blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
8da4417caf10ecf1121b42f4b888f8d03843988b | Python | fjanato/CurveFit | /src/curvefit/forecaster.py | UTF-8 | 7,159 | 3.40625 | 3 | [] | no_license | """
The Forecaster class is meant to fit regression models to the residuals
coming from evaluating predictive validity. We want to predict the residuals
forward with respect to how much data is currently in the model and how far out into the future.
"""
import numpy as np
import pandas as pd
import itertools
class ResidualModel:
def __init__(self, data, outcome, covariates):
"""
Base class for a residual model. Can fit and predict out.
Args:
data: (pd.DataFrame) data to use
outcome: (str) outcome column name
covariates: List[str] covariates to predict
"""
self.data = data
self.outcome = outcome
self.covariates = covariates
assert type(self.outcome) == str
assert type(self.covariates) == list
self.coef = None
def fit(self):
pass
def predict(self, df):
pass
class LinearResidualModel(ResidualModel):
def __init__(self, **kwargs):
"""
A basic linear regression for the residuals.
Args:
**kwargs: keyword arguments to ResidualModel base class
"""
super().__init__(**kwargs)
def fit(self):
df = self.data.copy()
df['intercept'] = 1
pred = np.asarray(df[['intercept'] + self.covariates])
out = np.asarray(df[[self.outcome]])
self.coef = np.linalg.inv(pred.T.dot(pred)).dot(pred.T).dot(out)
def predict(self, df):
df['intercept'] = 1
pred = np.asarray(df[['intercept'] + self.covariates])
return pred.dot(self.coef)
class Forecaster:
def __init__(self, data, col_t, col_obs, col_group, all_cov_names):
"""
A Forecaster will generate forecasts of residuals to create
new, potential future datasets that can then be fit by the ModelPipeline
Args:
data: (pd.DataFrame) the model data
col_t: (str) column of data that indicates time
col_obs: (str) column of data that's in the same space
as the forecast (linear space)
col_group: (str) column of data that indicates group membership
all_cov_names: List[str] list of all the covariate names that need
to be copied forward
"""
self.data = data
self.col_t = col_t
self.col_obs = col_obs
self.col_group = col_group
self.all_cov_names = all_cov_names
assert type(self.all_cov_names) == list
for l in self.all_cov_names:
assert type(l) == str
self.num_obs_per_group = self.get_num_obs_per_group()
self.max_t_per_group = self.get_max_t_per_group()
self.covariates_by_group = self.get_covariates_by_group()
self.mean_residual_model = None
self.std_residual_model = None
def get_num_obs_per_group(self):
"""
Get the number of observations per group that will inform
the amount of data going forwards.
Returns:
(dict) dictionary keyed by group with value num obs
"""
non_nulls = self.data.loc[~self.data[self.col_obs].isnull()].copy()
return non_nulls.groupby(self.col_group)[self.col_group].count().to_dict()
def get_max_t_per_group(self):
"""
Get the maximum t per group.
Returns:
(dict) dictionary keyed by group with value max t
"""
non_nulls = self.data.loc[~self.data[self.col_obs].isnull()].copy()
return non_nulls.groupby(self.col_group)[self.col_t].max().to_dict()
def get_covariates_by_group(self):
"""
Get the covariate entries for each group to fill in the data frame.
Returns:
(dict[dict]) dictionary keyed by covariate then keyed by group with value as covariate value
"""
cov_dict = {}
for cov in self.all_cov_names:
cov_dict[cov] = self.data.groupby(self.col_group)[cov].unique().to_dict()
for k, v in cov_dict[cov].items():
assert len(v) == 1, f"There is not a unique covariate value for group {k}"
cov_dict[cov][k] = v[0]
return cov_dict
def fit_residuals(self, residual_data, mean_col, std_col,
residual_covariates, residual_model_type):
"""
Run a regression for the mean and standard deviation
of the scaled residuals.
Args:
residual_data: (pd.DataFrame) data frame of residuals
that has the columns listed in the covariate
mean_col: (str) the name of the column that has mean
of the residuals
std_col: (str) the name of the column that has the std
of the residuals
residual_covariates: (str) the covariates to include in the regression
residual_model_type: (str) what type of residual model to it
types include 'linear'
"""
if residual_model_type == 'linear':
self.mean_residual_model = LinearResidualModel(
data=residual_data, outcome=mean_col, covariates=residual_covariates
)
self.std_residual_model = LinearResidualModel(
data=residual_data, outcome=std_col, covariates=residual_covariates
)
else:
raise ValueError(f"Unknown residual model type {residual_model_type}.")
self.mean_residual_model.fit()
self.std_residual_model.fit()
def predict(self, far_out, num_data):
"""
Predict out the residuals for all combinations of far_out and num_data
for both the mean residual and the standard deviation of the residuals.
Args:
far_out: (np.array) of how far out to predict
num_data: (np.array) of numbers of data points
Returns:
"""
data_dict = {'far_out': far_out, 'num_data': num_data}
rows = itertools.product(*data_dict.values())
new_data = pd.DataFrame.from_records(rows, columns=data_dict.keys())
new_data['residual_mean'] = self.mean_residual_model.predict(df=new_data)
new_data['residual_std'] = self.std_residual_model.predict(df=new_data)
return new_data
def copy_covariates(self, df):
"""
Covariates are not time varying so we can copy them over
for each location in the data frame.
"""
def simulate(self, far_out, num_simulations, predictions):
"""
Simulate the residuals based on the mean and standard deviation of predicting
into the future.
Args:
far_out: (int)
num_simulations: number of simulations to take
predictions:
Returns:
List[pd.DataFrame] list of data frames for each simulation
"""
data = self.data.copy()
data['max_obs'] = data[self.col_group].map(self.num_obs_per_group)
data['max_t'] = data[self.col_group].map(self.max_t_per_group)
for cov in self.all_cov_names:
data[cov] = data[self.col_group].map(self.covariates_by_group[cov])
| true |
0d554e1e64110fea12e8cb2f462070db7e7e5844 | Python | maurob/timeperiod | /permissions.py | UTF-8 | 604 | 2.53125 | 3 | [
"MIT"
] | permissive | from rest_framework.permissions import BasePermission
class IsCurrentUser(BasePermission):
def has_object_permission(self, request, view, obj):
return obj == request.user
class UserIsOwnerOrAdmin(BasePermission):
def has_permission(self, request, view):
return request.user and request.user.is_authenticated()
def check_object_permission(self, user, obj):
return (user and user.is_authenticated() and
(user.is_staff or obj == user))
def has_object_permission(self, request, view, obj):
return self.check_object_permission(request.user, obj) | true |
07723b0269913001dfd08bfac7341f89314dfeb9 | Python | selbieh/gaded | /backend/users/models.py | UTF-8 | 1,747 | 2.53125 | 3 | [] | no_license | from django.db import models
# Create your models here.
from django.contrib.auth.models import AbstractUser,UserManager
class cutomUserManger(UserManager):
def _create_user(self, mobile, password, **extra_fields):
"""
Create and save a user with the given username, email, and password.
"""
mobile = self.model.normalize_username(mobile)
#username = self.model.normalize_username(username)
user = self.model( mobile=mobile, **extra_fields)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, mobile, password, **extra_fields):
extra_fields.setdefault('is_staff', True)
extra_fields.setdefault('is_superuser', True)
if extra_fields.get('is_staff') is not True:
raise ValueError('Superuser must have is_staff=True.')
if extra_fields.get('is_superuser') is not True:
raise ValueError('Superuser must have is_superuser=True.')
return self._create_user( mobile, password, **extra_fields)
def create_user(self, mobile=None, password=None, **extra_fields):
extra_fields.setdefault('is_staff', False)
extra_fields.setdefault('is_superuser', False)
return self._create_user( mobile, password, **extra_fields)
class users(AbstractUser):
mobile = models.CharField(max_length=13,unique=True)
otp=models.IntegerField(null=True,blank=True)
otp_is_confirmed=models.BooleanField(default=False)
otp_attempt=models.IntegerField(default=0)
update_time=models.DateTimeField(auto_now=True)
username = None
USERNAME_FIELD = 'mobile'
objects = cutomUserManger()
def __str__(self):
return self.mobile
| true |
964d035cfdf48b32da56d81c791285ea9047b3b3 | Python | TejaDev/Python | /New folder/Miscellaneous Scripts/sd.py | UTF-8 | 686 | 3.65625 | 4 | [] | no_license | node=input("Enter the alphabets").split()
print(node)
print(len(node))
graph={}
for i in node:
graph[i]=input("Enter edges").split()
print("")
print("Adjacency List")
print("")
for x in node:
for y in graph[i]:
print("The vertex "+ x + " is connected to {}".format(graph[x]))
def dfs_path(graph,start,end):
result = []
dfs(graph,start,end,[],result)
print(result)
def dfs(graph,start,end,path,result):
path+=[start]
if start == end:
result.append(path)
else:
for node in graph[start]:
if node not in path:
dfs(graph,node,end,path[:],result)
dfs_path(graph,'A','D') | true |
26d0441fac6e2a8f59f18e665034ca30945e7c36 | Python | RafaelSdm/Curso-de-Python | /Mundo 3/ex081.py | UTF-8 | 814 | 4.28125 | 4 | [
"MIT"
] | permissive | print("analise dos numeros da lista:")
print("-"*20)
contador =1
lista = list()
while True:
lista.append(int(input("informe um numero:")))
while True:
resposta = str(input("deseja informar mais algum numero:")).strip().upper()[0]
if resposta in "SN":
break
else:
print("dados informados invalidos!")
if resposta == "N":
break
else:
contador += 1
print(f"voce informou {contador} numeros na lista")
print("\n\nlista dos numeros informados:")
for c in lista:
print(f" {c} ",end="")
print("\n\nordem decrescente dos numeros:")
lista.sort(reverse=True)
print(f" {lista} ")
print("\n\nanalise do numero 5 na lista:")
if 5 in lista:
print("o numero 5 aparece na lista")
else:
print("o numero 5 nao aparece na lista")
| true |
d13450b79a7b3b2a22d558ce32eb91372f8886fc | Python | nkimran94/python-tutorial | /python fundamentals/ifelse.py | UTF-8 | 337 | 3.890625 | 4 | [] | no_license | marks = int(input("What is your grade: "))
def show_grade(grade):
print("You got: {}".format(grade))
if marks >= 80:
show_grade("A+")
elif marks < 80 and marks >= 70:
show_grade("A")
elif marks < 70 and marks >= 60:
show_grade("A-")
elif marks >= 33:
show_grade("Passed")
else:
show_grade("F")
print("Finished") | true |
58e77e5189d08fa383da591dfc5bbfe83d3ea170 | Python | forrestsheldon/memNets | /resistor_networks.py | UTF-8 | 20,855 | 3.390625 | 3 | [] | no_license | # resistor_network.py
# By Forrest Sheldon
# This is a transcription of the classes I have created to solve
# and display resistor networks from my notebook
# Resistor_Networks_V3.ipynb
import numpy as np
import scipy as sp
import scipy.sparse as sparse
from scipy.sparse.linalg import spsolve, cg
from scipy.sparse.csgraph import connected_components
import matplotlib.pyplot as plt
import itertools
#================================================================
# ResistorNetwork
#================================================================
class ResistorNetwork(object):
"""
This is a basic class for solving a resistor network. Initializing the network requires:
G - An NxN sparse CSR matrix containing the conductances in the network of N nodes
external_voltages - An Nx1 dense vector of external voltages. Nodes not set to an external voltages
contain a Nan. The shape (N,) is preferred
Other available data attributes are:
voltages - These are the voltages of the internal nodes in the network. They are initally set
to None and are filled in upon calling self.solve()
nodes - The number of nodes in the network
num_comp - The number of connected components in the graph
comp_labels - An array containing the connected component label for each node
interior - A boolean array of the interior nodes of the network
boundary - A boolean array of the boundary nodes set to an external voltage
percolating_nodes - A boolean array of the nodes in the percolating cluster
non_percolating_nodes - An array containing the labels of the connected components that do not percolate
interior_percolating - A boolean array of the interior nodes of the percolating cluster
boundary_percolating - A boolean array of the boundary nodes of the percolating cluster
"""
def __init__(self, G, external_voltages):
self.G = G
self.external_voltages = external_voltages
self.voltages = None
self.nodes, tmp = self.G.shape
self.num_comp, self.comp_labels = connected_components(self.G, directed=False)
self.interior = np.isnan(self.external_voltages)
self.boundary = np.logical_not(np.isnan(self.external_voltages))
self.percolating_nodes = None
self.non_percolating_comp = None
self.find_percolating_nodes()
self.interior_percolating = np.logical_and(self.interior, self.percolating_nodes)
if not np.any(self.interior_percolating):
print "No interior nodes in percolating cluster"
self.boundary_percolating = np.logical_and(self.boundary, self.percolating_nodes)
def find_percolating_nodes(self):
"""
This method creates an array of indices of percolating nodes in the network
"""
self.percolating_nodes = np.zeros_like(self.external_voltages, dtype='bool')
self.non_percolating_comp = []
#Loop over connected components in graph
for cc in range(self.num_comp):
# Pull a boolean array of the node indices and use that to get the external voltages
cc_nodes = (self.comp_labels == cc)
cc_external_voltages = self.external_voltages[cc_nodes]
# Find the maximum and minimum voltages on the component. Nan's will only be found if no other number
# is in the array
cc_max = np.nanmax(cc_external_voltages)
cc_min = np.nanmin(cc_external_voltages)
# If the component is set to at least one voltage and it does not equal some other, it percolates
if np.isnan(cc_max) or cc_max == cc_min:
self.non_percolating_comp.append(cc)
else:
# Add the connected component to the percolating nodes
self.percolating_nodes[cc_nodes] = True
# If no nodes percolate, give a message
if not np.any(self.percolating_nodes):
print "This graph does not percolate"
def solve_voltages_percolating(self, solver, V_0=None):
"""
This method solves for the node voltages of the percolating cluster. Current solvers are:
'spsolve' - Scipy's exact sparse solver
'cg' - Scipy's sparse conjugate gradient solver. Note that conj grad seems to do well for solving a
network with separate components without breaking it up first but I'm not sure how safe this
is if we begin changing it's initial state.
The second optional argument is
V_0 - (optional) An initial guess for the voltages in the network for the conjugate gradient solver. I
think this may be useful for memristor networks where the current and previous voltages are only
infinitesimally separated.
"""
# First we form the equations matrix. To do this, we remove the rows and columns of boundary nodes,
# trading our Neumann boundary conditions for Dirichlet
L = self.graph_laplacian()
D = L[self.interior_percolating, :][:, self.interior_percolating]
# The columns corresponding to boundary nodes give a constant vector on the interior nodes yielding
# the equation Dv = b (the -1 subtracts it to the other side of the equation)
b = -1. * L[self.interior_percolating, :][:, self.boundary_percolating].dot(self.external_voltages[self.boundary_percolating])
# Put our boundary values in for the voltages
self.voltages = np.zeros_like(self.external_voltages)
self.voltages[self.boundary] = self.external_voltages[self.boundary]
# and solve!
if solver == 'spsolve':
self.voltages[self.interior_percolating] = spsolve(D, b)
elif solver == 'cg':
#I'd like to include an optional parameter to give the initial guess for the voltages in the network
if V_0 == None:
self.voltages[self.interior_percolating], convergence = cg(D, b)
else:
self.voltages[self.interior_percolating], convergence = cg(D, b, V_0[self.interior_percolating])
#print "Conjugate Gradient converges with %d" % convergence
else:
print "Solver not specified. Try 'spsolve' or 'cg'"
def fill_nonpercolating_voltages(self):
"""
Uses the non-percolating components to fill in the appropriate voltages for elements of the network not solved for
"""
#for each nonpercolating component
for cc in self.non_percolating_comp:
# Pull a boolean array of the node indices and use that to get the external voltages
cc_nodes = (self.comp_labels == cc)
cc_external_voltages = self.external_voltages[cc_nodes]
# Find the maximum and minimum voltages on the component. Nan's will only be found if no other number
# is in the array
cc_max = np.nanmax(cc_external_voltages)
cc_min = np.nanmin(cc_external_voltages)
# If the component is not set to any voltage, set it to zero
if np.isnan(cc_max):
self.voltages[cc_nodes] = 0
# If it is set to a single external voltage, set all nodes to that value
elif cc_max == cc_min:
self.voltages[cc_nodes] = cc_max
def solve_voltages(self, solver, V_0=None):
"""
To solve the network, solve the percolating cluster and fill in the non_percolating components
"""
self.solve_voltages_percolating(solver, V_0)
self.fill_nonpercolating_voltages()
def graph_laplacian(self):
"""
Returns the graph laplacian for the resistor network. This is L = D - G where D is the 'degree' matrix
(for us a diagonal matrix of the sum of the incident conductances to each node) and G is the 'adjacency'
matrix (for us the conductance matrix G)
"""
# Note that for CSR matrices it is faster to sum across rows
return sparse.dia_matrix((self.G.sum(1).flat, [0]), shape=(self.nodes,self.nodes)).tocsr() - self.G
def power(self):
"""
Returns a sparse matrix in CSR form containing the power dissipated between nodes i and j. Requires that
self.solve() have been called to populate self.voltages
"""
# Pull nonzero values to iterate only over occupied bonds
# as G is symmetric we can take only the upper trianglular part
rows, cols = sparse.triu(self.G).nonzero()
# Fill in the entries in the power matrix
power = sparse.lil_matrix(self.G.shape)
for node_i, node_j in itertools.izip(rows, cols):
power[node_i, node_j] = self.G[node_i, node_j] * (self.voltages[node_i] - self.voltages[node_j])**2
power[node_j, node_i] = power[node_i, node_j]
return power.tocsr()
def voltage_drop(self):
"""
Return a sparse matrix in CSR form containing the voltage drop between nodes i and j. Requires that self.solve()
have been called to populate self.voltages
"""
rows, cols = sparse.triu(self.G).nonzero()
# fill in the entries in the voltage drop matrix
voltage_drop = sparse.lil_matrix(self.G.shape)
for node_i, node_j in itertools.izip(rows, cols):
voltage_drop[node_i, node_j] = self.voltages[node_j] - self.voltages[node_i]
voltage_drop[node_j, node_i] = -1 * voltage_drop[node_i, node_j]
return voltage_drop.tocsr()
def voltage_drop_abs(self):
"""
Return a sparse matrix in CSR form containing the voltage drop between nodes i and j. Requires that self.solve()
have been called to populate self.voltages
"""
rows, cols = sparse.triu(self.G).nonzero()
# fill in the entries in the voltage drop matrix
voltage_drop = sparse.lil_matrix(self.G.shape)
for node_i, node_j in itertools.izip(rows, cols):
voltage_drop[node_i, node_j] = abs(self.voltages[node_j] - self.voltages[node_i])
voltage_drop[node_j, node_i] = voltage_drop[node_i, node_j]
return voltage_drop.tocsr()
def external_current(self):
"""
Returns the currents entering the nodes on the boundary. These are calculated from,
graph_laplacian[boundary,:].dot(self.voltages)
and thus occur in the order specified by self.boundary
"""
return self.graph_laplacian()[self.boundary, :].dot(self.voltages)
def conductivity(self):
"""
The total conductivity of the network is calculated as the sum of the positive external currents divided
by the voltage difference across the network. In order for this to work,
"""
I_external = self.external_current()
return I_external[I_external > 0].sum() / (np.nanmax(self.external_voltages) - np.nanmin(self.external_voltages))
def display_ring(self, ax, display_variable, nodesize=5, bondwidth=2, colormin=None, colormax=None):
"""
This method displays a resistor network of N nodes on the unit circle with resistors displayed as bonds between the
nodes. Indexing begins with the node at the 3 o'clock position and procedes counter clockwise around the circle.
The variables that may be displayed are:
'voltage'
'power'
'conductance'
'voltage_drop'
'log_voltage_drop'
"""
delta_theta = 2 * np.pi / self.nodes
def node2xy_circle(node_idx):
"""
returns the x and y coordinates of a node index on the unit circle assuming that the 0 node is
"""
complex_coord = np.exp(node_idx * delta_theta * 1j)
return complex_coord.real, complex_coord.imag
# Pull nonzero values to plot bonds
rows, cols = sparse.triu(self.G).nonzero()
# Set up colormap normalization
if colormin != None:
norm = plt.Normalize(vmin=colormin, vmax=colormax)
elif display_variable == 'voltage':
norm = plt.Normalize()
norm.autoscale(self.voltages)
elif display_variable == 'power':
power = self.power()
norm = plt.Normalize(vmin=power.min(), vmax=power.max())
elif display_variable == 'conductance':
conductances = self.G[rows, cols]
# I'd like the OFF grey to be lighter than the minimum of hte color map
# so I'm setting it so that it falls 1/3 through the colormap
mincond = conductances.min()
maxcond = conductances.max()
low_colormap = maxcond - 1.5 * (maxcond-mincond)
norm = plt.Normalize(vmin=low_colormap, vmax=maxcond)
elif display_variable == 'voltage_drop':
voltage_drop = self.voltage_drop_abs()
norm = plt.Normalize(vmin=0, vmax=voltage_drop.max())
elif display_variable == 'log_voltage_drop':
voltage_drop = self.voltage_drop_abs()
norm = plt.Normalize(vmin=np.log(voltage_drop.data.min()),
vmax=np.log(voltage_drop.max()))
if display_variable == 'voltage':
colormap = plt.get_cmap('Reds')
elif display_variable == 'power':
colormap = plt.get_cmap('YlOrRd')
elif display_variable == 'conductance':
colormap = plt.get_cmap('RdGy_r')
elif display_variable == 'voltage_drop':
colormap = plt.get_cmap('jet')
elif display_variable == 'log_voltage_drop':
colormap = plt.get_cmap('jet')
else:
print 'Invalid display variable %s' % display_variable
# Draw the bonds between nodes
for node_i, node_j in itertools.izip(rows, cols):
x_i, y_i = node2xy_circle(node_i)
x_j, y_j = node2xy_circle(node_j)
if display_variable == 'voltage':
ax.plot([x_i, x_j], [y_i, y_j], 'k', lw = bondwidth)
elif display_variable == 'power':
ax.plot([x_i, x_j], [y_i, y_j], color=colormap(norm(power[node_i, node_j])), lw=bondwidth)
elif display_variable == 'conductance':
ax.plot([x_i, x_j], [y_i, y_j], color=colormap(norm(self.G[node_i, node_j])), lw=bondwidth)
elif display_variable == 'voltage_drop':
ax.plot([x_i, x_j], [y_i, y_j], color=colormap(norm(voltage_drop[node_i, node_j])), lw=bondwidth)
elif display_variable == 'log_voltage_drop':
ax.plot([x_i, x_j], [y_i, y_j], color=colormap(norm(np.log(voltage_drop[node_i, node_j]))), lw=bondwidth)
# Now draw the nodes
if display_variable == 'voltage':
for node, volt in enumerate(self.voltages):
x, y = node2xy_circle(node)
ax.plot(x, y, 'o', markersize=nodesize, color=colormap(norm(volt)))
elif display_variable == 'power' or 'conductance' or 'voltage_drop' or 'log_voltage_drop':
for node in range(self.nodes):
x, y = node2xy_circle(node)
ax.plot(x, y, 'wo', markersize=nodesize)
# And finally set the axes to be just outside the grid spacing and invert the y_axis
ax.set_xlim( -1.1, 1.1)
ax.set_ylim( -1.1, 1.1)
def display_grid(self, ax, lattice_shape, display_variable, nodesize=5, bondwidth=3, colormin=None, colormax=None,
colormap_name=None):
"""
This method displays a 2D cubic resistor lattice of shape lattice_shape = (y, x). The variables
that may be displayed are:
'voltage'
'power'
'conductance'
'log-power'
Nodes are indexed across rows such that the first row has nodes 0 through x-1. This is because I typically
like to set up networks with a vertical bus bar architecture and it makes setting the nodes as simple as possible.
"""
def node2xy(node_idx):
"""
returns the x and y coordinates of a node index in our grid supposing that the 0,0 point is in the upper left
and the positive y-axis points down
"""
return node_idx % lattice_shape[1], int(node_idx / lattice_shape[1])
# Pull nonzero values to plot bonds
rows, cols = sparse.triu(self.G).nonzero()
# Set up colormap normalization
if colormin != None:
norm = plt.Normalize(vmin=colormin, vmax=colormax)
elif display_variable == 'voltage':
norm = plt.Normalize()
norm.autoscale(self.voltages)
elif display_variable == 'power':
power = self.power()
norm = plt.Normalize(vmin=power.min(), vmax=power.max())
elif display_variable == 'conductance':
conductances = self.G[rows, cols]
# I'd like the OFF grey to be lighter than the minimum of hte color map
# so I'm setting it so that it falls 1/3 through the colormap
mincond = conductances.min()
maxcond = conductances.max()
low_colormap = maxcond - 1.5 * (maxcond-mincond)
norm = plt.Normalize(vmin=low_colormap, vmax=maxcond)
elif display_variable == 'voltage_drop':
voltage_drop = self.voltage_drop_abs()
norm = plt.Normalize(vmin=0, vmax=voltage_drop.max())
elif display_variable == 'log_voltage_drop':
voltage_drop = self.voltage_drop_abs()
norm = plt.Normalize(vmin=np.log(voltage_drop.data.min()),
vmax=np.log(voltage_drop.max()))
if colormap_name != None:
colormap = plt.get_cmap(colormap_name)
else:
if display_variable == 'voltage':
colormap = plt.get_cmap('Reds')
elif display_variable == 'power':
colormap = plt.get_cmap('YlOrRd')
elif display_variable == 'conductance':
colormap = plt.get_cmap('RdGy_r')
elif display_variable == 'voltage_drop':
colormap = plt.get_cmap('jet')
elif display_variable == 'log_voltage_drop':
colormap = plt.get_cmap('jet')
# Draw the bonds between nodes
for node_i, node_j in itertools.izip(rows, cols):
x_i, y_i = node2xy(node_i)
x_j, y_j = node2xy(node_j)
if display_variable == 'voltage':
ax.plot([x_i, x_j], [y_i, y_j], 'k', lw = bondwidth)
elif display_variable == 'power':
ax.plot([x_i, x_j], [y_i, y_j], color=colormap(norm(power[node_i, node_j])), lw=bondwidth)
elif display_variable == 'conductance':
ax.plot([x_i, x_j], [y_i, y_j], color=colormap(norm(self.G[node_i, node_j])), lw=bondwidth)
elif display_variable == 'voltage_drop':
ax.plot([x_i, x_j], [y_i, y_j], color=colormap(norm(voltage_drop[node_i, node_j])), lw=bondwidth)
elif display_variable == 'log_voltage_drop':
ax.plot([x_i, x_j], [y_i, y_j], color=colormap(norm(np.log(voltage_drop[node_i, node_j]))), lw=bondwidth)
# Now draw the nodes
if display_variable == 'voltage':
for node, volt in enumerate(self.voltages):
x, y = node2xy(node)
ax.plot(x, y, 's', markersize=nodesize, color=colormap(norm(volt)))
elif display_variable == 'power' or 'conductance' or 'voltage_drop' or 'log_voltage_drop':
for node in range(self.nodes):
x, y = node2xy(node)
ax.plot(x, y, 'ws', markersize=nodesize)
# And finally set the axes to be just outside the grid spacing and invert the y_axis
ax.set_xlim( -1, lattice_shape[1])
ax.set_ylim( -1, lattice_shape[0])
ax.invert_yaxis()
ax.xaxis.set_tick_params(labelbottom='off', labeltop='on')
| true |
2e731e91267b79d610dba1607944abd4e1ac2bbb | Python | jthacker/jtmri | /jtmri/r2p_po2/model.py | UTF-8 | 17,919 | 2.765625 | 3 | [
"MIT"
] | permissive | from collections import namedtuple
import itertools
import logging
import time
from matplotlib import pyplot as pl
from numpy.random import uniform
from numpy.linalg import norm
import numpy as np
import scipy.stats
import jtmri.fit
import jtmri.np
log = logging.getLogger(__name__)
def unit_vec(a):
"""Return the unit vector"""
a = np.array(a)
a_norm = norm(a)
if a_norm == 0:
return a
return a / a_norm
def proj(a, b):
"""Project a onto b"""
b_hat = unit_vec(b)
return a.dot(b_hat) * b_hat
def rej(a, b):
"""Return the rejection of a onto b
Equivalent to the projection of a onto a plane orthogonal to b
"""
a = np.array(a)
b = np.array(b)
return a - proj(a, b)
def angle(a, b):
"""Return the angle between vector a and b"""
a_hat = unit_vec(a)
b_hat = unit_vec(b)
return np.arccos(np.clip(a_hat.dot(b_hat), -1.0, 1.0))
RBC = namedtuple('RBC', 'radius,position')
def rbc_magnetic_field_shift(pos, rbcs, Bo, delta_chi, theta=None):
"""Magnetic field shift due to a list of red blood cells
Parameters
----------
pos : (3x1) vector
Position to evalute magnetic field at
rbcs : list(RBC)
Red Blood Cell to simulate magnetic field of
Bo : (3x1) vector or scalar
Main magnetic field vector
delta_chi : scalar
Suscetibility of red blood cell
theta : scalar (default: None)
Angle of magnetic field, `Bo` must be a scalar if this is specified
Returns
-------
scalar
Shift in magnetic field due to `rbc` at point `pos`.
"""
if theta is not None:
assert np.isscalar(Bo)
theta_fn = lambda r: theta
Bo_mag = Bo
else:
Bo_mag = norm(Bo)
theta_fn = lambda r: angle(r, Bo)
fs = 0
for rbc in jtmri.utils.as_iterable(rbcs):
r = pos - rbc.position
r_mag = norm(r)
if r_mag >= rbc.radius:
fs += Bo_mag \
* (delta_chi / 3.) \
* (rbc.radius / r_mag)**3 \
* (3 * np.cos(theta_fn(r))**2 - 1)
return fs
def linear_spherical_overlap(r1, r2, d):
rs = r1 + r2
rd = np.abs(r1 - r2)
overlap = (rs - d) / (rs - rd)
return np.clip(overlap, 0, 1)
def rbc_intersects(rbc, rbcs, max_overlap):
"""Check if `rbc` intersects any rbc in `rbcs`"""
if len(rbcs) == 0:
return False
rs = np.array([x.radius for x in rbcs])
pos = np.array([x.position for x in rbcs])
dists = (((rbc.position - pos))**2).sum(axis=1) ** 0.5
overlaps = linear_spherical_overlap(rbc.radius, rs, dists)
return np.any(overlaps > max_overlap)
def fill_cylinder_with_rbcs(cylinder_radius, cylinder_len, hct, rbc_radius_fn, max_iter=1e5,
epsilon=1e-2, max_overlap=0.5):
"""Add red blood cells to a cylinder until the specified hematocrit has been reached
Cylinder axis lies on z axis
"""
total_sphere_volume = 0
last_err = float('inf')
total_cylinder_volume = np.pi * cylinder_radius**2 * cylinder_len
rbcs = []
intersected = 0
for i in itertools.count(start=1):
if i >= max_iter:
raise Exception('Max iterations reached: {}'.format(max_iter))
rc = rbc_radius_fn()
r = (cylinder_radius - rc) * np.sqrt(uniform(0, 1))
theta = uniform(0, 2 * np.pi)
z = uniform(-cylinder_len / 2. + rc, cylinder_len / 2. - rc)
position = [
r * np.cos(theta),
r * np.sin(theta),
z]
rbc = RBC(rc, np.array(position))
# Check for intersecting spheres
if rbc_intersects(rbc, rbcs, max_overlap):
intersected += 1
continue
# Check if hematocrit has been reached
rbc_volume = (4./3) * np.pi * rbc.radius**3
curr_hct = (total_sphere_volume + rbc_volume) / total_cylinder_volume
err = np.abs(curr_hct - hct)
if err > last_err:
raise Exception('error has increased, error: {} last_error: {} iteration: {}'
.format(err, last_err, i))
break
last_err = err
total_sphere_volume += rbc_volume
rbcs.append(rbc)
if err < epsilon:
break
log.debug('finished generating RBCs, i: %d error: %f hct: %f num_rbcs: %d dropped: %d efficiency: %f',
i, err, curr_hct, len(rbcs), intersected, len(rbcs) / float(i))
return rbcs
def extra_vascular_magnetic_field_shift(r, cylinder, Bo, delta_chi):
"""Returns the magnetic field at position `r` due to an infinite cylinder
Parameters
----------
r : 3-vector
Point to estimate field at
cylinder : Cylinder
Parameters describing the infinite cylinder
Bo : 3-vector
Magnetic field vector
delta_chi : float
Susceptibility difference between inside and outside of cylinder
Returns
-------
float
Magnetic field offset in units of input parameter `Bo`
"""
r = np.array(r)
Bo = np.array(Bo)
phi = angle(rej(r, cylinder.axis), rej(Bo, cylinder.axis))
theta = angle(Bo, cylinder.axis)
r_mag = cylinder.distance_to_point(r)
assert r_mag > cylinder.radius
# Extra vascular
return norm(Bo) \
* (delta_chi / 2.) \
* (cylinder.radius / r_mag)**2 \
* np.cos(2 * phi) \
* np.sin(theta)**2
class Cylinder(object):
@classmethod
def from_axis_offset(cls, axis, offset, radius):
axis = np.array(axis)
offset = np.array(offset)
x0 = axis + offset
x1 = offset
return Cylinder(x0, x1, radius)
def __init__(self, x0, x1, radius):
assert len(x0) == 3, 'x0 should be a 3-vector'
assert len(x1) == 3, 'x1 should be a 3-vector'
self._x0 = np.array(x0)
self._x1 = np.array(x1)
assert not np.array_equal(self._x0, self._x1), 'x0 must be unique from x1'
self._radius = radius
self._axis = self._x1 - self._x0
@property
def x0(self):
return self._x0
@property
def x1(self):
return self._x1
@property
def radius(self):
return self._radius
@property
def axis(self):
return self._axis
def distance_to_point(self, p):
"""Returns the minimum distance between cylinder axis and point `p`"""
p = np.array(p)
return norm(np.cross(self.axis, self.x1 - p)) / norm(self.axis)
def sample_voxel(voxel, N_samples):
"""Create a 3 x N_sample vector over the voxel"""
N_samples = int(N_samples)
x_samples = uniform(low=-voxel[0]/2., high=voxel[0]/2., size=N_samples)
y_samples = uniform(low=-voxel[1]/2., high=voxel[1]/2., size=N_samples)
z_samples = uniform(low=-voxel[2]/2., high=voxel[2]/2., size=N_samples)
return np.array(zip(x_samples, y_samples, z_samples)).T
def sample_voxel_ev(voxel, N_samples, cylinders):
"""Sample a voxel's extra-vascular space"""
samples = []
while len(samples) < N_samples:
sample = sample_voxel(voxel, 1)
if not is_extravascular(sample, cylinders):
continue
samples.append(sample)
return np.array(np.squeeze(samples)).T
def create_cylinders(target_volume_fraction, voxel_shape, cylinder_func,
epsilon=1e-2, max_iterations=None, N_samples=1e5):
"""Create a volume of cylinders
Parameters
----------
target_volume_fraction : float
Target fraction of the cuboid volume to be occupied by cylinders
voxel_shape : 3-vector
Dimensions of the voxel being filled, voxel is assumed to be centered
at the origin
cylinder_func : function
A cylinder generating function, encapsulates the distrubtion over the
cylinder parameters.
epsilon : float
Allowed error in reaching `target_volume_fraction`
max_iterations : int
Maximum number of iterations to perform when searching for target volume fraction
N_samples : int
Number of sample points to use when estimating cylinder volume
Returns
-------
iterable of Cylinder
An iterable of cylinder objects is returned, such that the RMS distance between
the `target_volume_fraction` and the total volume fraction of `voxel_shape` occupied by
the cylinders is less than `epsilon`.
"""
assert 0 <= target_volume_fraction <= 1
target_volume_fraction = float(target_volume_fraction)
cylinder_func = cylinder_func or random_cylinder
epsilon_sqd = epsilon**2
N_samples = int(N_samples)
samples = sample_voxel(voxel_shape, N_samples)
mask = np.zeros(N_samples, dtype=bool)
cylinders = []
loop = itertools.count() if max_iterations is None else xrange(1, max_iterations + 1)
last_err = float('inf')
for i in loop:
cylinder = cylinder_func()
mask |= cylinder_mask(cylinder, samples)
volume_fraction = mask.sum() / float(N_samples)
err = (1 - (volume_fraction / target_volume_fraction))**2
if err > last_err:
log.debug('current iteration error (%f) greater than previous (%f), terminating',
err, last_err)
break
cylinders.append(cylinder)
if err <= epsilon_sqd:
log.debug('target error (%f) reached. err: %f iteration: %d '\
'target_volume_fraction: %f actual_volume_fractions: %f',
epsilon_sqd, err, i, target_volume_fraction, volume_fraction)
break
last_err = err
if i == max_iterations:
log.debug('max iterations (%d) reached, loop terminated', max_iterations)
return cylinders
def random_unit_three_vector():
"""
Generates a random 3D unit vector (direction) with a uniform spherical distribution
Algo from http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution
:return:
"""
phi = np.random.uniform(0, np.pi*2)
costheta = np.random.uniform(-1, 1)
theta = np.arccos(costheta)
x = np.sin(theta) * np.cos(phi)
y = np.sin(theta) * np.sin(phi)
z = np.cos(theta)
return np.array([x, y, z])
def random_cylinder(radius_limits=(0, 1), x_limits=(-1, 1), y_limits=(-1, 1), z_limits=(-1, 1)):
"""Generate a randomly oriented cylinder with uniform direction vector distribution"""
r = uniform(*radius_limits)
axis = random_unit_three_vector()
offset = np.array([uniform(*l) for l in (x_limits, y_limits, z_limits)])
return Cylinder.from_axis_offset(axis, offset, r)
def cylinder_mask(cylinder, sample_grid):
"""Returns a 3D array where True values are inside the
cylinder and False values lie outside.
The `cylinder` is sampled on `sample_grid`.
"""
X, Y, Z = sample_grid
c = cylinder
v = unit_vec(c.axis)
a = (X - c.x0[0]) * v[0] + (Y - c.x0[1]) * v[1] + (Z - c.x0[2]) * v[2]
rsq = (c.x0[0] + a * v[0] - X)**2 + (c.x0[1] + a * v[1] - Y)**2 + (c.x0[2] + a * v[2] - Z)**2
return rsq <= c.radius**2
def estimate_cylinder_volume(voxel_shape, cylinders, N_samples=1e5):
"""Estimate the volume consumed by the cylinders within a voxel using Monte Carlo integration
Parameters
----------
voxel_shape : 3 vector
height, width, length of a voxel
cylinders : list of Cylinder
Cylinders to estimate the volume of
N_samples : int
Number of samples to use when estimating the volume
Returns
-------
float
Estimated volume of cylinders contained in the voxel
"""
N_samples = int(N_samples)
samples = sample_voxel(voxel_shape, N_samples)
mask = np.zeros(N_samples, dtype=bool)
for c in cylinders:
mask |= cylinder_mask(c, samples)
return mask.sum() / float(N_samples)
def diffuse(points, time_step, diffusion_coefficient):
"""Simulate diffusion by randomly walking each point"""
assert diffusion_coefficient >= 0
assert points.shape[0] == 3, 'Must be a 3 x N vector'
l = np.sqrt(2 * diffusion_coefficient * time_step)
pt = lambda: l * (2 * np.round(uniform(0, 1, points.shape[1])) - 1)
return points + np.array([pt(), pt(), pt()])
def is_extravascular(point, cylinders):
for cylinder in cylinders:
r = cylinder.distance_to_point(point)
if r < cylinder.radius:
return False
return True
def voxel_signal(phases):
"""Compute the signal for a voxel given a list of phases,
phases is 2d, with the second dimension representing each proton
phase
"""
return np.sum(np.exp(1j * phases), axis=1) / phases.shape[1]
def sample_cylinder_center(max_radius, num_protons):
theta = uniform(0, 2*np.pi, num_protons)
radius = max_radius * np.sqrt(uniform(0, 1))
return np.array([
radius * np.cos(theta),
radius * np.sin(theta),
np.zeros_like(theta)])
def sim_iv(params):
start = time.time()
log.info('Starting IV simulation')
log.debug(params)
time_step_count = int(np.ceil(params.time_total / params.time_step))
time_points = np.arange(0, params.time_total + params.time_step, params.time_step)
bs = []
for i in range(params.N_iv_cylinders):
radius = uniform(*params.cylinder_radius_limits)
protons = sample_cylinder_center(radius, params.N_protons_iv)
log.debug('IV cylinder created, radius: %fm protons: %d', radius, protons.shape[1])
for i, proton in enumerate(jtmri.np.iter_axes(protons, axes=1)):
log.debug('simulating proton %d of %d', i, protons.shape[1])
rbcs = fill_cylinder_with_rbcs(radius,
params.iv_cylinder_len,
params.Hct,
lambda: uniform(*params.rbc_radius_limits),
max_overlap = params.iv_rbc_overlap)
log.debug('generated %d rbcs in a cylinder with radius: %f', len(rbcs), radius)
# Cylinder axis lies along z-axis, force theta to follow sin(theta) distribution
theta = np.arccos(1 - 2 * np.random.uniform())
log.debug('simulating cylinder with angle %f', theta)
b = []
for _ in time_points:
field_shift = rbc_magnetic_field_shift(proton,
rbcs,
norm(params.Bo),
params.delta_chi_rbc,
theta=theta)
b.append(field_shift)
proton = diffuse(proton[:, np.newaxis],
params.time_step,
params.diffusion_coefficient_iv)[:, 0]
bs.append(b)
bs = np.array(bs).transpose()
# Compute phases
freqs = params.gyromagnetic_ratio * bs
phases = scipy.integrate.cumtrapz(freqs, dx=params.time_step, axis=0, initial=0)
end = time.time()
log.info('IV simulation finished in %d seconds', end - start)
return time_points, phases
def sim_ev(params):
centered = lambda l: (-0.5*l, 0.5*l)
time_step_count = int(np.ceil(params.time_total / params.time_step))
time_points = np.arange(0, params.time_total + params.time_step, params.time_step)
cylinder_func = lambda: random_cylinder(params.cylinder_radius_limits,
centered(params.voxel_shape[0]),
centered(params.voxel_shape[1]),
centered(params.voxel_shape[2]))
start = time.time()
log.info('Starting EV simulation')
log.info('Generating cylinders')
cylinders = create_cylinders(params.vascular_fraction,
params.voxel_shape,
cylinder_func=cylinder_func)
log.info('Generated %d cylinders with final vascular faction: %f',
len(cylinders),
estimate_cylinder_volume(params.voxel_shape, cylinders))
protons = sample_voxel_ev(params.voxel_shape, params.N_protons_ev, cylinders)
# Calculate magnetic fields for each proton at each time point
positions = [protons]
bs = []
for i, t in enumerate(time_points):
b = []
log.debug('step: %d t: %0.2g steps_remaining: %d', i, t, time_step_count - i)
for proton in jtmri.np.iter_axes(protons, 1):
field_shift = 0
for cylinder in cylinders:
if not is_extravascular(proton, [cylinder]):
continue
field_shift += extra_vascular_magnetic_field_shift(proton,
cylinder,
params.Bo,
params.delta_chi_blood)
b.append(field_shift)
bs.append(b)
protons = diffuse(protons, params.time_step, params.diffusion_coefficient_ev)
positions.append(protons)
bs = np.array(bs)
dists = ((positions[1:] - positions[0])**2).sum(axis=1)
# Compute phases
freqs = params.gyromagnetic_ratio * bs
phases = scipy.integrate.cumtrapz(freqs, dx=params.time_step, axis=0, initial=0)
end = time.time()
log.info('Simulation finished in %d seconds', end - start)
return time_points, phases
def signal(params, phases_iv, phases_ev):
sig_iv = np.abs(voxel_signal(phases_iv))
sig_ev = np.abs(voxel_signal(phases_ev))
sig = (1 - params.vascular_fraction) * sig_ev + params.vascular_fraction * params.intrinsic_signal_ratio * sig_ev * sig_iv
return sig_iv, sig_ev, sig
| true |
c182fdb6189ca3d1d8304a8a6b4f3509f875550b | Python | krteja97/data-structures-implementation-from-scratch-in-cpp | /result.py | UTF-8 | 510 | 3.078125 | 3 | [] | no_license | #python for result comparision
import numpy as np;
s1cr = 25;
s2cr = 23;
s3cr = 22;
s4cr = 23;
s5cr = 24;
total_credits = s1cr + s2cr + s3cr + s4cr + s5cr + 22;
s1g = 8.24;
s2g = 8.87
s3g = 9.32
s4g = 9.61;
s5g = 9.42;
sem4count = s1cr*s1g + s2cr*s2g + s3cr*s3g + s4cr*s4g + s5cr*s5g;
# for x in np.arange(7,10,0.04):
# tcredit = x*s5cr + (sem4count);
# print(x,tcredit/total_credits);
s5cg = 9.00;
for x in np.arange(8.0,10,0.01):
print(x,(sem4count + 22*x )/total_credits);
print(sem4count/117)
| true |
594b6d4303975daeef35bbf3bb5963de0a248c21 | Python | British-Oceanographic-Data-Centre/COAsT | /tests/config_parser_test.py | UTF-8 | 3,593 | 2.53125 | 3 | [
"MIT"
] | permissive | from datetime import datetime
import json
import os
from pathlib import Path
import pytest
from coast.data.config_parser import ConfigParser
from coast.data.config_structure import (
ConfigTypes,
ConfigKeys,
GriddedConfig,
IndexedConfig,
Dataset,
Domain,
CodeProcessing,
)
# Valid gridded config json.
gridded_json = {
"type": "gridded",
"grid_ref": {},
"dimensionality": 3,
"chunks": [],
"dataset": {"variable_map": {}, "dimension_map": {}, "coord_vars": [], "keep_all_vars": "False"},
"domain": {
"variable_map": {},
"dimension_map": {},
},
"static_variables": {"not_grid_vars": [], "coord_vars": [], "delete_vars": []},
"processing_flags": [],
}
# Valid indexed config json.
indexed_json = {
"type": "indexed",
"dimensionality": 3,
"chunks": [],
"dataset": {"variable_map": {}, "dimension_map": {}, "coord_vars": [], "keep_all_vars": "False"},
"processing_flags": [],
}
# Json with an invalid type value.
invalid_type_json = {"type": "invalid"}
@pytest.fixture
def json_file(input_json):
"""Write example json to file, for use with ConfigParser(). File auto deleted after test."""
dir_path = os.path.dirname(os.path.realpath(__file__))
cur_time = datetime.now().strftime("%Y%m%d%H%M%S")
tempfile = Path(f"{dir_path}/tempfile_{cur_time}.json")
with open(tempfile, "w") as temp:
json.dump(input_json, temp)
yield tempfile
tempfile.unlink()
def test__parse_gridded():
"""Test _parse_gridded method doesn't error on valid gridded json."""
gridded_obj = ConfigParser._parse_gridded(gridded_json)
assert type(gridded_obj) is GriddedConfig
assert gridded_obj.type is ConfigTypes.GRIDDED
def test__parse_indexed():
"""Test _parse_indexed method doesn't error on valid indexed json."""
indexed_obj = ConfigParser._parse_indexed(indexed_json)
assert type(indexed_obj) is IndexedConfig
assert indexed_obj.type is ConfigTypes.INDEXED
@pytest.mark.parametrize(
"config_json, object_key, object_type",
[
(gridded_json, ConfigKeys.DATASET, Dataset),
(gridded_json, ConfigKeys.DOMAIN, Domain),
],
)
def test__get_datafile_object(config_json, object_key, object_type):
"""Test _get_datafile_object method for both Dataset and Domain."""
data_obj = ConfigParser._get_datafile_object(config_json, object_key)
assert type(data_obj) is object_type
@pytest.mark.parametrize(
"config_json, , object_type",
[
(gridded_json, CodeProcessing),
],
)
def test__get_code_processing_object(config_json, object_type):
"""Test _get_code_processing_object method."""
data_obj = ConfigParser._get_code_processing_object(config_json)
assert type(data_obj) is object_type
# input_json argument indirectly links to json_file(input_json) method argument.
@pytest.mark.parametrize(
"input_json, config_class, config_type",
[
(gridded_json, GriddedConfig, ConfigTypes.GRIDDED),
(indexed_json, IndexedConfig, ConfigTypes.INDEXED),
],
)
def test_config_parser(json_file, config_class, config_type):
"""Test config parser init method with valid gridded and indexed json."""
config = ConfigParser(json_file).config
assert type(config) is config_class
assert config.type is config_type
@pytest.mark.parametrize("input_json", [invalid_type_json])
def test_config_parser_invalid_type(json_file):
"""Test config parser with an invalid type in json."""
with pytest.raises(ValueError) as e:
config = ConfigParser(json_file)
| true |
9f21602b83378cc5c2644a32fb4649a77add4514 | Python | daring-board/Recognition | /learning/create_data.py | UTF-8 | 1,488 | 3 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import json
def loadingJson(dirpath, f):
fpath = dirpath + '/' + f
fj = open(fpath,'r', encoding='utf-8')
json_data = json.load(fj)
fj.close()
return json_data
def output(data, mod):
with open('Utterance.txt', 'a', encoding='sjis') as f:
for i in range(len(data['turns'])):
if mod == "U" and data['turns'][i]['speaker'] == mod:
f.write(data['turns'][i]['utterance'])
f.write('\n')
elif mod == "S" and data['turns'][i]['speaker'] == mod and i != 0:
f.write(data['turns'][i]['utterance'])
f.write('\n')
else:
continue
if __name__ == "__main__":
argvs = sys.argv
_usage = """--
Usage:
python create_data.py [json] [speaker]
Args:
[json]: The argument is input directory that is contained files of json that is objective to convert to sql.
[speaker]: The argument is "U" or "S" that is speaker in dialogue.
""".rstrip()
if len(argvs) < 3:
print(_usage)
sys.exit(0)
# one file ver
'''
fj = open(argvs[1],'r')
json_data = json.load(fj)
fj.close()
output(json_data, mod)
'''
# more than two files ver
branch = os.walk(argvs[1])
mod = argvs[2]
for dirpath, dirs, files in branch:
for f in files:
json_data = loadingJson(dirpath, f)
output(json_data, mod)
| true |
47655a4c10bd7e4b0f65d4fce3d5ea3a1e2e3580 | Python | vietky-ct/werewolf | /assets/generate_json.py | UTF-8 | 796 | 2.734375 | 3 | [] | no_license | import requests
import re
import json
p = re.compile('dx-([a-zA-Z]+(-[a-zA-Z]*)*)')
arr = []
def format_image_name(url):
m = p.match(url.split('/')[-1].split('?')[0])
if m is None:
return None
# print('fileName', fileName)
return m.group(1)
# format_image_name('http://cdn.shopify.com/s/files/1/0740/4855/products/dx-seer_400x400.png?v=1555593941')
with open('cards.txt') as f:
id = 0
for line in f:
fileName = format_image_name(line).lower()
if fileName is None:
continue
id += 1
arr.append({
"id": id,
"name": '{}'.format(fileName),
"src": "./images/{}.png".format(fileName),
"score": 1,
"count": 1,
})
# print(arr)
print(json.dumps(arr)) | true |
2e84b55401770b7e35814ef023d5ac418cb1f5af | Python | SoumendraM/DataStructureInPython | /CHAP3-LINKLIST/SimpleHash.py | UTF-8 | 1,392 | 3.609375 | 4 | [] | no_license | # A simplest implementation of hashing using Pyhton hash() function
# Assumes, there is no collision
from random import randrange
from SingleLinkList import SingleLList
class SimpleHash:
def __init__(self, N):
self.nSize = N
self.arr = [None] * N
def isHashed(self, obj):
indx = hash(obj) % self.nSize
if self.arr[indx] == 1:
return True
else:
return False
def hashIt(self, obj):
indx = hash(obj) % self.nSize
if self.arr[indx] == None:
self.arr[indx] = 1
def getHashKey(self, obj):
return hash(obj) % self.nSize
if __name__ == '__main__':
ll = SingleLList()
lladdress = []
N = 10**4
for i in range(10):
ll.insertAtBeginning(randrange(1000))
ll.showSingleLL()
hObj = SimpleHash(10000)
t1 = ll.head
t2 = ll.head.getNext()
t3 = ll.head.getNext().getNext()
hObj.hashIt(t1)
hObj.hashIt(t2)
hObj.hashIt(t3)
t4 = ll.head.getNext().getNext().getNext().getNext()
print("t1 hashKey:", hObj.getHashKey(t1))
print("t2 hashKey:", hObj.getHashKey(t2))
print("t3 hashKey:", hObj.getHashKey(t3))
print("t4 hashKey:", hObj.getHashKey(t4))
print("t1 hash:", hObj.isHashed(t1))
print("t2 hash:", hObj.isHashed(t2))
print("t3 hash:", hObj.isHashed(t3))
print("t4 hash:", hObj.isHashed(t4))
| true |
fb65d6bf7532fffd8baf920eae567961e6e92ff8 | Python | flaviodipalo/RecSys-Project | /Python notebook/SLIM_BPR/Recommender.py | UTF-8 | 5,319 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Maurizio Ferrari Dacrema
"""
import multiprocessing
import time
import numpy as np
from SLIM_BPR.metrics import roc_auc, precision, recall, map, ndcg, rr
from SLIM_BPR.Recommender_utils import check_matrix
class Recommender(object):
"""Abstract Recommender"""
def __init__(self):
super(Recommender, self).__init__()
self.URM_train = None
self.sparse_weights = True
self.normalize = False
self.filterTopPop = False
self.filterTopPop_ItemsID = np.array([], dtype=np.int)
self.filterCustomItems = False
self.filterCustomItems_ItemsID = np.array([], dtype=np.int)
def fit(self, URM_train):
pass
def _filter_TopPop_on_scores(self, scores):
scores[self.filterTopPop_ItemsID] = -np.inf
return scores
def _filterCustomItems_on_scores(self, scores):
scores[self.filterCustomItems_ItemsID] = -np.inf
return scores
def _filter_seen_on_scores(self, user_id, scores):
seen = self.URM_train.indices[self.URM_train.indptr[user_id]:self.URM_train.indptr[user_id + 1]]
scores[seen] = -np.inf
return scores
def evaluateRecommendations(self, URM_test_new, at=5, minRatingsPerUser=1, exclude_seen=True,
mode='sequential'):
"""
Speed info:
- Sparse weighgs: batch mode is 2x faster than sequential
- Dense weighgts: batch and sequential speed are equivalent
:param URM_test_new: URM to be used for testing
:param at: 5 Length of the recommended items
:param minRatingsPerUser: 1 Users with less than this number of interactions will not be evaluated
:param exclude_seen: True Whether to remove already seen items from the recommended items
:param mode: 'sequential'
:return:
"""
# During testing CSR is faster
self.URM_test = check_matrix(URM_test_new, format='csr')
self.URM_train = check_matrix(self.URM_train, format='csr')
self.at = at
self.minRatingsPerUser = minRatingsPerUser
self.exclude_seen = exclude_seen
nusers = self.URM_test.shape[0]
# Prune users with an insufficient number of ratings
rows = self.URM_test.indptr
numRatings = np.ediff1d(rows)
mask = numRatings >= minRatingsPerUser
usersToEvaluate = np.arange(nusers)[mask]
usersToEvaluate = list(usersToEvaluate)
if mode=='sequential':
return self.evaluateRecommendationsSequential(usersToEvaluate)
else:
raise ValueError("Mode '{}' not available".format(mode))
def get_user_relevant_items(self, user_id):
return self.URM_test.indices[self.URM_test.indptr[user_id]:self.URM_test.indptr[user_id+1]]
def get_user_test_ratings(self, user_id):
return self.URM_test.data[self.URM_test.indptr[user_id]:self.URM_test.indptr[user_id+1]]
def evaluateRecommendationsSequential(self, usersToEvaluate):
start_time = time.time()
roc_auc_, precision_, recall_, map_, mrr_, ndcg_ = 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
n_eval = 0
for test_user in usersToEvaluate:
# Calling the 'evaluateOneUser' function instead of copying its code would be cleaner, but is 20% slower
# Being the URM CSR, the indices are the non-zero column indexes
relevant_items = self.get_user_relevant_items(test_user)
n_eval += 1
recommended_items = self.recommend(user_id=test_user, exclude_seen=self.exclude_seen,
n=self.at, filterTopPop=self.filterTopPop, filterCustomItems=self.filterCustomItems)
is_relevant = np.in1d(recommended_items, relevant_items, assume_unique=True)
# evaluate the recommendation list with ranking metrics ONLY
roc_auc_ += roc_auc(is_relevant)
precision_ += precision(is_relevant)
recall_ += recall(is_relevant, relevant_items)
map_ += map(is_relevant, relevant_items)
mrr_ += rr(is_relevant)
ndcg_ += ndcg(recommended_items, relevant_items, relevance=self.get_user_test_ratings(test_user), at=self.at)
if(n_eval % 10000 == 0):
print("Processed {} ( {:.2f}% ) in {:.2f} seconds. Users per second: {:.0f}".format(
n_eval,
100.0* float(n_eval)/len(usersToEvaluate),
time.time()-start_time,
float(n_eval)/(time.time()-start_time)))
if (n_eval > 0):
roc_auc_ /= n_eval
precision_ /= n_eval
recall_ /= n_eval
map_ /= n_eval
mrr_ /= n_eval
ndcg_ /= n_eval
else:
print("WARNING: No users had a sufficient number of relevant items")
results_run = {}
results_run["AUC"] = roc_auc_
results_run["precision"] = precision_
results_run["recall"] = recall_
results_run["map"] = map_
results_run["NDCG"] = ndcg_
results_run["MRR"] = mrr_
return (results_run)
| true |
c8421a9a2cb0d2ff019fea10947dab143f67a9fe | Python | taehoonkoo/tasa | /webserver/TASADemo/tasa_sql_templates.py | UTF-8 | 21,514 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
Srivatsan Ramanujam <sramanujam@gopivotal.com>
Jarrod Vawdrey <jvawdrey@gopivotal.com>
NLP Demo SQL templates
'''
def getTop20RelevantTweetsSQL(search_term):
'''
Top 20 Relevant Tweets (by score descending)
Columns: displayname, preferredusername, body, image
'''
sql = '''
SELECT displayname,
preferredusername,
body,
image
FROM
(
SELECT t3.displayname,
t3.preferredusername,
t2.body,
t3.image,
row_number() OVER (ORDER BY score DESC) AS _n_
FROM gptext.search(
TABLE(SELECT 1 SCATTER BY 1),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null,
'score desc') AS t1,
topicdemo.tweet_dataset AS t2,
sentimentdemo.actor_info AS t3
WHERE t1.id=t2.id AND t2.tweet_id=t3.tweet_id
) foo
WHERE _n_ <= 20
ORDER BY _n_
'''
return sql.format(search_term=search_term)
def getTopTweetIdsSQL(search_term):
return '''
select posted_date, array_agg(id) as tweet_ids
from (
select t1.id,
t1.score,
(t2.postedtime at time zone 'UTC')::date as posted_date,
row_number() over (partition by (t2.postedtime at time zone 'UTC')::date order by score desc) as index
from gptext.search(
TABLE(select * from topicdemo.tweet_dataset),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null
) t1,
topicdemo.tweet_dataset t2
where t1.id = t2.id
) search
where index <= 20
group by posted_date
'''.format(search_term=search_term)
def getTopTweetDataSQL(search_term):
return '''
with id_to_attributes_map
as
(
select t1.id,
t3.displayname,
t3.preferredusername,
t2.body,
t3.image
from gptext.search(
TABLE(select * from topicdemo.tweet_dataset),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null
) t1,
topicdemo.tweet_dataset t2,
sentimentdemo.actor_info t3
where t1.id = t2.id and
t2.tweet_id = t3.tweet_id
)
select id_to_attributes_map.*
from
(
select id
from
(
select t1.id,
t1.score,
(t2.postedtime at time zone 'UTC')::date as posted_date,
row_number() over (partition by (t2.postedtime at time zone 'UTC')::date order by score desc) as index
from gptext.search(
TABLE(select * from topicdemo.tweet_dataset),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null
) t1,
topicdemo.tweet_dataset t2
where t1.id = t2.id
)q
where index <= 20
group by id
) tbl1,
id_to_attributes_map
where tbl1.id = id_to_attributes_map.id
'''.format(search_term=search_term)
def getTop20RelevantTweetsRangeSQL(search_term,min_timestamp,max_timestamp):
'''
Top 20 Relevant Tweets (by score descending) for a given date range
min_timestamp and max_timestamp to be of format: YYYY-MM-DDTHH:MM:SSZ (i.e. 2013-07-01T00:00:00Z)
Columns: displayname, preferredusername, body, image
'''
sql = '''
SELECT displayname,
preferredusername,
body,
image
FROM
(
SELECT t3.displayname,
t3.preferredusername,
t2.body,
t3.image,
row_number() OVER (ORDER BY score DESC) AS _n_
FROM gptext.search(
TABLE(SELECT 1 SCATTER BY 1),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
'{{postedtime:[{min_timestamp} TO {max_timestamp}]}}',
'score desc') AS t1,
topicdemo.tweet_dataset AS t2,
sentimentdemo.actor_info AS t3
WHERE t1.id=t2.id AND t2.tweet_id=t3.tweet_id
) foo
WHERE _n_ <= 20
ORDER BY _n_
'''
return sql.format(search_term=search_term, min_timestamp=min_timestamp, max_timestamp=max_timestamp)
def getTop20RelevantTweetsRangeSentSQL(search_term,min_timestamp,max_timestamp,sentiment):
'''
Top 20 Relevant Tweets (by score descending) for a given time range and sentiment
min_timestamp and max_timestamp to be of format: YYYY-MM-DDTHH:MM:SSZ (i.e. 2013-07-01T00:00:00Z)
sentiment: 'negative','positive', or 'neutral'
Columns: displayname, preferredusername, body, image
'''
sql = '''
SELECT displayname,
preferredusername,
body,
image,
sentiment
FROM
(
SELECT *,
row_number() OVER (PARTITION BY sentiment ORDER BY score DESC) AS _n_
FROM
(
SELECT t1.score,
t2.postedtime,
t3.displayname,
t3.preferredusername,
t2.body,
t3.image,
CASE WHEN t4.median_sentiment_index > 1 THEN 'positive'
WHEN t4.median_sentiment_index < -1 THEN 'negative'
WHEN t4.median_sentiment_index BETWEEN -1 AND 1 THEN 'neutral'
END AS sentiment
FROM gptext.search(TABLE(SELECT 1 SCATTER BY 1),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
'{{postedtime:[{min_timestamp} TO {max_timestamp}]}}',
'score desc') AS t1,
topicdemo.tweet_dataset AS t2,
sentimentdemo.actor_info AS t3,
sentimentdemo.training_data_scored AS t4
WHERE t1.id=t2.id AND t2.tweet_id=t3.tweet_id AND t2.tweet_id = t4.id AND t4.median_sentiment_index NOTNULL
) t5
) t6
WHERE sentiment = '{sentiment}'
AND _n_ <= 20
ORDER BY score DESC
'''
return sql.format(search_term=search_term, min_timestamp=min_timestamp, max_timestamp=max_timestamp, sentiment=sentiment)
def getTopTweetIdsWithSentimentSQL(search_term):
return '''
select posted_date,
sentiment,
array_agg(id) as tweet_ids
from (
select id,
score,
row_number() over(partition by posted_date, sentiment order by score desc) as index,
posted_date,
sentiment
from (
select t1.id,
t1.score,
(t2.postedtime at time zone 'UTC')::date as posted_date,
case when t3.median_sentiment_index > 1 then 'positive'
when t3.median_sentiment_index < -1 then 'negative'
else 'neutral'
end as sentiment
from
gptext.search(
TABLE(select * from topicdemo.tweet_dataset),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null
) t1,
topicdemo.tweet_dataset t2,
sentimentdemo.training_data_scored t3
where t1.id = t2.id and t2.tweet_id = t3.id and t3.median_sentiment_index IS NOT NULL
) q1
) q2
where index <= 20
group by posted_date, sentiment
'''.format(search_term=search_term)
def getTopTweetDataWithSentimentSQL(search_term):
return '''
with id_to_attributes_map
as (
select t1.id,
t3.displayname,
t3.preferredusername,
t2.body,
t3.image
from
gptext.search(
TABLE(select * from topicdemo.tweet_dataset),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null
) t1,
topicdemo.tweet_dataset t2,
sentimentdemo.actor_info t3
where t1.id = t2.id and t2.tweet_id = t3.tweet_id
)
select id_to_attributes_map.*
from (
select id
from (
select id,
score,
row_number() over(partition by posted_date, sentiment order by score desc) as index,
posted_date,
sentiment
from (
select t1.id,
t1.score,
(t2.postedtime at time zone 'UTC')::date as posted_date,
case when t3.median_sentiment_index > 1 then 'positive'
when t3.median_sentiment_index < -1 then 'negative'
else 'neutral'
end as sentiment
from
gptext.search(
TABLE(select * from topicdemo.tweet_dataset),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null
) t1,
topicdemo.tweet_dataset t2,
sentimentdemo.training_data_scored t3
where t1.id = t2.id and t2.tweet_id = t3.id and t3.median_sentiment_index IS NOT NULL
) q1
) q2
where index <= 20
group by id
) tbl1,
id_to_attributes_map
where tbl1.id = id_to_attributes_map.id
'''.format(search_term=search_term)
def getHeatMapTweetIdsSQL(search_term):
return '''
with hmap
as (
select day_of_week,
hour_of_day,
id,
sentiment,
row_number() over (partition by day_of_week, hour_of_day, sentiment order by score desc) as index
from
(
select t1.id,
t1.score,
case
when t3.median_sentiment_index > 1 then 'positive'
when t3.median_sentiment_index < -1 then 'negative'
else 'neutral'
end as sentiment,
extract(DOW from (t2.postedtime at time zone 'UTC')) as day_of_week,
extract(HOUR from (t2.postedtime at time zone 'UTC')) as hour_of_day
from
gptext.search (
TABLE(select * from topicdemo.tweet_dataset),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null
) t1,
topicdemo.tweet_dataset t2,
sentimentdemo.training_data_scored t3
where t1.id = t2.id and t2.tweet_id = t3.id and t3.median_sentiment_index IS NOT NULL
) q
)
select hmap_stats.day_of_week,
hmap_stats.hour_of_day,
id_arr.sentiment,
hmap_stats.num_tweets,
hmap_stats.num_positive,
hmap_stats.num_negative,
id_arr.id_arr
from (
select day_of_week,
hour_of_day,
count(id) as num_tweets,
count(id) filter(where sentiment='positive') as num_positive,
count(id) filter(where sentiment='negative') as num_negative,
count(id) filter(where sentiment='neutral') as num_neutral
from hmap
group by day_of_week, hour_of_day
) hmap_stats,
(
select day_of_week,
hour_of_day,
sentiment,
array_agg(id order by index) as id_arr
from hmap
where sentiment in ('positive', 'negative') and index <=10
group by day_of_week, hour_of_day, sentiment
) id_arr
where hmap_stats.day_of_week = id_arr.day_of_week and
hmap_stats.hour_of_day = id_arr.hour_of_day
'''.format(search_term=search_term)
def getHeatMapTweetDateSQL(search_term):
return '''
with hmap
as (
select day_of_week,
hour_of_day,
id,
sentiment,
row_number() over (partition by day_of_week, hour_of_day, sentiment order by score desc) as index
from (
select t1.id,
t1.score,
case
when t3.median_sentiment_index > 1 then 'positive'
when t3.median_sentiment_index < -1 then 'negative'
else 'neutral'
end as sentiment,
extract(DOW from (t2.postedtime at time zone 'UTC')) as day_of_week,
extract(HOUR from (t2.postedtime at time zone 'UTC')) as hour_of_day
from
gptext.search (
TABLE(select * from topicdemo.tweet_dataset),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null
) t1,
topicdemo.tweet_dataset t2,
sentimentdemo.training_data_scored t3
where t1.id = t2.id and t2.tweet_id = t3.id and t3.median_sentiment_index IS NOT NULL
) q
),
id_to_attributes_map
as (
select t1.id,
t3.displayname,
t3.preferredusername,
t2.body,
t3.image
from
gptext.search(
TABLE(select * from topicdemo.tweet_dataset),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null
) t1,
topicdemo.tweet_dataset t2,
sentimentdemo.actor_info t3
where t1.id = t2.id and t2.tweet_id = t3.tweet_id
)
select id_to_attributes_map.*
from
(
select id
from hmap
where sentiment in ('positive', 'negative') and index <=10
group by id
) tbl1,
id_to_attributes_map
where tbl1.id = id_to_attributes_map.id
'''.format(search_term=search_term)
def getAdjectivesTweetIdsSQL(search_term):
return '''
with token_freq_id_arr
as
(
select token,
count(*) as frequency,
array_agg(id order by score desc) as id_arr
from
(
select t1.id,
t1.score,
lower(t3.token) as token,
t3.indx
from gptext.search(
TABLE(select * from topicdemo.tweet_dataset),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null
) t1,
topicdemo.tweet_dataset t2,
sentimentdemo.training_data_pos_tagged t3
where t1.id = t2.id and t2.tweet_id = t3.id and t3.tag = 'A'
) tbl
group by token
)
select token,
frequency*1.0/(select max(frequency) from token_freq_id_arr) as normalized_frequency,
id_arr[1:20] -- Top 20 tweets per adjective
from token_freq_id_arr
order by normalized_frequency desc
--Top-100 adjectives by normalized frequency
limit 100
'''.format(search_term=search_term)
def getAdjectivesTweetDataSQL(search_term):
return '''
with id_to_attributes_map
as
(
select t1.id,
t3.displayname,
t3.preferredusername,
t2.body,
t3.image
from gptext.search(
TABLE(select * from topicdemo.tweet_dataset),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null
) t1,
topicdemo.tweet_dataset t2,
sentimentdemo.actor_info t3
where t1.id = t2.id and
t2.tweet_id = t3.tweet_id
),
token_freq_id_arr
as
(
select token,
count(*) as frequency,
array_agg(id order by score desc) as id_arr
from
(
select t1.id,
t1.score,
lower(t3.token) as token,
t3.indx
from gptext.search(
TABLE(select * from topicdemo.tweet_dataset),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
null
) t1,
topicdemo.tweet_dataset t2,
sentimentdemo.training_data_pos_tagged t3
where t1.id = t2.id and t2.tweet_id = t3.id and t3.tag = 'A'
) tbl
group by token
order by frequency desc
-- Top-100 adjectives only
limit 100
)
select id_to_attributes_map.*
from
(
select id
from
(
select token,
frequency,
-- Top-20 tweets per adjective
unnest(id_arr[1:20]) as id
from token_freq_id_arr
)q
group by id
) top_adj,
id_to_attributes_map
where id_to_attributes_map.id = top_adj.id
'''.format(search_term=search_term)
def getCountOfRelevantTweetsSQL(search_term):
'''
Grab the count of relevant tweets
'''
sql = '''
SELECT * FROM gptext.search_count('vatsandb.topicdemo.tweet_dataset','{search_term}', null)
'''
return sql.format(search_term=search_term)
def getCountOfRelevantTweetsRangeSQL(search_term, min_timestamp, max_timestamp):
'''
Grab the count of relevant tweets within date range
min_timestamp and max_timestamp to be of format: YYYY-MM-DDTHH:MM:SSZ (i.e. 2013-07-01T00:00:00Z)
'''
sql = '''
SELECT * FROM gptext.search_count('vatsandb.topicdemo.tweet_dataset','{search_term}','{{postedtime:[{min_timestamp} TO {max_timestamp}]}}')
'''
return sql.format(search_term=search_term,min_timestamp=min_timestamp,max_timestamp=max_timestamp)
def getStatsRelevantTweetsSQL(search_term,min_timestamp,max_timestamp):
'''
Grab count of relevant tweets, average median_sentiment_index, count of tweets for each sentiment label
min_timestamp and max_timestamp to be of format: YYYY-MM-DDTHH:MM:SSZ (i.e. 2013-07-01T00:00:00Z)
'''
sql = '''
SELECT count(tweet_id) as num_tweets,
avg(median_sentiment_index) as mean_sentiment_index,
count(tweet_id) FILTER (WHERE median_sentiment_index > 1) AS positive_count,
count(tweet_id) FILTER (WHERE median_sentiment_index < -1) AS negative_count,
count(tweet_id) FILTER (WHERE median_sentiment_index BETWEEN -1 AND 1) AS neutral_count
FROM
(
SELECT t1.*,
t2.tweet_id,
t2.postedtime,
t3.median_sentiment_index
FROM gptext.search (
TABLE(SELECT 1 SCATTER BY 1),
'vatsandb.topicdemo.tweet_dataset',
'{search_term}',
'{{postedtime:[{min_timestamp} TO {max_timestamp}]}}'
) t1,
topicdemo.tweet_dataset t2,
sentimentdemo.training_data_scored t3
WHERE t1.id = t2.id AND t2.tweet_id = t3.id AND t3.median_sentiment_index NOTNULL
) q
'''
return sql.format(search_term=search_term, min_timestamp=min_timestamp, max_timestamp=max_timestamp) | true |
dd4ad795a683d6e0f63eefaef57fa6e7bebe8f48 | Python | AlinaZankevich/python_course | /B_decrease.py | UTF-8 | 2,129 | 3.953125 | 4 | [] | no_license | import time
class RangeIterableIterator:
def __init__(self, size):
self.x = size
# раз сам себе итератор - сам себя и возвращает
def __iter__(self):
return self
def __next__(self):
self.x -= 1
if self.x <= 0:
raise StopIteration
return '#' * self.x
# 8. В файле «B_decrease.py» повторите главную программу
# из файла «A_increase.py» с итерируемым объектом нового
# класса RangeIterableIterator.
if __name__ == '__main__':
for item in RangeIterableIterator(10):
print(item)
time.sleep(0.25)
# 9. Запустите написанную в файле «B_decrease.py» программу,
# полюбуйтесь рисунком в консоли.
#
# 10. Дальнейшим упрощением работы с итерируемыми объектами является
# применение генераторов вместо итераторов. Для работы с генераторами
# создайте новый файл «C_sinus.py».
#
# 11. В файле «C_sinus.py» создайте для бесконечных последовательностей
# строк из решеток длиной, определяемой функцией синус, класс
# SinusIterableWithGenerator, который будет итерируемым объектом,
# возвращающим в методе __iter__ генератор с помощью оператора yield:
# 12. В файле «C_sinus.py» повторите главную программу
# из файла «A_increase.py» с итерируемым объектом нового класса
# SinusIterableWithGenerator.
#
# 13. Запустите написанную в файле «C_sinus.py» программу,
# полюбуйтесь рисунком в консоли.
# Для остановки используйте кнопку с изображением красного квадрата.
#
| true |
3c07f116df7a09352394dd9d65ab24b1eae58755 | Python | AmilaWeerasinghe/Data-Structures-and-Algorithms | /Lab 01/code/fib.py | UTF-8 | 1,530 | 4.03125 | 4 | [] | no_license |
#!/usr/bin/python
import timeit
import datetime as dt
import matplotlib.pyplot as plt
import numpy as np
def fib_r(x):
if (x <= 2):
return 1
return fib_r(x-1) + fib_r(x-2)
def fib_i(x):
a = 1
b = 1
fib = 1
i = 2
while i < x:
fib = a + b
a = b
b = fib
i += 1
return fib
"""
# measure time for recursion
a = []
time = []
for x in range(1, 41):
start_r = dt.datetime.now()
print("Fib of " + str(x) + " = " + str(fib_r(x)))
end_r = dt.datetime.now()
elapsed_r = (end_r - start_r).microseconds
elapsed_r = elapsed_r*1000
time.append(elapsed_r)
print("Python recursion, number =" + str(x) + " time = "+str(elapsed_r))
print(*time)
list1 = range(1, 41)
print(*list1)
plt.plot(list1, time)
plt.xlabel('number')
plt.ylabel('time taken (nanoseconds)')
plt.title('Fibbonacci Python implentation using recursion')
plt.grid(True)
plt.show()
"""
a = []
time = []
# measure time for iteration
for x in range(1, 41):
start_i = dt.datetime.now()
print("Fib of " + str(x) + " = " + str(fib_i(x)))
end_i = dt.datetime.now()
elapsed_i = (end_i - start_i).microseconds
elapsed_i = elapsed_i*1000
time.append(elapsed_i)
print("Python iterations, number =" + str(x) + " time = "+str(elapsed_i))
print(*time)
list1 = range(1, 41)
print(*list1)
plt.plot(list1, time)
plt.xlabel('number')
plt.ylabel('time taken (nanoseconds)')
plt.title('Fibbonacci Python implentation using iteration')
plt.grid(True)
plt.show()
| true |
d902772998eae91c16f3235bd0d2599443826bf9 | Python | GradyKurpasi/SSIE-519-RNN-WSD | /preprocess.py | UTF-8 | 6,543 | 2.71875 | 3 | [] | no_license | # Preprocessing to prepare datasets and write them to disk
# Only intended to run once
# from gensim.test.utils import datapath
from gensim import utils
import gensim.models
from nltk.corpus import semcor
import pickle
import random
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
####################################################################################################
#
# Create and Store Word2Vec distributional word vectors
#
# GENSIM code adapted from GENSIM documentation found at
# https://radimrehurek.com/gensim/auto_examples/tutorials/run_word2vec.html#sphx-glr-download-auto-examples-tutorials-run-word2vec-py
class MyCorpus(object):
"""An interator that yields sentences (lists of str)."""
def __iter__(self):
sents = semcor.sents() #subset of the Brown Corpus, tokenized, but not tagged or chunked
for s in sents :
# ss = ' '.join(list(s))
# temp = utils.simple_preprocess(' '.join((list(s))
yield utils.simple_preprocess(' '.join((list(s))))
print('Creating Word Vectors')
sentences = MyCorpus()
model = gensim.models.Word2Vec(sentences=sentences, size=300) #creates distributional word vectors with dimensionality = 300
import tempfile
with tempfile.NamedTemporaryFile(prefix='gensim-model-', delete=False) as tmp:
temporary_filepath = tmp.name #'C:\\Users\\gkurp\\AppData\\Local\\Temp\\gensim-model-zviw4kpu'
model.save(temporary_filepath)
with tempfile.NamedTemporaryFile(prefix='gensim-model-', delete=False) as tmp:
temporary_filepath = 'gensim-model-zviw4kpu'
new_model = gensim.models.Word2Vec.load(temporary_filepath)
print('done')
####################################################################################################
#
# Create and Store SemCor word / synset dictionaries
#
# SEMCOR documentation found at
# https://www.nltk.org/_modules/nltk/corpus/reader/semcor.html
# NLTK Corpus howto found at
# http://www.nltk.org/howto/corpus.html
def processChunk(sentence_chunk):
if "Lemma" in str(sentence_chunk.label()):
# if "Lemma" in chunk label, chunk represents a sysnset encoding
descriptor = str(sentence_chunk.label())
word = str(sentence_chunk[0][0]) if type(sentence_chunk[0][0]) == str else " ".join(sentence_chunk[0][0])
# if synset encountered, sentence chunk is a tree w subtree
# sentence_chunk[0] is a tree with POS and [lemma]
# if [lemma] are only 1 word, type(sentence_chunk[0][0]) is str
# if [lemma] are more than 1 word type(sentence_chunk[0][0]) is nltk.tree
# if [lemma] are more than 1 word, str array is joined with " " separator
else:
# chunk represents a regular tree (POS : stop word or punctuation)
word = str(sentence_chunk[0])
descriptor = str(sentence_chunk.label())
if word in lemmas:
lemmas[word][descriptor] = lemmas[word][descriptor] + 1 if descriptor in lemmas[word] else 1
else:
lemmas[word] = {descriptor: 1} # this else statement prevents keyerror lookups on lemmas[word][synset]
return word
print("Importing Lemma and Synsets")
lemmas = dict()
# lemmas is a dict of dict,
# lemmas[word] = dictionary of { synsets:frequency of synset when associated with a 'word' }
# lemmas[word][synset] is a count of how many times a synset appears for each word
# *** len(lemmas[word]) = the number of different senses a 'word' has in the corpus
taggedsentences = semcor.tagged_sents(tag='both')
# all sentences, fully tagged from SEMCOR
plaintextsentences = semcor.sents()
# all sentences from SEMCOR
targetsentences = {}
# sentences containing 'point'
pos = dict()
# list of part of speech tags from the corpus
max_sentence_len = 0
lemmacount = {}
# find all sentences including exactly 1 occurence of 'back'
# not all of these sentences are related to the synsets we are looking for
# e.g. goes back relates to the verb go instead of back
for i, s in enumerate(plaintextsentences) :
ss = ' '.join(list(s))
if ss.count(' back ') == 1:
targetsentences[ss] = i
# temp = utils.simple_preprocess(' '.join((list(s))
# find all lemma and synsets associated with them.
for sentence in taggedsentences:
# Prepare:
# synset inventory and count by lemma
# lemma inventory and count by synset
for sentence_chunk in sentence:
processChunk(sentence_chunk)
if len(sentence) > max_sentence_len : max_sentence_len = len(sentence)
# find lemma with most different senses
# for lemma in lemmas:
# lemmacount[lemma] = len(lemmas[lemma])
# high_lemma = {i:j for i, j in lemmacount.items() if j > 5}
# high_lemmas ={}
# for a in high_lemma.keys():
# high_lemmas[a] = lemmas[a]
with open('lemma.p', 'wb') as f:
pickle.dump(lemmas, f)
targetsynset = lemmas['back']
tgtsynsetlist = list(targetsynset)
with open('targetsynset.p', 'wb') as f:
pickle.dump(targetsynset, f)
for tgt in targetsynset:
print(tgt, targetsynset[tgt])
print("Done")
##################################################################################
#
# create and store training / test data
#
# train / test data is dictionary of
# { sentence : index of sense in target synset lists}
print("Creating Training/Testing data")
trainingsentences = {}
notfound = 0
for i, sent in enumerate(targetsentences):
idx = targetsentences[sent]
tagsent = taggedsentences[idx]
for token in tagsent:
if str(token.label()) in tgtsynsetlist:
trainingsentences[sent] = tgtsynsetlist.index(str(token.label()))
#print(sent, str(token.label()))
break
else:
notfound += 1
print(notfound)
print(len(trainingsentences))
begintestdata = round(len(trainingsentences) * .75)
trainingsentenceslist = list(trainingsentences)
random.shuffle(trainingsentenceslist)
randtrainingsentences = {}
for rs in trainingsentenceslist:
randtrainingsentences[rs] = trainingsentences[rs]
train_data = dict(list(randtrainingsentences.items())[:begintestdata])
test_data = dict(list(randtrainingsentences.items())[begintestdata:])
with open('train_data.p', 'wb') as f:
pickle.dump(train_data, f)
with open('test_data.p', 'wb') as f:
pickle.dump(test_data, f)
for s in train_data:
print(s, tgtsynsetlist[train_data[s]])
print("Done") | true |
3238d40ae5b5c4f6610d78eb326e25ea9c9bb336 | Python | nixizi/Weight-Vector-Grid-Based-Archive | /Code/support_method.py | UTF-8 | 15,033 | 2.546875 | 3 | [] | no_license | import math
import numpy as np
import copy
import random
import decomposition_method
import itertools
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.ticker as mticker
import sys
import pygmo as pg
import time
import pandas as pd
from pandas.plotting import parallel_coordinates
import problem_set
import operator
import functools
def update_archive(weight_vectors, archive, new_solution, refer_vector, update_type="shuffle"):
if update_type == "archive":
update_archive_origin(weight_vectors, archive,
new_solution, refer_vector)
elif update_type == "shuffle":
update_archive_shuffle(weight_vectors, archive,
new_solution, refer_vector)
elif update_type == "random":
update_archive_random(weight_vectors, archive,
new_solution, refer_vector)
else:
pass
def update_archive_origin(weight_vectors, archive, new_solution, refer_vector):
minimum_angle = sys.maxsize
nearby_vector_index = None
# Find the closest vectors
for i in range(len(weight_vectors)):
wv = weight_vectors[i, :]
cos_value = (np.dot(wv, new_solution)) / \
math.sqrt((wv.dot(wv.T)) * (new_solution.dot(new_solution.T)))
angle = math.acos(cos_value)
if angle < minimum_angle:
nearby_vector_index = i
minimum_angle = angle
nearby_vector = weight_vectors[nearby_vector_index]
# Update archive
if nearby_vector_index not in archive:
archive[nearby_vector_index] = new_solution
else:
original_solution = archive[nearby_vector_index]
if decomposition(new_solution, nearby_vector, refer_vector, 5) < decomposition(original_solution, nearby_vector, refer_vector, 5):
archive[nearby_vector_index] = new_solution
def update_archive_shuffle(weight_vectors, archive, new_solution, refer_vector):
minimum_angle = sys.maxsize
nearby_vector_index = None
index_pool = [x for x in range(len(weight_vectors))]
random.shuffle(index_pool)
# Find the closest vectors
for i in index_pool:
wv = weight_vectors[i, :]
cos_value = (np.dot(wv, new_solution)) / \
math.sqrt((wv.dot(wv.T)) * (new_solution.dot(new_solution.T)))
angle = math.acos(cos_value)
if angle < 0.01:
minimum_angle = angle
nearby_vector_index = i
break
if angle < minimum_angle:
nearby_vector_index = i
minimum_angle = angle
nearby_vector = weight_vectors[nearby_vector_index]
# Update archive
if nearby_vector_index not in archive:
archive[nearby_vector_index] = new_solution
else:
original_solution = archive[nearby_vector_index]
if decomposition(new_solution, nearby_vector, refer_vector, 5) < decomposition(original_solution, nearby_vector, refer_vector, 5):
archive[nearby_vector_index] = new_solution
def update_archive_random(weight_vectors, archive, new_solution, refer_vector):
minimum_angle = sys.maxsize
weight_vectors_size = len(weight_vectors)
nearby_vector_index = None
index_pool = [x for x in range(weight_vectors_size)]
random.shuffle(index_pool)
threadhold = math.pow(math.pow(3, 0.33) /
(2 * math.pi * weight_vectors_size), 0.5)
# Find the closest vectors
for i in index_pool[:int(weight_vectors_size / 2)]:
wv = weight_vectors[i, :]
cos_value = (np.dot(wv, new_solution)) / \
math.sqrt((wv.dot(wv.T)) * (new_solution.dot(new_solution.T)))
angle = math.acos(cos_value)
if angle < 0.01:
minimum_angle = angle
nearby_vector_index = i
break
if angle < minimum_angle:
nearby_vector_index = i
minimum_angle = angle
nearby_vector = weight_vectors[nearby_vector_index]
# Update archive
if nearby_vector_index not in archive:
archive[nearby_vector_index] = new_solution
else:
original_solution = archive[nearby_vector_index]
if decomposition(new_solution, nearby_vector, refer_vector, 5) < decomposition(original_solution, nearby_vector, refer_vector, 5):
archive[nearby_vector_index] = new_solution
def update_EP(EP, new_solution):
"""
Update new_solution to EP
"""
for cur_solution in EP:
if is_dominate(cur_solution, new_solution) is True or np.all(cur_solution == new_solution):
return None
else:
if is_dominate(new_solution, cur_solution) is True:
remove_from_EP(cur_solution, EP)
EP.append(new_solution)
def update_BA(BA, new_solution, size_archive):
"""
Update new_solution to BA
"""
for cur_solution in BA:
if is_dominate(cur_solution, new_solution) is True or np.all(cur_solution == new_solution):
return None
else:
if is_dominate(new_solution, cur_solution) is True:
remove_from_EP(cur_solution, BA)
if len(BA) < size_archive:
BA.append(new_solution)
else:
if random.random() > 0.5:
index = random.randint(0, size_archive - 1)
BA[index] = new_solution
def update_BAHVC(BA, new_solution, size_archive):
"""
Update new_solution to BA
"""
for cur_solution in BA:
if is_dominate(cur_solution, new_solution) is True or np.all(cur_solution == new_solution):
return None
else:
if is_dominate(new_solution, cur_solution) is True:
remove_from_EP(cur_solution, BA)
if len(BA) < size_archive:
BA.append(new_solution)
else:
m = len(BA[0])
hv = pg.hypervolume(BA)
index = hv.least_contributor([2 for i in range(m)])
BA.pop(index)
BA.append(new_solution)
def cal_uniform(archive):
closest_dis_arr = []
length_archive = len(archive)
num = length_archive // 100
for i in range(length_archive):
vector_i = archive[i, :]
min_angle_arr = [sys.maxsize for x in range(num)]
vector_index_arr = [-1 for x in range(num)]
for j in range(length_archive):
vector_j = archive[j, :]
if np.array_equal(vector_i, vector_j):
break
else:
cos_value = (np.dot(vector_i, vector_j)) / \
math.sqrt((vector_i.dot(vector_i.T))
* (vector_j.dot(vector_j.T)))
angle = math.acos(cos_value)
max_angle = max(min_angle_arr)
max_index = min_angle_arr.index(max_angle)
if angle < max_angle:
min_angle_arr[max_index] = angle
vector_index_arr[max_index] = j
distance_arr = []
for v_index in vector_index_arr:
vector_j = archive[v_index]
distance = (sum((vector_i - vector_j)**2))**0.5
distance_arr.append(distance)
distance_arr = np.array(distance_arr)
closest_dis_arr.append(np.mean(distance_arr))
closest_dis_arr = np.array(closest_dis_arr)
return np.mean(closest_dis_arr), np.var(closest_dis_arr), np.std(closest_dis_arr), np.std(closest_dis_arr) / np.mean(closest_dis_arr)
def remove_from_EP(remove_array, EP):
for index in range(len(EP)):
cur_array = EP[index]
if np.all(cur_array == remove_array):
del EP[index]
return True
def generate_init_population(a, b, dimension, size):
return np.array([[(b - a) * random.random() - abs(a)
for j in range(dimension)] for i in range(size)])
def decomposition(fx, coef_vector, refer_vector, theta):
return decomposition_method.tchebycheff(fx, coef_vector, refer_vector)
def select_result_BHV(archive, remaining_size, refer_point):
new_archive = list(archive)
count = len(new_archive)
while count > remaining_size:
hv = pg.hypervolume(new_archive)
index = hv.least_contributor(refer_point)
new_archive.pop(index)
count -= 1
return np.array(new_archive)
def remove_dominated(result):
n = len(result)
dominated = [0 for x in range(n)]
dominated_list = []
for a in range(n):
for b in range(n):
p1 = result[a, :]
p2 = result[b, :]
if is_dominate(p1, p2) is True:
dominated[b] = 1
for i in range(n):
if dominated[i] == 0:
dominated_list.append(result[i, :])
return np.array(dominated_list)
def is_dominate(x, y):
"""
Check whether x dominate y(x < y)
Parameters
----------
x: list or ndarray
y: list or ndarray
Returns
-------
True for x dominate y
False for x non-dominate y
"""
smaller_flag = False
for i in range(len(x)):
if(x[i] > y[i]):
return False
elif x[i] < y[i]:
smaller_flag = True
else:
pass
return smaller_flag
def get_weighted_vectors(M, H):
"""Set docstring here.
Parameters
----------
M: The number of objects
H: A parameter that influence the number of weight vector
Returns
-------
numpy matrix, every row is a weight vector
"""
comb = [i for i in range(1, M + H)]
weight_matrix = []
comb = list(itertools.combinations(comb, M - 1))
for space in comb:
weight = []
last_s = 0
for s in space:
w = (((s - last_s) - 1) / H)
last_s = s
weight.append(w)
weight.append(((M + H - last_s) - 1) / H)
weight_matrix.append(weight)
return np.array(weight_matrix)
def mutation(x, rate, upper_bound, lower_bound):
# Simplely change value
for i in range(len(x)):
if random.random() < rate:
x[i] = lower_bound + random.random() * (upper_bound - lower_bound)
return x
def crossover(a, b):
return (a + b) / 2
def imporve(x):
return x
def random_diff_int(a, b, n):
"""
Generate n different integer from a to b, [a, b] b included
n should be bigger than b - a
Parameters
----------
a: Start from a
b: End to b
n: The number of integer
Returns
-------
[n random integer from a to b]
"""
if n <= 0:
raise ValueError("n should be positive")
if a > b:
t = a
a = b
b = t
if a == b:
return [a for x in range(n)]
if n > b - a + 1:
raise ValueError("n should be bigger than b - a")
generate_list = [a + x for x in range(b - a + 1)]
random.shuffle(generate_list)
return generate_list[:n]
def calculate_n(n, m):
def ncr(n, r):
r = min(r, n - r)
if r == 0:
return 0
numer = functools.reduce(operator.mul, range(n, n - r, -1))
denom = functools.reduce(operator.mul, range(1, r + 1))
return numer // denom
h = 1
cur_n = 0
while(cur_n < n):
h += 1
cur_n = ncr(h + m - 1, m - 1)
return cur_n, h
def plot_2D_multi(x, m, name):
theta = math.pi * 2 / m
x_p = []
y_p = []
result = []
for i in range(m):
x_p.append(math.sin(i * theta))
y_p.append(math.cos(i * theta))
fig = plt.figure(figsize=(9, 9), dpi=200)
ax = fig.add_subplot(111)
plt.xlim(-1.1, 1.1)
plt.ylim(-1.1, 1.1)
plt.xlabel("$x$", fontsize=20)
plt.ylabel("$y$", fontsize=20)
ax.scatter(x[:, 0], x[:, 1])
ax.scatter(x_p[:], y_p[:], c='r')
plt.savefig("../Testing_result/{0}_node{1}_1.pdf".format(
name, len(x)), format='pdf', bbox_inches='tight')
def plot_3D_weight_vector(EP, weight_vectors, name):
# Print 3D graph with EP and weight vectors
# The enlarge ratio of weight_vectors should change
enlarge = 1
fig = plt.figure(figsize=(12, 9), dpi=200)
ax = fig.add_subplot(111, projection='3d')
plt.title('%s point number: %d weight vector number: %d' %
(name, len(EP), len(weight_vectors)))
temp = np.array(EP)
ax.scatter(temp[:, 0], temp[:, 1], temp[:, 2], c='b')
ax.view_init(20, 45)
def plot_parallel(polt_data, m, name):
df = pd.DataFrame(data=polt_data, columns=[i + 1 for i in range(m)])
df['0'] = pd.Series(1, index=df.index)
fig = plt.figure(figsize=(12, 9), dpi=200)
plt.title('%s point number: %d' % (name, len(polt_data)))
parallel_coordinates(df, class_column='0')
plt.show()
def plot_3D(EP, name):
# Print 3D graph of EP
fig = plt.figure(figsize=(9, 9), dpi=200)
ax = fig.add_subplot(111, projection='3d')
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0))
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0))
ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0))
ax.tick_params(axis='x', labelsize=20)
ax.tick_params(axis='y', labelsize=20)
ax.tick_params(axis='z', labelsize=20)
plt.xlim(0, 1.2)
plt.ylim(0, 1.2)
ax.set_zlim(0, 1.2)
plt.xlabel("Minimize $f_1(x)$", fontsize=25, labelpad=15)
plt.ylabel("Minimize $f_2(x)$", fontsize=25, labelpad=15)
ax.set_zlabel("Minimize $f_3(x)$", fontsize=25, labelpad=15)
temp = np.array(EP)
ax.scatter(temp[:, 0], temp[:, 1], temp[:, 2], c='b')
ax.view_init(20, 45)
plt.savefig("../Testing_result/{0}_node{1}_1.pdf".format(
name, len(EP)), format='pdf', bbox_inches='tight')
plt.close()
def write_file(write_str):
with open("../Testing_result/Data_result.txt", "a") as f:
f.write(write_str + "\t\n")
def write_addr(write_str, addr):
with open(addr, "a") as f:
f.write(write_str)
def plot_seq(data):
def decide_shape(i):
shape = ['-', '*-', '^-', 'o-', 'v-', '<-', '>--', '*--', '^--']
return shape[i]
data = np.array(data)
length = len(data[0]) - 3
x_axis = [1 / length * i * 100 for i in range(length + 1)]
fig, ax = plt.subplots(figsize=(18, 9), dpi=200)
plt.xlim(-2, 100)
plt.ylim(0, 105)
ax.tick_params(axis='x', labelsize=20)
ax.tick_params(axis='y', labelsize=20)
plt.xlabel("Percentage of recorded solution (%)", fontsize=25)
plt.ylabel("Percentage of total solution (%)", fontsize=25)
for i in range(len(data)):
data_line = data[i][2:].astype(np.float)
max_num = max(data_line)
data_line = (data_line / max_num) * 100
line_shape = decide_shape(i)
ax.plot(x_axis, data_line, line_shape, label="{0} dimension:{1}".format(
data[i][0], data[i][1]), markersize=15, markerfacecolor='none')
print(data[i][0])
legend = ax.legend(loc='lower right', shadow=True, fontsize=20)
plt.savefig("../Testing_result/record.pdf",
format='pdf', bbox_inches='tight')
def read_seq(addr):
result = []
line = []
with open(addr, "r") as f:
result = f.readlines()
result = [x.split(';')[:-1] for x in result]
return result
| true |
04de579fd23cfd2eb8624e213ac35d6c5a7167cc | Python | Oxidiz3/cse210-project | /forthequeen/__main__.py | UTF-8 | 506 | 2.609375 | 3 | [
"MIT"
] | permissive | # program entry point
#from game.director import Director
from game.start_view import StartView
import data.constants as constants
import arcade
def start_game():
""" Main method """
#game = Director(constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT, constants.SCREEN_TITLE)
window = arcade.Window(constants.SCREEN_WIDTH, constants.SCREEN_HEIGHT, constants.SCREEN_TITLE)
start_view = StartView()
window.show_view(start_view)
arcade.run()
if __name__ == "__main__":
start_game() | true |
45dc9544879c142a09617b1edbd0f7c5e1942b7b | Python | wertt89/web-scraping-challenge | /scrape_mars.py | UTF-8 | 3,869 | 3.171875 | 3 | [] | no_license | from bs4 import BeautifulSoup
import pandas as pd
from splinter import Browser
import time
def init_browser():
executable_path = {'executable_path': '/Users/konstajokipii/NU_BOOTCAMP/web-scraping-challenge/chromedriver'}
return Browser('chrome', **executable_path, headless=False)
def scrape():
browser = init_browser()
mars_dict ={}
# Visiting Mars news URL
NasaMarsNews = 'https://mars.nasa.gov/news/'
browser.visit(NasaMarsNews)
# Creating HTML object
html = browser.html
# Parsing HTML w/ BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
# Getting the first item in a list <li> under an unordered list <ul>
latest_news = soup.find('li', class_='slide')
# Saving the news item under a <div> container
news_title = latest_news.find('div', class_='content_title').text
# Saving the text paragraph in the container with an 'article_teaser_body' class
news_paragraph = latest_news.find('div', class_='article_teaser_body').text
# *********************************************************************************************************************
# Visiting Mars facts URL
MarsFacts = 'https://space-facts.com/mars/'
browser.visit(MarsFacts)
# Creating HTML object
html = browser.html
# Using pandas to scrape the table
table = pd.read_html(html)
# Slicing the table into a dataframe
marsfacts_df = table[0]
marsfacts_df.columns =['Description', 'Value']
# Converting dataframe to HTML table and passing parameters for styling
html_table = marsfacts_df.to_html(index=False, header=False, border=0, classes="table table-sm table-striped font-weight-light")
# *********************************************************************************************************************
# Visiting USGS Astrogeology URL
MarsHemisphere = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(MarsHemisphere)
# Creating HTML object
html = browser.html
# Parsing HTML w/ BeautifulSoup
soup = BeautifulSoup(html, 'html.parser')
# Retrieing parent containers for each hemisphere
hemispheres = soup.find_all('div', class_="item")
# Creating empty list for storing the python dictionary
hemisphere_image_data = []
# For loop through each container
for hemisphere in range(len(hemispheres)):
# Using Splinter to click on all links to get image data
hemisphere_url = browser.find_by_css("a.product-item h3")
hemisphere_url[hemisphere].click()
# Creating a BeautifulSoup object with the image URL
image_url = browser.html
image_soup = BeautifulSoup(image_url, 'html.parser')
# Storing prefix URL for fullsize image links
prefix_url = 'https://astrogeology.usgs.gov'
# Saving full resolution images into variable
suffix_url = image_soup.find('img', class_="wide-image")['src']
# Joining URLs
full_image_url = prefix_url + suffix_url
# Saving image title into a variable
image_title = browser.find_by_css('.title').text
# Adding key value pairs to python dictionary and appending to list
hemisphere_image_data.append({"title": image_title, "img_url": full_image_url})
# Returning to main page
browser.back()
# Closing browser session
browser.quit()
# *********************************************************************************************************************
mars_dict = {
"news_title": news_title,
"news_paragraph": news_paragraph,
"mars_fact_table": html_table,
"hemisphere_images": hemisphere_image_data
}
# Returning results
return mars_dict | true |
57160519ea32f4ecf12ca99d6ce3309b6634992e | Python | saltedfish666/ClassicalAlgorithm | /0-1背包(动态规划)/python/0-1bag.py | UTF-8 | 1,090 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Knapsack
#n is the number of items,c is bag's capacity
def bag(n,c,w,v):
m=[[0 for j in range(c+1)] for i in range(n+1)]
for i in range(1,n+1):
for j in range(1,c+1):
m[i][j]=m[i-1][j]
if j>=w[i-1] and m[i][j]<m[i-1][j-w[i-1]]+v[i-1]:
m[i][j]=m[i-1][j-w[i-1]]+v[i-1]
return m
#Traceback
def show(n,c,w,m):
print 'Maximum value:',m[n][c]
x=[0 for i in range(n)]
j=c
for i in reversed(range(1,n+1)):
if m[i][j]>m[i-1][j]:
x[i-1]=1
j-=w[i-1]
print 'item:'
for i in range(n):
if x[i]:
print 'NO.',i+1
if __name__=='__main__':
'''
n=5
c=10
w=[2,2,6,5,4]
v=[6,3,5,4,6]
'''
path=raw_input('please input the path:')
with open(path,'r') as f:
temp=f.readlines()
for i in range(len(temp)):
temp[i]=temp[i].rstrip('\n')
n=int(temp[0])
c=int(temp[1])
w=map(int,temp[2].split(' '))
v=map(int,temp[3].split(' '))
m=bag(n,c,w,v)
print m
'''
for i in range(n+1): #换行打印
print m[i]
print '\n'
'''
show(n,c,w,m)
#os.system("pause") | true |
4d258679852f1c2507cf92fe77f86592fce7f02b | Python | Parya1112009/mytest | /end.py | UTF-8 | 88 | 2.546875 | 3 | [] | no_license | import re
p= re.compile("bad$")
match = p.search("cats are bad")
print match.group()
| true |
cfc30265cc853f82ccf4161c308ce8da69b5abcb | Python | jborelo/pylogtst | /mod2.py | UTF-8 | 464 | 2.671875 | 3 | [] | no_license | import logging
from plog import plog
lgr = logging.getLogger(__name__)
plog(f"--- In mod2 logger name={lgr.name}, level={lgr.level}, parent logger: {str(lgr.parent)}")
# ---------------------------------
def sayHi():
ss = f"------------- m2m2m2m2 logger name: {lgr.name} level={lgr.level}, parent logger: {str(lgr.parent)}"
plog(ss)
lgr.debug(ss)
if __name__ == "__main__":
lgr.info("Starting main in mod2")
plog("Starting main in mod2") | true |
a317bb9c04bea4fda88fa2f6b27934ec730d10e5 | Python | jabbber/worktools | /zypper_join.py | UTF-8 | 1,309 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python
#author: zwj
#version: 0.2
#this script can join zypper pch and pa
import sys
import re
with open('pch.txt') as pchfile:
pch = pchfile.read()
patchnames = {}
for line in pch.split("\n")[4:]:
if re.match("^\s*$",line):
continue
patchname = re.findall("^[\w-]+\s+\|\s+\w+-((?:[\w+]+-)*[a-zA-Z][\w+]+)(?:-\d\w*)*-(\d+)\s+\|", line)
if len(patchname) == 1:
if patchname[0]:
patch = patchname[0][0]
version = patchname[0][1]
if patch in patchnames:
if version > patchnames[patch][0]:
patchnames[patch] = (version,line)
else:
patchnames[patch] = (version,line)
continue
sys.stderr.write("This line in 'pch' can not match:\n'>>'%s\n"%line)
with open('pa.txt') as pafile:
pa = pafile.read()
for line in pa.split("\n")[4:]:
if re.match("^\s*$",line):
continue
updatename = re.findall(".+\s+\|\s+.+\s+\|\s+((?:[\w+]+-)*[\w+]+)\s+\|", line)
if len(updatename) == 1:
if updatename[0]:
#print updatename[0]
if updatename[0] in patchnames:
print line + patchnames.pop(updatename[0])[1]
continue
sys.stderr.write("This line in 'pa' can not match:\n'>>'%s\n"%line)
| true |
837a2dfbd419a22612a84464f8a5262d45414ff3 | Python | AlexSChapman/InteractiveProgramming | /model_tic_tac.py | UTF-8 | 12,251 | 3.484375 | 3 | [] | no_license | """
Model for Super Tic Tac tic_tac_toe
@Author Alex Chapman
3/6/17
"""
import ast
D = 'steelblue'
class model(object):
"""
class which handles the actual functioning of the server. Includes
handling new users connecting, as well as clients sending commands
such as color changes and click values.
Attributes:
SOCKET_LIST - List of addresses that have connected to server
users - List of user objects
correlation - Dictionary that relates other two attributes
"""
def __init__(self):
"""Returns model object"""
# list of addresses
self.socket_list = []
# list of user objects
self.users = []
# dictionary relating users to addresses
self.correlation = {}
def save_val(self, thruput, member):
"""
function call which allows data to be passed into the game field.
Commands: 'hb' - heartbeat message sent on loop to sync
'uCOLOR' - sets the color of the user that calls it
'[x, y]' - click value of given
Input: thruput -> command or whatnot to be processed
member -> address from which the message originated
"""
# figures out which user sent the message
to_use = self.correlation[member]
# if the command is the color set
if thruput[0] == 'u':
to_use.set_color(thruput[1:])
print(member, thruput[1:])
# on the click command check if it is that user's turn
elif to_use.turn_state:
try:
# if hb is appended to the end of the click, remove it
if 'hb' in thruput and len(thruput) > 2:
thruput = thruput[:len(thruput)-2]
# interperet point input as list
ls = ast.literal_eval(thruput)
# passes the point value down to the user and returns if
# the turn was successfully passed
if to_use.input(ls):
# change whos turn it is
self.change_turns()
print('Turns Changed.')
else:
print('Invalid Click. Still your turn.')
except (SyntaxError, ValueError) as e:
if(thruput == 'hb'):
pass
else:
print('didnt work' + thruput)
pass
else:
if(thruput == 'hb'):
pass
else:
print('not your turn ' + str(member))
pass
def update_socket_list(self, thruput):
"""
called upon clients connection, creates and establishes list of
addresses which are in turn paired with user objects.
inputs: thruput -> address of most recently connected machine
"""
if thruput not in self.socket_list: # if address not already listed
self.socket_list.append(thruput)
# print(len(self.socket_list))
# the first user has a predefined 'x' char (now irrelevant)
if len(self.socket_list) == 1:
# first user to connect gets first turn
self.users.append(user('x', True))
# adds new user object to the dictionary so it can be found
self.correlation[thruput] = self.users[0]
# see above
elif len(self.socket_list) == 2:
self.users.append(user('o', False))
self.correlation[thruput] = self.users[1]
else:
print('Invalid number of connections: 2 players expected.')
print('Ignoring Extra Connection')
# print(self.users)
# changes the turns, excecution above
def change_turns(self):
"""Changes the turns"""
for u in self.users:
u.flip_turn()
class tic_tac_toe(object):
"""
Class which handles the creation of game objects for each of the 9
major game tiles. Contains a 3x3 matrix of color vals, with the
default color being defined at the top of the file.
Attributes:
focus - whether or not the game matrix is in focus (playable)
state - 3x3 matrix holding the values of that specific board
"""
def __init__(self, f, x=0, y=0):
"""Returns new tic_tac_toe object"""
self.focus = f
self.state = [[D, D, D],
[D, D, D],
[D, D, D]]
def __str__(self):
"""Returns string representation of tic_tac_tow object"""
to_return = (str(self.state[0]) + '\n' +
str(self.state[1]) + '\n' +
str(self.state[2]) + '\n')
return to_return
def add_char(self, char, i, j):
"""
Function which handles an attempted click: i.e. adding a new color
to the game matrix if the box clicked on is empty.
"""
if self.state[i][j] is D:
self.state[i][j] = char
print('Clicked Valid Box.')
return True
else:
print('Box already taken.')
return False
def on_click(self, x, y, char):
"""
Called by the user on click of the specific sub-game board.
Inputs:
x - decimal representation of click on the bigger board
y - decimal representation of click on the bigger board
char - color string of calling user
"""
x = x * 3
y = y * 3
# converts the decimal point to row and column clicked
j = helper_func(x)
i = helper_func(y)
# only excecutes if the square clicked is unoccupied and in focus
if self.add_char(char, i, j):
# changes the big-board focus to the equivalent of the square clkd.
change_focus(int(j), int(i))
return True
else:
return False
def check_if_won(self):
"""
Method to check the state matrix of the board to evaluate wheteher
or not it has been won. If won, it returns the winning string. If
not, returns the default string (D)
"""
# checks if the rows have a winner
for row in self.state:
if(row[0] == row[1] and row[0] == row[2]):
return row[0]
# checks if the columns have a winner
for column in range(0, len(self.state)):
one = self.state[0][column]
two = self.state[1][column]
three = self.state[2][column]
if(one == two and one == three):
return one
# checks if the upper-left bottom-right diagonal has a winner
one = self.state[0][0]
two = self.state[1][1]
three = self.state[2][2]
if(one == two and one == three):
return one
# checks if the other diagonal has a winner
one = self.state[0][2]
three = self.state[2][0]
if(one == two and one == three):
return one
return D
def get_to_send(self):
"""
Returns nested list to send to clients
Format:
[[ROW],[ROW],[ROW]]
L ROW = [focus, < 9 length list of all square values >]
"""
ls = []
ls.append(self.focus)
# print(self.state)
if self.state is not None:
for i in self.state:
for q in i:
ls.append(q)
return ls
class user(object):
"""
User object designed to create a profile for each client that connects
to the game. Also handles all high-level click functionality.
Attributes:
char - string representing the color of the user, set by
uCOLOR command upon client initialization.
turn_state - Boolean representing whether or not it is this user's
turn.
"""
def __init__(self, char=D, turn_state=False):
"""Returns a user object"""
# sets the color to default, to be set by first client operation.
self.char = char
self.turn_state = turn_state
def input(self, ls):
"""
Accepts the point value of the clicked position as a decimal fro 0-1
in both the x and y direction. Calculates which large tile is
clicked on, and then calls that tile with the same clicked point.
Input: point of click in the form [x, y]
"""
click_x = ls[0]
click_y = ls[1]
# converts the decimal point to row and column clicked
i = helper_func(click_x)
j = helper_func(click_y)
# print('box clicked on:', i, ':', j)
# print('box in focus:', get_board_focus())
if main_board[j][i].focus:
# HIGHEST LEVEL CLICK MANAGEMENT
# attempts to add color to the clicked-on board
return main_board[j][i].on_click(click_x - i/3,
click_y - j/3,
self.char)
else:
return False
def flip_turn(self):
self.turn_state = not self.turn_state
def set_color(self, color):
self.char = color
def helper_func(val):
"""
Function independent of a class, designed to convert a decimal into
its corresponding integer value. Assumes 3 columns / rows.
Input: Must be between 0 and 1 to function correctly
"""
if val > 2/3:
i = 2
elif val < 1/3:
i = 0
else:
i = 1
return i
def change_focus(row, column):
"""
Changes focus to the new main tile. takes a row and column integer as
inputs, must be between 0-2 for both.
"""
# sets all foci to false
for rw in main_board:
for game in rw:
game.focus = False
# goes to the single board that should be in focus and sets its focus
main_board[column][row].focus = True
print('focus on:', column, row)
# Initializes the base variable which holds the game boards needed
main_board = [[tic_tac_toe(True), tic_tac_toe(False), tic_tac_toe(False)],
[tic_tac_toe(False), tic_tac_toe(False), tic_tac_toe(False)],
[tic_tac_toe(False), tic_tac_toe(False), tic_tac_toe(False)]]
def check_if_won(board):
"""
Function used to check if the main board has been won.
Inputs: board - main game board to be checked.
Output: string representing the color of the winner, if not the def.
"""
# Finds out if each individual board has been won.
ls = []
for i in board:
ts = []
for v in i:
ts.append(v.check_if_won)
# checks if the rows have a winner
for row in ls:
if(row[0] == row[1] and row[0] == row[2]):
return row[0]
# checks if the columns have a winner
for column in range(0, len(ls)):
one = ls[0][column]
two = ls[1][column]
three = ls[2][column]
if(one == two and one == three):
return one
# checks if the upper-left bottom-right diagonal has a winner
one = ls[0][0]
two = ls[1][1]
three = ls[2][2]
if(one == two and one == three):
return one
# checks if the other diagonal has a winner
one = ls[0][2]
three = ls[2][0]
if(one == two and one == three):
return one
return D
def get_board_state():
"""
Converts the game board into a readable form for sending to the clients
"""
ls = []
for row in main_board:
ts = []
for val in row:
# print(val.get_to_send())
ts.append(val.get_to_send())
ls.append(ts)
return ls
def get_board_focus():
"""Returns the index of the cell that is in focus"""
for i, row in enumerate(main_board):
for o, column in enumerate(row):
if column.focus == 1:
return str(i) + ':' + str(o)
return 'none in focus'
| true |
7cd64663da81096afd56943aec788151fc685fae | Python | ChaseTheodos/CSC-132-Final | /facial_req.py | UTF-8 | 2,478 | 2.84375 | 3 | [] | no_license | #! /usr/bin/python3
from imutils.video import VideoStream
from imutils.video import FPS
import face_recognition
import imutils
import pickle
import time
import cv2
#Initialize 'name'
name = "unknown"
#use encodings.pickle file model created from train_model.py (kept in current working directory)
pickledInfo = "encodings.pickle"
print("[INFO] loading encodings + face detector...")
data = pickle.loads(open(pickledInfo, "rb").read())
# initialize the video stream and allow the camera sensor to warm up
vs = VideoStream(usePiCamera=True).start()
time.sleep(2.0)
# loop over frames from the video file stream
while True:
# grab the frame from the threaded video stream and resize it
# to 500px (to speedup processing)
frame = vs.read()
frame = imutils.resize(frame, width=500)
# Detect the fce boxes
boxes = face_recognition.face_locations(frame)
# compute the facial embeddings for each face bounding box
encodings = face_recognition.face_encodings(frame, boxes)
names = []
# loop over the encoded data
for encoding in encodings:
# attempt to match each face
matches = face_recognition.compare_faces(data["encodings"],
encoding)
name = "Unknown" #if face is not recognized, then print Unknown
# check to see if we have found a match
if True in matches:
# find the indexes of all matched faces then initialize a
# dictionary to count the total number of times each face
# was matched
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
# loop over the matched indexes and maintain a count for
# each recognized face face
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
# determine the recognized face
name = max(counts, key=counts.get)
#print name test
if name != name:
name = name
print(name)
# update the list of names
names.append(name)
# loop over the recognized faces
for ((top, right, bottom, left), name) in zip(boxes, names):
# draw the predicted face name on the image - color is in BGR
cv2.rectangle(frame, (left, top), (right, bottom),
(0, 255, 225), 2)
y = top - 15 if top - 15 > 15 else top + 15
cv2.putText(frame, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX,
.8, (0, 255, 255), 2)
# display the image to our screen
cv2.imshow("Facial Recognition is Running", frame)
key = cv2.waitKey(1) & 0xFF
# quit when 'q' key is pressed
if key == ord("q"):
break
# cleanup
cv2.destroyAllWindows()
vs.stop()
| true |
055e92ea1738563ef2bf850d45c7a8c07845e617 | Python | theelous3/noio_ws | /tests/minimal_client_server.py | UTF-8 | 4,266 | 3 | 3 | [] | no_license | '''
This file outlays the basic requirements for creating a
websocket client and a websocket server, dealing strictly with
the websocket frame side of the protocol. This means, that it
does not detail the use of the opening handshake utilities
nor does it deal with adding extensibility.
'''
import noio_ws as ws
class WsClient:
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.ws_conn = ws.Connection(role='CLIENT')
def main(self, location):
self.sock.connect(location)
# spawn a task for sending messages
self.incoming_message_manager()
# spawn a task for incoming messages
def incoming_message_manager():
while True:
event = self.next_event()
if event.type == 'text':
...
# display the message or whatever
elif event.type == 'binary':
...
# do some binary-ish shit
elif event.type == 'ping':
...
# send the pong, like:
# self.ws_send(event.message, 'pong')
elif event.type == 'pong':
...
# confirmed, connection isn't pointless :)
elif event.type == 'close':
...
# feel free to get the status code or w/e
# then send your side of the close:
# self.ws_send('', 'close')
# at this point, we can exit the client.
def ws_send(self, message, type, fin=True, status_code=None):
self.sock.sendall(
self.ws_conn.send(ws.Data(message, type, fin, status_code)))
def next_event(self):
while True:
event = self.ws_conn.next_event()
if event is ws.Information.NEED_DATA:
self.ws_conn.recv(self.sock.recv(2048))
continue
return event
websock_client = WsClient()
websock_client.main(('some_location.com', 80))
class WsServer:
def __init__(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def main(self, location):
self.sock.bind(location)
self.sock.listen(5)
while True:
client_sock, addr = self.sock.accept()
# Here we spawn something to handle a connected client,
# like an async task or threaded handler.
handler = WsClientHandler(client_sock, addr)
handler.main()
class WsClientHandler:
def __init__(self, sock, addr):
self.sock = sock
self.addr = addr
self.ws_conn = ws.Connection(role='SERVER')
def main(self):
# here we'll just spawn an x for the message manager
self.incoming_message_manager()
def incoming_message_manager():
while True:
event = self.next_event()
elif event.type == 'text':
...
# print the message or whatever
elif event.type == 'binary':
...
# do some binary-ish things
elif event.type == 'ping':
...
# send the pong, like:
# self.ws_send(event.message, 'pong')
elif event.type == 'pong':
...
# confirmed, connection isn't pointless :)
elif event.type == 'close':
...
# feel free to get the status code or w/e
# then send your side of the close:
# self.ws_send('', 'close')
# at this point, we can exit the client.
def ws_send(self, message, type, fin=True, status_code=None):
self.sock.sendall(
ws_conn.send(ws.Data(message, type, fin, status_code)))
def next_event(self):
while True:
event = self.ws_conn.next_event()
if event is ws.Information.NEED_DATA:
self.ws_conn.recv(self.sock.recv(2048))
continue
return event
websock_server = WsServer()
websock_server.main(('some_location.com', 80))
| true |
89297074f74f855faf515de2b715e7dbc7c65503 | Python | heedong0612/June-30-day-LeetCoding-Challenge | /Day9/isSubsequence_Donghee.py | UTF-8 | 301 | 2.984375 | 3 | [] | no_license | # Donghee Lee
# 06/09/2020
class Solution:
def isSubsequence(self, t: str, s: str) -> bool:
for i in range(len(s)) :
if t == "" :
return True
if t[0] == s[i] :
t = t[1:]
return t == ""
| true |
6bb1cf487a5529dfff6d8fe2412dc443e2affc99 | Python | No-bb-just-do-it/timetask_spider | /china_canton_railway/utils/city_data.py | UTF-8 | 321 | 2.90625 | 3 | [
"MIT"
] | permissive |
def get_city_dict():
city_dict = {}
# 将各个城市与编号制成字典
with open('城市编号id.txt', 'r', encoding='GB2312')as f:
city_data = f.read()
for each_info in city_data.split('\n'):
city_dict[each_info.split(',')[0]] = each_info.split(',')[1]
return city_dict | true |
3a601ca0e9f2ab1c4a0a659e86049117b67c9e58 | Python | j16949/Programming-in-Python-princeton | /1.4/27.py | UTF-8 | 629 | 2.5625 | 3 | [] | no_license | import sys
def had(ho,n):
# if n==1:
# return [[True]]
hn=[[[] for j in range(2*n)] for i in range(2*n)]
for i in range(n):
for j in range(n):
hn[i][j]=ho[i][j]
for i in range(n):
for j in range(n,2*n):
hn[i][j]=ho[i][j-n]
for i in range(n,2*n):
for j in range(n):
hn[i][j]=ho[i-n][j]
for i in range(n,2*n):
for j in range(n,2*n):
hn[i][j]=not ho[i-n][j-n]
# for i in range(2*n):
# for j in range(2*n):
# print(hn[i][j],end=',')
return hn
#n=eval(sys.argv[1])
n=8
g=[[] for i in range(2*n+1)]
g[1]=[[True]]
i=1
while i<=n:
g[i*2]=had(g[i],i)
i*=2
print('g{}:\n{}'.format(2*n,g[2*n]))
| true |
7f190c4ff92d169392f3f4ac23d204e4eb8337ba | Python | vipinkr1212/module_learning | /linearsearch.py | UTF-8 | 395 | 3.5625 | 4 | [] | no_license | pos = -1 # pos- position, pos=-1 means till now it does not have any position
def search(list,n):
i = 0
while i < len(list):
if list[i] == n:
globals()['pos'] = i
return True
i += 1
return False
list =[5,8,4,6,9,2]
n = 9
if search(list,n):
print("found at",pos+1)
else:
print("not found")
| true |
9cf58417b38cdc18a4b59a60c02080844588c643 | Python | Patrickctyyx/my_dl_utils | /postprocessing/det_visualize_from_txt.py | UTF-8 | 1,509 | 3.203125 | 3 | [] | no_license | """
将 txt 转换为 COCO JSON
txt 格式为:文件名 x01,y01,x02,y02,c0 x11,y11,x12,y12,c1 ...
@Author: patrickcty
@filename: det_visualize_from_txt.py
"""
import os
import cv2
import numpy as np
from dataset.make_dir_if_not_exist import make_dir_if_not_exists
def visualize_from_txt(txt_file, target_dir):
"""
可视化 txt 标注文件中的图像
"""
make_dir_if_not_exists(target_dir)
with open(txt_file, 'r') as f:
for line in f:
total = line.split()
filepath = total[0]
anns = total[1:]
anns = list(map(str_ann_to_list, anns))
img = cv2.imread(filepath)
img = draw_bbox(img, anns)
basename = os.path.basename(filepath)
cv2.imwrite(os.path.join(target_dir, basename), img)
print('Save {} successfully.'.format(basename))
def str_ann_to_list(strann):
"""
将 "x1,y1,x2,y2,c" 形式转换成 [x1,y1,x2,y2,c]
"""
return list(map(int, strann.split(',')))
def draw_bbox(image, bboxes):
"""
bboxes: 一个 list,其中每个元素都是 [x_min, y_min, x_max, y_max, cls_id]
"""
image_h, image_w, _ = image.shape
for i, bbox in enumerate(bboxes):
coor = np.array(bbox[:4], dtype=np.int32)
bbox_color = [0, 0, 255]
bbox_thick = int(0.6 * (image_h + image_w) / 600)
c1, c2 = (coor[0], coor[1]), (coor[2], coor[3])
cv2.rectangle(image, c1, c2, bbox_color, bbox_thick)
return image | true |
453b8e0b0eb3cca15768e64e7fe1b6a330859b4e | Python | Kargina/stepic_python_intro | /4/4_6_1.py | UTF-8 | 3,398 | 3.140625 | 3 | [] | no_license | from calendar import TextCalendar
class BookIOErrors(Exception):
pass
class PageNotFoundError(BookIOErrors):
pass
class TooLongTextError(BookIOErrors):
pass
class PermissionDeniedError(BookIOErrors):
pass
class NotExistingExtensionError(BookIOErrors):
pass
class Page:
"""класс страница"""
def __init__(self, text=None, max_sign=2000):
self._text = '' if text is None else text
self.max_sign = max_sign
def __str__(self):
return self._text
def __eq__(self, obj):
if isinstance(obj, Page) or isinstance(obj, str):
return len(self._text) == len(obj)
raise TypeError
def __lt__(self, obj): # <
if isinstance(obj, Page) or isinstance(obj, str):
return len(self._text) < len(obj)
raise TypeError
def __le__(self, obj): # <=
if isinstance(obj, Page) or isinstance(obj, str):
return len(self._text) <= len(obj)
raise TypeError
def __ne__(self, obj): # !=
if isinstance(obj, Page) or isinstance(obj, str):
return len(self._text) != len(obj)
raise TypeError
def __gt__(self, obj): # >
if isinstance(obj, Page) or isinstance(obj, str):
return len(self._text) > len(obj)
raise TypeError
def __ge__(self, obj): # >=
if isinstance(obj, Page) or isinstance(obj, str):
return len(self._text) >= len(obj)
raise TypeError
def __len__(self):
return len(self._text)
def __add__(self, obj):
if (not isinstance(obj, Page)) and (not isinstance(obj, str)):
raise TypeError
if len(self._text) + len(obj) > self.max_sign:
raise TooLongTextError
self._text = self._text + str(obj)
return self
def __radd__(self, obj):
if (not isinstance(obj, Page)) and (not isinstance(obj, str)):
raise TypeError
return str(obj) + self._text
class Book:
"""класс книга"""
def __init__(self, title, content=None):
self.title = title
self._content = [] if content is None else content
def __eq__(self, obj):
if not isinstance(obj, Book):
raise TypeError
return len(self._content) == len(obj)
def __lt__(self, obj): # <
if not isinstance(obj, Book):
raise TypeError
return len(self._content) < len(obj)
def __le__(self, obj): # <=
if not isinstance(obj, Book):
raise TypeError
return len(self._content) <= len(obj)
def __ne__(self, obj): # !=
if not isinstance(obj, Book):
raise TypeError
return len(self._content) != len(obj)
def __gt__(self, obj): # >
if not isinstance(obj, Book):
raise TypeError
return len(self._content) > len(obj)
def __ge__(self, obj): # >=
if not isinstance(obj, Book):
raise TypeError
return len(self._content) >= len(obj)
def __getitem__(self, index):
if index < 1 or index > len(self):
raise PageNotFoundError
return self._content[index-1]
def __setitem__(self, index, value):
if index < 1 or index > len(self):
raise PageNotFoundError
self._content[index-1] = value
def __len__(self):
return len(self._content)
| true |
5c3d66c8cabe5653e01dd5c93467a47294cd4360 | Python | kartikeya-shandilya/project-euler | /python/129.py | UTF-8 | 382 | 3 | 3 | [] | no_license |
def gcd_10(n):
if n%2 == 0 or n%5 == 0:
return False
return True
def long_div(n):
if not gcd_10(n):
return 0
k = 1
d = 1
while k:
k = (10*k + 1) % n
d += 1
return d
if __name__ == "__main__":
for i in xrange(10**6, 10**6+10000):
k = long_div(i)
if k>10**6:
print i, k
break
| true |
88ad72bab616ad133584ca364c84423f4d41091e | Python | Kupcor4000/Python_Algorithms | /Searching_binary.py | UTF-8 | 1,721 | 4.125 | 4 | [] | no_license | #lista musi byc posortowana!
def binary_search(list, target):
first = 0
last = len(list) - 1
step = 0
while first <= last:
midpoint = (first + last) //2 #zaokraglanie w dol
step = step+1
print("Przejscie: {}, midpoint: {} , aktualna wartosc na midpoincie: {}, first: {}, last: {}".format(step,midpoint,list[midpoint],first,last))
if int(list[midpoint]) == target:
return midpoint
elif int(list[midpoint]) < target:
first = midpoint + 1 #wszystkie wartosci przed midpointem zostaja odrzucone
#warunek kiedy target < list[midpoint]
else:
last = midpoint - 1 #wszystkie wartosci po midpoincie zostaja odrzucone
return None
def verify(wartosc):
if wartosc is not None:
print("Szukana liczba znajduje sie na pozycji: {}".format(wartosc))
else:
print("Szukana liczba nie znajduje sie w zbiorze")
#booble sort
def sorting(a_list):
i = True
while i:
z = True
for j in range(len(a_list)-1):
if int(a_list[j]) > int(a_list[j+1]):
k = a_list[j]
a_list[j] = a_list[j+1]
a_list[j+1] = k
z = False
if z:
i = False
return a_list
lista = input("Please enter some number seperate by coma: ")
lista = lista.split(" ")
print("Posortowana lista: {}".format(sorting(lista)))
ta = int(input("Ktora z podanych wyzej liczb chcesz znaleźć? "))
wynik = binary_search(sorting(lista),ta)
verify(wynik)
| true |
5f3157a4561fb730700bf19c4e936bde0aee1340 | Python | cmavian/pfb2019 | /Pearson-lecture/BLAST_script.py | UTF-8 | 659 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python3
# to run ./BLAST_script.py blast*.txt because it has a for loop to run many files
import sys
import re
hit_files = []
field_str = 'qseqid, sseqid, percid, alen, mismat, gaps, q_start, q_end, s_start, s_end, evalue, bits'
fields = field_str.split(', ')
hitlist = []
for hit_file in sys.argv[1:]: # : opens all files
with open(hit_file, 'r') as fin:
for line in fin:
if line[0]=='#':
continue
hit_data = dict(zip(fields, line.strip('\n').split('\t')))
hit_data['file'] = hit_file
hitlist.append(hit_data)
break
for hit in hitlist:
print('\t'.join([hit[x] for x in ('file','percid','alen','evalue')]))
| true |
94098d2d183f58bb81f5f5bb0ffd45485b114d97 | Python | yukai-chiu/CodingPractice | /LeetCode/Explore/Binary Search Tree/Validate Binary Search Tree.py | UTF-8 | 2,850 | 3.65625 | 4 | [] | no_license | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
#My first try
class Solution:
def is_valid(self, node, max_val, min_val):
if not node.left and not node.right:
return (True, max(max_val, node.val), min(min_val, node.val))
max_l = max_r = max_val
min_l = min_r = min_val
l = r = True
if node.left:
l, max_l, min_l = self.is_valid(node.left, max_val, min_val)
if node.right:
r, max_r, min_r = self.is_valid(node.right, max_val, min_val)
if not l or not r:
return (False, -1, -1)
else:
if max_l < node.val and node.val < min_r:
return (True, max(max_r, node.val), min(min_l, node.val))
else:
return (False, -1, -1)
def isValidBST(self, root: TreeNode) -> bool:
if not root:
return True
ans, _max, _min = self.is_valid(root, float('-inf'), float('inf'))
print(ans, _max, _min)
return ans
#Recursive
#Time: O(n)
#Space: O(n)
class Solution:
def is_valid(self, node, max_val, min_val):
if not node:
return True
if max_val <= node.val or node.val <= min_val:
return False
if not self.is_valid(node.left, node.val, min_val):
return False
if not self.is_valid(node.right, max_val, node.val):
return False
return True
def isValidBST(self, root: TreeNode) -> bool:
if not root:
return True
return self.is_valid(root, float('inf'), float('-inf'))
#Iterative
#Time: O(n)
#Space: O(n)
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
if not root:
return True
stack = [(root, float('inf'), float('-inf'))]
while stack:
curr, upper, lower = stack.pop()
if upper <= curr.val or curr.val <= lower:
return False
if curr.left:
stack.append((curr.left, curr.val, lower))
if curr.right:
stack.append((curr.right, upper, curr.val))
return True
#Inorder
class Solution:
def isValidBST(self, root: TreeNode) -> bool:
if not root:
return True
stack = []
last = float('-inf')
while stack or root:
while root:
stack.append(root)
root = root.left
root = stack.pop()
if root.val <= last:
return False
last = root.val
root = root.right
return True | true |
ad34a12a56d2417dc7a86f1905be8ef0d981181a | Python | Algo-Goer/master_course_Arch_2019 | /Project1/MIPSim.py | UTF-8 | 27,436 | 2.796875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# @Time : 2019/10/24 9:42 下午
# @Author : Zhou Liang
# @File : MIPSim.py
# @Software: PyCharm
# On my honor, I have neither given nor received unauthorized aid on this assignment.
import sys
START_ADDRESS = 256 # 起始地址
INSTRUCTION_SEQUENCE = {} # 指令序列
INSTRUCTION_COUNT = 0 # 指令条数
MACHINE_WORD_LENGTH = 32 # 机器字长
MIPS_STATUS = {
'CycleNumber': 0, # 当前执行指令的周期数
'PC': START_ADDRESS - 4, # 程序计数器(当前指令)
'NPC': START_ADDRESS, # 程序计数器(下一条指令)
'Registers': [0] * 32, # 32个MIPS寄存器
'Data': {}, # 模拟的存储器空间
'END': False, # 标志程序是否运行结束
}
def twos_complement_to_value(input_str): # 二进制补码转整数真值
unsigned_str = input_str[1:]
if input_str[0] == '1':
for i in range(31): # 将负数补码的无符号部分按位取反
if unsigned_str[i] == '0':
unsigned_str = unsigned_str[:i] + '1' + unsigned_str[i + 1:]
else:
unsigned_str = unsigned_str[:i] + '0' + unsigned_str[i + 1:]
abs_value = int(unsigned_str, 2) + 1
value = abs_value if input_str[0] == '0' else abs_value * (-1)
else:
value = int(unsigned_str, 2)
return value
def value_to_twos_complement(value): # 整数真值转换为二进制补码,要求输入的真值在32位补码可表示的范围内
global MACHINE_WORD_LENGTH
if str(value)[0] == '-': # 负数
abs_value = value * -1
binary_value_str = str(bin(abs_value))[2:]
if len(binary_value_str) < MACHINE_WORD_LENGTH - 1:
for i in range(MACHINE_WORD_LENGTH - 1 - len(binary_value_str)):
binary_value_str = '0' + binary_value_str
elif len(binary_value_str) == MACHINE_WORD_LENGTH: # 解决2147483648转为二进制的问题
binary_value_str = binary_value_str[1:]
for i in range(MACHINE_WORD_LENGTH - 1): # 按位取反
if binary_value_str[i] == '0':
binary_value_str = binary_value_str[:i] + '1' + binary_value_str[i + 1:]
else:
binary_value_str = binary_value_str[:i] + '0' + binary_value_str[i + 1:]
last_zero_index = binary_value_str.rfind('0') # 加一
if last_zero_index != -1:
binary_value_str = binary_value_str[:last_zero_index] + '1' + binary_value_str[last_zero_index + 1:]
else: # 解决2147483648转为二进制的问题
for i in range(MACHINE_WORD_LENGTH - 1): # 按位取反
if binary_value_str[i] == '0':
binary_value_str = binary_value_str[:i] + '1' + binary_value_str[i + 1:]
else:
binary_value_str = binary_value_str[:i] + '0' + binary_value_str[i + 1:]
for i in range(last_zero_index + 1, MACHINE_WORD_LENGTH - 1):
binary_value_str = binary_value_str[:i] + '0' + binary_value_str[i + 1:]
binary_value_str = '1' + binary_value_str
else: # 正数
binary_value_str = str(bin(value))[2:]
if len(binary_value_str) < MACHINE_WORD_LENGTH - 1:
for i in range(MACHINE_WORD_LENGTH - 1 - len(binary_value_str)):
binary_value_str = '0' + binary_value_str
binary_value_str = '0' + binary_value_str
return binary_value_str
def shift(mode, shamt, input_value): # 移位函数
binary_str = value_to_twos_complement(input_value)
if mode == 'SLL': # 逻辑左移
binary_str = binary_str[shamt:]
for i in range(shamt):
binary_str = binary_str + '0'
return twos_complement_to_value(binary_str)
elif mode == 'SRL': # 逻辑右移
binary_str = binary_str[:-shamt]
for i in range(shamt):
binary_str = '0' + binary_str
return twos_complement_to_value(binary_str)
elif mode == 'SRA': # 算术右移
sign = binary_str[0]
binary_str = binary_str[:-shamt]
for i in range(shamt):
binary_str = sign + binary_str
return twos_complement_to_value(binary_str)
def disassembler_instruction(input_file_name, output_file_name,
start_address): # 反汇编器(第一部分),将机器码还原为指令序列, 并写入文件disassembly.txt
instruction_count = 0
instruction_sequence = {}
current_address = start_address
input_file_pointer = open(input_file_name)
output_file_pointer = open(output_file_name, 'w')
input_line = input_file_pointer.readline()
while input_line: # 指令段到BREAK结束
# print(input_line[0:32], end='\t')
if input_line[0:2] == '01': # Category-1
if input_line[2:6] == '0000': # J target
instruction = 'J #' + str(int(input_line[6:32] + '00', 2))
elif input_line[2:6] == '0001': # JR rs
instruction = 'JR ' + 'R' + str(int(input_line[6:11], 2))
elif input_line[2:6] == '0010': # BEQ rs, rt, offset
sign_extend_offset = input_line[16:32] + '00' # 18bits sign_extend_offset
if sign_extend_offset[0] == '0': # 符号位为0,offset为正
target_offset = int(sign_extend_offset, 2)
elif sign_extend_offset[0] == '1': # 符号位为1,offset为负
target_offset = twos_complement_to_value(sign_extend_offset)
instruction = 'BEQ ' + 'R' + str(int(input_line[6:11], 2)) + ", " \
+ 'R' + str(int(input_line[11:16], 2)) + ", " \
+ '#' + str(target_offset)
elif input_line[2:6] == '0011': # BLTZ rs, offset
sign_extend_offset = input_line[16:32] + '00' # 18bits sign_extend_offset
if sign_extend_offset[0] == '0': # 符号位为0,offset为正
target_offset = int(sign_extend_offset, 2)
elif sign_extend_offset[0] == '1': # 符号位为1,offset为负
target_offset = twos_complement_to_value(sign_extend_offset)
instruction = 'BLTZ ' + 'R' + str(int(input_line[6:11], 2)) + ", " \
+ '#' + str(target_offset)
elif input_line[2:6] == '0100': # BGTZ rs, offset
sign_extend_offset = input_line[16:32] + '00' # 18bits sign_extend_offset
if sign_extend_offset[0] == '0': # 符号位为0,offset为正
target_offset = int(sign_extend_offset, 2)
elif sign_extend_offset[0] == '1': # 符号位为1,offset为负
target_offset = twos_complement_to_value(sign_extend_offset)
instruction = 'BGTZ ' + 'R' + str(int(input_line[6:11], 2)) + ", " \
+ '#' + str(target_offset)
elif input_line[2:6] == '0101': # BREAK
instruction = 'BREAK'
elif input_line[2:6] == '0110': # SW rt, offset(base)
if input_line[16] == '0': # 符号位为0,offset为正
decimal_offset = int(input_line[16:32], 2)
elif input_line[16] == '1': # 符号位为1,offset为负
decimal_offset = twos_complement_to_value(input_line[16:32])
instruction = 'SW ' + 'R' + str(int(input_line[11:16], 2)) + ", " \
+ str(decimal_offset) + "(R" + str(int(input_line[6:11], 2)) + ')'
elif input_line[2:6] == '0111': # LW rt, offset(base)
if input_line[16] == '0': # 符号位为0,offset为正
decimal_offset = int(input_line[16:32], 2)
elif input_line[16] == '1': # 符号位为1,offset为负
decimal_offset = twos_complement_to_value(input_line[16:32])
instruction = 'LW ' + 'R' + str(int(input_line[11:16], 2)) + ", " \
+ str(decimal_offset) + "(R" + str(int(input_line[6:11], 2)) + ')'
elif input_line[2:6] == '1000': # SLL rd, rt, sa [Shift Word Left Logical]
instruction = 'SLL ' + 'R' + str(int(input_line[16:21], 2)) + ", " \
+ 'R' + str(int(input_line[11:16], 2)) + ", " \
+ '#' + str(int(input_line[21:26], 2))
elif input_line[2:6] == '1001': # SRL rd, rt, sa [Shift Word Right Logical]
instruction = 'SRL ' + 'R' + str(int(input_line[16:21], 2)) + ", " \
+ 'R' + str(int(input_line[11:16], 2)) + ", " \
+ '#' + str(int(input_line[21:26], 2))
elif input_line[2:6] == '1010': # SRA rd, rt, sa [Shift Word Right Arithmetic]
instruction = 'SRA ' + 'R' + str(int(input_line[16:21], 2)) + ", " \
+ 'R' + str(int(input_line[11:16], 2)) + ", " \
+ '#' + str(int(input_line[21:26], 2))
elif input_line[2:6] == '1011': # NOP(No Operation)
instruction = 'NOP'
elif input_line[0:2] == '11': # Category-2
if input_line[2:6] == '0000': # ADD rd, rs, rt
instruction = 'ADD ' + 'R' + str(int(input_line[16:21], 2)) + ", " \
+ 'R' + str(int(input_line[6:11], 2)) + ", " \
+ 'R' + str(int(input_line[11:16], 2))
elif input_line[2:6] == '0001': # SUB
instruction = 'SUB ' + 'R' + str(int(input_line[16:21], 2)) + ", " \
+ 'R' + str(int(input_line[6:11], 2)) + ", " \
+ 'R' + str(int(input_line[11:16], 2))
elif input_line[2:6] == '0010': # MUL rd, rs, rt [rd ← rs × rt]
instruction = 'MUL ' + 'R' + str(int(input_line[16:21], 2)) + ", " \
+ 'R' + str(int(input_line[6:11], 2)) + ", " \
+ 'R' + str(int(input_line[11:16], 2))
elif input_line[2:6] == '0011': # AND rd, rs, rt
instruction = 'AND ' + 'R' + str(int(input_line[16:21], 2)) + ", " \
+ 'R' + str(int(input_line[6:11], 2)) + ", " \
+ 'R' + str(int(input_line[11:16], 2))
elif input_line[2:6] == '0100': # OR rd, rs, rt
instruction = 'OR ' + 'R' + str(int(input_line[16:21], 2)) + ", " \
+ 'R' + str(int(input_line[6:11], 2)) + ", " \
+ 'R' + str(int(input_line[11:16], 2))
elif input_line[2:6] == '0101': # XOR rd, rs, rt
instruction = 'XOR ' + 'R' + str(int(input_line[16:21], 2)) + ", " \
+ 'R' + str(int(input_line[6:11], 2)) + ", " \
+ 'R' + str(int(input_line[11:16], 2))
elif input_line[2:6] == '0110': # NOR
instruction = 'NOR ' + 'R' + str(int(input_line[16:21], 2)) + ", " \
+ 'R' + str(int(input_line[6:11], 2)) + ", " \
+ 'R' + str(int(input_line[11:16], 2))
elif input_line[2:6] == '0111': # SLT
instruction = 'SLT ' + 'R' + str(int(input_line[16:21], 2)) + ", " \
+ 'R' + str(int(input_line[6:11], 2)) + ", " \
+ 'R' + str(int(input_line[11:16], 2))
elif input_line[2:6] == '1000': # ADDI rt, rs, immediate
decimal_imm = int(input_line[16:32], 2) if input_line[16] == '0' else twos_complement_to_value(
input_line[16:32])
instruction = 'ADDI ' + 'R' + str(int(input_line[11:16], 2)) + ', ' \
+ 'R' + str(int(input_line[6:11], 2)) + ", " \
+ '#' + str(decimal_imm)
elif input_line[2:6] == '1001': # ANDI rt, rs, immediate
decimal_imm = int(input_line[16:32], 2) if input_line[16] == '0' else twos_complement_to_value(
input_line[16:32])
instruction = 'ANDI ' + 'R' + str(int(input_line[11:16], 2)) + ', ' \
+ 'R' + str(int(input_line[6:11], 2)) + ", " \
+ '#' + str(decimal_imm)
elif input_line[2:6] == '1010': # ORI rt, rs, immediate
decimal_imm = int(input_line[16:32], 2) if input_line[16] == '0' else twos_complement_to_value(
input_line[16:32])
instruction = 'ORI ' + 'R' + str(int(input_line[11:16], 2)) + ', ' \
+ 'R' + str(int(input_line[6:11], 2)) + ", " \
+ '#' + str(decimal_imm)
elif input_line[2:6] == '1011': # XORI rt, rs, immediate
decimal_imm = int(input_line[16:32], 2) if input_line[16] == '0' else twos_complement_to_value(
input_line[16:32])
instruction = 'XORI ' + 'R' + str(int(input_line[11:16], 2)) + ', ' \
+ 'R' + str(int(input_line[6:11], 2)) + ", " \
+ '#' + str(decimal_imm)
# print(input_line[0:32] + '\t' + str(current_address) + '\t' + instruction)
output_file_pointer.write(input_line[0:32] + '\t' + str(current_address) + '\t' + instruction + '\n')
instruction_count = instruction_count + 1
instruction_sequence[current_address] = instruction
current_address = current_address + 4
if instruction == 'BREAK':
break
input_line = input_file_pointer.readline()
output_file_pointer.close()
input_file_pointer.close()
return instruction_count, instruction_sequence
def disassembler_memory(input_file_name, output_file_name,
instruction_count): # 反汇编器(第二部分),将指令序列后的补码序列写入到存储空间(data),并写入文件disassembly.txt
memory_space = {}
input_file_pointer = open(input_file_name)
output_file_pointer = open(output_file_name, 'a')
current_address = START_ADDRESS + instruction_count * 4
input_lines = input_file_pointer.readlines()[instruction_count:]
for line in input_lines:
line_value = twos_complement_to_value(line)
# print(line[0:32] + '\t' + str(current_address) + '\t' + str(line_value))
output_file_pointer.write(line[0:32] + '\t' + str(current_address) + '\t' + str(line_value) + '\n')
memory_space[current_address] = line_value
current_address = current_address + 4
output_file_pointer.close()
input_file_pointer.close()
return memory_space
def instruction_operation(instruction, old_status):
temp_status = old_status
temp_status['CycleNumber'] = temp_status['CycleNumber'] + 1
temp_status['PC'] = temp_status['NPC']
temp_status['NPC'] = temp_status['PC'] + 4 # 非跳转指令 PC = PC + 4
op = instruction.split(' ')[0]
if op == 'J': # J target
target = instruction[3:]
temp_status['NPC'] = int(target)
elif op == 'JR': # JR rs [PC ← rs]
rs_index = int(instruction[4:])
temp_status['NPC'] = temp_status['Registers'][rs_index]
elif op == 'BEQ': # BEQ rs, rt, offset 【if rs = rt then branch】
rs_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rt_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
offset = int(instruction[4:].replace(" ", "").split(',')[2][1:])
if temp_status['Registers'][rs_index] == temp_status['Registers'][rt_index]:
temp_status['NPC'] = temp_status['NPC'] + offset
elif op == 'BLTZ': # BLTZ rs, offset [if rs < 0 then branch]
rs_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
offset = int(instruction[4:].replace(" ", "").split(',')[1][1:])
if temp_status['Registers'][rs_index] < 0:
temp_status['NPC'] = temp_status['NPC'] + offset
elif op == 'BGTZ': # BGTZ rs, offset [if rs > 0 then branch]
rs_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
offset = int(instruction[4:].replace(" ", "").split(',')[1][1:])
if temp_status['Registers'][rs_index] > 0:
temp_status['NPC'] = temp_status['NPC'] + offset
elif op == 'BREAK':
temp_status['END'] = True # 程序结束
elif op == 'SW': # SW rt, offset(base) [memory[base+offset] ← rt]
rt_index = int(instruction[3:].replace(" ", "").split(',')[0][1:])
comma_index = int(instruction[3:].replace(" ", "").index(','))
left_parenthesis_index = int(instruction[3:].replace(" ", "").index('('))
offset = int(instruction[3:].replace(" ", "")[comma_index + 1:left_parenthesis_index])
base = int(instruction[3:].replace(" ", "")[left_parenthesis_index + 2:-1])
temp_status['Data'][offset + temp_status['Registers'][base]] = temp_status['Registers'][rt_index]
elif op == 'LW': # LW rt, offset(base) [rt ← memory[base+offset]]
rt_index = int(instruction[3:].replace(" ", "").split(',')[0][1:])
comma_index = int(instruction[3:].replace(" ", "").index(','))
left_parenthesis_index = int(instruction[3:].replace(" ", "").index('('))
offset = int(instruction[3:].replace(" ", "")[comma_index + 1:left_parenthesis_index])
base = int(instruction[3:].replace(" ", "")[left_parenthesis_index + 2:-1])
temp_status['Registers'][rt_index] = temp_status['Data'][offset + temp_status['Registers'][base]]
elif op == 'SLL': # SLL rd, rt, sa [rd ← rt << sa]
rd_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rt_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
sa = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rd_index] = shift('SLL', sa, temp_status['Registers'][rt_index])
elif op == 'SRL': # SRL rd, rt, sa 【rd ← rt >> sa】
rd_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rt_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
sa = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rd_index] = shift('SRL', sa, temp_status['Registers'][rt_index])
elif op == 'SRA': # SRA rd, rt, sa 【rd ← rt >> sa (arithmetic)】
rd_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rt_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
sa = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rd_index] = shift('SRA', sa, temp_status['Registers'][rt_index])
elif op == 'NOP':
pass # no operation
elif op == 'ADD': # ADD rd, rs, rt 【rd ← rs + rt】
rd_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rs_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
rt_index = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rd_index] = temp_status['Registers'][rs_index] + temp_status['Registers'][rt_index]
elif op == 'SUB': # SUB rd, rs, rt [rd ← rs - rt]
rd_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rs_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
rt_index = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rd_index] = temp_status['Registers'][rs_index] - temp_status['Registers'][rt_index]
elif op == 'MUL': # MUL rd, rs, rt [rd ← rs × rt]
rd_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rs_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
rt_index = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rd_index] = temp_status['Registers'][rs_index] * temp_status['Registers'][rt_index]
elif op == 'AND': # AND rd, rs, rt[rd ← rs AND rt](按位与)
rd_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rs_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
rt_index = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rd_index] = temp_status['Registers'][rs_index] & temp_status['Registers'][rt_index]
elif op == 'OR': # OR rd, rs, rt[rd ← rs OR rt] (按位或)
rd_index = int(instruction[3:].replace(" ", "").split(',')[0][1:])
rs_index = int(instruction[3:].replace(" ", "").split(',')[1][1:])
rt_index = int(instruction[3:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rd_index] = temp_status['Registers'][rs_index] | temp_status['Registers'][rt_index]
elif op == 'XOR': # XOR rd, rs, rt[rd ← rs XOR rt] (按位异或)
rd_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rs_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
rt_index = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rd_index] = temp_status['Registers'][rs_index] ^ temp_status['Registers'][rt_index]
elif op == 'NOR': # NOR rd, rs, rt[rd ← rs NOR rt] (按位或非)
rd_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rs_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
rt_index = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rd_index] = ~ (temp_status['Registers'][rs_index] | temp_status['Registers'][rt_index])
elif op == 'SLT': # SLT rd, rs, rt [rd ← (rs < rt)]
rd_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rs_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
rt_index = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rd_index] = 1 if temp_status['Registers'][rs_index] < temp_status['Registers'][
rt_index] else 0
elif op == 'ADDI': # ADDI rt, rs, immediate [rt ← rs + immediate]
rt_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rs_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
imm = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rt_index] = temp_status['Registers'][rs_index] + imm
elif op == 'ANDI': # ANDI rt, rs, immediate [rt ← rs AND immediate]
rt_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rs_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
imm = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rt_index] = temp_status['Registers'][rs_index] & imm
elif op == 'ORI': # ORI rt, rs, immediate [rt ← rs OR immediate]
rt_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rs_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
imm = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rt_index] = temp_status['Registers'][rs_index] | imm
elif op == 'XORI': # XORI rt, rs, immediate [rt ← rs OR immediate]
rt_index = int(instruction[4:].replace(" ", "").split(',')[0][1:])
rs_index = int(instruction[4:].replace(" ", "").split(',')[1][1:])
imm = int(instruction[4:].replace(" ", "").split(',')[2][1:])
temp_status['Registers'][rt_index] = temp_status['Registers'][rs_index] ^ imm
return temp_status
def print_status(mips_status, output_file_name): # 输出某一个Cycle的状态
output_file_pointer = open(output_file_name, 'a')
output_file_pointer.write("--------------------" + '\n')
# print('--------------------')
output_file_pointer.write(
"Cycle:" + str(mips_status['CycleNumber']) + '\t' + str(mips_status['PC']) + '\t' + INSTRUCTION_SEQUENCE[
mips_status['PC']] + '\n')
# print("Cycle:" + str(mips_status['CycleNumber']) + '\t' + str(mips_status['PC']) + '\t' + INSTRUCTION_SEQUENCE[mips_status['PC']])
output_file_pointer.write('\n')
# print('')
output_file_pointer.write("Registers" + '\n')
# print("Registers")
for i in range(32): # 打印32个寄存器状态
if i % 8 == 0:
if i < 9:
output_file_pointer.write('R0' + str(i) + ':\t' + str(mips_status['Registers'][i]) + '\t')
# print('R0' + str(i) + ':\t' + str(mips_status['Registers'][i]), end='\t')
else:
output_file_pointer.write('R' + str(i) + ':\t' + str(mips_status['Registers'][i]) + '\t')
# print('R' + str(i) + ':\t' + str(mips_status['Registers'][i]), end='\t')
elif i % 8 == 7:
output_file_pointer.write(str(mips_status['Registers'][i]) + '\n')
# print(str(mips_status['Registers'][i]))
else:
output_file_pointer.write(str(mips_status['Registers'][i]) + '\t')
# print(str(mips_status['Registers'][i]), end='\t')
# print("")
output_file_pointer.write('\n')
# print("Data")
output_file_pointer.write("Data" + '\n')
word_number = len(mips_status['Data']) # 存储器中的字数
data_start_address = sorted(mips_status['Data'])[0]
for i in range(word_number): # 打印存储器状态
current_address = data_start_address + i * 4
if i % 8 == 0:
output_file_pointer.write(
str(current_address) + ":" + '\t' + str(mips_status['Data'][current_address]) + '\t')
# print(str(current_address) + ":" + '\t' + str(mips_status['Data'][current_address]), end='\t')
elif i % 8 == 7:
output_file_pointer.write(str(mips_status['Data'][current_address]) + '\n')
# print(str(mips_status['Data'][current_address]))
else:
output_file_pointer.write(str(mips_status['Data'][current_address]) + '\t')
# print(str(mips_status['Data'][current_address]), end='\t')
# print('')
output_file_pointer.write('\n')
output_file_pointer.close()
def run(): # 运行模拟器,输出每一个周期的状态结果
output_file_pointer = open('simulation.txt', 'w')
output_file_pointer.truncate() # 清空文件simulation.txt
output_file_pointer.close()
global MIPS_STATUS
while not MIPS_STATUS['END']:
MIPS_STATUS = instruction_operation(INSTRUCTION_SEQUENCE[MIPS_STATUS['NPC']], MIPS_STATUS)
print_status(MIPS_STATUS, 'simulation.txt')
if __name__ == '__main__':
# 默认sys.argv[1]为输入的文件名
INSTRUCTION_COUNT, INSTRUCTION_SEQUENCE = disassembler_instruction(sys.argv[1], 'disassembly.txt', START_ADDRESS)
MIPS_STATUS['Data'] = disassembler_memory(sys.argv[1], 'disassembly.txt', INSTRUCTION_COUNT)
# 文件名写死
# INSTRUCTION_COUNT, INSTRUCTION_SEQUENCE = disassembler_instruction('sample.txt', 'disassembly.txt', START_ADDRESS)
# MIPS_STATUS['Data'] = disassembler_memory(sys.argv[1], 'disassembly.txt', INSTRUCTION_COUNT)
# print(INSTRUCTION_COUNT)
# print(INSTRUCTION_SEQUENCE)
# print(MIPS_STATUS['Data'])
# print("\t")
run()
| true |
ff500959386d2cb9ef8a5a4444cb3935610e8386 | Python | GuangYao-Dou/C | /studytest/hello.py | UTF-8 | 692 | 2.90625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
figsize(12.5, 4)
import scipy.stats as stats
a = np.arange(16)
poi = stats.poisson
lambda_ = [1.5, 4.25]
colors = ["#348ABD", "#A60628"]
plt.bar(a, poi.pmf(a, lambda_[0]), color=colors[0], label="$\lambda = %.1f$" % lambda_[0], alpha=0.60,
edgecolor=colors[0], lw="3")
plt.bar(a, poi.pmf(a, lambda_[1]), color=colors[1],
label="$\lambda = %.1f$" % lambda_[1], alpha=0.60,
edgecolor=colors[1], lw="3")
plt.xticks(a + 0.4, a)
plt.legend()
plt.ylabel("Probability of $k$")
plt.xlabel("$k$")
plt.title("Probability mass function of a Poisson random variable,\
differing \$\lambda$ values");
| true |
255142b4c3a8d2b88563b7d1b46b3d5be9aa69d3 | Python | katryo/tfidf_with_sklearn | /web_page.py | UTF-8 | 1,827 | 3.046875 | 3 | [] | no_license | import requests
import cchardet
import re
import utils
import pdb
from term import Term
class WebPage():
def __init__(self, url=''):
self.url = url
self.terms = {}
def fetch_html(self):
try:
response = requests.get(self.url)
self.set_html_body_with_cchardet(response)
except requests.exceptions.ConnectionError:
self.html_body = ''
def set_html_body_with_cchardet(self, response):
encoding_detected_by_cchardet = cchardet.detect(response.content)['encoding']
response.encoding = encoding_detected_by_cchardet
self.html_body = response.text
def remove_html_tags(self):
html_tag_pattern = re.compile('<.*?>')
semicolon_pattern = re.compile(';\n')
script_tag_pattern = re.compile('<script.*?</script>')
break_pattern = re.compile('\n')
tab_pattern = re.compile('\t')
brace_pattern = re.compile('\{.*?\}')
text = semicolon_pattern.sub('', self.html_body)
text = script_tag_pattern.sub('', text)
text = tab_pattern.sub('', text)
text = break_pattern.sub('', text)
text = brace_pattern.sub('', text)
self.text = html_tag_pattern.sub('', text)
def term_count_up(self, word):
self.terms.setdefault(word, Term(word)) # terms == {'薬': Term('薬')}
self.terms[word].count_up()
def set_words_from_text(self):
self.words = utils.words(self.text)
def set_terms_from_words(self):
# self.remove_html_tagsをしていることが前提
self.words_count = len(self.words)
for word in self.words:
self.term_count_up(word)
def set_terms_tf(self):
for word in self.terms:
term = self.terms[word]
term.set_tf(self.words_count) | true |
5fe67e13eda2f403c43ee9a45a093c185c8a467e | Python | thiagonache/flask-simple-api | /api/api.py | UTF-8 | 2,759 | 2.765625 | 3 | [] | no_license | import flask
from flask import request, jsonify
import psycopg2
import os
app = flask.Flask(__name__)
app.config["DEBUG"] = True
@app.route('/', methods=['GET'])
def home():
return '''<h1>Distant Reading Archive</h1>
<p>A prototype API for distant reading of science fiction novels.</p>'''
@app.route('/api/v1/resources/books/all', methods=['GET'])
def api_all():
all_books = read_db()
return jsonify(all_books)
@app.errorhandler(404)
def page_not_found(e):
return "<h1>404</h1><p>The resource could not be found.</p>", 404
@app.route('/api/v1/resources/books/new', methods=['POST'])
def api_new():
payload = request.json
published = payload.get("published")
author = payload["author"]
title = payload["title"]
sentence = payload["sentence"]
query = "INSERT INTO books VALUES(NULL,%s,'%s','%s','%s')" % (
published, author, title, sentence)
write_db(query)
return '{"published": "%s", "author": "%s", "sentence": "%s"}' % (published, author, sentence)
def connect_db():
try:
connection = psycopg2.connect(user=os.getenv("DB_USER", "postgres"),
password=os.getenv(
"DB_PASS", "postgres"),
host=os.getenv("DB_HOST", "127.0.0.1"),
port=os.getenv("DB_PORT", 5432),
database=os.getenv("DB_SCHEMA", "books"))
except (Exception, psycopg2.Error) as error:
raise(Exception("Error while connecting to PostgreSQL", error))
return connection
def read_db():
connection = connect_db()
cursor = connection.cursor()
# Print PostgreSQL Connection properties
print(connection.get_dsn_parameters(), "\n")
try:
cursor.execute("SELECT * FROM books;")
records = cursor.fetchall()
except (Exception, psycopg2.Error) as error:
print("Error while reading database data", error)
finally:
# closing database connection.
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
return records
def write_db(query):
connection = connect_db()
cursor = connection.cursor()
# Print PostgreSQL Connection properties
print(connection.get_dsn_parameters(), "\n")
try:
cursor.execute(query)
connection.commit()
except (Exception, psycopg2.Error) as error:
print("Error to insert data on PostgreSQL", error)
finally:
# closing database connection.
if(connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
app.run()
| true |
64ae4cdb27f65dfe33f465ac98773dce818c6a53 | Python | ramonvaleriano/python- | /Livros/Livro-Introdução à Programação-Python/Capitulo 4/Exemplos 4/Listagen4_2.py | UTF-8 | 338 | 3.859375 | 4 | [
"MIT"
] | permissive | # Program: Listagen4_2.py
# Author: Ramon R. Valeriano
# Description:
# Dveloped: 23/03/2020 - 07:58
# Update:
number1 = int(input("Enter with the first number: "))
number2 = int(input("Enter with the second number: "))
if number1 > number2:
print("O primeiro número é maior!")
if number2 > number1:
print("O segundo número é maior!")
| true |
001f07c1e6ca59863d41607261f8d80264bb46dc | Python | chondromalasia/Visualization-Project | /pp-gordon/practice_classifier/tester_thing.py | UTF-8 | 688 | 2.84375 | 3 | [] | no_license | import sys
import codecs
import json
"""
This goes through the annotated folder and deletes the duplicates
"""
def main():
unique_tweets = {}
with codecs.open("annotated_for_entities.dat", 'r', 'utf-8') as file_open:
for line in file_open:
tweet = json.loads(line)
if tweet['id'] not in unique_tweets:
unique_tweets[tweet['id']] = tweet
print len(unique_tweets)
with codecs.open("annotated_for_entities_new.dat", 'w', 'utf-8') as out_file:
for thing in unique_tweets:
json.dump(unique_tweets[thing], out_file)
out_file.write("\n")
if __name__ == '__main__':
main()
| true |
b2c473758008e7850d0e5adc588d8377a94eefbb | Python | uuep0x7/xsys.py | /xsys.py | UTF-8 | 2,403 | 2.984375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import os, string, sys
#===========================================
# Váriavies com as Cores
#===========================================
NONE="\033[0m" # Eliminar as Cores, deixar padrão)
NN="\033[0m" # Eliminar as Cores, deixar padrão)
## Cores de Fonte
K="\033[0;30m" # Black (Preto)
R="\033[0;31m" # Red (Vermelho)
G="\033[0;32m" # Green (Verde)
Y="\033[0;33m" # Yellow (Amarelo)
B="\033[0;34m" # Blue (Azul)
M="\033[0;35m" # Magenta (Vermelho Claro)
C="\033[0;36m" # Cyan (Ciano - Azul Claro)
W="\033[0;37m" # White (Branco)
## Efeito Negrito (bold) e cores
BK="\033[1;30m" # Bold+Black (Negrito+Preto)
BR="\033[1;31m" # Bold+Red (Negrito+Vermelho)
BG="\033[1;32m" # Bold+Green (Negrito+Verde)
BY="\033[1;33m" # Bold+Yellow (Negrito+Amarelo)
BB="\033[1;34m" # Bold+Blue (Negrito+Azul)
BM="\033[1;35m" # Bold+Magenta (Negrito+Vermelho Claro)
BC="\033[1;36m" # Bold+Cyan (Negrito+Ciano - Azul Claro)
BW="\033[1;37m" # Bold+White (Negrito+Branco)
## Cores de fundo (backgroud)
BGK="\[\033[40m\]" # Black (Preto)
BGR="\[\033[41m\]" # Red (Vermelho)
BGG="\[\033[42m\]" # Green (Verde)
BGY="\[\033[43m\]" # Yellow (Amarelo)
BGB="\[\033[44m\]" # Blue (Azul)
BGM="\[\033[45m\]" # Magenta (Vermelho Claro)
BGC="\[\033[46m\]" # Cyan (Ciano - Azul Claro)
BGW="\[\033[47m\]" # White (Branco)
## Cores Piscante
PCK="\033[5;30m" #Piscante+Black (Preto)
PCR="\033[5;31m" #Piscante+Red (Vermelho)
PCG="\033[5;32m" #Piscante+Green (Verde)
PCY="\033[5;33m" #Piscante+Yellow (Amarelo)
PCB="\033[5;34m" #Piscante+Blue (Azul)
PCM="\033[5;35m" #Piscante+Magenta (Vermelho Claro)
PCC="\033[5;36m" #Piscante+Cyan (Ciano - Azul Claro)
PCW="\033[5;37m" #Piscante+White (Branco)
##### Variaveis & Limpar tela
y = os.name
os.system('cls' if os.name == 'nt' else 'clear')
##### Identificação do sitema operacional
if y == "nt":
print("======================================")
print("["+BR+"+"+NN+"] Sistema Windows NT \n["+BB+"+"+NN+"] Bem vindo ao xSys")
print("======================================")
while True:
cmmd = raw_input("\n["+BB+"+"+NN+"] xSys > ")
os.system(cmmd)
else:
print("======================================")
print("["+BR+"+"+NN+"] Bem vindo ao xSys")
print("======================================")
while True:
cmmd = raw_input("\n["+PCG+"+"+NN+"] xSys > ")
os.system(cmmd)
| true |
7c3a6858f9b461cffd7f7c2583c8ca7eeeb8519f | Python | lnassabain/Neural-Net-for-Point-Classification | /utility.py | UTF-8 | 1,342 | 3.21875 | 3 | [] | no_license | import numpy as np
from scipy.special import softmax # use built-in function to avoid numerical instability
class Utility:
@staticmethod
def identity(Z):
return Z,1
@staticmethod
def tanh(Z):
"""
Z : non activated outputs
Returns (A : 2d ndarray of activated outputs, df: derivative component wise)
"""
A = np.empty(Z.shape)
A = 2.0/(1 + np.exp(-2.0*Z)) - 1 # A = np.tanh(Z)
df = 1-A**2
return A,df
@staticmethod
def sigmoid(Z):
A = np.empty(Z.shape)
A = 1.0 / (1 + np.exp(-Z))
df = A * (1 - A)
return A,df
@staticmethod
def relu(Z):
A = np.empty(Z.shape)
A = np.maximum(0,Z)
df = (Z > 0).astype(int)
return A,df
@staticmethod
def softmax(Z):
return softmax(Z, axis=0) # from scipy.special
@staticmethod
def cross_entropy_cost(y_hat, y):
n = y_hat.shape[1]
ce = -np.sum(y*np.log(y_hat+1e-9))/n
return ce
"""
Explication graphique du MSE:
https://towardsdatascience.com/coding-deep-learning-for-beginners-linear-regression-part-2-cost-function-49545303d29f
"""
@staticmethod
def MSE_cost(y_hat, y):
mse = np.square(np.subtract(y_hat, y)).mean()
return mse | true |
e926e4d1eaf0bd5b15d27da9645a3e51d2332fb7 | Python | tjhubert/cs412 | /q3_htheodo2.py | UTF-8 | 1,317 | 2.96875 | 3 | [] | no_license | import numpy as np
import scipy as sp
import statistics as st
filename = "./data/data.online.scores"
with open(filename) as f_in:
lines = filter(None, (line.rstrip().split('\t') for line in f_in))
student_ids = []
midterm_scores = []
final_scores = []
for line in lines:
student_ids.append(line[0])
midterm_scores.append(int(line[1]))
final_scores.append(int(line[2]))
def zscore(data, mean, std):
return (data - mean) / std
def mean_score(ls):
sum = 0.0
for num in ls:
sum += num
return sum / len(ls)
def sample_variance(ls):
sum = 0.0
sum_sq = 0.0
for num in ls:
sum = sum + num
sum_sq = sum_sq + num * num
return (sum_sq - sum * sum / len(ls)) / (len(ls) - 1)
mean = np.mean(midterm_scores)
std = np.std(midterm_scores)
midterm_z = [zscore(score, mean, std) for score in midterm_scores]
print "Question 3a"
print "Mean before normalization is", round(mean_score(midterm_scores), 3)
print "Sample variance before normalization is", round(sample_variance(midterm_scores), 3)
print "Mean after normalization is", round(mean_score(midterm_z), 3)
print "Sample variance after normalization is", round(sample_variance(midterm_z), 3), "\n"
print "Question 3b"
print "Score after normalization", round(zscore(90, mean, std), 3) , "\n\n\n"
| true |
2437d600630a2252d7e13ed54c5ce566ae7e4080 | Python | SNAP-SAPIENT/Collector-Reflector | /pi_stuff/MotorHAT/picVidTest/StepTest2.py | UTF-8 | 1,723 | 2.8125 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | #!/usr/bin/python
#import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_Stepper
from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_DCMotor, Adafruit_StepperMotor
import sys
import picamera
import time
import atexit
# create a default object, no changes to I2C address or frequency
mh = Adafruit_MotorHAT()
#create camera object
camera = picamera.PiCamera()
# recommended for auto-disabling motors on shutdown!
def turnOffMotors():
mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)
mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)
atexit.register(turnOffMotors)
myStepper = mh.getStepper(2048, 1) # 200 steps/rev, motor port #1
myStepper.setSpeed(60) # 30 RPM
#reset motor to 0 degrees
camera.resolution = '720p'
vidCount = 1
picCount = 1
"""
while picCount <= 4:
#rotate platform by 45 degrees and take a picture (repeat 3 more times)
print ("rotating to pic {}".format(picCount))
myStepper.step(512, Adafruit_MotorHAT.FORWARD, Adafruit_MotorHAT.DOUBLE)
print ("Taking picture {}".format(picCount))
#take pic
camera.capture('image{}.jpg'.format(picCount))
time.sleep(.5)
picCount += 1
"""
def Test2():
while (True):
#rotate platform by 360 degrees while taking video
print ("Spinning 360 degrees")
camera.start_recording('video{}.h264' .format(vidCount))
myStepper.step(2048, Adafruit_MotorHAT.FORWARD, Adafruit_MotorHAT.DOUBLE)
camera.stop_recording()
vidCount += 1
sys.exit()
| true |
4e56622c8e5b8db346ca622ee8a5280676532cce | Python | krmbzds/gpa-calculator | /gpa_calculator.py | UTF-8 | 3,297 | 3.4375 | 3 | [] | no_license |
def initialize_db(db_name="grades.db"):
conn = sqlite3.connect(db_name)
conn.close()
print("\nDatabase initialized.")
def create_tables(db_name='grades.db', table='grades'):
conn = sqlite3.connect(db_name)
c = conn.cursor()
c.execute('CREATE TABLE %s (className text, semesterHours int, letterGrade text)' % table)
conn.commit()
conn.close()
print("\nCreated '%s' table on '%s' database." % (table, db_name))
def insert_into_table(className, semesterHours, letterGrade, db_name='grades.db', table='grades'):
conn = sqlite3.connect(db_name)
c = conn.cursor()
values = [className, semesterHours, letterGrade]
statement = "INSERT INTO %s VALUES (?,?,?)" % table
c.execute(statement, values)
conn.commit()
conn.close()
def fetch_results(db_name='grades.db', table='grades'):
conn = sqlite3.connect(db_name)
c = conn.cursor()
c.execute("SELECT * FROM %s" % table)
rows = c.fetchall()
conn.close()
t = prettytable.PrettyTable(["Class Name", "Credit", "Grade"])
for row in rows:
t.add_row([row[0], row[1], row[2]])
g = prettytable.PrettyTable(["Quality Points", "Total Credit", "Cumulative GPA"])
g.add_row(cumulative_gpa())
print(t.get_string(hrules=prettytable.ALL))
print(g.get_string(hrules=prettytable.ALL))
def cumulative_gpa(db_name='grades.db', table='grades'):
conn = sqlite3.connect(db_name)
c = conn.cursor()
c.execute("SELECT * FROM %s" % table)
rows = c.fetchall()
conn.close()
total_hours, quality_points = 0, 0
catalog = {"A+": 4.3, "A": 4.0, "A-": 3.7,
"B+": 3.3, "B": 3.0, "B-": 2.7,
"C+": 2.3, "C": 2.0, "C-": 1.7,
"D+": 1.3, "D": 1.0, "D-": 0.7,
"F": 0, "U": 0, "IA": 0}
for row in rows:
total_hours += int(row[1])
quality_points += int(row[1]) * catalog[row[2]]
gpa = math.ceil((quality_points / total_hours) * 100) / 100
return [quality_points, total_hours, gpa]
def collect_input():
while True:
print("")
insert_into_table(input("Class Name: "), input("Semester Hours: "), input("Letter Grade: "))
response = input("\nAdd another line? (Y/n) ").lower()
if response:
if response[0] == "n":
break
elif response[0] == "y":
pass
else:
raise Exception("Wrong Input")
break
def collect_input_add():
response = input("\nAdd more grades? (y/N) ").lower()
if response:
if response[0] == "n":
pass
elif response[0] == "y":
collect_input()
else:
raise Exception("Wrong Input")
def db_exists(db_name="grades.db"):
return os.path.isfile(db_name)
def main():
if db_exists():
print("\nFound an existing database...")
collect_input_add()
fetch_results()
else:
print("\nNo grades database found...")
input("\nPress any key to intialize the database and start adding grades.")
initialize_db()
create_tables()
collect_input()
fetch_results()
if __name__ == "__main__":
import os
import math
import sqlite3
import prettytable
main()
| true |
7457d1a0c5388cebe8ec1b38341a5f90334dc722 | Python | jacobhamblin/little-challenges | /lib/search.py | UTF-8 | 1,342 | 3.234375 | 3 | [] | no_license | def bin_recur(list, value):
if not len(list):
return -1
mid = len(list) // 2
if list[mid] == value:
return mid
if list[mid] > value:
return bin_recur(list[0:mid], value)
else:
rest = bin_recur(list[mid + 1 : len(list)], value)
if rest is -1:
return -1
return mid + 1 + rest
def _bin_recur_indices(list, value, start, end):
if end < start:
return -1
mid_index = (end - start // 2) + start
mid = list[mid_index]
if mid == value:
return mid_index
if mid > value:
return _bin_recur_indices(list, value, start, mid_index - 1)
else:
return _bin_recur_indices(list, value, mid_index + 1, end)
def bin_recur_indices(list, value):
return _bin_recur_indices(list, value, 0, len(list) - 1)
def bin_recur_iter(list, value):
start = 0
end = len(list) - 1
while end > start:
mid_index = (end - start // 2) + start
if list[mid_index] == value:
return mid_index
elif list[mid_index] > value:
end = mid_index - 1
else:
start = mid_index + 1
if len(list) and list[end] == value:
return end
return -1
BINARY = {
"recur": bin_recur,
"recur_with_indices": bin_recur_indices,
"recur_iter": bin_recur_iter,
}
| true |
639dcf5e7c6064652541728d55043ba09a5da48c | Python | harshita26/Contact-Management-System | /cont_ms.py | UTF-8 | 8,769 | 2.78125 | 3 | [] | no_license | import tkinter as tk
from tkinter import ttk
from tkinter import messagebox,font
import sqlite3
root=tk.Tk()
root.title('Contact List')
root.geometry('700x530')
root.resizable(0,0)
# database connection
def Database():
conn=sqlite3.connect('data.db')
mycursor=conn.cursor()
mycursor.execute("CREATE TABLE IF NOT EXISTS 'MEMBER' (mem_id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT, firstname TEXT,lastname TEXT, gender TEXT, age TEXT, address TEXT, contact TEXT )")
mycursor.execute("SELECT * FROM 'MEMBER' ORDER BY 'lastname' ASC")
fetch=mycursor.fetchall()
for row in fetch:
tree.insert('','end',values=(row))
mycursor.close()
conn.close()
# add new contact functionality
def new_contact_action():
add_window=tk.Toplevel()
add_window.geometry('400x400')
add_window.title('Add Contact')
add_window.resizable(0,0)
# label frame
add_frame=ttk.LabelFrame(add_window,text='ADDING NEW CONTACT')
add_frame.grid(row=0,column=0,padx=20,pady=20)
# label loop
add_labels=['First Name: ','Last Name: ','Gender: ','Age: ','Address: ','Contact: ']
for i in range(len(add_labels)):
current_label='label'+str(i)
current_label=ttk.Label(add_frame,text=add_labels[i])
current_label.grid(row=i,column=0,padx=10,pady=10,sticky=tk.W)
# entry
fname=tk.StringVar()
lname=tk.StringVar()
gender=tk.StringVar()
age=tk.StringVar()
address=tk.StringVar()
contact=tk.StringVar()
fname_entry=ttk.Entry(add_frame,width=20,textvariable=fname)
fname_entry.focus_set()
lname_entry=ttk.Entry(add_frame,width=20,textvariable=lname)
gender_entry=ttk.Combobox(add_frame,width=18,textvariable=gender,state='readonly')
gender_entry['values']=('Male','Female','Other')
gender_entry.current(1)
age_entry=ttk.Entry(add_frame,width=20,textvariable=age)
address_entry=ttk.Entry(add_frame,width=20,textvariable=address)
contact_entry=ttk.Entry(add_frame,width=20,textvariable=contact)
# entry grid
fname_entry.grid(row=0,column=1,padx=10,pady=10)
lname_entry.grid(row=1,column=1,padx=10,pady=10)
gender_entry.grid(row=2,column=1,padx=10,pady=10)
age_entry.grid(row=3,column=1,padx=10,pady=10)
address_entry.grid(row=4,column=1,padx=10,pady=10)
contact_entry.grid(row=5,column=1,padx=10,pady=10)
def new_save_action():
if fname.get()=='' or lname.get()=='' or gender.get()=='' or age.get()=='' or address.get()=='' or contact.get()=='':
result=messagebox.showwarning('Warning','Please Complete The Required Field')
else:
tree.delete(*tree.get_children())
conn=sqlite3.connect('data.db')
mycursor=conn.cursor()
mycursor.execute("INSERT INTO 'MEMBER' (firstname, lastname, gender, age, address, contact) VALUES (?, ?, ?, ?, ?, ?)", (str(fname.get()), str(lname.get()), str(gender.get()), int(age.get()), str(address.get()), str(contact.get())))
conn.commit()
mycursor.close()
conn.close()
Database()
fname_entry.delete(0,tk.END)
lname_entry.delete(0,tk.END)
gender_entry.delete(0,tk.END)
age_entry.delete(0,tk.END)
address_entry.delete(0,tk.END)
contact_entry.delete(0,tk.END)
# save button
save_btn=ttk.Button(add_frame,text='SAVE',command=new_save_action)
save_btn.grid(row=7,columnspan=2,padx=4)
add_window.mainloop()
def OnSelected(event):
global mem_id
update_window=tk.Toplevel()
update_window.geometry('400x400')
update_window.title('Update Contact')
update_window.resizable(0,0)
current_item=tree.focus()
content=(tree.item(current_item))
selecteditem=content['values']
mem_id = selecteditem[0]
# label frame
update_frame=ttk.LabelFrame(update_window,text='UPDATING CONTACT')
update_frame.grid(row=0,column=0,padx=20,pady=20)
# label loop
add_labels=['First Name: ','Last Name: ','Gender: ','Age: ','Address: ','Contact: ']
for i in range(len(add_labels)):
current_label='label'+str(i)
current_label=ttk.Label(update_frame,text=add_labels[i])
current_label.grid(row=i,column=0,padx=10,pady=10,sticky=tk.W)
# entry
fname=tk.StringVar()
lname=tk.StringVar()
gender=tk.StringVar()
age=tk.StringVar()
address=tk.StringVar()
contact=tk.StringVar()
contact.set(selecteditem[6])
fname.set(selecteditem[1])
lname.set(selecteditem[2])
gender.set(selecteditem[3])
age.set(selecteditem[4])
address.set(selecteditem[5])
fname_entry=ttk.Entry(update_frame,width=20,textvariable=fname)
fname_entry.focus_set()
lname_entry=ttk.Entry(update_frame,width=20,textvariable=lname)
gender_entry=ttk.Combobox(update_frame,width=18,textvariable=gender,state='readonly')
gender_entry['values']=('Male','Female','Other')
gender_entry.current(1)
age_entry=ttk.Entry(update_frame,width=20,textvariable=age)
address_entry=ttk.Entry(update_frame,width=20,textvariable=address)
contact_entry=ttk.Entry(update_frame,width=20,textvariable=contact)
# entry grid
fname_entry.grid(row=0,column=1,padx=10,pady=10)
lname_entry.grid(row=1,column=1,padx=10,pady=10)
gender_entry.grid(row=2,column=1,padx=10,pady=10)
age_entry.grid(row=3,column=1,padx=10,pady=10)
address_entry.grid(row=4,column=1,padx=10,pady=10)
contact_entry.grid(row=5,column=1,padx=10,pady=10)
# update contact funtionality
def update_action():
tree.delete(*tree.get_children())
conn=sqlite3.connect('data.db')
mycursur=conn.cursor()
# mycursur.execute("UPDATE 'MEMBER' SET `firstname` = ?, `lastname` = ?, `gender` =?, `age` = ?, `address` = ?, `contact` = ? WHERE `mem_id` = ?", (str(fname.get()), str(lname.get()), str(gender.get()), str(age.get()), str(address.get()), str(contact.get()), int(mem_id)))
mycursur.execute("UPDATE `MEMBER` SET `firstname` = ?, `lastname` = ?, `gender` =?, `age` = ?, `address` = ?, `contact` = ? WHERE `mem_id` = ?", (str(fname.get()), str(lname.get()), str(gender.get()), str(age.get()), str(address.get()), str(contact.get()), int(mem_id)))
conn.commit()
mycursur.close()
conn.close()
Database()
# update button
update_btn=ttk.Button(update_frame,text='UPDATE',command=update_action)
update_btn.grid(row=7,columnspan=2,padx=4)
update_window.mainloop()
# delete contact functionaility
def delete_contact_action():
if not tree.selection():
result=messagebox.showwarning('Warning','You not selected any contact')
else:
result=messagebox.askquestion('Delete','Are you sure you want to delete this record?')
if result=='yes':
current_item=tree.focus()
contents=(tree.item(current_item))
selecteditem = contents['values']
tree.delete(current_item)
conn=sqlite3.connect("data.db")
mycursur=conn.cursor()
mycursur.execute("DELETE FROM 'MEMBER' WHERE `mem_id` = %d" % selecteditem[0])
conn.commit()
mycursur.close()
conn.close()
# label name
heading=tk.Label(root,text='CONTACT MANAGEMENT SYSTEM',font=('Arial',16,'bold'),width=800,padx=50)
heading.pack(fill=tk.X)
# label
input_label=tk.Label(root,bg='#6666ff',width=800)
input_label.pack(fill=tk.X)
# add and delete button
adds=tk.Button(input_label,text='ADD NEW CONTACT',bg="#28A745",fg='#ffffff',command=new_contact_action)
delete=tk.Button(input_label,text='DELETE CONTACT',bg="#DC3545",fg='#ffffff',command=delete_contact_action)
# grid
adds.grid(row=0,columnspan=3,padx=120)
delete.grid(row=0,column=6,padx=50)
# label frame
input_frame=tk.LabelFrame(root,relief=tk.FLAT,bg='#6666ff',width=800)
input_frame.pack(fill=tk.X)
tree = ttk.Treeview(input_frame, columns=("MemberID", "First Name", "Last Name", "Gender", "Age", "Address", "Contact"), height=400, selectmode="extended")
tree.heading('MemberID', text="MemberID")
tree.heading('First Name', text="First Name")
tree.heading('Last Name', text="Last Name")
tree.heading('Gender', text="Gender")
tree.heading('Age', text="Age")
tree.heading('Address', text="Address")
tree.heading('Contact', text="Contact")
tree.column('#0', minwidth=0, width=0)
tree.column('#1', minwidth=0, width=0)
tree.column('#2', minwidth=0, width=80)
tree.column('#3', minwidth=0, width=120)
tree.column('#4', minwidth=0, width=90)
tree.column('#5', minwidth=0, width=80)
tree.column('#6', minwidth=0, width=120)
tree.column('#7', minwidth=0, width=120)
tree.pack()
tree.bind('<Double-Button-1>',OnSelected)
if __name__ == '__main__':
Database()
root.mainloop() | true |
bd96db2e12184cadece4c526a06defe5af68f44e | Python | ku-ya/occ_grid_map | /src/DDA.py | UTF-8 | 453 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
class DDA:
def __init__(self,start,end):
self.start = start
self.end = end
self.path = []
self.direction = end[1]-start[1]/(end[0]-start[0])
def step(self):
self.t_max_x
self.t_max_y
while :
if(t_max_x<t_max_y):
t_max_x += t_delta_x
x += step_x
else:
t_max_y += t_delta_y
y += step_y
nextVoxel(x,y)
| true |
a8c0fcccfbc2417508af8d3936dd3cea4dc0dd39 | Python | Philipg99/leetcode | /2. Add Two Numbers.py | UTF-8 | 492 | 3.015625 | 3 | [] | no_license | class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
a = ""
a += str(l1.val)
while l1.next != None:
l1 = l1.next
a += str(l1.val)
b = ""
b += str(l2.val)
while l2.next != None:
l2 = l2.next
b += str(l2.val)
na=int(a[::-1])
nb=int(b[::-1])
c=list(map(int,list(str(na+nb))))
return c[::-1]
| true |
aaf59a6d9cb3caf88aaa581aaa55de2332312a69 | Python | hi0t/Outtalent | /Leetcode/459. Repeated Substring Pattern/solution1.py | UTF-8 | 205 | 2.796875 | 3 | [
"MIT"
] | permissive | class Solution:
def repeatedSubstringPattern(self, s: str) -> bool:
for p in range(2, len(s) + 1):
if len(s) % p == 0 and s[:len(s) // p] * p == s: return True
return False
| true |
a1b0b46d55d90ef30fbad6a59f1e0860235cfd13 | Python | Oscar1305/Python | /Python/Actividad 5.py | UTF-8 | 209 | 3.953125 | 4 | [] | no_license | """
5. Realizar un algoritmo que calcule la suma de los enteros entre 1 y 10, ambos inclusive.
"""
contador = 0
suma = 0
while contador <= 10:
suma += contador
contador += 1
print(suma)
| true |
deeab5860f55dbcfb34ed477ab618732d614254d | Python | ngubenkov/ETL | /test.py | UTF-8 | 356 | 2.515625 | 3 | [] | no_license | import collections
import os
import pprint
s = "http://newsapi.org/v2/everything?q=RUSSIA&from={DATA_DATE}&to={DATA_DATE}&sortBy=popularity&apiKey={TOKEN}"
os.environ['DATA_DATE'] = '2020-10-01'
os.environ['TEST'] = 'TEST'
#for k, v in os.environ.items(): print(f'{k}={v}')
d = collections.defaultdict(str)
d.update(os.environ)
pprint.pprint(d)
| true |
6e8667c10290da7e615ec6ab2bae987d018024bc | Python | chinaCQbai/scrapyCar | /carhome/carhome/spiders/carLink.py | UTF-8 | 968 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python
# coding=utf-8
import scrapy
from selenium import webdriver
from carhome.items import carList
class carLink(scrapy.Spider):
name = 'carLink'
def __init__(self):
super(carLink, self).__init__()
self.allowed_domains = ["bitauto.com"]
self.start_urls = ["http://car.bitauto.com/"]
self.driver = webdriver.Firefox()
# 获取所有的1级链接
#self.driver.find_elements_by_class_name('mainBrand'):
#self.driver.find_element_by_xpath('//*[@id="treeList"]//a[@class="mainBrand"]'):
def parse(self, response):
self.driver.get(response.url)
item = carList()
for list1 in self.driver.find_elements_by_xpath('//*[@id="treeList"]//a[@class="mainBrand"]'):
item['url']=list1.get_attribute('href')
item['name']=list1.get_attribute('innerText')
#list1.click()
yield item
self.driver.close()
| true |
2ff5997337aff1ffd34637edf9bf89d3a58efb37 | Python | OnMyTeam/Algorithm | /Python/baekjoon_10814.py | UTF-8 | 375 | 3.640625 | 4 | [] | no_license | # 나이순 정렬(10814) 백준
#
import sys
from operator import itemgetter
t = int(sys.stdin.readline())
member_list = []
for i in range(t):
info = sys.stdin.readline().split()
age, name = int(info[0]), info[1]
member_list.append([i, age, name])
member_list.sort(key=itemgetter(1,0))
for member in member_list:
print(str(member[1]) + " " +member[2])
| true |
c03bca11eb422f69e6a43d232ffcf0d76e842b00 | Python | jo-nas/progressbar_plug | /progressbar_plug/__init__.py | UTF-8 | 3,420 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
""" Progressbar for other Plugs
This module contains a plug that setup a progressbar and updates it.
To use this plug you must also update the frontend. Installation see README.md.
Example:
If you want to use this plugs inside a test, import it.
> from plugs import progress_bar
"""
from openhtf import plugs
import threading
import uuid
import logging
from tqdm import tqdm
_LOG = logging.getLogger(__name__)
class ProgressBarNotCreatedException(Exception):
pass
class ProgressBar(plugs.FrontendAwareBasePlug):
def __init__(self):
super(ProgressBar, self).__init__()
self._id = None
self._progress = None
self._status = None
self._desc = None
self._message = None
self._progressbar_tqdm = None
self._cond = threading.Condition()
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if value != self.id:
self._id = value
self.notify_update()
@property
def status(self):
return self._status
@status.setter
def status(self, value):
if value != self.status:
self._status = value
self.notify_update()
@property
def progress(self):
return self._progress
@progress.setter
def progress(self, value):
if self._progressbar_tqdm is None:
raise ProgressBarNotCreatedException("Created a progressbar before assigning.")
if value != self.progress:
self._progress = value
self._progressbar_tqdm.n = value
self._progressbar_tqdm.refresh()
self.notify_update()
@property
def desc(self):
return self._desc
@desc.setter
def desc(self, value):
if value != self.desc:
self._desc = value
self._progressbar_tqdm.set_description(value)
self.notify_update()
@property
def message(self):
return self._message
@message.setter
def message(self, value):
if value != self.message:
self._message = value
self.notify_update()
def _asdict(self):
"""Return a dict representation of the current progressbar."""
with self._cond:
if self.id is None:
return None
return {
'progressbar': {
'id': self.id.hex,
'message': self.message,
'status': self.status,
'desc': self.desc,
'progress': self.progress
}
}
def create_progressbar(self, desc, progress=0, message="", status="", total=100):
import sys
self._id = uuid.uuid4()
_LOG.debug('Displaying progress (%s): "%s"', self.id, 0)
self._progress = progress
self._desc = desc+":"
self._message = message
self._status = status
self._progressbar_tqdm = tqdm(
file=sys.stdout,
total=total,
desc=desc,
ncols=80,
bar_format="{desc}|{bar}|{percentage:3.0f}%",
smoothing=0.5
)
self.notify_update()
return self.id
def tearDown(self):
self._progressbar_tqdm.close()
self._progressbar_tqdm = None
self._id = None
self.notify_update()
| true |
ab48a406daed5b0585f8baba989177ca26b53bf3 | Python | xf97/myLeetCodeSolutions | /src/572.py | UTF-8 | 956 | 3.484375 | 3 | [] | no_license | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
'''
头大
time defeat: 98.9%
space defeat: 11.14%
'''
class Solution:
def isSubtree(self, s: TreeNode, t: TreeNode) -> bool:
#遍历,拼接
#递归
#边界情况
if s.left == None and s.right == None and t.left == None and t.right == None:
return s.val == t.val
sRes = []
tRes = []
def dfs(root, _result):
if not root:
return _result.append("a") #a是空节点标志
_result.append(str(root.val))
dfs(root.left, _result)
dfs(root.right, _result)
#拼接字符串
dfs(s, sRes)
dfs(t, tRes)
sStr = "".join(sRes)
tStr = "".join(tRes)
print(sStr, tStr)
return tStr in sStr | true |
f5cd0e5089b1c845837f165ecc8dbfa499da14b0 | Python | yusokk/algorithm | /week2/8983-사냥꾼.py | UTF-8 | 972 | 3.03125 | 3 | [] | no_license | import sys
sys.stdin = open("../test/hunter.txt", "r")
first_input = tuple(map(int, sys.stdin.readline().split()))
M = first_input[0] # 사대의 수
N = first_input[1] # 동물의 수
L = first_input[2] # 사정거리
m_list = sorted(map(int, sys.stdin.readline().split())) # 사대 위치 좌표들
count = 0
def binary(left, right):
global count
pl = 0
pr = len(m_list)-1
while True:
pc = (pl+pr) // 2
if left <= m_list[pc] <= right:
count += 1
break
elif m_list[pc] < left:
pl = pc + 1
else:
pr = pc - 1
if pr < pl:
break
for _ in range(N):
temp = tuple(map(int, sys.stdin.readline().split()))
if L - temp[1] >= 0:
left = temp[0] - (L - temp[1])
if left <= 0:
left = 1
right = temp[0] + (L - temp[1])
if right > m_list[-1]:
right = m_list[-1]
binary(left, right)
print(count) | true |
fbd792e58df818ada1027a5ae5d3ac6dd29c6d01 | Python | Maryam-ask/Python_Tutorial | /Iterators/StopIteration.py | UTF-8 | 348 | 3.796875 | 4 | [] | no_license | # StopIteration:
class MyNumbers:
def __iter__(self):
self.number = 1
return self
def __next__(self):
if self.number <= 20:
x = self.number
self.number +=1
return x
else:
raise StopIteration
ob1 = MyNumbers()
obIter = iter(ob1)
for i in obIter:
print(i) | true |
16dd83bf1de5ed61bb8cc075bfdd8c0ea39d0739 | Python | pagrus/galvanize-magonote | /test/magonote-update-tag-count.py | UTF-8 | 988 | 2.71875 | 3 | [
"MIT"
] | permissive | import psycopg2
import requests
import time
import magonote_functions as mf
# from db_info import db_name, db_user, db_pw, db_host
"""
reads list of tags from db, updates count from first tag listing page
"""
db_name = 'itchbot'
db_user = 'itchbot'
db_pw = 'IB!0502'
db_host = '127.0.0.1'
db_connection = psycopg2.connect( database=db_name, user=db_user, host=db_host, password=db_pw )
# tag_id | slug | tag_desc
cur = db_connection.cursor()
cur.execute('SELECT tag_id, slug FROM itch_tag')
tag_list = cur.fetchall()
for tag in tag_list:
print("sleeping for 2s...")
time.sleep(2)
tag_url = "https://itch.io/games/newest/tag-{}".format(tag[1])
r = requests.get(tag_url)
rt = r.text
game_count = mf.get_game_count_from_tag_html(rt)
print("game count for {}: {}".format(tag[1], game_count))
cur.execute("INSERT INTO itch_tag_count (tag_id, count) VALUES (%s, %s)", (tag[0], game_count))
db_connection.commit()
cur.close()
db_connection.close() | true |
2b63a367af64a5ca1a797e3a3684f75a401d0c79 | Python | lyphilong/Test_Adaptive_CFA_Forensics | /src/shin_variance.py | UTF-8 | 8,527 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python3
"""
Implementation of
Hyun Jun Shin, Jong Ju Jeon, and Il Kyu Eom "Color filter array pattern identification using variance of color difference image," Journal of Electronic Imaging 26(4), 043015 (7 August 2017). https://doi.org/10.1117/1.JEI.26.4.043015
Usage:
python3 shin_variance.py (-j JPEG_QUALITY) (-b BLOCK_SIZE) (-o OUT) images
--jpeg, -j
If specified, must be a number between 0 and 100. Images will be processed with JPEG compression before detection.
If unspecified, no JPEG compression is applied
--block, -b
The image is split in blocks of the specified size before being studied. Default size: 32
--out -o
Path to the output file. Default: out_choi.npz
images
All following arguments should be paths to images to analyse.
Output:
Output will be written on the specified file. The file is in a npz NumPy format, and contains one 2D array named after each input image. Each pixel in an array represent the confidence that the block at this location is forged, with 0 representing high-confidence and 1 low-confidence (or high-confidence that the block is authentic).
"""
import argparse
import io
import os
import sys
import numpy as np
import PIL
from matplotlib import pyplot as plt
from tqdm import tqdm
from utils import jpeg_compress
def decompose_in_grids(img):
"""
Decompose the 4 possible CFA positions of an image.
:param img: np.ndarray, shape (2Y, 2X, 3)
:return grids: np.ndarray, shape (4, Y, X, 3)
"""
return np.asarray(
[img[::2, ::2], img[::2, 1::2], img[1::2, ::2], img[1::2, 1::2]])
def remove_background(grids):
"""
Background removal as performed in the original article.
:param grids: np.ndarray, shape (4, Y, X, 3)
:return grids (background removed): same shape
"""
grids = grids - .25 * (grids + np.roll(grids, 1, axis=1) + np.roll(
grids, 1, axis=2) + np.roll(grids, (1, 1), axis=(1, 2)))
return grids
def compute_df(grids):
"""
Difference of colours, red-green (d) and blue-green (f).
:param grids: np.ndarray, shape (4, Y, X, 3)
:return d, f: two np.ndarray, both of shape (4, Y, X, 3)
Input shape: (4, Y, X, 3)
Output shape: ((4, Y, X), (4, Y, X))
"""
d, f = grids[:, :, :, 0] - grids[:, :, :, 1], grids[:, :, :,
2] - grids[:, :, :, 1]
return d, f
def grids_to_blocks(grids, block_size=32):
"""
Split a grid into blocks
:param grids: np.ndarray, shape (4, block_size*Y+t, block_size*X+t)
:param block_size: default 32, block size
:return blocks: np.ndarray, shape (Y, X, 4, block_size, block_size)
"""
_, Y, X = grids.shape
Y -= Y % block_size
X -= X % block_size
grids = grids[:, :Y, :X]
n_Y, n_X = Y // block_size, X // block_size
blocks = np.asarray(
np.split(np.asarray(np.split(grids, n_X, axis=2)), n_Y, axis=2))
return blocks
def blocks_to_variance(blocks):
"""
Compute the spatial variance of each block.
:param blocks: np.ndarray, shape (Y, X, 4, block_size, block_size)
:return var: np.ndarray, shape (Y, X, 4)
"""
var = np.var(blocks, axis=(-1, -2))
return var
def determine_candidates(var_d, var_f):
"""
Find the mot likely grid
:param var_d, var_f: two lists, tuples or np.ndarray, both of shape (4,)
:return grid, confidence: grid is 0 for RG/GB, 1 for GR/BG, 2 for GB/RG, 3 for BG/GR. Confidence ranges from 0 (very confident) to 1 (not confident).
Returns:
"""
v_d_RGGB_BGGR = np.abs(var_d[0] - var_d[3])
v_d_GRBG_GBRG = np.abs(var_d[1] - var_d[2])
v_f_RGGB_BGGR = np.abs(var_f[0] - var_f[3])
v_f_GRBG_GBRG = np.abs(var_f[1] - var_f[2])
v_RGGB_BGGR = v_d_RGGB_BGGR + v_f_RGGB_BGGR
v_GRBG_GBRG = v_d_GRBG_GBRG + v_d_GRBG_GBRG
if v_RGGB_BGGR > v_GRBG_GBRG: # Candidates are RGGB and BGGR
v_RGGB = var_d[3] + var_f[
0] # Where neither R/B nor G are original in that grid. A lower value thus means a higher probability
v_BGGR = var_d[0] + var_f[3]
ratio_green = v_GRBG_GBRG / v_RGGB_BGGR if v_RGGB_BGGR > 0 else None
if v_RGGB < v_BGGR:
ratio_rb = v_RGGB / v_BGGR if v_BGGR > 0 else None
grid = 0
else:
ratio_rb = v_BGGR / v_RGGB if v_BGGR > 0 else None
grid = 3
else: # Candidates are GRBG and GBRG
v_GRBG = var_d[2] + var_f[1]
v_GBRG = var_d[1] + var_f[2]
ratio_green = v_RGGB_BGGR / v_GRBG_GBRG if v_GRBG_GBRG > 0 else None
if v_GRBG > v_GBRG:
ratio_rb = v_GBRG / v_GRBG if v_GRBG > 0 else None
grid = 1
else:
ratio_rb = v_GRBG / v_GBRG if v_GBRG > 0 else None
grid = 2
if ratio_green is None:
if ratio_rb is None:
confidence = 1.
else:
confidence = ratio_rb
else:
if ratio_rb is None:
confidence = ratio_green
else:
confidence = min(ratio_green, ratio_rb)
return grid, confidence
def find_forgeries(img, block_size=32):
"""
Given an image, find forged regions.
:param img: np.ndarray, shape (Y, X, 3)
:param block_size: int, default:32. Size of the blocks in which forgeries are sought
:return forged_confdence: np.ndarray, shape (Y//block_size, X//block_size). Confidence that each block is forged, from 0 (very confident) to 1 (not confident that the block is forged, or confident that it is authentic).
"""
block_size //= 2 # we will be working at half-resolution after decomposing into grids, block size must account for this
grids = decompose_in_grids(img)
grids = remove_background(grids)[:, 2:-2, 2:-2]
d, f = compute_df(grids)
global_var_d, global_var_f = np.var(d,
axis=(-1, -2)), np.var(f,
axis=(-1, -2))
global_grid, _ = determine_candidates(global_var_d, global_var_f)
blocks_d, blocks_f = grids_to_blocks(d, block_size), grids_to_blocks(
f, block_size)
var_d, var_f = blocks_to_variance(blocks_d), blocks_to_variance(blocks_f)
n_Y, n_X, _ = var_d.shape
forged_confidence = np.ones((n_Y, n_X))
for y in range(n_Y):
for x in range(n_X):
grid, confidence = determine_candidates(var_d[y, x], var_f[y, x])
if grid != global_grid:
forged_confidence[y, x] = confidence
return forged_confidence
def get_parser():
parser = argparse.ArgumentParser(
description=
"""Detect forgeries through CFA estimation using the variance of colour difference method. For more details see
Hyun Jun Shin, Jong Ju Jeon, and Il Kyu Eom "Color filter array pattern identification using variance of color difference image," Journal of Electronic Imaging 26(4), 043015 (7 August 2017). https://doi.org/10.1117/1.JEI.26.4.043015"""
)
parser.add_argument(
"-j",
"--jpeg",
type=int,
default=None,
help=
"JPEG compression quality. Default: no compression is done before analysis."
)
parser.add_argument("-b",
"--block-size",
type=int,
default=32,
help="Block size. Default: 32.")
parser.add_argument(
"-o",
"--out",
type=str,
default="out_shin.png",
help="Path to output detected forgeries. Default: out_shin.png")
parser.add_argument("input", type=str, help="Images to analyse.")
return parser
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args(sys.argv[1:])
out = args.out
block_size = args.block_size
quality = args.jpeg
image_name = args.input
confidences = {}
img = plt.imread(image_name)
Y_o, X_o, C = img.shape
img = img[:Y_o, :X_o, :3]
if quality is not None:
img = jpeg_compress(img, quality)
forged_confidence = find_forgeries(img, block_size)
error_map = 1 - forged_confidence # highest values (white) correspond to suspected forgeries
# Resample the output to match the original image size
error_map = np.repeat(np.repeat(error_map, block_size, axis=0),
block_size,
axis=1)
output = np.zeros((Y_o, X_o))
output[:error_map.shape[0], :error_map.shape[1]] = error_map
plt.imsave(out, output)
| true |
07e4f302de72eb4cd963e6c686c70fac25b99057 | Python | juntaek118/py | /django/카톡 리퀘스트 리스폰스.py | UHC | 3,721 | 2.59375 | 3 | [] | no_license | #Views.py
from django.shortcuts import render
# Create your views here.
from django.http import JsonResponse
from django.views.decorators.csrf import csrf_exempt
import json
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "pytoon.settings")
import django
django.setup()
from pytoon.models import User
# Create your views here.
def keyboard(request):
return JsonResponse({
'type' : 'buttons',
'buttons' : ['ʱ ', '߰', '']
})
@csrf_exempt
def answer(request):
json_str = ((request.body).decode('utf-8'))
received_json_data = json.loads(json_str)
content= received_json_data['content']
userid= received_json_data['user_key']
if content=='ʱ ':
create_user_db(request)
return JsonResponse({
'message': {
'text': 'ʱ Ǿϴ' + get_user_db(request)
},
'keyboard': {
'type': 'buttons',
'buttons': ['ʱ ', '', '߰/']
}
})
elif content=='':
return JsonResponse({
'message': {
'text': ' Դϴ.'+get_user_db(request) # db
},
'keyboard': {
'type': 'buttons',
'buttons': ['ʱ ', '', '߰/']
}
})
elif content=='߰/':
return JsonResponse({
'message': {
'text': content + ' Էּ' # db
},
'keyboard': {
'type': 'buttons',
'buttons': ['ʱ ', '', '߰/']
}
})
else: # ȭ̸ Է
append_delete_db(request)
return JsonResponse({
'message': {
'text': ' Դϴ.'+ get_user_db(request) # db
},
'keyboard': {
'type': 'buttons',
'buttons': ['ʱ ', '', '߰/']
}
# ?
def create_user_db(request):
json_str = ((request.body).decode('utf-8'))
received_json_data = json.loads(json_str)
content= received_json_data['content']
userid= received_json_data['user_key']
content=[content] # , Ʈ
Menu.objects.create(
user_id=user_id,
user_toon=content,
)
def get_user_db(request):
json_str = ((request.body).decode('utf-8'))
received_json_data = json.loads(json_str)
userid= received_json_data['user_key']
toon_name = User.objects.get(user_id=userid).toon_name
return ' Դϴ' + toon_name
def append_delete_db(request):
json_str = ((request.body).decode('utf-8'))
received_json_data = json.loads(json_str)
content= received_json_data['content']
a=User.objects.get(user_id=userid).user_toon
#Models.py
from django.db import models
# Create your models here.
# DB
class User(models.Model):
user_id = models.CharField(max_length=30, default="")
user_toon= models.CharField(max_length=30, default="")
def __str__(self):
return self.user_id
if content in a:
a.remove(content)
else:
a.append(content)
| true |
723091f900a479b6538984b5240915b70c2b0ad6 | Python | JoseCarlos33/Pyhton | /Questões Resolvidas & Resumos de Assuntos/loop for.py | UTF-8 | 589 | 3.796875 | 4 | [] | no_license |
"""
for no python:
for item in iteravel:
range(1,10)
1
2
3
4
5
6
7
8
9
"""
qtd = int(input('Digite a quantidade de loops: '))
soma = 0
num = 0
for l in range(1, qtd+1):
print(f'{l}')
print('')
for n in range(1, qtd+1):
num = int(input(f'Digite o número {n} de {qtd}:'))
soma = soma + num
print(f'A soma foi igual a {soma}')
"""
------------------------------------------------------
COMO NAO PULAR LINHA DEPOIS DE UM PRINT:
------------------------------------------------------
"""
# print('nome', end='')
#Observaçao: para
| true |
398c768ac8eb1fffc56b9cb4f5932602d3d5524f | Python | AmeliaWen/316-ImageProcessing | /fft.py | UTF-8 | 13,353 | 2.96875 | 3 | [] | no_license | import cv2
import math
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import time
import argparse
from tqdm import tqdm
# this is the fast fourier transform base case
def sfft_1d(a):
a = np.asarray(a, dtype=complex)
N = a.shape[0]
res = np.zeros(N, dtype=complex)
for k in range(N):
for n in range(N):
res[k] += a[n] * np.exp(-2j * np.pi * k * n / N)
return res
# this is the inverse fast fourier transform in 1 dimension (base case)
def ifft_1d(a):
a = np.asarray(a, dtype=complex)
N = a.shape[0]
res = np.zeros(N, dtype=complex)
for n in range(N):
for k in range(N):
res[n] += a[k] * np.exp(2j * np.pi * k * n / N)
res[n] /= N
return res
# this is the inverse fast fourier transform called from ifft_2d
def ifft(a):
a = np.asarray(a, dtype=complex)
N = a.shape[0]
# check size validation
if N % 2 != 0:
raise AssertionError("size of a must be a power of 2")
# run base case
elif N <= 16:
return ifft_1d(a)
# recursive call
else:
even = ifft(a[::2])
odd = ifft(a[1::2])
res = np.exp(2j * np.pi * np.arange(N) / N).astype(np.complex64)
return np.concatenate((even + res[:N // 2] * odd,
even + res[N // 2:] * odd), axis=0)
# this is the inverse fast fourier transform in 2 dimension
def ifft_2d(a):
a = np.asarray(a, dtype=complex)
N, M = a.shape
res = np.zeros((N, M), dtype=complex)
for row in range(N):
res[row, :] = ifft(a[row, :])
for col in range(M):
res[:, col] = ifft(res[:, col])
return res
# this is the fast fourier transform in 1 dimension
def fft_1d(x):
x = np.asarray(x, dtype=complex)
N = x.shape[0]
if N % 2 > 0:
raise AssertionError("size of a must be a power of 2")
elif N <= 16:
return sfft_1d(x)
else:
even = fft_1d(x[::2])
odd = fft_1d(x[1::2])
res = np.exp(-2j * np.pi * np.arange(N) / N)
return np.concatenate([even + res[:int(N / 2)] * odd, even + res[int(N / 2):] * odd])
# this is the fast fourier transform in 2 dimension
def fft_2d (img):
a = np.asarray(img, dtype=complex)
w, h = a.shape
res = np.empty_like(img, dtype=complex)
for i in range(h):
res[:, i] = fft_1d(a[:,i])
for j in range(w):
res[j, :] = fft_1d(res[j, :])
return res
# this is the helper method for mode 2
# we investigated three denoising methods
# 1. remove high frequency
# 2. width and height have different fraction
# 3. threshold everything, threshold is 0.9
# it prints in the command line the number of non-zeros
def denoise(img, type, precentage, test):
fft_img = img.copy()
h, w = fft_img.shape
if type == 1:
print("remove high frequency")
for r in tqdm(range(h)):
for c in range(w):
if r > h * precentage and r < h*(1-precentage):
fft_img[r, c]= 0
if c > w * precentage and c < w*(1-precentage):
fft_img[r, c] = 0
non_zero_count = np.count_nonzero(fft_img)
print("amount of non-zeros: ", non_zero_count)
print("fraction of non-zero coefficient: ", non_zero_count / fft_img.size)
denoised = ifft_2d(fft_img)
#not in test mode
if test == 0:
plt.subplot(122)
else:
plt.subplot(131)
plt.imshow(np.abs(denoised), norm=colors.LogNorm())
elif type == 2:
print("width and height have different fraction")
h_fraction = 0.1
fft_img[int(h_fraction * h):int(h * (1 - h_fraction)), :] = 0.0
w_fraction = 0.15
fft_img[:, int(w_fraction * w):int(w * (1 - w_fraction))] = 0.0
non_zero_count = np.count_nonzero(fft_img)
print("amount of non-zeros: ", non_zero_count)
print("fraction of non-zero coefficient: ", non_zero_count / fft_img.size)
denoised = ifft_2d(fft_img)
if test == 0:
plt.subplot(122)
else:
plt.subplot(132)
plt.imshow(np.abs(denoised), norm=colors.LogNorm())
elif type == 3:
print("threshold everything, threshold is 0.9 ")
threshold = fft_img.real.max() * 0.9
for r in tqdm(range(h)):
for c in range(w):
if fft_img[r, c] < threshold and fft_img[r, c] > -threshold:
fft_img[r, c] = fft_img[r, c]
elif fft_img[r, c] <= -threshold:
fft_img[r, c] = -threshold
else :
fft_img[r, c] = threshold
non_zero_count = np.count_nonzero(fft_img)
print("amount of non-zeros: ", non_zero_count)
print("fraction of non-zero coefficient: ", non_zero_count / fft_img.size)
denoised = ifft_2d(fft_img)
if test == 0:
plt.subplot(122)
else:
plt.subplot(133)
plt.imshow(np.abs(denoised), norm=colors.LogNorm())
# this is the helper method for mode 3
# it keeps the value for a certain percentage of image file and make others 0
def compress_f (img, filename, precentage):
fft_img = img.copy()
w, h = fft_img.shape
h = int (math.sqrt(1-precentage) * (fft_img.shape[0] / 2))
w = int (math.sqrt(1-precentage) * (fft_img.shape[1] / 2))
fft_img[h:-h, :] = 0
fft_img[:, w:-w] = 0
print("compressing ", precentage, " percentage of the image")
print("nonzero values: ", np.count_nonzero(fft_img))
name = filename+"_"+str(precentage) + ".csv"
np.savetxt(name, fft_img, delimiter=",")
return ifft_2d(fft_img).real
# this method is the slow version of fft algorithm
def sfft (a):
a = np.asarray(a, dtype=complex)
N, M = a.shape
res = np.zeros((N, M), dtype=complex)
for r in range(N):
for c in range(M):
for m in range(M):
for n in range(N):
res[r, c] += a[n, m] * np.exp(-2j * np.pi * ((float(r * n) / N) + (float (c * m) / M)))
return res
# this method is called when using mode 4
# we produce plots that summarize the runtime complexity of your algorithms.
# It prints in the command line the means and variances of the runtime of your algorithms versus the problem size.
def mode_4():
print("mode 4 is triggered")
size = [32, 64, 128]
slow_time = list()
fast_time = list()
dft_mean = list()
dft_std = list()
fft_mean = list()
fft_std = list()
x = 32
for j in range(3):
dft_list = list()
fft_list = list()
for i in range(10):
y = np.random.rand(x, x).astype(np.float32)
startTime = time.time()
fft_2d(y)
endTime = time.time()
diffTime = endTime - startTime
print("Fast time: {}".format(diffTime))
dft_list.append(diffTime)
slow_start = time.time()
sfft(y)
slow_end = time.time()
diffTimeSlow = slow_end-slow_start
print("Slow time: {}".format(diffTimeSlow))
fft_list.append(diffTimeSlow)
slow_time.append(fft_list)
fast_time.append(dft_list)
x *= 2
slow_time = np.array(slow_time)
fast_time = np.array(fast_time)
slow_mean = slow_time.mean(axis=1)
slow_std = slow_time.std(axis=1) * 2
fast_mean = fast_time.mean(axis=1)
fast_std = fast_time.std(axis=1) * 2
plt.figure("Mode_4")
power = np.arange(5, 8)
plt.subplot(133)
plt.errorbar(power, slow_mean, yerr=slow_std, label="slow")
plt.errorbar(power, fast_mean, yerr=fast_std, label="fast")
plt.xlabel("size of test data (power of 2)")
plt.ylabel("runtime (second)")
plt.xticks(power)
plt.title("Runtime for slow FT against fast FT")
plt.legend(loc='best')
plt.show()
# after experiment, we found type2 denoise method produces the best result.
# this method output a one by two subplot.
# In this subplot we include the original image next to its denoised version.
def mode_2 (iname, type, precentage):
img = cv2.imread(iname, cv2.IMREAD_UNCHANGED)
vertical = img.shape[0]
horizontal = img.shape[1]
new_shape = (changeSize(vertical), changeSize(horizontal))
img = cv2.resize(img, new_shape)
img_FFT = fft_2d(img)
plt.subplot(121)
plt.imshow(img)
denoise(img_FFT, type, precentage, 0)
plt.show()
# this method is used for the test mode
# it produces 3 subplots using different denoise methods
def mode_2_test (iname, precentage):
img = cv2.imread(iname, cv2.IMREAD_UNCHANGED)
vertical = img.shape[0]
horizontal = img.shape[1]
new_shape = (changeSize(vertical), changeSize(horizontal))
img = cv2.resize(img, new_shape)
img_FFT = fft_2d(img)
denoise(img_FFT, 1, precentage, 1)
denoise(img_FFT, 2, precentage, 2)
denoise(img_FFT, 3, precentage, 3)
plt.show()
# Firstly, we take the FFT of the image to compress it.
# The compression comes from setting some Fourier coefficients to zero calling compress_f.
# we experiment on various parameters from compression
def mode_3 (iname):
filename = iname.split('.')[0]
img = cv2.imread(iname, cv2.IMREAD_UNCHANGED)
vertical = img.shape[0]
horizontal = img.shape[1]
new_shape = (changeSize(vertical), changeSize(horizontal))
img = cv2.resize(img, new_shape)
img_FFT = fft_2d(img)
compress_1 = compress_f(img_FFT, filename, 0)
compress_2 = compress_f(img_FFT, filename, 0.25)
compress_3 = compress_f(img_FFT, filename, 0.4)
compress_4 = compress_f(img_FFT, filename, 0.6)
compress_5 = compress_f(img_FFT, filename, 0.8)
compress_6 = compress_f(img_FFT, filename, 0.95)
plt.subplot(321), plt.imshow(compress_1.real, cmap='gray')
plt.title("0% compression"), plt.xticks([]), plt.yticks([])
plt.subplot(322), plt.imshow(compress_2.real, cmap='gray')
plt.title("25% compression"), plt.xticks([]), plt.yticks([])
plt.subplot(323), plt.imshow(compress_3.real, cmap='gray')
plt.title("40% compression"), plt.xticks([]), plt.yticks([])
plt.subplot(324), plt.imshow(compress_4.real, cmap='gray')
plt.title("60% compression"), plt.xticks([]), plt.yticks([])
plt.subplot(325), plt.imshow(compress_5.real, cmap='gray')
plt.title("80% compression"), plt.xticks([]), plt.yticks([])
plt.subplot(326), plt.imshow(compress_6.real, cmap='gray')
plt.title("95% compression"), plt.xticks([]), plt.yticks([])
plt.show()
# this method is used to resize the image
def changeSize (n):
p = int(math.log(n, 2))
return int(pow(2, p+1))
# simply perform the FFT and output a one by two subplot
# of the original image and next to it its Fourier transform.
def mode_1 (iname) :
img = cv2.imread(iname, cv2.IMREAD_GRAYSCALE)
vertical = img.shape[0]
horizontal = img.shape[1]
new_shape = (changeSize(vertical), changeSize(horizontal))
img = cv2.resize(img, new_shape)
img_FFT = fft_2d(img)
plt.figure("Mode_1")
plt.subplot(121)
plt.imshow(img)
plt.subplot(122)
plt.imshow(np.abs(img_FFT), norm=colors.LogNorm())
plt.show()
# this produces the two subplots
# of the Fourier transform we implemented and next to it the built in fft2 function in numpy.
def mode_1_test (iname):
img = cv2.imread(iname, cv2.IMREAD_GRAYSCALE)
vertical = img.shape[0]
horizontal = img.shape[1]
new_shape = (changeSize(vertical), changeSize(horizontal))
img = cv2.resize(img, new_shape)
img_FFT = fft_2d(img)
plt.figure("Mode_1_test")
plt.subplot(121)
plt.imshow(np.abs(img_FFT), norm=colors.LogNorm())
img_FFT_2 = np.fft.fft2(img)
plt.subplot(122)
plt.imshow(np.abs(img_FFT_2), norm=colors.LogNorm())
plt.show()
def parseArgs():
parser = argparse.ArgumentParser()
helper = {
1: "[1] (Default) for fast mode where ther image is converted into its FFT form and displayed",
2: "[2] for denoising where the image is denoised by applying an FFT, truncating high frequencies and then displyed",
3: "[3] for compressing and saving the image",
4: "[4] for plotting the runtime graphs for the report"
}
parser.add_argument('-m', action='store', dest='mode',
help=''.join(helper.values()), type=int, default=1)
parser.add_argument('-i', action='store', dest='image',
help='image to process', type=str, default='moonlanding.png')
parser.add_argument('-t', action='store', dest='test',
help='this mode is used to test the program', type=int, default=0)
return parser.parse_args()
if __name__ == '__main__':
mode = 1
image = "moonlanding.png"
try :
result = parseArgs()
except BaseException as e:
print("ERROR\tIncorrect input syntax: Please check arguments and try again")
exit(1)
mode = result.mode
image = result.image
test = result.test
if (test == 1):
mode_1_test(image)
elif (test == 2):
mode_2_test(image, 0.1)
elif (mode ==1):
mode_1(image)
elif (mode == 2):
mode_2(image, 1, 0.1)
elif (mode == 3):
mode_3(image)
elif (mode == 4):
mode_4()
else:
print("mode not recognized")
exit(1)
| true |
06ffc6e519de28b93869e1e970bd7aea82f1b816 | Python | IvanPiankov/EPAM_Piankov_Ivan | /homework3/test/test_cache2.py | UTF-8 | 529 | 3.09375 | 3 | [] | no_license | import random
import pytest
from homework3.task1.cache_2.cache_2 import cache
@cache(times=2)
def random_choice_func(a, b, c):
return random.randrange(a, b, c)
def test_not_integer_error():
with pytest.raises(TypeError, match="times should be integer"):
cache("a")
def test_of_random_choice_function():
first_result = random_choice_func(5, 6, 1)
second_result = random_choice_func(5, 6, 1)
third_result = random_choice_func(5, 6, 1)
assert (first_result == second_result) != third_result
| true |
7a0dc8bd4ca94b25e1da142112099aa2fa9a32ac | Python | itsolutionscorp/AutoStyle-Clustering | /assignments/python/series/src/243.py | UTF-8 | 405 | 3.796875 | 4 | [] | no_license | def slices(in_string, size):
if size < 1 or size > len(in_string):
raise ValueError('Slice size must be a positive integer less than or equal to the string length')
slices_list = []
for slice_start in range(len(in_string) - size + 1):
this_slice = list(in_string[slice_start: slice_start + size])
slices_list.append([int(x) for x in this_slice])
return slices_list
| true |
11c406c8df73d2268ab6cfa25116610aec1f6657 | Python | DmitryKgit/algorithms | /7. Цикл while/7Z_onedir_memory_better.py | UTF-8 | 3,508 | 3.578125 | 4 | [] | no_license | '''
Задача №3667. Исполнитель Водолей
У исполнителя “Водолей” есть два сосуда, первый объемом A литров,
второй объемом B литров, а также кран с водой. Водолей может
выполнять следующие операции:
Наполнить сосуд A (обозначается >A).
Наполнить сосуд B (обозначается >B).
Вылить воду из сосуда A (обозначается A>).
Вылить воду из сосуда B (обозначается B>).
Перелить воду из сосуда A в сосуд B (обозначается как A>B).
Перелить воду из сосуда B в сосуд A (обозначается как B>A).
Команда переливания из одного сосуда в другой приводят к тому,
что либо первый сосуд полностью опустошается,
либо второй сосуд полность наполняется.
Входные данные
Программа получает на вход три натуральных числа A, B, N, не превосходящих 10^4.
Выходные данные
Необходимо вывести алгоритм действий Водолея, который позволяет получить в
точности N литров в одном из сосудов, если же такого алгоритма не существует,
то программа должна вывести текст Impossible.
Количество операций в алгоритме не должно превышать 10^5.
Гарантируется, что если задача имеет решение, то есть решение,
которое содержит не более, чем 10^5 операций.
Тесты к этой задаче закрытые.
Примеры
входные данные
3
5
1
выходные данные
>A
A>B
>A
A>B
входные данные
3
5
6
выходные данные
Impossible
Цикл while - из большего в меньшее.
Максимальное процессорное время 0.212
Максимальный расход памяти 30982144
Максимальное астрономическое время 0.214
LIMIT = 10^5
100%
'''
A = int(input())
B = int(input())
N = int(input())
capA = 0 # меньшая емкость
capB = 0 # большая емкость
LIMIT = 100000
count = 0
print_list = []
Min = 'A'
Max = 'B'
if A < N and B < N: # N больше каждой из емкостей A и B
print('Impossible')
elif A == N:
print('>A')
elif B == N:
print('>B')
else:
if A > B: # A всегда меньше B
Min, Max = Max, Min
A, B = B, A
while count < LIMIT and capA != N and capB != N:
capB += A
print_list.append('>' + Min + '\n' + Min + '>' + Max)
if capB > B:
capA = capB % B # После переливания из A остаток поместится в A
if capA != N:
print_list.append(Max + '>' + '\n' + Min + '>' + Max)
capB = capA
count += 1
if count == LIMIT: # решение не найдено за 10^5 шагов
print('Impossible')
else:
print('\n'.join(print_list))
| true |
091ec09e7f30ecbda36ee76f0a74564883100da5 | Python | Ranger-222/Jarvis-AI | /jarvis.py | UTF-8 | 3,147 | 2.78125 | 3 | [] | no_license | import pyttsx3
import speech_recognition as sr
import datetime
import wikipedia
import webbrowser
import os
import random
engine = pyttsx3.init('sapi5')
voices = engine.getProperty('voices')
engine.setProperty('voice',voices[1].id)
randNo = random.randint(0 , 7)
i = randNo
def speak(audio):
engine.say(audio)
engine.runAndWait()
def wishme():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning !")
elif hour>=12 and hour<18:
speak("Good After Noon !")
else:
speak("Good Evening !")
speak(" I am Jarvis. How May I help you?")
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("Listening...")
# r.adjust_for_ambient_noise(source,duration=1)
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recogninzing..")
query = r.recognize_google(audio, language='en-in')
print(f"User Said : {query} \n")
except Exception as e:
# print(e)
print("Say that aagain please")
speak("Sorry! Say that again please.")
return "None"
return query
if __name__ == '__main__':
wishme()
# while True :
if 1:
query = takeCommand().lower()
if 'wikipedia' in query:
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
elif 'open youtube ' in query:
webbrowser.open("youtube.com")
speak(" Okay , Opening Youtube")
elif 'open google' in query:
webbrowser.open('google.com')
speak(" Okay , Opening Google")
elif 'open whatsapp' in query:
webbrowser.open('https://web.whatsapp.com/')
speak(" Okay , Opening Whatsapp")
elif 'play music' in query:
music_dir = 'D:\\adobe\\PYTHON\\JARVIS\\songs'
songs = os.listdir(music_dir)
# print(songs)
os.startfile(os.path.join(music_dir,songs[i]))
elif 'the time ' in query:
strTime = datetime.datetime.now().strftime("%H:%M:%S")
speak(f"The time now is {strTime}")
elif 'open visual studio code' in query:
codePath = "C:\\Users\\NIKHIL PC\\AppData\\Local\\Programs\\Microsoft VS Code\\Code.exe"
os.startfile(codePath)
speak(" Okay , Opening VS code")
elif 'open zoom' in query:
zoomPath = "C:\\Users\\NIKHIL PC\\AppData\\Roaming\\Zoom\\bin\\Zoom.exe"
os.startfile(zoomPath)
speak(" Okay , Opening zoom")
elif 'open microsoft word' in query:
msWord = "C:\\Program Files\\Microsoft Office\\root\\Office16\\WINWORD.EXE"
os.startfile(msWord)
speak(" Okay , Opening MS word")
| true |
67bd9ce9e77d07dbc1610a80aef1ac4f8289abe2 | Python | Sanket-Mathur/CodeChef-Practice | /HRDSEQ.py | UTF-8 | 260 | 2.984375 | 3 | [] | no_license | T = int(input())
for t in range(T):
N = int(input())
L = [0]
while(len(L) < N):
x = L[-1]
k = -1
for i in range(len(L)-2, -1, -1):
if L[i] == x:
k = i
break
if(k==-1):
L.append(0)
else:
L.append(len(L)-1-k)
print(L.count(L[-1]))
| true |
799cd66ad8e23c810f8a3f7090d6d4c4fd0d7425 | Python | NguyenPhuTrang/BT-V-Bi-u- | /File/vidu8.py | UTF-8 | 280 | 3.265625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
x = np.arange(1, 5)
y = x**3
fix, ax = plt.subplots(nrows=2, ncols=2, figsize=(6, 6))
ax[0, 1].plot([1, 2, 3, 4], [1, 4, 9, 16], "go")
ax[1, 0].plot(x, y, 'r^')
ax[0, 1].set_title("Squeres")
ax[1, 0].set_title("Cubes")
plt.show() | true |
f5b154ee2cb3dde8fe0ed5cd534359b89349ddbe | Python | jeanyego/Atm | /atm.py | UTF-8 | 1,212 | 3.46875 | 3 | [] | no_license | name = input("Enter your Name:\n")
users =('Jean','Lynn','Jy')
balance = 50000
userspwd =('Jeanpwd','Lynnpwd','Jypwd')
if (name in users):
password = input ("Enter password:\n")
userID = users.index(name)
if (password ==userspwd[userID] ):
print('Welcome %s' %name)
from datetime import datetime
current_datetime = datetime.now()
print(current_datetime)
print('These are the available options:')
print('1. Withdraw')
print('2. Deposit?')
print('3. Report an issue')
Option = int (input('Select an option:'))
if (Option == 1):
withdrawcash = input('How much would you like to withdraw?\n')
print('Take your cash', withdrawcash)
elif (Option == 2):
depositcash = input('How much would you like to deposit?\n')
print(balance)
elif (Option == 3):
report = input('What issue will you like to report?\n')
print('Thank you for contacting us')
else:
print('Invalid Option')
else:
print("Password incorrect, Try again")
else:
print("Name not found, Visit our branch to create an account")
| true |
7b20b76610c36c2cd8d491f1a32505585d0571de | Python | REX51/competitive-programming | /shift.py | UTF-8 | 52 | 3.59375 | 4 | [] | no_license | s = 'ABCZ'
s = list(s)
for i in s:
print(ord(i)) | true |
4c6fe09f65979f5afa3f762f350cc714756c921b | Python | MahshidZ/python-recepies | /test/find_range_bst_tests.py | UTF-8 | 1,172 | 3.25 | 3 | [] | no_license | from nose.tools import *
from basic_bst import *
from find_range_bst import *
def test_range_count():
my_bst = BST()
my_bst.insert(3)
my_bst.insert(5)
my_bst.insert(4)
my_bst.insert(2)
range_finder1 = RangeFinderInOrderStrategy(my_bst, 2, 4)
count = range_finder1.count_range()
assert_equal(count, 3)
range_finder2 = RangeFinderCompareStrategy(my_bst, 2, 4)
count = range_finder2.count_range()
assert_equal(count, 3)
range_finder3 = RangeFinderGetNextStrategy(my_bst, 2, 4)
count = range_finder3.count_range()
assert_equal(count, 3)
def test_print_range():
my_bst = BST()
my_bst.insert(20)
my_bst.insert(14)
my_bst.insert(18)
my_bst.insert(15)
my_bst.insert(19)
my_bst.insert(17)
my_bst.insert(16)
my_bst.insert(23)
my_bst.insert(26)
my_bst.insert(22)
my_bst.insert(21)
my_bst.insert(30)
my_bst.insert(24)
my_bst.insert(25)
range_finder1 = RangeFinderInOrderStrategy(my_bst, 13, 23)
range_finder1.print_range()
range_finder2 = RangeFinderCompareStrategy(my_bst, 13, 23)
range_finder2.print_range()
range_finder3 = RangeFinderGetNextStrategy(my_bst, 13, 23)
range_finder3.print_range()
| true |
3266ba5f5b79d74a3161c2562699402235822083 | Python | knesto/Zipf-Distribution | /zipf.py | UTF-8 | 2,316 | 3.375 | 3 | [] | no_license | import math
import random
alpha=1
n=10
x = -1
m_bFirst = True #// Static first time flag
c=0 #Normalization constant
def nextZipf():
global alpha,n,x,m_bFirst,c
zipf_value = 0
# Compute normalization constant on first call only
if (m_bFirst==True):
i=1
while i <=n:
c = c + (1.0 / math.pow(float (i), alpha))
i=i+1
c = 1.0 / c;
m_bFirst = False
#Pull a uniform random number (0 < z < 1)
i = 1
while True:
z = rand_val(0)
if((z != 0) or (z !=1)):
break
i = i + 1
#Map z to the value
sum_prob = 0
i=1
while i <=n:
sum_prob = sum_prob+c / math.pow(float (i), alpha)
if sum_prob>= z:
zipf_value = i
break
i=i+1
return (int (zipf_value)-1)
"""//=========================================================================
// = Multiplicative LCG for generating uniform(0.0, 1.0) random numbers =
// = - x_n = 7^5*x_(n-1)mod(2^31 - 1) =
// = - With x seeded to 1 the 10000th x value should be 1043618065 =
// = - From R. Jain, "The Art of Computer Systems Performance Analysis," =
// = John Wiley & Sons, 1991. (Page 443, Figure 26.2) =
//========================================================================="""
def rand_val(seed):
global x
a = 16807 #Multiplier
m = 2147483647 #Modulus
q = 127773 #m div a
r = 2836 #m mod a
#Set the seed if argument is non-zero and then return zero
if (seed > 0):
x = seed
return (0.0)
#RNG using integer arithmetic
x_div_q = x / q #x divided by q
x_mod_q = x % q #x modulo q
x_new = (a * x_mod_q) - (r * x_div_q) #New x value
if (x_new > 0):
x = x_new
else:
x = x_new + m
# Return a random value between 0.0 and 1.0
return (float (x) / m)
def main():
pin= [0] * 10
count=0
nRand= random.random()
rand_val(nRand)
for i in range(1000):
nZipf = int(nextZipf())
pin[nZipf]=(pin[nZipf]+1)
for j in range(len(pin)):
print(pin[j]," ")
if __name__=="__main__":
main()
| true |
d6f36f66b92dc506ff96dd81ff08c8017e9d7993 | Python | jangui/archive | /Python-Scripts/base_changer/run.py | UTF-8 | 1,017 | 3.3125 | 3 | [] | no_license | #!/usr/bin/env python3
import code.basechanger as bc
import argparse
import sys
class MyParser(argparse.ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(1)
def main():
parser = MyParser(description="Change a number from one counting system (base) to another.")
parser.add_argument("input_number", help="Desired number you wish to change base",type=str)
parser.add_argument("input_base", help="What base is your input number in?",type=int)
parser.add_argument("output_base", help="Desired base you wish to change the number to",type=int)
args = parser.parse_args()
answer = bc.nbase_change(input_number = args.input_number,
input_base = args.input_base, output_base = args.output_base)
print("\n" + args.input_number + "(Base-" + str(args.input_base) +
") in Base-" + str(args.output_base) + " is: "+ str(answer))
if __name__ == "__main__":
main()
| true |
1da44791e57e2ff8f11ee7e210b086ea3b5aa578 | Python | mikeboydbrowne/py_reranker | /rerank | UTF-8 | 2,103 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
from numuntrans_russian import numUntrans
import optparse
import sys
optparser = optparse.OptionParser()
optparser.add_option("-k", "--kbest-list", dest="input", default="data/dev+test.100best", help="100-best translation lists")
optparser.add_option("-l", "--lm", dest="lm", default=-0.01221363, type="float", help="Language model weight")
optparser.add_option("-t", "--tm1", dest="tm1", default=-0.01734393, type="float", help="Translation model p(e|f) weight")
optparser.add_option("-s", "--tm2", dest="tm2", default=-0.02476659, type="float", help="Lexical translation model p_lex(f|e) weight")
optparser.add_option("-u", "--ut", dest="ut", default=.5 , type="float", help="Number of untranslated words")
(opts, _) = optparser.parse_args()
weights = {'p(e)' : float(opts.lm) ,
'p(e|f)' : float(opts.tm1),
'p_lex(f|e)' : float(opts.tm2)}
all_hyps = [pair.split(' ||| ') for pair in open(opts.input)]
num_sents = len(all_hyps) / 100 # of 100 sentence groupings
# for references 0 to 800
for s in xrange(0, num_sents):
hyps_for_one_sent = all_hyps[s * 100:s * 100 + 100] # getting an array of 100 hypotheses
(best_score, best) = (-1e300, '') # tuple of best score, sentence
for (num, hyp, feats) in hyps_for_one_sent: # for each sentence in that hyp array
score = 0.0
print hyp
print "hi"
for feat in feats.split(' '): # for each metric in the p(e|f), p(e), p_lex(e|f) grouping
(k, v) = feat.split('=') # get each key-value pair
score += weights[k] * float(v) # multiply it by its weight
numUntrans = numUntrans(hyp)
score += numUntrans * opts.ut # num of untranslated words
if score > best_score: # if the score is an improvement
(best_score, best) = (score, hyp) # update the record
try:
sys.stdout.write("%s\n" % best) # print the result once you've run through all 100 sentences
except (Exception):
sys.exit(1)
| true |
ccaf1dae975bee298e2fe8c4d545e6b481638ba8 | Python | umbc-hackafe/salt-rules | /_modules/dns.py | UTF-8 | 1,603 | 2.8125 | 3 | [] | no_license | from copy import deepcopy
import socket
def is_listdict(d):
return isinstance(d, list) and all((isinstance(n, dict) and len(n) == 1 for n in d))
def resolve(hostname):
return socket.gethostbyname(hostname)
def merge_listdict(a, b):
"merges b into a"
a_dict = {}
b_dict = {}
for elm in a:
a_dict.update(elm)
for elm in b:
b_dict.update(elm)
res_dict = merge(a_dict, b_dict)
return [{k: v} for k, v in res_dict.items()]
def merge(a, b, path=None):
"merges b into a"
if path is None: path = []
if is_listdict(a) and is_listdict(b):
return merge_listdict(a, b)
else:
for key in b:
if key in a:
if isinstance(a[key], dict) and isinstance(b[key], dict):
merge(a[key], b[key], path + [str(key)])
elif a[key] == b[key]:
pass # same leaf value
else:
a[key] = b[key]
else:
a[key] = b[key]
return a
def static_resolve(host):
if host == 'localhost':
return host
defaults = __salt__['pillar.get']("dns:defaults", {})
for name, network in __salt__['pillar.get']("dns:networks", {}).items():
network = merge(deepcopy(defaults), network)
domain = network['options']['domain-name']
if host.endswith('.' + domain):
unqualified_host = host[:-len(domain)-1]
if unqualified_host in network.get('hosts', {}):
return network['hosts'][unqualified_host].get('ip', host)
return host
| true |
d20988754b881942168841e821b0ddb4cd7d6750 | Python | QHJ2017/MyOwnMachineLearning | /AdaBoost.py | UTF-8 | 1,956 | 3.1875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
这个是我自己写的,可以看到,几乎不是AdaBoost,只是人工模拟了一下。
'''
import math
def Step(TestSet, Labels, E_in, g):
E = E_in
e_count = 0
z = 0.0 # 归一化时使用的Z
R = [] # R 用来存储划判断对错,正确的存1,错误的存-1
W = [] # W 用来存储 w
# 第二步,计算在当前划分中的错误个数
for i in range(10):
if Labels[i] != g(TestSet[i]):
R.append(-1)
e_count += E[i]
else:
R.append(1)
alpha = 0.5 * math.log((1 - e_count) / e_count)
# 第三步,计算错分错误率综合,由此计算此弱分类器下的权重alpha
for i in range(10):
w = E[i] * math.exp(-1 * alpha * R[i])
W.append(w)
z += w
# print z
for i in range(10):
E[i] = W[i] / z
# print E
return alpha, E
def AdaBoost(x, TestSet, Labels):
# x 代表要进行判断的数字,测试集,标签集,layer代表若分类器的个数
E1 = [] # E 用来存储错误值
for i in range(10):
E1.append(0.1)
alpha_1, E2 = Step(TestSet, Labels, E1, G1)
alpha_2, E3 = Step(TestSet, Labels, E2, G2)
alpha_3, E4 = Step(TestSet, Labels, E3, G3)
# print 'alpha_3: ', alpha_3
return (alpha_1 * G1(x)) + (alpha_2 * G2(x)) + (alpha_3 * G3(x))
def G1(x):
if x < 2.5:
return 1
else:
return -1
def G2(x):
if x < 8.5:
return 1
else:
return -1
def G3(x):
if x < 5.5:
return -1
else:
return 1
def AdaBoostCalculate(x, layer=3, step=1):
# x 代表要进行判断的数字,layer代表若分类器的个数
TestSet = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
Labels = [1, 1, 1, -1, -1, -1, 1, 1, 1, -1]
AdaBoost(1, TestSet, Labels)
for i in range(10):
print AdaBoost(i, TestSet, Labels)
return 0
print AdaBoostCalculate(3)
| true |
82ad625163f769fa83267c3f29949f489982eae1 | Python | lingler412/UDEMY_PYTHON | /second_string_method_exercise.py | UTF-8 | 322 | 3.75 | 4 | [] | no_license | the_string = "North Dakota"
print(the_string.rjust(17))
print(the_string.ljust(17, "*"))
center_plus = (the_string.center(16, "+"))
print(center_plus)
print(the_string.lstrip("North")) # strip method is case sensative
print(center_plus.rstrip("+"))
print(center_plus.strip("+"))
print(the_string.replace("North", "South")) | true |
1473233883e915139f4035aadefe02eee7725f2d | Python | cnokello/esb | /lib/refunite_etl/writers/notification_writer.py | UTF-8 | 9,610 | 2.765625 | 3 | [] | no_license | """NotificationWriter: Sends notifications using SMS and E-mail channels
@author: okello
@created: 26-aug-2015
"""
import logging
import re
import json
import time, datetime
class MessageTemplate(object):
def __init__(self, line):
"""Creates a message template object from a line representing a message template record
"""
self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
self.logger.info("Creating a message template object...")
record = line.strip().split("|", -1)
self.notification_type = record[0].strip()
self.channel = record[1].strip()
self.language = record[2].strip()
self.subject = record[3].strip()
self.message = record[4].strip()
self.composed_message = None
self.created = datetime.datetime.utcfromtimestamp(
time.time()).strftime("%Y-%m-%d %H:%M:%S")
def get_key(self):
"""Creates and returns the key to use for this message template
"""
return self.notification_type.upper() + "." + \
self.channel.upper() + "." + self.language.upper()
def get_composed_msg(self, template_params):
"""Creates a complete composed message by replacing placeholders with the exact data
"""
self.logger.debug("Composing message with params: %s..." % template_params)
fields = re.findall('\[\[.+?\]\]', self.message, re.DOTALL)
msg = self.message
for field in fields:
clean_field = field.replace("[[", "").replace("]]", "")
field_value = template_params.get(clean_field, None)
self.logger.info(
("Message Template; Template Params: %s, Field Name: %s, Value: %s" %
(template_params, field, field_value)))
if(field_value is None):
return False
msg = msg.replace(field, str(field_value))
self.composed_message = msg
self.logger.info("Composed Message: %s" % msg)
return msg
def to_db(self):
"""Creates and returns database schema plus data to be submitted to the DB writer
"""
fields = []
notification_type = {}
notification_type["data_type"] = "string"
notification_type["name"] = "notification_type"
notification_type["value"] = self.notification_type
fields.append(notification_type)
channel = {}
channel["data_type"] = "string"
channel["name"] = "channel"
channel["value"] = self.channel
fields.append(channel)
language = {}
language["data_type"] = "string"
language["name"] = "language"
language["value"] = self.language
fields.append(language)
subject = {}
subject["data_type"] = "string"
subject["name"] = "subject"
subject["value"] = self.subject
fields.append(subject)
message = {}
message["data_type"] = "string"
message["name"] = "message"
message["value"] = self.composed_message
fields.append(message)
created = {}
created["data_type"] = "string"
created["name"] = "created"
created["value"] = self.created
fields.append(created)
table = {}
table["notification"] = fields
tables = []
tables.append(table)
self.logger.debug("DB Tables: %s" % json.dumps(tables))
return json.dumps(tables)
def to_str(self):
"""Creates and returns a string representation of the message template object
"""
return (
"Notification Type: %s\nChannel: %s\nLanguage: %s\nSubject: %s\nMessage: %s\n" %\
(self.notification_type, self.channel, self.language, self.subject, self.message))
class Notification(object):
"""Notification model class
"""
def __init__(self, msg):
self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
self.logger.info("Processing notification request: %s..." % msg)
self.language = msg.get("language").strip().upper()
params = msg.get("params")
self.channels = params.get("channels")
self.notification_type = params.get("type")
self.recipient_id = params.get("recipient_id")
def compose_email(self, subject, msg, recipient, sender):
self.logger.debug(
"Composing E-mail Message with params; Subject: %s, \
Message: %s, Recipient: %s, Sender: %s..." %\
(subject, msg, recipient, sender))
email_msg = {}
email_msg["sender"] = sender
email_msg["recipient"] = recipient
email_msg["subject"] = subject
email_msg["message"] = msg
return email_msg
def compose_sms(self, msg, recipient):
self.logger.debug(
"Composing SMS message with the params; Message: %s,\
Recipient: %s..." % (msg, recipient))
sms_msg = {}
sms_msg["recipient"] = recipient
sms_msg["message"] = msg
return sms_msg
def send(self, msg_templates, parser_broker_service, transformer_broker_service, topic_email, topic_sms):
self.logger.debug("Sending out notification...")
for channel_cfg in self.channels:
channel = channel_cfg.get("channel")
email_phone = channel_cfg.get("key")
template_params = channel_cfg.get("template_params")
key = self.notification_type.upper() + "." + channel.upper() + "." + self.language.upper()
self.logger.debug(
"Sending notification; Channel: %s, Email_Phone: %s, Template Params: %s, Key: %s..." %\
(channel, email_phone, template_params, key))
msg_template = msg_templates.get(key)
if msg_template:
self.logger.debug("Matched Message Template: %s" % msg_template.to_str())
if channel.strip().upper() == "WEB":
composed_email_msg = msg_template.get_composed_msg(template_params)
composed_email = self.compose_email(msg_template.subject,
composed_email_msg, email_phone,
"on@refunite.org")
if composed_email_msg and len(composed_email_msg.strip()) > 0:
parser_broker_service.publish(
json.dumps(composed_email), topic_email)
self.logger.info(
"Published notification message to topic: %s, message: %s" %\
(topic_email, composed_email))
elif channel.strip().upper() == "SMS" or channel.strip().upper() == "USSD":
composed_sms_msg = msg_template.get_composed_msg(template_params)
composed_sms = self.compose_sms(
composed_sms_msg, email_phone)
if composed_sms_msg and len(composed_sms_msg.strip()) > 0:
parser_broker_service.publish(json.dumps(composed_sms), topic_sms)
self.logger.debug("SMS and USSD Composed Message: %s" % composed_sms)
self.logger.info("Published notification message to topic: %s, message: %s" %\
(topic_sms, composed_sms))
transformer_broker_service.publish(msg_template.to_db())
class NotificationWriter(object):
"""Processes notification requests
"""
def __init__(self, cfg, msg_broker_services, msg_consumer_service):
self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
self.logger.info("Starting NotificationWriter...")
self.find_non_json = re.compile(r"^([^(\[|{)?]*).*")
self.cfg = cfg
self.msg_broker_services = msg_broker_services
self.msg_consumer_service = msg_consumer_service
self.msg_templates = self.load_notification_templates(
cfg.get("notifications", "message_templates").strip())
def load_notification_templates(self, file_path):
msg_templates = {}
with open(file_path, 'rb') as file:
for line in file:
msg_template = MessageTemplate(line)
msg_templates[msg_template.get_key()] = msg_template
self.logger.debug("Message Templates: %s" % msg_templates)
return msg_templates
def process_msg(self, msg):
self.logger.debug("Processing message: %s..." % msg)
notification_request = json.loads(msg.replace(re.search(
self.find_non_json, msg).group(1), ""))
parser_broker_service = self.msg_broker_services.get("parser")
transformer_broker_service = self.msg_broker_services.get("transformer")
Notification(notification_request).send(
self.msg_templates, parser_broker_service, transformer_broker_service, self.cfg.get(
"global", "topic_email").strip(), self.cfg.get("global", "topic_sms").strip())
def run(self):
self.msg_consumer_service.consume(self.process_msg)
| true |
5b6e6a9ee40cfb2c8c16b99992b00a238a312ce2 | Python | kag253/SpyOptionsDataCollector | /database_initialize_script.py | UTF-8 | 1,246 | 3 | 3 | [] | no_license | import sqlite3
from sqlite3 import Error
DATABASE_FILE = './options_data.db'
def main(db_file):
options_table_sql = """
CREATE TABLE IF NOT EXISTS options (
symbol text NOT NULL,
root_symbol text NOT NULL,
option_type text NOT NULL,
strike real NOT NULL,
expiration text NOT NULL,
quote_timestamp text NOT NULL,
bid real NOT NULL,
ask real NOT NULL,
bidsize int NOT NULL,
asksize int NOT NULL,
stock_quote real NOT NULL,
PRIMARY KEY (symbol, quote_timestamp))
"""
conn = create_connection(db_file)
create_table(conn, options_table_sql)
def create_connection(db_file):
""" create a database connection to a SQLite database """
try:
conn = sqlite3.connect(db_file)
print(sqlite3.version)
except Error as e:
print(e)
return conn
def create_table(conn, create_table_sql):
""" create a table from the create_table_sql statement
:param conn: Connection object
:param create_table_sql: a CREATE TABLE statement
:return:
"""
try:
c = conn.cursor()
c.execute(create_table_sql)
except Error as e:
print(e)
if __name__ == '__main__':
main(DATABASE_FILE) | true |
9c6baef3a407a24f65ec0d549ae94768ff9df2b3 | Python | CassieButter/purple_unicorn | /bot.py | UTF-8 | 7,354 | 3.15625 | 3 | [] | no_license | #!/usr/bin/python3.4
# -*- coding: utf-8 -*-
import random
from dice_parser import DiceParser
from dndbeyond_websearch import Searcher
class PurpleBot:
def __init__(self):
self.dice_parser = DiceParser()
self.dnd_searcher = Searcher()
def _get_rand(self, max):
return random.randint(0, max)
def _get_roll(self, die):
return 1 + self._get_rand(die - 1)
def get_random_greetings(self, username):
msgs = ["I'm crazy purple unicorn!!!!!",
"Tell me 'bout the raaaaabits",
"I am fluffy! Fluffy-fluffy-fluffy WOLF!",
"Let me be a leader and I shall endeavor not to get all of us killed.",
username + ', why are you talking to me?!']
r = self._get_rand(len(msgs) - 1)
return msgs[r]
def get_random_sticker(self):
stickers = ['CAADAgADOgAD7sShCiK3hMJMvtbhAg',
'CAADAgADXwAD7sShCnji8rK8rHETAg',
'CAADAgADPgAD7sShChzV1O0OvX5KAg',
'CAADAgADPAAD7sShCkDkzhbVa_89Ag',
'CAADAgADNAAD7sShCuKlu6OUNCgmAg',
'CAADAgADQAAD7sShCgjoFTxdY7vVAg',
'CAADAgADpgIAAu7EoQpMlrIZAAFx37kC']
r = self._get_rand(len(stickers)-1)
return stickers[r]
def get_help_message(self):
return '<code>/hi</code> - bot will say something\n' + \
'<code>/roll</code> - roll dices. E.g.: /roll 2d6 + 5\n' + \
'<code>/r</code> - shortcut for roll command\n' + \
'<code>/percent</code> - equals to /roll 1d100\n' + \
'<code>/fc</code> - roll 1d2 and translate results in "head or tails"\n' + \
'<code>/init</code> - roll dices for initiative (or any saves), result will be sorted; you may also pass your bonuses with your names, e.g.: /init barbarian=2 cleric=0 orc1=1 orc2=1\n' + \
'<code>/search</code> - look for given query on dndbeyond.com\n' + \
'<code>/help</code> - this message'
def get_current_help(self, command):
if command == 'hi':
return '<code>/hi</code> - as an answer to this command bot will send random message OR random sticker from \'Unicorn Stella\'-pack'
elif command == 'roll' or command == 'r':
return '<code>/' + command + ' [expression - optional]</code> - bot will try to execute given expression.\n' + \
'Examples:\n' + \
'<code>/' + command + '</code> - roll 1d20\n' + \
'<code>/' + command + ' 3d6</code> - roll 1d6 die 3 times and summarize the result\n' + \
'<code>/' + command + ' (2+5)*3*(14-2)</code> - just calculate this expression\n' + \
'<code>/' + command + ' (2+1)d(17+4) + 2</code> - roll 1d21 die 3 times and add 2 to the sum\n' + \
'<code>/' + command + ' 3d6H2</code> - roll 1d6 die 3 times and get only 2 highest results (and sum them)\n' + \
'<code>/' + command + ' 4d8L1</code> - roll 1d8 die 4 times and get only 1 lowest result\n' + \
'<code>/' + command + ' d</code> - roll 1d20\n' + \
'<code>/' + command + ' (1d3)d(5d4H2)L(1d3+1)</code> - any allowed expressions can be combined\n'
elif command == 'percent':
return '<code>/percent</code> - roll 1d100'
elif command == 'init':
return '<code>/init [list of characters - required, their bonuses - optional]</code> - roll initiative and sort results\n' + \
'Examples:\n' + \
'<code>/init player1 player2</code> - roll initiative (1d20) for each, initiative bonus is 0 for both\n' + \
'<code>/init player1=5 player2 player3=-1</code> - roll initiative (1d20) for each, add initiative bonus: 5 for player1, 0 for player2 and -1 for player3\n' + \
'Results look like this:\n' + \
'<code>player4 : 23 (18 5 [2])</code> - it means that total result for <code>player4</code> is <code>23</code>: <code>18</code> was rolled and <code>5</code> is a bonus. <code>[2]</code> is additional roll for cases when 2 or more players have similar results and we need just to order them.'
elif command == 'search':
return '<code>/search [query - required]</code> - go to dndbeyond.com and look for results'
elif command == 'help':
return '<code>/help [command - optional]</code> - get help\n' + \
'Examples:\n' + \
'<code>/help</code> - get list of all commands with tiny descriptions\n' + \
'<code>/help roll</code> - get more help about \'/roll\' command (you can use any of allowed command names)'
else:
return 'Nobody can help you, you are doomed.'
def generate_init(self, participants):
parts_tuples = []
width = 1
for p in participants:
roll = self._get_roll(20)
# name | result | bonus | roll | additional roll
char = (p[0], roll+int(p[1]), int(p[1]), roll, self._get_roll(10))
parts_tuples.append(char)
if len(p[0]) > width:
width = len(p[0])
res = "Results:"
for char in sorted(parts_tuples, key=lambda pt: (-pt[1], -pt[2], -pt[4])):
res += '\n' + \
'<code>' + '{0: <{width}}'.format(char[0], width=width) + '</code> : ' + \
'<b>' + str(char[1]) + '</b> ' + \
'(' + str(char[3]) + ' ' + str(char[2]) + ' [' + str(char[4]) + '])'
return res
def roll_msg(self, username, expression):
try:
dice_result = self.dice_parser.parse(expression)
answer = username + ' rolls:\n' + \
dice_result.string + ' = <b>' + str(dice_result.value) + '</b>'
return answer
except KeyError:
return "I will not follow your commands!"
except Exception:
return "Oh, c'mon, sweety, stop doing this"
def flip_coin(self, username):
dice_result = self._get_roll(2)
return username + ": " + ("орёл" if dice_result == 1 else "решка")
def execute_search(self, query):
if query is None or len(query) == 0:
return "I don't know what you are looking for"
results = self.dnd_searcher.search(query)
compendium_results = [r for r in results if not r.breadcrumbs.upper().startswith("FORUM")]
if len(compendium_results) == 0:
return "I've found nothing"
return 'Found ' + str(len(compendium_results)) + ' result(s)\n\n' + \
self._search_result_short(compendium_results[0]) + '\n' + \
self._search_result_snippet(compendium_results[0]) + '\n\n' + \
'\n'.join(self._search_result_short(sr) for sr in compendium_results[1:5] if sr is not None)
@staticmethod
def _search_result_snippet(search_result):
return '\n'.join(str(snippet) for snippet in search_result.snippets)
@staticmethod
def _search_result_short(search_result):
return '<a href="' + search_result.url.replace("’", "%E2%80%99") + '">' + search_result.title + '</a> ' + \
'(' + search_result.breadcrumbs + ')\n'
| true |
cba0e305516586e9ee8e0cb1a2ed5b07e4ab1d5e | Python | silenttemplar/tensorflow_example_project | /example/visualization/ch04/multiple_bar_plot/multiple_bar_plot2.py | UTF-8 | 1,429 | 2.90625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
np.random.seed(0)
n_data = 10
background = 50 * np.ones(n_data)
data1 = np.random.uniform(20, 40, (n_data, ))
data2 = np.random.uniform(10, 20, (n_data, ))
data_idx = np.arange(n_data)
colors = ['tab:blue', 'tab:red']
fig, ax = plt.subplots(figsize=(10, 7))
ax.tick_params(labelsize=15)
ax.bar(data_idx, background,
hatch='/',
facecolor='whitesmoke',
edgecolor='silver')
rects1 = ax.bar(data_idx, data1,
color='tab:blue',
label=colors[0])
rects2 = ax.bar(data_idx, data2,
color='tab:red',
label=colors[1])
y_ticks = ax.get_yticks()
ytick_interval = y_ticks[1] - y_ticks[0]
#ax.set_ylim([0, 50 + ytick_interval*0.5])
for rect_idx, rect in enumerate(rects1):
x = rect.get_x()
width = rect.get_width()
height = rect.get_height()
ax.text(x + width/2, height+ytick_interval*0.2,
s=str(round(height)),
rotation=90,
ha='left',
fontsize=20,
color=colors[0])
for rect_idx, rect in enumerate(rects2):
x = rect.get_x()
width = rect.get_width()
height = rect.get_height()
ax.text(x + width/2, height+ytick_interval*0.2,
s=str(round(height)),
rotation=90,
ha='right',
fontsize=20,
color=colors[1])
ax.grid(axis='y')
plt.legend()
plt.show()
| true |
bcaef3614539f7503ecc9c750096deb0f32618dc | Python | bogbond/FourGarbage | /Route Planning/Algorithm.py | UTF-8 | 8,779 | 2.859375 | 3 | [] | no_license | import pygame, sys, random, heapq
from pygame.locals import *
from enum import Enum
#colours
BLACK = (0, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
BROWN = (153, 76, 0)
WHITE = (255, 255,255)
#tipes
HOME = 0
BIN_PLASTIC = 1
BIN_PAPER = 2
BIN_GLASS = 3
BIN_ORGANIC = 4
BIN_WASTE = 5
trash_Pos =[]
trashcounter=0
class Background(pygame.sprite.Sprite):
def __init__(self, image_file, location):
pygame.sprite.Sprite.__init__(self) #call Sprite initializer
self.image = pygame.image.load(image_file)
self.rect = self.image.get_rect()
self.rect.left, self.rect.top = location
BackGround = Background('background.jpg', [0,0])
textures = {
HOME : pygame.image.load('home.png'),
BIN_PLASTIC : pygame.image.load('plastic.png'),
BIN_PAPER : pygame.image.load('paper.png'),
BIN_GLASS : pygame.image.load('glass.png'),
BIN_ORGANIC : pygame.image.load('organic.png'),
BIN_WASTE : pygame.image.load('waste.png')
}
TILWSIZE =100
MAPWIDTH = 8
MAPHIGHT = 8
PLAYER = pygame.image.load('GT.png')
playerPos = [7, 7]
resourses = [HOME, BIN_PLASTIC, BIN_PAPER, BIN_GLASS, BIN_ORGANIC, BIN_WASTE]
Q_homes=0
Q_glass=0
Q_paper=0
Q_plastic=0
Q_organic=0
Q_waste=0
tilemap = [ [HOME for w in range(MAPWIDTH)] for h in range(MAPHIGHT)]
for rw in range(MAPHIGHT):
for cl in range(MAPWIDTH):
randomNumber = random.randint(0, 14)
if randomNumber == 1 or randomNumber == 2:
title = BIN_GLASS
Q_glass +=1
elif randomNumber == 3 or randomNumber == 4:
title = BIN_PAPER
Q_paper +=1
elif randomNumber ==5 or randomNumber ==6:
title = BIN_PLASTIC
Q_plastic +=1
elif randomNumber ==7 or randomNumber ==8:
title = BIN_ORGANIC
Q_organic +=1
elif randomNumber ==9 or randomNumber ==10:
title = BIN_WASTE
Q_waste +=1
else:
title = HOME
Q_homes +=1
tilemap[rw][cl] = title
tilemap[0][0] = HOME
tilemap[7][7] = HOME
pygame.init()
pygame.display.set_caption("Garbage truck")
TILWSIZE1 = 125
DISPLAYSURF = pygame.display.set_mode((MAPWIDTH*TILWSIZE1, MAPHIGHT*TILWSIZE))
DISPLAYSURF.fill((255, 255, 255))
pygame.font.init() # you have to call this at the start,
# if you want to use this module.
myfont = pygame.font.SysFont('Comic Sans MS', 30)
textsurface0 = myfont.render('Quantity of', False, (0, 0, 0))
textsurface = myfont.render("HOMES: "+str(Q_homes),True,BLACK)
textsurface1 = myfont.render("GLASS: "+str(Q_glass),True,BLACK)
textsurface2 = myfont.render("PAPER: "+str(Q_paper),True,BLACK)
textsurface3 = myfont.render("PLASTIC: "+str(Q_plastic),True,BLACK)
textsurface4 = myfont.render("ORGANIC: "+str(Q_organic),True,BLACK)
textsurface5 = myfont.render("WASTE: "+str(Q_waste),True,BLACK)
textsurface7 = myfont.render("GARAGE",True,BLACK)
textsurface8 = myfont.render("LANDFILL",True,BLACK)
Q_player=0
for row in range(MAPHIGHT):
for column in range(MAPWIDTH):
#trash_Pos
DISPLAYSURF.blit(textures[tilemap[row][column]], (column*TILWSIZE, row*TILWSIZE))
#if title == BIN_GLASS or title == BIN_PAPER:
#trash_Pos.append(row)
#trash_Pos[column]= column
TILWSIZE2 = 10
pygame.time.delay(10)
DISPLAYSURF.blit(PLAYER, (playerPos[0]*TILWSIZE, playerPos[1]*TILWSIZE))
#DISPLAYSURF.blit(PLAYER, (playerPos[0]*400, playerPos[1]*500))
# print('Player Position: ', playerPos)
print(trashcounter)
# textsurface6 = myfont.render("STEPS: "+str(len(path)),True,BLACK)
DISPLAYSURF.blit(textsurface0,(805,5))
DISPLAYSURF.blit(textsurface,(805,35))
DISPLAYSURF.blit(textsurface1,(805,65))
DISPLAYSURF.blit(textsurface2,(805,95))
DISPLAYSURF.blit(textsurface3,(805,125))
DISPLAYSURF.blit(textsurface4,(805,155))
DISPLAYSURF.blit(textsurface5,(805,185))
DISPLAYSURF.blit(textsurface7,(700,700))
DISPLAYSURF.blit(textsurface8,(0,5))
pygame.draw.rect(DISPLAYSURF, WHITE, (805, 248, 180, 42))
# DISPLAYSURF.blit(textsurface6,(805,250))
pygame.display.update()
class Action(Enum):
UP = 0
DOWN = 1
LEFT = 2
RIGHT = 3
class State():
def __init__(self, coordX,coordY):
self.x = coordX
self.y = coordY
def __eq__ (self, other):
return self.x == other.x and self.y == other.y
class Node():
def __init__(self, parent, state, action):
self.parent = parent
self.state = state
self.action = action
self.cost = 0
def __eq__(self,other):
return self.state == other.state
def __le__(self, other):
return self.cost <= other.cost
def __lt__(self,other):
return self.cost < other.cost
def goal_test(state):
return state == State(0,0)
#tuple of states and actions
def succ(state):
actions = []
if 0 < state.x <= 7:
actions.append((State(state.x-1,state.y),Action.UP))
if 0 <= state.x < 7:
actions.append((State(state.x+1,state.y),Action.DOWN))
if 0 < state.y <= 7:
actions.append((State(state.x,state.y-1),Action.LEFT))
if 0 <= state.y < 7:
actions.append((State(state.x,state.y+1),Action.RIGHT))
return actions
def f(node):
#if there is a house it shouldn't choose that way
#bigger cost, more expensive
if tilemap[node.state.x][node.state.y] == HOME:
g = node.parent.cost + 50
#it should go through the bins
else: g = node.parent.cost + 1
#heuristic just the way missing till the end
h = (node.state.x ** 2) + (node.state.y ** 2)
f = g + h
return f
def graphsearch(fringe, istate, explored):
heapq.heappush(fringe, Node(None,istate,None))
# fringe.append(Node(None, istate, None))
while True:
if len(fringe) == 0:
return False
#Select elem from fringe -- REVISAR
elem = heapq.heappop(fringe)
if goal_test(elem.state):
actions = []
while elem.state != istate:
actions.append(elem.action)
elem = elem.parent
return actions
#return sequence of actions indicated by the parent and action fields of nodes
explored.append(elem)
#(Action, state is a tuple, succ is a list of tuples)
for (state, action) in succ(elem.state):
x = Node(elem, state, action)
x.cost = f(x)
existFringe = False
existExplored = False
for st in fringe:
if st.state == state:
existFringe = True
y = st
for stt in explored:
if stt.state == state:
existExplored = True
# if state is not in fringe and is not in explored
if existFringe == False and existExplored == False:
# insert x into fringe according to x.cost
heapq.heappush(fringe,x)
# fringe.append(x)
# else if there is node y in fringe such that y.state == state
# and y.cost > x.cost
elif existFringe == True and existExplored == False and y.state == state and y.cost > x.cost:
# remove y from fringe and insert x into fringe
fringe.remove(y)
heapq.heappush(fringe,x)
#fringe.append(x)
def main():
fringe = []
explored = []
actions = graphsearch(fringe,State(7,7), explored)
actions.reverse()
# path = astar(maze, start, end)
print(len(actions))
print(actions)
textsurface6 = myfont.render("STEPS: "+str(len(actions)),True,BLACK)
DISPLAYSURF.blit(textsurface6,(805,250))
pos = (7,7)
for act in actions:
if act == Action.UP:
pos = tuple(map(lambda x,y: x-y, pos,(0,1)))
elif act == Action.LEFT:
pos = tuple(map(lambda x,y: x-y, pos,(1,0)))
elif act == Action.RIGHT:
pos = tuple(map(lambda x,y: x+y, pos,(1,0)))
elif act == Action.DOWN:
pos = tuple(map(lambda x,y: x+y, pos,(0,1)))
else:
pos = pos
DISPLAYSURF.blit(PLAYER, (pos[0]*TILWSIZE, pos[1]*TILWSIZE))
pygame.display.update()
pygame.time.delay(200)
while True:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if __name__ == '__main__':
main()
| true |
a048a9fbae88d438ae8528b7502718e498b10aba | Python | Markczy/Leecode | /分类/背包问题/Subset Sum.py | UTF-8 | 943 | 4.28125 | 4 | [] | no_license | """
给定一组正数数组,然后给定一个数,判断这组数中有没有和为这个数
Example 1: #
Input: {1, 2, 3, 7}, S=6
Output: True
The given set has a subset whose sum is '6': {1, 2, 3}
Example 2: #
Input: {1, 2, 7, 1, 5}, S=10
Output: True
The given set has a subset whose sum is '10': {1, 2, 7}
Example 3: #
Input: {1, 3, 4, 8}, S=6
Output: False
The given set does not have any subset whose sum is equal to '6'.
"""
# dp方法,dp[j]为数组里有和为j的子集,dp[j]=dp[j] or dp[j-nums[i]]
def subset_sum(nums, S):
dp = [True] + [False] * S # dp[j]为和为j的子集是否在nums中存在。很显然dp[0]=True
for i in range(len(nums)):
for j in range(S, nums[i] - 1, -1): # 每遍历一个数就从S开始更新dp数组
dp[j] = dp[j] or dp[j - nums[i]]
return dp[-1]
print(subset_sum([1, 2, 3, 7], 6))
print(subset_sum([1, 2, 7, 1, 5], 10))
print(subset_sum([1, 3, 4, 8], 6))
| true |
5e47639864586a60d660a20450d76843f68f1a7e | Python | CurryMentos/algorithm | /exercise/综合/5.5.py | UTF-8 | 984 | 3.5625 | 4 | [] | no_license | # !/usr/bin/env python
# -*-coding:utf-8 -*-
"""
# File : 5.5.py
# Time :2021/7/27 14:17
# Author :zyz
# version :python 3.7
"""
"""
写一个小程序:控制台输入邮箱地址(格式为 username@companyname.com), 程序识别用户名和公司名后,将用户名和公司名输出到控制台。
要求:
校验输入内容是否符合规范(xx@yy.com), 如是进入下一步,如否则抛出提 示"incorrect email format"。注意必须以.com 结尾
可以循环“输入--输出判断结果”这整个过程
按字母 Q(不区分大小写)退出循环,结束程序
"""
import re
while True:
email = input()
if email == "q" or email == "Q":
quit()
if re.match(r'^[0-9a-zA-Z]{0,19}@[0-9a-zA-Z]{0,19}\.com$', email):
userName = re.findall("^(.*?)@", email)
print(userName[0])
companyName = re.findall("@(.*?).com", email)
print(companyName[0])
else:
print("incorrect email format")
| true |
27551249fabf955e80df54757b554ff2b9401e9e | Python | BigDataMalaysia/MYGovDataSet | /rank_visit_count.py | UTF-8 | 1,031 | 3.015625 | 3 | [] | no_license | #!/usr/bin/python
import mygovdataset
FAILS_IN_A_ROW_LIMIT = 30
datasets = list()
dataset_id = 0
fails_in_a_row = 0
while fails_in_a_row < FAILS_IN_A_ROW_LIMIT:
url = "http://data.gov.my/view.php?view={0}".format(dataset_id)
try:
some_data_set = mygovdataset.MYGovDataSet(url)
fails_in_a_row = 0
datasets.append({"dataset" : some_data_set, "dataset_id" : dataset_id})
print "id {0} added to list".format(dataset_id, e)
except Exception, e:
print "WARNING: id {0} didn't work out; exception: {1}".format(dataset_id, e)
fails_in_a_row += 1
dataset_id += 1
print "Got {0} fails in a row; assuming there are no more valid data sets (count: {1})".format(FAILS_IN_A_ROW_LIMIT, len(datasets))
# sort by view count, highest to lowest
datasets.sort(key=lambda x: x["dataset"].view_count, reverse=True)
rank = 1
print "#rank, id, view_count"
for dataset in datasets:
print "{0}, {1}, {2}".format(rank, dataset["dataset_id"], dataset["dataset"].view_count)
rank += 1
| true |
e2b3f3494524adeede5d074dad85582aa893ccca | Python | sreeharsha2002/Brick-Breaker | /canon.py | UTF-8 | 553 | 2.625 | 3 | [] | no_license | import random
from colorama import Fore
import numpy as np
import config
from ball import Ball
import os
class Canon(Ball):
def __init__(self, x, y, xlength, ylength, xvel, yvel,ctype):
super().__init__(x, y, xlength, ylength, xvel, yvel)
if ctype==0:
self._type="CANNON0"
else:
self._type="CANNON1"
self.isCollided=False
def attach(self, paddleobj):
if(self._type=="CANNON0"):
self._y=paddleobj._y
else:
self._y=paddleobj._y+paddleobj._ylength-1 | true |
d5877fac0aa89404d3722b64dd9d99d320538331 | Python | StefanFNorling/Misc-Projects | /venv/pd4.py | UTF-8 | 3,018 | 3.578125 | 4 | [] | no_license | # Stefan Norling
# sfn2mc
# This program finds the shortest path to the middle of a graph from
# the four corners of the graph
# Input for this is a line of numbers, rows separated by semicolons and
# each element of them separated by a comma
file = open("programaday2.txt", "r")
besttlpath = ["", 0]
besttrpath = ["", 0]
bestblpath = ["", 0]
bestbrpath = ["", 0]
def tlpath(x, y, currpath, currval):
if x == middlex and y == middley:
global besttlpath
if currval > besttlpath[1]:
besttlpath = [currpath, currval]
elif x <= middlex and y <= middley:
tlpath(x + 1, y, currpath + "d ", currval + graph[x + 1][y])
tlpath(x, y + 1, currpath + "r ", currval + graph[x][y + 1])
def trpath(x, y, currpath, currval):
if x == middlex and y == middley:
global besttrpath
if currval > besttrpath[1]:
besttrpath = [currpath, currval]
elif x <= middlex and y >= middley:
trpath(x + 1, y, currpath + "d ", currval + graph[x + 1][y])
trpath(x, y - 1, currpath + "l ", currval + graph[x][y - 1])
def blpath(x, y, currpath, currval):
if x == middlex and y == middley:
global bestblpath
if currval > bestblpath[1]:
bestblpath = [currpath, currval]
elif x >= middlex and y <= middley:
blpath(x - 1, y, currpath + "u ", currval + graph[x - 1][y])
blpath(x, y + 1, currpath + "r ", currval + graph[x][y + 1])
def brpath(x, y, currpath, currval):
if x == middlex and y == middley:
global bestbrpath
if currval > bestbrpath[1]:
bestbrpath = [currpath, currval]
elif x >= middlex and y >= middley:
brpath(x - 1, y, currpath + "u ", currval + graph[x - 1][y])
brpath(x, y - 1, currpath + "l ", currval + graph[x][y - 1])
numgraphs = int(file.readline())
for each in range(numgraphs):
info = file.readline().split(";")
rows = len(info)
middlex = rows // 2
graph = []
i = 0
while i < rows:
graph.append(list(map(int, info[i].split(","))))
i += 1
cols = len(graph[0])
middley = cols // 2
tlpath(0, 0, "", graph[0][0])
trpath(0, cols - 1, "", graph[0][cols - 1])
blpath(rows - 1, 0, "", graph[rows - 1][0])
brpath(rows - 1, cols - 1, "", graph[rows - 1][cols - 1])
best = max(besttlpath[1], besttrpath[1], bestblpath[1], bestbrpath[1])
j = 0
while j < rows:
print(graph[j])
j += 1
if best == besttlpath[1]:
print("The best path is taken from the top-left and is", besttlpath[0]+"with value", besttlpath[1])
elif best == besttrpath[1]:
print("The best path is taken from the top-right and is", besttrpath[0]+"with value", besttrpath[1])
elif best == bestblpath[1]:
print("The best path is taken from the bottom-left and is", bestblpath[0]+"with value", bestblpath[1])
elif best == bestbrpath[1]:
print("The best path is taken from the bottom-right and is", bestbrpath[0]+"with value", bestbrpath[1]) | true |
4c0b465cb098cdcc8c3fad28615e1f3c9c5817f7 | Python | chenxu0602/LeetCode | /1749.maximum-absolute-sum-of-any-subarray.py | UTF-8 | 1,348 | 3.375 | 3 | [] | no_license | #
# @lc app=leetcode id=1749 lang=python3
#
# [1749] Maximum Absolute Sum of Any Subarray
#
# https://leetcode.com/problems/maximum-absolute-sum-of-any-subarray/description/
#
# algorithms
# Medium (50.76%)
# Likes: 178
# Dislikes: 4
# Total Accepted: 6.6K
# Total Submissions: 12.9K
# Testcase Example: '[1,-3,2,3,-4]'
#
# You are given an integer array nums. The absolute sum of a subarray [numsl,
# numsl+1, ..., numsr-1, numsr] is abs(numsl + numsl+1 + ... + numsr-1 +
# numsr).
#
# Return the maximum absolute sum of any (possibly empty) subarray of nums.
#
# Note that abs(x) is defined as follows:
#
#
# If x is a negative integer, then abs(x) = -x.
# If x is a non-negative integer, then abs(x) = x.
#
#
#
# Example 1:
#
#
# Input: nums = [1,-3,2,3,-4]
# Output: 5
# Explanation: The subarray [2,3] has absolute sum = abs(2+3) = abs(5) = 5.
#
#
# Example 2:
#
#
# Input: nums = [2,-5,1,-4,3,-2]
# Output: 8
# Explanation: The subarray [-5,1,-4] has absolute sum = abs(-5+1-4) = abs(-8)
# = 8.
#
#
#
# Constraints:
#
#
# 1 <= nums.length <= 10^5
# -10^4 <= nums[i] <= 10^4
#
#
#
# @lc code=start
import itertools
class Solution:
def maxAbsoluteSum(self, nums: List[int]) -> int:
return max(itertools.accumulate(nums, initial=0)) - min(itertools.accumulate(nums, initial=0))
# @lc code=end
| true |
e76734e17409d6d0418226d7e28d22ad816ea999 | Python | CatalinAnton/toponym-resolution-in-scientific-papers | /w10_validator/schema_validator.py | UTF-8 | 2,254 | 2.65625 | 3 | [] | no_license | import json
import jsonschema
from jsonschema import ValidationError
import p1_file_management
schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"title": "Senteces",
"description": "Analized sentences that contain toponyms",
"type": "object",
"properties": {
"sentences": {
"type": "array",
"items": {
"allOf": [
{
"type": "object",
"properties": {
"sentence": {"type": "string"},
"words": {
"type": "array",
"items": {
"allOf": [
{
"type": "object",
"properties": {
"toponymId": {"type": "number"},
"toponym": {"type": "string"},
"latitude": {"type": "number", "minimum": -90, "maximum": 90},
"longitude": {"type": "number", "minimum": -180, "maximum": 180}
},
"required": ["toponymId", "toponym", "latitude", "longitude"]
}
]
}
}
},
"required": ["sentence", "words"]
}
]
}
}
},
"required": ["sentences"]
}
files = p1_file_management.get_file_list_output_final()
dict_file_content = p1_file_management.get_dictionary_file_content(files)
for (file_path, content) in dict_file_content.items():
print("Validating " + file_path + " ...")
json_object = json.loads(content)
try:
jsonschema.validate(json_object, schema, format_checker=jsonschema.FormatChecker())
print("\tJSON file is valid")
except Exception as e:
print("\t" + str(e))
print("\n")
| true |
e5c5dc31472d4fca4d0d5640bd25cd2306e48d2f | Python | Abhishek-2193/autoRigger | /createSecondaryWindow.py | UTF-8 | 5,649 | 2.53125 | 3 | [] | no_license | import maya.cmds as cmds
from math import pow, sqrt, cos, acos, radians
class secLocators():
def __init__(self):
self.createSecLocatorWindows()
def createSecLocatorWindows(self):
cmds.window("Secondary Controllers", h = 500, w = 200, rtf = True, title = 'SECONDARY LOCATORS', titleBar = True)
cmds.rowColumnLayout(nc = 1)
cmds.image(i = "/Users/abhishekravi/Desktop/NEWMAYAWORK/Header2.png")
cmds.separator(h = 10)
cmds.button(l = "Create Reverse Footroll", w = 100, c = self.createReverseFootroll)
cmds.separator(st = 'single', h = 10)
#cmds.text("Twist Amount", l = "Amount of twist joints")
#armTwist = cmds.intField(minValue = 2, maxValue = 10, value = 3)
self.armTwistCount = cmds.intSliderGrp(l = "Number of twist joints", min = 4, max = 10, value = 4, step = 1, field = True)
cmds.separator(h = 10)
cmds.button(l = "Create Forearm Twist", w = 100, c = self.armTwist)
cmds.separator(st = 'single', h = 10)
cmds.button(l = "Delete Locators", w = 100, c = self.deleteSecondary)
self.checkGroup(self)
cmds.showWindow()
def checkGroup(self, void):
if cmds.objExists('SECONDARY'):
print "Secondary Group Exists"
else:
cmds.group(em = True, n = 'SECONDARY')
self.setColors(self)
def createReverseFootroll(self, void):
#Left
#heel
cmds.select(deselect = True)
l_rev_heel = cmds.spaceLocator(n = 'Loc_L_inv_heel')
cmds.scale(0.05, 0.05, 0.05, l_rev_heel)
cmds.move(0.15, -0.5, 0, l_rev_heel)
cmds.parent(l_rev_heel, 'SECONDARY')
#toes
l_rev_toes = cmds.spaceLocator(n = 'Loc_L_inv_toes')
cmds.scale(0.05, 0.05, 0.05, l_rev_toes)
cmds.move(0.15, -0.5, 0.3, l_rev_toes)
cmds.parent(l_rev_toes, 'Loc_L_inv_heel')
#ball
l_rev_ball = cmds.spaceLocator(n = 'Loc_L_inv_ball')
cmds.scale(0.05, 0.05, 0.05, l_rev_ball)
cmds.move(0.15, -0.5, 0.15, l_rev_ball)
cmds.parent(l_rev_ball, 'Loc_L_inv_toes')
#ankle
l_rev_ankle = cmds.spaceLocator(n = 'Loc_L_inv_ankle')
cmds.scale(0.05, 0.05, 0.05, l_rev_ankle)
cmds.move(0.15, -0.4, 0, l_rev_ankle)
cmds.parent(l_rev_ankle, 'Loc_L_inv_ball')
#Right
#heel
cmds.select(deselect = True)
r_rev_heel = cmds.spaceLocator(n = 'Loc_R_inv_heel')
cmds.scale(0.05, 0.05, 0.05, r_rev_heel)
cmds.move(-0.15, -0.5, 0, r_rev_heel)
cmds.parent(r_rev_heel, 'SECONDARY')
#toes
r_rev_toes = cmds.spaceLocator(n = 'Loc_R_inv_toes')
cmds.scale(0.05, 0.05, 0.05, r_rev_toes)
cmds.move(-0.15, -0.5, 0.3, r_rev_toes)
cmds.parent(r_rev_toes, 'Loc_R_inv_heel')
#ball
r_rev_ball = cmds.spaceLocator(n = 'Loc_R_inv_ball')
cmds.scale(0.05, 0.05, 0.05, r_rev_ball)
cmds.move(-0.15, -0.5, 0.15, r_rev_ball)
cmds.parent(r_rev_ball, 'Loc_R_inv_toes')
#ankle
r_rev_ankle = cmds.spaceLocator(n = 'Loc_R_inv_ankle')
cmds.scale(0.05, 0.05, 0.05, r_rev_ankle)
cmds.move(-0.15, -0.4, 0, r_rev_ankle)
cmds.parent(r_rev_ankle, 'Loc_R_inv_ball')
def armTwist(self, void):
_amount = cmds.intSliderGrp(self.armTwistCount, q = True, v = True)
self.createForeArmTwist(self, _amount)
def createForeArmTwist(self, void, amount):
cmds.select(deselect = True)
L_elbowPos = cmds.xform(cmds.ls('Loc_L_elbow'), q = True, t = True, ws = True)
L_wristPos = cmds.xform(cmds.ls('Loc_L_wrist'), q = True, t = True, ws = True)
L_vectorX = L_wristPos[0] - L_elbowPos[0]
L_vectorY = L_wristPos[1] - L_elbowPos[1]
L_vectorZ = L_wristPos[2] - L_elbowPos[2]
print amount
for i in range(amount -1):
L_twistLoc = cmds.spaceLocator(n = 'Loc_L_armTwist_' + str(i))
cmds.move(L_elbowPos[0] + (L_vectorX / amount) + ((L_vectorX / amount) * i), L_elbowPos[1] + (L_vectorY / amount) + ((L_vectorY / amount) * i), L_elbowPos[2] + (L_vectorZ / amount) + ((L_vectorZ / amount) * i), L_twistLoc)
cmds.scale(0.05, 0.05, 0.05, L_twistLoc)
cmds.parent(L_twistLoc, 'SECONDARY')
R_elbowPos = cmds.xform(cmds.ls('Loc_R_elbow'), q = True, t = True, ws = True)
R_wristPos = cmds.xform(cmds.ls('Loc_R_wrist'), q = True, t = True, ws = True)
R_vectorX = R_wristPos[0] - R_elbowPos[0]
R_vectorY = R_wristPos[1] - R_elbowPos[1]
R_vectorZ = R_wristPos[2] - R_elbowPos[2]
for j in range(amount - 1):
r_twistLoc = cmds.spaceLocator(n = 'Loc_R_armTwist_' + str(j))
cmds.move(R_elbowPos[0] + (R_vectorX / amount) + ((R_vectorX / amount) * j), R_elbowPos[1] + (R_vectorY / amount) + ((R_vectorY / amount) * j), R_elbowPos[2] + (R_vectorZ / amount) + ((R_vectorZ / amount) * i), r_twistLoc)
cmds.scale(0.05, 0.05, 0.05, r_twistLoc)
cmds.parent(r_twistLoc, 'SECONDARY')
def setColors(self, void):
cmds.setAttr('SECONDARY.overrideEnabled', 1)
cmds.setAttr('SECONDARY.overrideRGBColors', 1)
cmds.setAttr('SECONDARY.overrideColorRGB', 1, 1, 1)
def deleteSecondary(self, void):
cmds.delete(cmds.ls('SECONDARY'))
| true |
5c95bd382a324d9b54de9992133b35c1e8a89cca | Python | taylorjeftedasilva/Desafios-python | /list-eliminacao.py | UTF-8 | 321 | 3 | 3 | [] | no_license | def josephus_survivor(l,k):
lista=[]
for i in range(1,l+1):
lista.append(i)
k1=k-1
while len(lista)>1:
if len(lista)<=k1:
k1=k1%len(lista)
n=lista[k1]
lista.remove(n)
else:
lista.remove(lista[k1])
k1+=k-1
return lista[0]
| true |
d409189337d5839fcf002d56e878f918f3e5e04f | Python | LJ1234com/scikit-learn | /Examples/Cross_validation2.py | UTF-8 | 684 | 2.953125 | 3 | [] | no_license | import numpy as np
import sklearn
import sklearn.model_selection
import sklearn.datasets
import sklearn.svm
import matplotlib.pyplot as plt
digits = sklearn.datasets.load_digits()
x = digits.data
y = digits.target
svc = sklearn.svm.SVC(kernel='linear')
Cs = np.logspace(-10, 0, 10)
scores = []
score_std = []
for C in Cs:
svc.C = C
score = sklearn.model_selection.cross_val_score(svc, x, y, cv=5)
scores.append(np.mean(score))
score_std.append(np.std(score))
plt.semilogx(Cs, scores)
plt.semilogx(Cs, np.array(scores) + np.array(score_std), 'b--')
plt.semilogx(Cs, np.array(scores) - np.array(score_std), 'b--')
plt.xlabel('C')
plt.ylabel('CV Score')
plt.show()
| true |
5160615c43b31edeff18565b6a1af0241effe483 | Python | inteljack/EL6183-Digital-Signal-Processing-Lab-2015-Fall | /project/Examples/Examples/PP2E/Dstruct/Basic/timer2.py | UTF-8 | 498 | 2.90625 | 3 | [] | no_license | def test(reps, func):
import time
start_wall = time.time() # current real seconds
start_cpu = time.clock() # current processor secs
for i in xrange(reps): # call it 'reps' times
x = func(i)
cpu_time = time.clock() - start_cpu
wall_time = time.time() - start_wall # total = stop - start time
return {'cpu': cpu_time, 'wall': wall_time}
| true |
8ed06ec2e73868204be876e7abe0cda4b26a49b4 | Python | mesmerus/synapse | /synapse/metrics/metric.py | UTF-8 | 9,945 | 2.96875 | 3 | [
"Apache-2.0"
] | permissive | # -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from itertools import chain
import logging
import re
logger = logging.getLogger(__name__)
def flatten(items):
"""Flatten a list of lists
Args:
items: iterable[iterable[X]]
Returns:
list[X]: flattened list
"""
return list(chain.from_iterable(items))
class BaseMetric(object):
"""Base class for metrics which report a single value per label set
"""
def __init__(self, name, labels=[], alternative_names=[]):
"""
Args:
name (str): principal name for this metric
labels (list(str)): names of the labels which will be reported
for this metric
alternative_names (iterable(str)): list of alternative names for
this metric. This can be useful to provide a migration path
when renaming metrics.
"""
self._names = [name] + list(alternative_names)
self.labels = labels # OK not to clone as we never write it
def dimension(self):
return len(self.labels)
def is_scalar(self):
return not len(self.labels)
def _render_labelvalue(self, value):
return '"%s"' % (_escape_label_value(value),)
def _render_key(self, values):
if self.is_scalar():
return ""
return "{%s}" % (
",".join(["%s=%s" % (k, self._render_labelvalue(v))
for k, v in zip(self.labels, values)])
)
def _render_for_labels(self, label_values, value):
"""Render this metric for a single set of labels
Args:
label_values (list[object]): values for each of the labels,
(which get stringified).
value: value of the metric at with these labels
Returns:
iterable[str]: rendered metric
"""
rendered_labels = self._render_key(label_values)
return (
"%s%s %.12g" % (name, rendered_labels, value)
for name in self._names
)
def render(self):
"""Render this metric
Each metric is rendered as:
name{label1="val1",label2="val2"} value
https://prometheus.io/docs/instrumenting/exposition_formats/#text-format-details
Returns:
iterable[str]: rendered metrics
"""
raise NotImplementedError()
class CounterMetric(BaseMetric):
"""The simplest kind of metric; one that stores a monotonically-increasing
value that counts events or running totals.
Example use cases for Counters:
- Number of requests processed
- Number of items that were inserted into a queue
- Total amount of data that a system has processed
Counters can only go up (and be reset when the process restarts).
"""
def __init__(self, *args, **kwargs):
super(CounterMetric, self).__init__(*args, **kwargs)
# dict[list[str]]: value for each set of label values. the keys are the
# label values, in the same order as the labels in self.labels.
#
# (if the metric is a scalar, the (single) key is the empty tuple).
self.counts = {}
# Scalar metrics are never empty
if self.is_scalar():
self.counts[()] = 0.
def inc_by(self, incr, *values):
if len(values) != self.dimension():
raise ValueError(
"Expected as many values to inc() as labels (%d)" % (self.dimension())
)
# TODO: should assert that the tag values are all strings
if values not in self.counts:
self.counts[values] = incr
else:
self.counts[values] += incr
def inc(self, *values):
self.inc_by(1, *values)
def render(self):
return flatten(
self._render_for_labels(k, self.counts[k])
for k in sorted(self.counts.keys())
)
class GaugeMetric(BaseMetric):
"""A metric that can go up or down
"""
def __init__(self, *args, **kwargs):
super(GaugeMetric, self).__init__(*args, **kwargs)
# dict[list[str]]: value for each set of label values. the keys are the
# label values, in the same order as the labels in self.labels.
#
# (if the metric is a scalar, the (single) key is the empty tuple).
self.guages = {}
def set(self, v, *values):
if len(values) != self.dimension():
raise ValueError(
"Expected as many values to inc() as labels (%d)" % (self.dimension())
)
# TODO: should assert that the tag values are all strings
self.guages[values] = v
def render(self):
return flatten(
self._render_for_labels(k, self.guages[k])
for k in sorted(self.guages.keys())
)
class CallbackMetric(BaseMetric):
"""A metric that returns the numeric value returned by a callback whenever
it is rendered. Typically this is used to implement gauges that yield the
size or other state of some in-memory object by actively querying it."""
def __init__(self, name, callback, labels=[]):
super(CallbackMetric, self).__init__(name, labels=labels)
self.callback = callback
def render(self):
try:
value = self.callback()
except Exception:
logger.exception("Failed to render %s", self.name)
return ["# FAILED to render " + self.name]
if self.is_scalar():
return list(self._render_for_labels([], value))
return flatten(
self._render_for_labels(k, value[k])
for k in sorted(value.keys())
)
class DistributionMetric(object):
"""A combination of an event counter and an accumulator, which counts
both the number of events and accumulates the total value. Typically this
could be used to keep track of method-running times, or other distributions
of values that occur in discrete occurances.
TODO(paul): Try to export some heatmap-style stats?
"""
def __init__(self, name, *args, **kwargs):
self.counts = CounterMetric(name + ":count", **kwargs)
self.totals = CounterMetric(name + ":total", **kwargs)
def inc_by(self, inc, *values):
self.counts.inc(*values)
self.totals.inc_by(inc, *values)
def render(self):
return self.counts.render() + self.totals.render()
class CacheMetric(object):
__slots__ = (
"name", "cache_name", "hits", "misses", "evicted_size", "size_callback",
)
def __init__(self, name, size_callback, cache_name):
self.name = name
self.cache_name = cache_name
self.hits = 0
self.misses = 0
self.evicted_size = 0
self.size_callback = size_callback
def inc_hits(self):
self.hits += 1
def inc_misses(self):
self.misses += 1
def inc_evictions(self, size=1):
self.evicted_size += size
def render(self):
size = self.size_callback()
hits = self.hits
total = self.misses + self.hits
return [
"""%s:hits{name="%s"} %d""" % (self.name, self.cache_name, hits),
"""%s:total{name="%s"} %d""" % (self.name, self.cache_name, total),
"""%s:size{name="%s"} %d""" % (self.name, self.cache_name, size),
"""%s:evicted_size{name="%s"} %d""" % (
self.name, self.cache_name, self.evicted_size
),
]
class MemoryUsageMetric(object):
"""Keeps track of the current memory usage, using psutil.
The class will keep the current min/max/sum/counts of rss over the last
WINDOW_SIZE_SEC, by polling UPDATE_HZ times per second
"""
UPDATE_HZ = 2 # number of times to get memory per second
WINDOW_SIZE_SEC = 30 # the size of the window in seconds
def __init__(self, hs, psutil):
clock = hs.get_clock()
self.memory_snapshots = []
self.process = psutil.Process()
clock.looping_call(self._update_curr_values, 1000 / self.UPDATE_HZ)
def _update_curr_values(self):
max_size = self.UPDATE_HZ * self.WINDOW_SIZE_SEC
self.memory_snapshots.append(self.process.memory_info().rss)
self.memory_snapshots[:] = self.memory_snapshots[-max_size:]
def render(self):
if not self.memory_snapshots:
return []
max_rss = max(self.memory_snapshots)
min_rss = min(self.memory_snapshots)
sum_rss = sum(self.memory_snapshots)
len_rss = len(self.memory_snapshots)
return [
"process_psutil_rss:max %d" % max_rss,
"process_psutil_rss:min %d" % min_rss,
"process_psutil_rss:total %d" % sum_rss,
"process_psutil_rss:count %d" % len_rss,
]
def _escape_character(m):
"""Replaces a single character with its escape sequence.
Args:
m (re.MatchObject): A match object whose first group is the single
character to replace
Returns:
str
"""
c = m.group(1)
if c == "\\":
return "\\\\"
elif c == "\"":
return "\\\""
elif c == "\n":
return "\\n"
return c
def _escape_label_value(value):
"""Takes a label value and escapes quotes, newlines and backslashes
"""
return re.sub(r"([\n\"\\])", _escape_character, str(value))
| true |
f6d4307abf5d8778a41e3aef1a540311c4b6d268 | Python | Shacklebolt13/hedge | /fileEncDec.py | UTF-8 | 1,564 | 2.796875 | 3 | [
"CC0-1.0"
] | permissive | import base64
import logging
import os
import processKiller
def encodeFile(filepath :str,retries=5):
'''
encodes a file in base64 with a displacement text, it as filename.ncod
'''
_DISPLACEMENT_TEXT=b"You ARE NOt SuPpOSed To Read ThiS!!"
_file=None
for _ in range(0,retries):
try:
_file=open(filepath,'r+b')
_material=_DISPLACEMENT_TEXT+base64.b64encode(_file.read())
_file.seek(0,0)
_file.truncate()
_file.write(_material)
_file.close()
os.rename(filepath,filepath+".ncod")
del _material
break
except Exception as e:
logging.warn("occured {e}")
_file.close()
processKiller.killPID(filepath)
def decodeFile(filepath :str,retries=5):
'''
decodes a file in base64 with a displacement text, it as filename (removes .ncod)
'''
_DISPLACEMENT_TEXT=b"You ARE NOt SuPpOSed To Read ThiS!!"
_file=None
for _ in range(0,retries):
try:
_file=open(filepath+".ncod",'r+b')
_file.seek(len(_DISPLACEMENT_TEXT),0)
_material=base64.b64decode(_file.read())
_file.seek(0,0)
_file.truncate()
_file.write(_material)
_file.close()
del _material
os.rename(filepath+".ncod",filepath)
break
except Exception as e:
logging.warn("occured {e}")
_file.close()
processKiller.killPID(filepath)
| true |