repo_name stringclasses 400 values | branch_name stringclasses 4 values | file_content stringlengths 16 72.5k | language stringclasses 1 value | num_lines int64 1 1.66k | avg_line_length float64 6 85 | max_line_length int64 9 949 | path stringlengths 5 103 | alphanum_fraction float64 0.29 0.89 | alpha_fraction float64 0.27 0.89 |
|---|---|---|---|---|---|---|---|---|---|
DamienPond001/Udemy_API | refs/heads/master | # Make an array of translated impact forces: translated_force_b
translated_force_b = force_b - np.mean(force_b) + 0.55
# Take bootstrap replicates of Frog B's translated impact forces: bs_replicates
bs_replicates = draw_bs_reps(translated_force_b, np.mean, 10000)
# Compute fraction of replicates that are less than the observed Frog B force: p
p = np.sum(bs_replicates <= np.mean(force_b)) / 10000
# Print the p-value
print('p = ', p)
# Compute mean of all forces: mean_force
mean_force = np.mean(forces_concat)
# Generate shifted arrays
force_a_shifted = force_a - np.mean(force_a) + mean_force
force_b_shifted = force_b - np.mean(force_b) + mean_force
# Compute 10,000 bootstrap replicates from shifted arrays
bs_replicates_a = draw_bs_reps(force_a_shifted, np.mean, 10000)
bs_replicates_b = draw_bs_reps(force_b_shifted, np.mean, 10000)
# Get replicates of difference of means: bs_replicates
bs_replicates = bs_replicates_a-bs_replicates_b
# Compute and print p-value: p
p = np.sum(bs_replicates >= (np.mean(force_a)-np.mean(force_b))) / 10000
print('p-value =', p) | Python | 31 | 33.870968 | 80 | /Datacamp/hypothesis_testing_with_one_dataset.py | 0.727778 | 0.697222 |
DamienPond001/Udemy_API | refs/heads/master | #Sometimes we may want multiple row indexes in a heirachical order
# Set the index to be the columns ['state', 'month']: sales
sales = sales.set_index(['state', 'month'])
# Sort the MultiIndex: sales
sales = sales.sort_index()
sales =
eggs salt spam
state month
CA 1 47 12.0 17
2 110 50.0 31
NY 1 221 89.0 72
2 77 87.0 20
TX 1 132 NaN 52
2 205 60.0 55
# Look up data for NY in month 1: NY_month1
NY_month1 = sales.loc[('NY', 1)]
# Look up data for CA and TX in month 2: CA_TX_month2
CA_TX_month2 = sales.loc[(['CA', 'TX'], 2),:]
# Look up data for all states in month 2: all_month2
all_month2 = sales.loc[(slice(None), 2),:] | Python | 25 | 29.4 | 66 | /Datacamp/multi_indexing.py | 0.561265 | 0.480896 |
DamienPond001/Udemy_API | refs/heads/master |
for i in range(50):
# Generate bootstrap sample: bs_sample
bs_sample = np.random.choice(rainfall, size=len(rainfall))
# Compute and plot ECDF from bootstrap sample
x, y = ecdf(bs_sample)
_ = plt.plot(x=x, y=y, marker='.', linestyle='none',
color='gray', alpha=0.1)
# Compute and plot ECDF from original data
x, y = ecdf(rainfall)
_ = plt.plot(x=x, y=y, marker='.')
# Make margins and label axes
plt.margins(0.02)
_ = plt.xlabel('yearly rainfall (mm)')
_ = plt.ylabel('ECDF')
# Show the plot
plt.show()
def draw_bs_reps(data, func, size=1):
"""Draw bootstrap replicates."""
# Initialize array of replicates: bs_replicates
bs_replicates = np.empty(size)
# Generate replicates
for i in range(size):
bs_replicates[i] = bootstrap_replicate_1d(data, func) #applies func to bootstrap sample
return bs_replicates
# Take 10,000 bootstrap replicates of the mean: bs_replicates
bs_replicates = draw_bs_reps(rainfall,np.mean,10000)
# Compute and print SEM
sem = np.std(rainfall) / np.sqrt(len(rainfall))
print(sem)
# Compute and print standard deviation of bootstrap replicates
bs_std = np.std(bs_replicates)
print(bs_std)
# Make a histogram of the results
_ = plt.hist(bs_replicates, bins=50, normed=True)
_ = plt.xlabel('mean annual rainfall (mm)')
_ = plt.ylabel('PDF')
# Show the plot
plt.show()
# Draw bootstrap replicates of the mean no-hitter time (equal to tau): bs_replicates
bs_replicates = draw_bs_reps(nohitter_times, np.mean, 10000)
# Compute the 95% confidence interval: conf_int
conf_int = np.percentile(bs_replicates, [2.5, 97.5])
# Print the confidence interval
print('95% confidence interval =', conf_int, 'games')
# Plot the histogram of the replicates
_ = plt.hist(bs_replicates, bins=50, normed=True)
_ = plt.xlabel(r'$\tau$ (games)')
_ = plt.ylabel('PDF')
# Show the plot
plt.show()
| Python | 71 | 25.535212 | 95 | /Datacamp/bootstrapping.py | 0.68435 | 0.664721 |
DamienPond001/Udemy_API | refs/heads/master | # Merge revenue with managers on 'city': merge_by_city
merge_by_city = pd.merge(revenue, managers, on='city')
# Print merge_by_city
print(merge_by_city)
# Merge revenue with managers on 'branch_id': merge_by_id
merge_by_id = pd.merge(revenue, managers, on='branch_id')
# Print merge_by_id
print(merge_by_id)
# Add 'state' column to revenue: revenue['state']
revenue['state'] = ['TX','CO','IL','CA']
# Add 'state' column to managers: managers['state']
managers['state'] = ['TX','CO','CA', 'MO']
# Merge revenue & managers on 'branch_id', 'city', & 'state': combined
combined = pd.merge(revenue, managers, on=['branch_id', 'city','state'])
# Print combined
print(combined)
#matching columns are suffixed with _x, _y. This can be changed with 'suffixes = [..., ...]' arg
o2o = pd.merge(left=site, right=visited, left_on='name', right_on='site')
#This will handle 1-to-1, many-to-1 and many-to-many merges
# Merge revenue and sales: revenue_and_sales
revenue_and_sales = pd.merge(revenue, sales, how='right',on=['city', 'state'])
# Print revenue_and_sales
print(revenue_and_sales)
# Merge sales and managers: sales_and_managers
sales_and_managers = pd.merge(sales, managers, how='left',left_on=['city', 'state'], right_on=['branch', 'state'])
# Print sales_and_managers
print(sales_and_managers) | Python | 41 | 30.853659 | 114 | /Datacamp/merging.py | 0.689655 | 0.68659 |
DamienPond001/Udemy_API | refs/heads/master | # Create a select query: stmt
stmt = select([census])
# Add a where clause to filter the results to only those for New York
stmt = stmt.where(census.columns.state == 'New York')
# Execute the query to retrieve all the data returned: results
results = connection.execute(stmt).fetchall()
# Loop over the results and print the age, sex, and pop2008
for result in results:
print(result.age, result.sex, result.pop2008)
# Create a query for the census table: stmt
stmt = select([census])
# Append a where clause to match all the states in_ the list states
stmt = stmt.where(census.columns.state.in_(states))
# Loop over the ResultProxy and print the state and its population in 2000
for i in connection.execute(stmt).fetchall():
print(i.state, i.pop2000)
# Import and_
from sqlalchemy import and_
# Build a query for the census table: stmt
stmt = select([census])
# Append a where clause to select only non-male records from California using and_
stmt = stmt.where(
# The state of California with a non-male sex
and_(census.columns.state == 'California',
census.columns.sex != 'M'
)
)
# Loop over the ResultProxy printing the age and sex
for result in connection.execute(stmt).fetchall():
print(result.age, result.sex)
# Build a query to select the state column: stmt
stmt = select([census.columns.state])
# Order stmt by the state column
stmt = stmt.order_by(census.columns.state) #desc(census.columns.state)
## Build a query to select state and age: stmt
#stmt = select([census.columns.state, census.columns.age])
#
## Append order by to ascend by state and descend by age
#stmt = stmt.order_by(census.columns.state, desc(census.columns.age))
# Execute the query and store the results: results
results = connection.execute(stmt).fetchall()
# Print the first 10 results
print(results[:10]) | Python | 58 | 31 | 82 | /Datacamp/sqlalchemy_more_statements.py | 0.726146 | 0.715364 |
DamienPond001/Udemy_API | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 7 11:20:39 2018
@author: Damien
"""
| Python | 6 | 13.166667 | 35 | /API/Section7/code/models/__init__.py | 0.55814 | 0.418605 |
DamienPond001/Udemy_API | refs/heads/master | # Import create_engine
from sqlalchemy import create_engine
# Create an engine that connects to the census.sqlite file: engine
engine = create_engine('sqlite:///census.sqlite')
connection = engine.connect()
# Build select statement for census table: stmt
stmt = "SELECT * FROM census"
# Execute the statement and fetch the results: results
results = connection.execute(stmt).fetchall()
# Print results
print(results)
#ALTERNATIVELY
# Import select
from sqlalchemy import select
# Reflect census table via engine: census
census = Table('census', metadata, autoload=True, autoload_with=engine)
# Build select statement for census table: stmt
stmt = select([census])
# Print the emitted statement to see the SQL emitted
print(stmt)
# Execute the statement and print the results
print(connection.execute(stmt).fetchall())
#
#Recall the differences between a ResultProxy and a ResultSet:
#
# ResultProxy: The object returned by the .execute() method. It can be used in a variety of ways to get the data returned by the query.
# ResultSet: The actual data asked for in the query when using a fetch method such as .fetchall() on a ResultProxy.
#This separation between the ResultSet and ResultProxy allows us to fetch as much or as little data as we desire.
results = connection.execute(stmt).fetchall()
# Get the first row of the results by using an index: first_row
first_row = results[0]
# Print the first row of the results
print(first_row)
# Print the first column of the first row by using an index
print(first_row[0])
# Print the 'state' column of the first row by using its name
print(first_row['state']) | Python | 54 | 29.166666 | 138 | /Datacamp/sqlalchemy_statements.py | 0.761671 | 0.760442 |
DamienPond001/Udemy_API | refs/heads/master | # Seed random number generator
np.random.seed(42)
# Compute mean no-hitter time: tau
tau = np.mean(nohitter_times)
# Draw out of an exponential distribution with parameter tau: inter_nohitter_time
inter_nohitter_time = np.random.exponential(tau, 100000)
# Plot the PDF and label axes
_ = plt.hist(inter_nohitter_time,
bins=50, normed=True, histtype='step')
_ = plt.xlabel('Games between no-hitters')
_ = plt.ylabel('PDF')
# Show the plot
plt.show()
#Verigy using cdf
# Create an ECDF from real data: x, y
x, y = ecdf(nohitter_times)
# Create a CDF from theoretical samples: x_theor, y_theor
x_theor, y_theor = ecdf(inter_nohitter_time)
# Overlay the plots
plt.plot(x=x_theor, y=y_theor)
plt.plot(x=x, y=y, marker='.', linestyle='none')
# Margins and axis labels
plt.margins(0.02)
plt.xlabel('Games between no-hitters')
plt.ylabel('CDF')
# Show the plot
plt.show()
# Plot the theoretical CDFs
plt.plot(x_theor, y_theor)
plt.plot(x, y, marker='.', linestyle='none')
plt.margins(0.02)
plt.xlabel('Games between no-hitters')
plt.ylabel('CDF')
# Take samples with half tau: samples_half
samples_half = np.random.exponential(tau/2,10000)
# Take samples with double tau: samples_double
samples_double = np.random.exponential(2*tau,10000)
# Generate CDFs from these samples
x_half, y_half = ecdf(samples_half)
x_double, y_double = ecdf(samples_double)
# Plot these CDFs as lines
_ = plt.plot(x_half, y_half)
_ = plt.plot(x_double, y_double)
# Show the plot
plt.show()
| Python | 61 | 23.409836 | 81 | /Datacamp/parameter_optimisation.py | 0.71323 | 0.694426 |
DamienPond001/Udemy_API | refs/heads/master |
# Import numpy as np
import numpy as np
# Create array using np.linspace: x
x = np.linspace(0,5,100)
# Create array using np.cos: y
y = np.cos(x)
# Add circles at x and y
p.circle(x,y)
# Specify the name of the output file and show the result
output_file('numpy.html')
show(p)
#pandas
# Import pandas as pd
import pandas as pd
# Read in the CSV file: df
df = pd.read_csv('auto.csv')
# Import figure from bokeh.plottin
from bokeh.plotting import figure
# Create the figure: p
p = figure(x_axis_label='HP', y_axis_label='MPG')
# Plot mpg vs hp by color
p.circle( df['hp'], df['mpg'], color=df['color'], size=10)
# Specify the name of the output file and show the result
output_file('auto-df.html')
show(p)
#ColumnDataSource
# Import the ColumnDataSource class from bokeh.plotting
from bokeh.plotting import ColumnDataSource
# Create a ColumnDataSource from df: source
source = ColumnDataSource(df)
# Add circle glyphs to the figure p
p.circle('Year', 'Time', source=source, color='color',size=8)
# Specify the name of the output file and show the result
output_file('sprint.html')
show(p)
| Python | 51 | 20.627451 | 61 | /Datacamp/bokeh_numpy_pandas.py | 0.722826 | 0.71558 |
DamienPond001/Udemy_API | refs/heads/master | np.random.binomial(trails, probablity_of_success, size=number_of_reps)
np.random.poisson(average_rate, size=number_of_reps)
np.random.normal(mean, std, size=)
# Draw 100000 samples from Normal distribution with stds of interest: samples_std1, samples_std3, samples_std10
samples_std1 = np.random.normal(20,1,size=100000)
samples_std3 = np.random.normal(20,3,size=100000)
samples_std10 = np.random.normal(20,10,size=100000)
# Make histograms
_ = plt.hist(samples_std1, normed=True, histtype='step', bins=100)
_ = plt.hist(samples_std3, normed=True, histtype='step', bins=100)
_ = plt.hist(samples_std10, normed=True, histtype='step', bins=100)
# Make a legend, set limits and show plot
_ = plt.legend(('std = 1', 'std = 3', 'std = 10'))
plt.ylim(-0.01, 0.42)
plt.show()
# Compute mean and standard deviation: mu, sigma
mu = np.mean(belmont_no_outliers)
sigma = np.std(belmont_no_outliers)
# Sample out of a normal distribution with this mu and sigma: samples
samples = np.random.normal(mu, sigma, size=10000)
# Get the CDF of the samples and of the data
x_theor, y_theor = ecdf(samples)
x, y = ecdf(belmont_no_outliers)
# Plot the CDFs and show the plot
_ = plt.plot(x_theor, y_theor)
_ = plt.plot(x, y, marker='.', linestyle='none')
_ = plt.xlabel('Belmont winning time (sec.)')
_ = plt.ylabel('CDF')
plt.show()
np.randm.exponential(mean, size=)
def successive_poisson(tau1, tau2, size=1):
"""Compute time for arrival of 2 successive Poisson processes."""
# Draw samples out of first exponential distribution: t1
t1 = np.random.exponential(tau1, size=size)
# Draw samples out of second exponential distribution: t2
t2 = np.random.exponential(tau2, size=size)
return t1 + t2 | Python | 50 | 33.220001 | 111 | /Datacamp/EDA_distributions.py | 0.71345 | 0.665497 |
DamienPond001/Udemy_API | refs/heads/master | #If a df is indexed by date-time, we can perform resampling.
#Downsampling is when we go to a lower unit, lower unit being one with fewer units in a period (lowere frequency)
#Downsample from hours to days
#Upsampling is the opposite and will introduce Nana, unless otherwise catered for through filling methods
# Downsample to 6 hour data and aggregate by mean: df1
df1 = df.Temperature.resample('6h').mean()
# Downsample to daily data and count the number of data points: df2
df2 = df.Temperature.resample('D').count()
# Extract temperature data for August: august
august = df.Temperature.loc['2010-08']
# Downsample to obtain only the daily highest temperatures in August: august_highs
august_highs = august.resample('D').max()
# Extract temperature data for February: february
february = df.Temperature.loc['2010-02']
# Downsample to obtain the daily lowest temperatures in February: february_lows
february_lows = february.resample('D').min()
# Extract data from 2010-Aug-01 to 2010-Aug-15: unsmoothed
unsmoothed = df['Temperature']['2010-Aug-01':'2010-Aug-15']
# Apply a rolling mean with a 24 hour window: smoothed
smoothed = unsmoothed.rolling(window=24).mean()
# Create a new DataFrame with columns smoothed and unsmoothed: august
august = pd.DataFrame({'smoothed':smoothed, 'unsmoothed':unsmoothed})
# Plot both smoothed and unsmoothed data using august.plot().
august.plot()
plt.show()
| Python | 36 | 38.083332 | 113 | /Datacamp/resampling.py | 0.764037 | 0.731343 |
DamienPond001/Udemy_API | refs/heads/master | #IF a table has an already defined relationship:
# Build a statement to join census and state_fact tables: stmt
stmt = select([census.columns.pop2000, state_fact.columns.abbreviation])
# Execute the statement and get the first result: result
result = connection.execute(stmt).first()
# Loop over the keys in the result object and print the key and value
for key in result.keys():
print(key, getattr(result, key))
# Build a statement to select the census and state_fact tables: stmt
stmt = select([census, state_fact])
# Add a select_from clause that wraps a join for the census and state_fact
# tables where the census state column and state_fact name column match
stmt = stmt.select_from(
census.join(state_fact, census.columns.state == state_fact.columns.name))
# Execute the statement and get the first result: result
result = connection.execute(stmt).first()
# Loop over the keys in the result object and print the key and value
for key in result.keys():
print(key, getattr(result, key))
# Build a statement to select the state, sum of 2008 population and census
# division name: stmt
stmt = select([
census.columns.state,
func.sum(census.columns.pop2008),
state_fact.columns.census_division_name
])
# Append select_from to join the census and state_fact tables by the census state and state_fact name columns
stmt = stmt.select_from(
census.join(state_fact, census.columns.state == state_fact.columns.name)
)
# Append a group by for the state_fact name column
stmt = stmt.group_by(state_fact.columns.name)
# Execute the statement and get the results: results
results = connection.execute(stmt).fetchall() | Python | 46 | 35.304348 | 109 | /Datacamp/sqlalchemy_joins.py | 0.743559 | 0.736369 |
DamienPond001/Udemy_API | refs/heads/master | SELECT * FROM table
SELECT COUNT(*) FROM table
#counts number of rows
SELECT DISTINCT row FROM table
#selects unique entries in row
SELECT COUNT(row) FROM table
#counts non-null entries
SELECT COUNT(DISTINCT row) FROM table
#returns count of distinct entries
SELECT * FROM table
WHERE column_value = 'some_value' #Use boolean operators, note that <> is !=
SELECT * FROM table
WHERE column1 = 'some_value' AND/OR column2 > some_value;
SELECT * FROM table
WHERE column BETWEEN value1 AND value2;
#Returns a range (inclusive)
SELECT * FROM table
WHERE column IN ('...', '....', '....')
#use this instead of multiple ORs
SELECT * FROM table
WHERE column IS NULL\IS NOT NULL
#filter column on null\not null values
SELECT * FROM table
WHERE column LIKE 'Data%'
# % wildcard matches none, one or many
SELECT * FROM table
WHERE column NOT LIKE 'Data%'
# % wildcard matches none, one or many. Here we return all entrie that DON'T match
SELECT * FROM table
WHERE column LIKE 'Data_'
# _ wildcard matches a single char
###AGGREGATION####
SELECT SUM(column) FROM table #AVG, MIN, MAX
SELECT (col1 + col2)*3 AS new_col FROM table #Note: (3/2) = 1, (3.0/2.0) = 1.5
#Can combine aggregations with arithmetic
####ORDERING####
SELECT column FROM table
ORDER BY col1 DESC
#NOTE comes after WHERE clauses
###GROUPING###
SELECT col1, COUNT(col2) FROM table
GROUP BY col1
#NOTE can't SELECT a column that isn't the GROUP BY, unless we aggregate it
###HAVING###
SELECT column FROM table
HAVING AVG(col1) > ...
###FULL EG###
SELECT release_year, AVG(budget) AS avg_budget, AVG(gross) AS avg_gross FROM films
WHERE release_year > 1990
GROUP BY release_year
HAVING AVG(budget) > 60000000
ORDER BY avg_gross DESC
SELECT country, AVG(budget) AS avg_budget, AVG(gross) AS avg_gross FROM films
GROUP BY country
HAVING COUNT(title) > 10
ORDER BY country
LIMIT 5 | Python | 85 | 20.952942 | 82 | /Datacamp/SQL.py | 0.725469 | 0.706166 |
DamienPond001/Udemy_API | refs/heads/master | kind='scatter' uses a scatter plot of the data points
kind='reg' uses a regression plot (default order 1)
kind='resid' uses a residual plot
kind='kde' uses a kernel density estimate of the joint distribution
kind='hex' uses a hexbin plot of the joint distribution
# Generate a joint plot of 'hp' and 'mpg'
sns.jointplot(x='hp', y='mpg', data=auto)
# Generate a joint plot of 'hp' and 'mpg' using a hexbin plot
sns.jointplot(x='hp', y='mpg', data=auto, kind='hex')
# Display the plot
plt.show()
#Plot of all numeric columns against one another
# Print the first 5 rows of the DataFrame
print(auto.head())
# Plot the pairwise joint distributions from the DataFrame
sns.pairplot(auto, hue='origin', kind='reg')
# Display the plot
plt.show()
# Print the covariance matrix
print(cov_matrix)
# Visualize the covariance matrix using a heatmap
sns.heatmap(cov_matrix)
# Display the heatmap
plt.show() | Python | 33 | 26.363636 | 67 | /Datacamp/seaborn_multivariate.py | 0.740577 | 0.738359 |
DamienPond001/Udemy_API | refs/heads/master | #indexing as:
df[['...', '....']]
#returns a DataFrame
p_counties = election.loc['Perry':'Potter', :]
# Slice the row labels 'Potter' to 'Perry' in reverse order: p_counties_rev
p_counties_rev = election.loc['Potter':'Perry':-1, :]
# Slice the columns from the starting column to 'Obama': left_columns
left_columns = election.loc[:, :'Obama']
# Print the output of left_columns.head()
print(left_columns.head())
# Slice the columns from 'Obama' to 'winner': middle_columns
middle_columns = election.loc[:, 'Obama':'winner']
# Print the output of middle_columns.head()
print(middle_columns.head())
# Slice the columns from 'Romney' to the end: 'right_columns'
right_columns = election.loc[:, 'Romney':]
#inddexes are immutables, therefore to change it the whole index needs to be overwritten;
# Create the list of new indexes: new_idx
new_idx = [ind.upper() for ind in sales.index]
# Assign new_idx to sales.index
sales.index = new_idx
# Assign the string 'MONTHS' to sales.index.name
sales.index.name = 'MONTHS'
# Print the sales DataFrame
print(sales)
# Assign the string 'PRODUCTS' to sales.columns.name
sales.columns.name = 'PRODUCTS'
# Print the sales dataframe again
print(sales) | Python | 41 | 28.219513 | 89 | /Datacamp/indexing.py | 0.717627 | 0.716792 |
DamienPond001/Udemy_API | refs/heads/master | #Melting data is the process of turning columns of your data into rows of data.
airquality_melt = pd.melt(airquality_melt, id_vars=['Month', 'Day'])
#id_vars = columns not wishing to melt
#value_vars = columns wishing to melt (deafult to all not in id_vars)
#Pivoting data is the opposite of melting it.
airquality_pivot = airquality_melt.pivot_table(index=["Month", "Day"], columns="measurement", values="reading")
#columns="measurement" : columns to pivot
#values="reading" : values to fill columns with
#the above create a heirarchical header format. To fix this:
airquality_pivot_reset = airquality_pivot.reset_index()
#Often there are duplicate values, these can be handled as follows:
airquality_pivot = airquality_dup.pivot_table(index=['Month', 'Day'], columns='measurement', values='reading', aggfunc=np.mean)
#where the mean is taken
#Note in the below that Series atributes and functions are accessed on the .str function
# Melt ebola: ebola_melt
ebola_melt = pd.melt(ebola, id_vars=['Date', 'Day'], var_name='type_country', value_name='counts')
# Create the 'str_split' column
ebola_melt['str_split'] = ebola_melt.type_country.str.split('_')
# Create the 'type' column
ebola_melt['type'] = ebola_melt.str_split.str.get(0)
# Create the 'country' column
ebola_melt['country'] = ebola_melt.str_split.str.get(1)
# Print the head of ebola_melt
print(ebola_melt.head()) | Python | 32 | 42.28125 | 127 | /Datacamp/tidy_data.py | 0.739162 | 0.737717 |
DamienPond001/Udemy_API | refs/heads/master | # Construct arrays of data: dems, reps
dems = np.array([True] * 153 + [False] * 91)
reps = np.array([True] * 136 + [False] * 35)
def frac_yea_dems(dems, reps):
"""Compute fraction of Democrat yea votes."""
frac = np.sum(dems) / len(dems)
return frac
# Acquire permutation samples: perm_replicates
perm_replicates = draw_perm_reps(dems, reps, frac_yea_dems, 10000)
# Compute and print p-value: p
p = np.sum(perm_replicates <= 153/244) / len(perm_replicates)
print('p-value =', p)
# Compute the difference in mean sperm count: diff_means
diff_means = diff_of_means(control, treated)
# Compute mean of pooled data: mean_count
mean_count = np.mean(np.concatenate([control, treated]))
# Generate shifted data sets
control_shifted = control - np.mean(control) + mean_count
treated_shifted = treated - np.mean(treated) + mean_count
# Generate bootstrap replicates
bs_reps_control = draw_bs_reps(control_shifted,
np.mean, size=10000)
bs_reps_treated = draw_bs_reps(treated_shifted,
np.mean, size=10000)
# Get replicates of difference of means: bs_replicates
bs_replicates = bs_reps_control- bs_reps_treated
# Compute and print p-value: p
p = np.sum(bs_replicates >= np.mean(control) - np.mean(treated)) \
/ len(bs_replicates)
print('p-value =', p) | Python | 41 | 31.219513 | 66 | /Datacamp/A_B_testing.py | 0.680303 | 0.656818 |
DamienPond001/Udemy_API | refs/heads/master | #EG:
id treatment gender response
0 1 A F 5
1 2 A M 3
2 3 B F 8
3 4 B M 9
df.pivot(index = "treatment", columns = "gender", values = "response")
#pivot
gender F M
treatment
A 5 3
B 8 9
#Not specifying the values will pivot all columns | Python | 15 | 23.266666 | 70 | /Datacamp/pivoting_tables.py | 0.451791 | 0.407714 |
DamienPond001/Udemy_API | refs/heads/master | # Import package
from urllib.request import urlretrieve
# Import pandas
import pandas as pd
# Assign url of file: url
url = 'https://s3.amazonaws.com/assets.datacamp.com/production/course_1606/datasets/winequality-red.csv'
# Save file locally
urlretrieve(url, 'winequality-red.csv')
# Read file into a DataFrame and print its head
df = pd.read_csv('winequality-red.csv', sep=';')
#Alternatively
df = pd.read_csv(url, sep = ";") #does not save the file locally
#If file is an excel file
xl = pd.read_excel(url, sheetname = None)
# Print the sheetnames to the shell
print(xl.keys())
# Print the head of the first sheet (using its name, NOT its index)
print(xl['1700'].head())
##HTTP requests
# Import packages
from urllib.request import urlopen, Request
# Specify the url
url = "http://www.datacamp.com/teach/documentation"
# This packages the request: request
request = Request(url)
# Sends the request and catches the response: response
response = urlopen(request)
# Print the datatype of response
print(type(response))
# Extract the response: html
html = response.read()
# Be polite and close the response!
response.close()
#The requests package simplifies this:
# Import package
import requests
# Specify the url: url
url = "http://www.datacamp.com/teach/documentation"
# Packages the request, send the request and catch the response: r
r = requests.get(url)
# Extract the response: text
text = r.text
#NO NEED TO CLOSE
# Print the html
print(text) | Python | 68 | 20.67647 | 104 | /Datacamp/web_import.py | 0.741344 | 0.735234 |
DamienPond001/Udemy_API | refs/heads/master |
# Read in the data file with header=None: df_headers
df_headers = pd.read_csv(data_file, header=None)
# Print the output of df_headers.head()
print(df_headers.head())
# Split on the comma to create a list: column_labels_list
column_labels_list = column_labels.split(",")
# Assign the new column labels to the DataFrame: df.columns
df.columns = column_labels_list
# Remove the appropriate columns: df_dropped
df_dropped = df.drop(list_to_drop, axis = 'columns')
# Print the output of df_dropped.head()
print(df_dropped.head())
# Convert the date column to string: df_dropped['date']
df_dropped['date'] = df_dropped['date'].astype(str)
# Pad leading zeros to the Time column: df_dropped['Time']
df_dropped['Time'] = df_dropped['Time'].apply(lambda x:'{:0>4}'.format(x))
# Concatenate the new date and Time columns: date_string
date_string = df_dropped.date + df_dropped.Time
# Convert the date_string Series to datetime: date_times
date_times = pd.to_datetime(date_string, format='%Y%m%d%H%M')
# Set the index to be the new date_times container: df_clean
df_clean = df_dropped.set_index(date_times)
# Print the dry_bulb_faren temperature between 8 AM and 9 AM on June 20, 2011
print(df_clean.loc['2011-Jun-20 08:00':'2011-Jun-20 09:00', 'dry_bulb_faren'])
# Convert the dry_bulb_faren column to numeric values: df_clean['dry_bulb_faren']
df_clean['dry_bulb_faren'] = pd.to_numeric(df_clean['dry_bulb_faren'], errors='coerce')
# Print the transformed dry_bulb_faren temperature between 8 AM and 9 AM on June 20, 2011
print(df_clean.loc['2011-Jun-20 08:00':'2011-Jun-20 09:00', 'dry_bulb_faren'])
# Convert the wind_speed and dew_point_faren columns to numeric values
df_clean['wind_speed'] = pd.to_numeric(df_clean['wind_speed'], errors='coerce')
df_clean['dew_point_faren'] = pd.to_numeric(df_clean['dew_point_faren'], errors='coerce') | Python | 45 | 40.066666 | 89 | /Datacamp/readin_and_cleaning.py | 0.723485 | 0.6921 |
DamienPond001/Udemy_API | refs/heads/master | import numpy as np
np.mean(data)
np.median(data)
np.var(versicolor_petal_length)
np.std(versicolor_petal_length)
#covariance matrix:
# returns a 2D array where entries [0,1] and [1,0] are the covariances.
# Entry [0,0] is the variance of the data in x, and entry [1,1] is the variance of the data in y
np.cov(versicolor_petal_length, versicolor_petal_width)
def pearson_r(x, y):
"""Compute Pearson correlation coefficient between two arrays."""
# Compute correlation matrix: corr_mat
corr_mat = np.corrcoef(x,y)
# Return entry [0,1]
return corr_mat[0,1]
# Compute Pearson correlation coefficient for I. versicolor: r
r = pearson_r(versicolor_petal_length, versicolor_petal_width)
# Print the result
print(r)
# Specify array of percentiles: percentiles
percentiles = np.array([2.5, 25, 50, 75, 97.5])
# Compute percentiles: ptiles_vers
ptiles_vers= np.percentile(versicolor_petal_length, percentiles)
# Print the result
print(ptiles_vers)
# Create box plot with Seaborn's default settings
_ = sns.boxplot(x='species', y='petal length (cm)', data=df)
# Label the axes
plt.xlabel('species')
plt.ylabel('petal length (cm)')
# Show the plot
plt.show()
| Python | 46 | 24.695652 | 96 | /Datacamp/EDA_boxplot_percentile.py | 0.72335 | 0.703046 |
DamienPond001/Udemy_API | refs/heads/master | from flask_restful import Resource, reqparse
from flask_jwt import jwt_required
import sqlite3
class Item(Resource):
parser = reqparse.RequestParser() #This prevents code duplication and now belongs to the Item class
parser.add_argument('price',
type = float,
required = True,
help = "This field cannot be left blank")
@jwt_required()
def get(self, name):
item = self.find_by_name(name)
#http://127.0.0.1:5000/item/wine?price=17 will pass 17 to the args
#args = Item.parser.parse_args()
#print(args['price'])
if item is not None:
return item, 200
else:
return {"message" : "Item not found"}, 404
@classmethod
def find_by_name(cls, name):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
select_query = "SELECT * FROM items WHERE name = ?"
result = cursor.execute(select_query, (name,))
item_in_db = result.fetchone()
connection.close()
if item_in_db is not None:
return {'item' : {'name' : item_in_db[0], 'price': item_in_db[1]}}
#We could use the get() method but that requires a JWT
#Thus we use the alternative class method
def post(self, name):
item = self.find_by_name(name)
if item is not None:
return {"message":"item already in database"}, 400
data = Item.parser.parse_args()
item = {'name' : name, 'price': data['price']}
try:
self.insert_item(item)
except:
return {"message" : "An error occurred"}, 500
return {'name' : name, 'price' : data['price']}, 201 #201 is code for created
@classmethod
def insert_item(cls, item):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
insert_query = "INSERT INTO items VALUES (?, ?)"
cursor.execute(insert_query, (item['name'], item['price']))
connection.commit()
connection.close()
def delete(self, name):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
delete_query = "DELETE FROM items WHERE name = ?"
cursor.execute(delete_query, (name,))
connection.commit()
connection.close()
return {"message" : "Item deleted"}
def put(self, name):
item = self.find_by_name(name)
data = Item.parser.parse_args()
updated_item = {'name' : name, 'price': data['price']}
if item is None:
try:
self.insert_item(updated_item)
except:
{"message" : "an error occurred"}, 500
else:
try:
self.update(updated_item)
except:
{"message" : "an error occurred"}, 500
return updated_item, 201 #201 is code for created
@classmethod
def update(cls, item):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
insert_query = "UPDATE items SET price = ? WHERE name = ?"
cursor.execute(insert_query, (item['price'], item['name']))
connection.commit()
connection.close()
class ItemList(Resource):
def get(self):
connection = sqlite3.connect('data.db')
cursor = connection.cursor()
query = "SELECT * FROM items"
result = cursor.execute(query)
items = result.fetchall()
connection.close()
if items is not None:
return {'items' : items}
else:
return {"message" : "No items in database"}
| Python | 132 | 27.121212 | 103 | /API/Section6/code/UseDB/item.py | 0.55433 | 0.540344 |
DamienPond001/Udemy_API | refs/heads/master | import sqlite3
connection = sqlite3.connect('data.db')
cursor = connection.cursor() #similar to a screen cursor, it allows us to selct and start thinigs. It executes the queries
create_table = "CREATE TABLE users (id int, username text, password text)"
cursor.execute(create_table)
user = (1, "damien", "bitches")
insert_query = "INSERT INTO users VALUES (?, ?, ?)"
cursor.execute(insert_query, user)
users = [
(2, "not damien", "notbitches"),
(3, "other", "otherps")
]
cursor.executemany(insert_query, users)
select_query = "SELECT * from users"
a = cursor.execute(select_query)
res = connection.execute("SELECT name FROM sqlite_master WHERE type='table';")
for name in res:
print (name[0])
print(next(a))
connection.commit()
connection.close() | Python | 30 | 25.066668 | 124 | /API/Section6/test.py | 0.695262 | 0.68758 |
DamienPond001/Udemy_API | refs/heads/master | # Import row from bokeh.layouts
from bokeh.layouts import row, column
# Create the first figure: p1
p1 = figure(x_axis_label='fertility (children per woman)', y_axis_label='female_literacy (% population)')
# Add a circle glyph to p1
p1.circle('fertility', 'female_literacy', source=source)
# Create the second figure: p2
p2 = figure(x_axis_label='population', y_axis_label='female_literacy (% population)')
# Add a circle glyph to p2
p2.circle('population', 'female_literacy', source=source)
# Put p1 and p2 into a horizontal row: layout
layout = row(p1,p2)
#layout = column(p1, p2)
# Specify the name of the output_file and show the result
output_file('fert_row.html')
show(layout)
# Import column and row from bokeh.layouts
from bokeh.layouts import row, column
# Make a column layout that will be used as the second row: row2
row2 = column([mpg_hp, mpg_weight], sizing_mode='scale_width')
# Make a row layout that includes the above column layout: layout
layout = row([avg_mpg, row2], sizing_mode='scale_width')
# Specify the name of the output_file and show the result
output_file('layout_custom.html')
show(layout)
# Import gridplot from bokeh.layouts
from bokeh.layouts import gridplot
# Create a list containing plots p1 and p2: row1
row1 = [p1, p2]
# Create a list containing plots p3 and p4: row2
row2 = [p3, p4]
# Create a gridplot using row1 and row2: layout
layout = gridplot([row1, row2])
# Specify the name of the output_file and show the result
output_file('grid.html')
show(layout)
#TABS
# Import Panel from bokeh.models.widgets
from bokeh.models.widgets import Panel
# Create tab1 from plot p1: tab1
tab1 = Panel(child=p1, title='Latin America')
# Create tab2 from plot p2: tab2
tab2 = Panel(child=p2, title='Africa')
# Create tab3 from plot p3: tab3
tab3 = Panel(child=p3, title='Asia')
# Create tab4 from plot p4: tab4
tab4 = Panel(child=p4, title='Europe')
# Import Tabs from bokeh.models.widgets
from bokeh.models.widgets import Tabs
# Create a Tabs layout: layout
layout = Tabs(tabs=[tab1, tab2, tab3, tab4])
# Specify the name of the output_file and show the result
output_file('tabs.html')
show(layout) | Python | 82 | 25.280487 | 105 | /Datacamp/bokeh_layouts.py | 0.735376 | 0.708914 |
vgrichina/ios-autocomplete | refs/heads/master | import os
import sqlite3
db_path = "Autocomplete/names.sqlite"
os.remove(db_path)
db = sqlite3.connect(db_path)
db.execute("pragma synchronous=off")
db.execute("pragma journal_mode=memory")
db.execute("pragma temp_store=memory")
db.execute("create table names (name text)")
db.execute("create table parts (part text collate nocase)")
db.execute("""create table names_parts (part_id integer, name_id integer,
foreign key(name_id) references names(rowid),
foreign key(part_id) references parts(rowid))
""")
db.execute("create index parts_idx on parts (part)")
db.execute("create index names_parts_idx on names_parts (part_id, name_id)")
c = db.cursor()
all_parts = {}
for name in open("Autocomplete/fake-full-names.txt", "r"):
name = name.replace("\n", "")
c.execute("insert into names values (?)", (name,))
name_id = c.lastrowid
for part in name.split(" "):
if len(part) > 1:
if part in all_parts:
part_id = all_parts[part]
else:
c.execute("insert into parts values(?)", (part,))
part_id = c.lastrowid
c.execute("insert into names_parts values (?, ?)", (part_id, name_id))
db.commit()
db.close()
| Python | 42 | 28.261906 | 82 | /gen_index.py | 0.634662 | 0.632221 |
zhouyichen/PGCN | refs/heads/master | import numpy as np
import pandas as pd
import os
from random import shuffle
def generate_proposals(start_gt, end_gt, label, n_frame, alpha=5, beta=2.5, n_to_generate=100):
duration = end_gt - start_gt
proposals = []
while n_to_generate:
iou = np.random.beta(alpha, beta)
not_success = True
while not_success:
is_start = np.random.randint(2)
endpoint1 = np.random.randint(start_gt, end_gt)
if is_start:
start_ps = endpoint1
intersection = end_gt - start_ps
if intersection / duration < iou:
continue
x = (intersection - duration * iou) / iou
end_ps = round(end_gt + x)
if end_ps > n_frame:
continue
else:
end_ps = endpoint1
intersection = end_ps - start_gt
x = (intersection - duration * iou) / iou
if intersection / duration < iou:
continue
start_ps = round(start_gt - x)
if start_ps < 0:
continue
not_success = False
n_to_generate = n_to_generate - 1
proposals.append([label, iou, intersection/(end_ps - start_ps), start_ps, end_ps])
return proposals
def generate_proposal_file_per_video(index, video_path, gt_path, mapping, f, n_ps_per_gt):
video = pd.read_csv(gt_path, header=None)
video = video[video.columns[0]].values.tolist()
n_frame = len(video)
current_label = video[0]
start_idx = 0
n_gt = 0
gt=[]
proposals = []
for i in range(n_frame):
if video[i] == current_label:
continue
else:
end_idx = i - 1
label = mapping[current_label]
if label != 0:
n_gt = n_gt + 1
gt.append([label, start_idx, end_idx])
print(current_label, mapping[current_label], start_idx, end_idx)
start_idx = i
current_label = video[i]
print(len(proposals))
f.write("#%s\n" %index)
f.write(video_path + "\n")
f.write(str(n_frame)+"\n" + "1" + "\n")
f.write(str(n_gt) + "\n")
for i in range(n_gt):
f.write(str(gt[i][0]) + " " + str(gt[i][1]) + " "+ str(gt[i][2]) + "\n")
ps = generate_proposals(start_gt=gt[i][1], end_gt=gt[i][2], label=gt[i][0], n_frame=n_frame,
n_to_generate=n_ps_per_gt)
proposals.extend(ps)
shuffle(proposals)
f.write(str(len(proposals)) + "\n")
for i in range(len(proposals)):
f.write(str(proposals[i][0]) + " " + str(proposals[i][1]) + " " + str(proposals[i][2]) + " " +
str(proposals[i][3]) + " " + str(proposals[i][4]) + "\n")
def main():
path = "CS6101/"
mapping_filepath = path + "splits/mapping_bf.txt"
mapping_df = pd.read_csv(mapping_filepath, header=None, sep=" ")
mapping = dict(zip(mapping_df[mapping_df.columns[1]], mapping_df[mapping_df.columns[0]]))
print(mapping)
videos = os.listdir(path + "groundtruth")
print()
print(len(videos))
output_filepath = "data/breakfast_proposal.txt"
f = open(output_filepath, "w")
for i in range(len(videos)):
generate_proposal_file_per_video(i, video_path= path + "groundtruth/" + videos[i],
gt_path=path + "groundtruth/" + videos[i],
mapping=mapping,
f=f,
n_ps_per_gt=100)
f.close()
if __name__ == '__main__':
main()
| Python | 110 | 32.518181 | 102 | /generate_proposal.py | 0.505688 | 0.494854 |
EdgarOPG/Second-Partial-Proyect-Data-Mining | refs/heads/master | """
Author: Normando Ali Zubia Hernández
This file is created to explain the use of dimensionality reduction
with different tools in sklearn library.
Every function contained in this file belongs to a different tool.
"""
from sklearn import datasets
from sklearn.decomposition import PCA
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.feature_selection import SelectFromModel
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeRegressor
from sklearn import metrics
import pandas as pd
import numpy
def get_feacture_subset(data, *args):
featureDic = []
for arg in args:
featureDic.append(arg)
subset = data[featureDic]
return subset
def attribute_subset_selection_with_trees(data):
# import data
X = data[:,1:-1]
Y = numpy.asarray(data[:,-1], dtype="int16")
# First 10 rows
print('Training Data:\n\n' + str(X[:20]))
print('\n')
print('Targets:\n\n' + str(Y[:20]))
# Model declaration
extra_tree = ExtraTreesClassifier()
# Model training
extra_tree.fit(X, Y)
# Model information:
print('\nModel information:\n')
# display the relative importance of each attribute
print('Importance of every feature:\n' + str(extra_tree.feature_importances_))
# If model was training before prefit = True
model = SelectFromModel(extra_tree, prefit = True)
# Model transformation
new_feature_vector = model.transform(X)
# First 10 rows of new feature vector
print('\nNew feature vector:\n')
print(new_feature_vector[:10])
def principal_components_analysis(data, columns, n_components):
# import data
X = data[:,1:-1]
Y = numpy.asarray(data[:,-1], dtype="int16")
# First 10 rows
print('Training Data:\n\n' + str(X[:10]))
print('\n')
print('Targets:\n\n' + str(Y[:10]))
# Model declaration
if n_components < 1:
pca = PCA(n_components = n_components, svd_solver = 'full')
else:
pca = PCA(n_components = n_components)
# Model training
pca.fit(X)
# Model transformation
new_feature_vector = pca.transform(X)
# Model information:
print('\nModel information:\n')
print('Number of components elected: ' + str(pca.n_components))
print('New feature dimension: ' + str(pca.n_components_))
print('Variance of every feature: \n' + str(pca.explained_variance_ratio_))
# First 10 rows of new feature vector
#print('\nNew feature vector:\n')
#print(new_feature_vector[:10])
#print(pd.DataFrame(pca.components_,columns=columns[1:-1]))
# Print complete dictionary
# print(pca.__dict__)
def z_score_normalization(data):
print('----- z_score_normalization -------\n')
# import data
X = data[:,1:-1]
Y = numpy.asarray(data[:,-1], dtype="int16")
# First 10 rows
print('Training Data:\n\n' + str(X[:10]))
print('\n')
print('Targets:\n\n' + str(Y[:10]))
# Data standarization
standardized_data = preprocessing.scale(X)
# First 10 rows of new feature vector
print('\nNew feature vector:\n')
print(standardized_data[:10])
def min_max_scaler(data):
print('----- min_max_scaler -------\n')
# import data
X = data[:,1:-1]
Y = numpy.asarray(data[:,-1], dtype="int16")
# First 10 rows
print('Training Data:\n\n' + str(X[:10]))
print('\n')
print('Targets:\n\n' + str(Y[:10]))
# Data normalization
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler.fit(X)
# Model information:
print('\nModel information:\n')
print('Data min: \n' + str(min_max_scaler.data_min_))
print('Data max: \n' + str(min_max_scaler.data_max_))
new_feature_vector = min_max_scaler.transform(X)
# First 10 rows of new feature vector
print('\nNew feature vector:\n')
print(new_feature_vector[:10])
def fill_missing_values_with_constant(data, column, constant):
temp = data[column].fillna(constant)
data[column] = temp
return data
def fill_missing_values_with_mean(data, column):
temp = data[column].fillna(data[column].mean())
data[column] = temp
return data
def fill_missing_values_with_mode(data, column):
temp = data[column].fillna(data[column].mode()[0])
data[column] = temp
return data
def convert_data_to_numeric(data):
numpy_data = data.values
for i in range(len(numpy_data[0])):
temp = numpy_data[:,i]
dict = numpy.unique(numpy_data[:,i])
# print(dict)
for j in range(len(dict)):
# print(numpy.where(numpy_data[:,i] == dict[j]))
temp[numpy.where(numpy_data[:,i] == dict[j])] = j
numpy_data[:,i] = temp
return numpy_data
if __name__ == '__main__':
data = pd.read_csv('train.csv')
data['LotFrontage'] = data['LotFrontage'].replace('NaN', -1, regex=False)
#Outlier
data = fill_missing_values_with_constant(data, 'MasVnrArea', 0)
#Outlier
data = fill_missing_values_with_constant(data, 'GarageYrBlt', -1)
data = data.fillna('NaN')
columns = data.columns
#print(columns)
data = convert_data_to_numeric(data)
#z_score_normalization(data)
min_max_scaler(data)
attribute_subset_selection_with_trees(data)
principal_components_analysis(data,columns,.9)
feature_vector = data[:,1:-1]
targets = data[:,-1]
data_features_train, data_features_test, data_targets_train, data_targets_test = \
train_test_split(feature_vector,
targets,
test_size=0.25)
# Model declaration
"""
Parameters to select:
criterion: "mse"
max_depth: maximum depth of tree, default: None
"""
dec_tree_reg = DecisionTreeRegressor(criterion='mse', max_depth=7)
dec_tree_reg.fit(data_features_train, data_targets_train)
# Model evaluation
test_data_predicted = dec_tree_reg.predict(data_features_test)
error = metrics.mean_absolute_error(data_targets_test, test_data_predicted)
print('Total Error: ' + str(error))
| Python | 218 | 27.688074 | 86 | /primera_iteracion.py | 0.649344 | 0.637032 |
EdgarOPG/Second-Partial-Proyect-Data-Mining | refs/heads/master | """
Author: Normando Ali Zubia Hernández
This file is created to explain the use of normalization
with different tools in sklearn library.
Every function contained in this file belongs to a different tool.
"""
from sklearn import preprocessing
import pandas as pd
import numpy
def z_score_normalization(data):
# import data
X = data[:,0:-2]
Y = numpy.asarray(data[:,-1], dtype="int16")
# First 10 rows
print('Training Data:\n\n' + str(X[:10]))
print('\n')
print('Targets:\n\n' + str(Y[:10]))
# Data standarization
standardized_data = preprocessing.scale(X)
# First 10 rows of new feature vector
print('\nNew feature vector:\n')
print(standardized_data[:10])
def min_max_scaler(data):
# import data
X = data[:,0:-2]
Y = numpy.asarray(data[:,-1], dtype="int16")
# First 10 rows
print('Training Data:\n\n' + str(X[:10]))
print('\n')
print('Targets:\n\n' + str(Y[:10]))
# Data normalization
min_max_scaler = preprocessing.MinMaxScaler()
min_max_scaler.fit(X)
# Model information:
print('\nModel information:\n')
print('Data min: ' + str(min_max_scaler.data_min_))
print('Data max: ' + str(min_max_scaler.data_max_))
new_feature_vector = min_max_scaler.transform(X)
# First 10 rows of new feature vector
print('\nNew feature vector:\n')
print(new_feature_vector[:10])
def convert_data_to_numeric(data):
numpy_data = data.values
for i in range(len(numpy_data[0])):
temp = numpy_data[:,i]
dict = numpy.unique(numpy_data[:,i])
# print(dict)
for j in range(len(dict)):
# print(numpy.where(numpy_data[:,i] == dict[j]))
temp[numpy.where(numpy_data[:,i] == dict[j])] = j
numpy_data[:,i] = temp
return numpy_data
if __name__ == '__main__':
data = pd.read_csv('train.csv')
data = convert_data_to_numeric(data)
z_score_normalization(data)
min_max_scaler(data)
| Python | 77 | 24.38961 | 66 | /normalization.py | 0.615345 | 0.599488 |
EdgarOPG/Second-Partial-Proyect-Data-Mining | refs/heads/master | import pandas as pd
import matplotlib.pyplot as ptl
import math as mt
def open_file(fileName):
data = pd.read_csv(fileName)
return data
def show_data_info(data):
print("Number of instance:" + str(data.shape[0]))
print("Number of features:" + str(data.shape[1]))
print("------------------------------------------")
print("Initial instance:\n")
print(data)
print("Numerical info:\n")
numerical_info = data.iloc[:, :data.shape[1]]
print(numerical_info.describe())
def count_words(data, column):
temp = []
array = []
for x in range(len(data)):
array = data.iloc[x][column].split(' ')
temp.append(len(array))
data[column] = temp
return data
def save(data):
data.to_csv('clean.csv', index = False)
if __name__ == '__main__':
data = open_file('train.csv')
show_data_info(data)
#save(data);
| Python | 36 | 23.5 | 55 | /clean.py | 0.582766 | 0.579365 |
EdgarOPG/Second-Partial-Proyect-Data-Mining | refs/heads/master | """
*This module was create for Data Mining subject in Universidad Autonóma de Chihuahua
*Professor: M.I.C Normando Ali Zubia Hernández
Module information:
The principal functions of this module are:
*Create violin graphs
*Create box-Graphs
*Create Histograms
Information contact:
email: azubiah@uach.mx
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from pandas.tools.plotting import scatter_matrix
def open_file(fileName):
'''
This method will open a file in csv format
:param fileName: file to open (Complete PATH)
:return: Pandas Data Frame
'''
#TODO csv file validation
data = pd.read_json(fileName)
return data
def create_histogram(data):
data.hist(column = 'bedrooms')
plt.show()
def create_density_plot(data):
data.plot(kind='density', subplots=True, layout=(3, 3), sharex=False)
plt.show()
def create_whisker_plots(data):
data.plot(kind='box', subplots=True, layout=(3, 3), sharex=False, sharey=False)
plt.show()
def show_data_info(data):
print("Number of instance: " + str(data.shape[0]))
print("Number of fetures: " + str(data.shape[1]))
print('------------------------------------------')
print("Initial instances:\n")
print(data.head(10))
print("Numerical Information:\n")
numerical_info = data.iloc[:, :data.shape[1]]
print(numerical_info.describe())
def get_feature_subset(data, *args):
featureDict = []
for arg in args:
featureDict.append(arg)
subset = data[featureDict]
return subset
def delete_column(data, *args):
for arg in args:
data = data.drop(arg, 1)
return data
def delete_missing_objects(data, type):
type = 0 if type == 'instance' else 1
data = data.dropna(axis = type)
return data
def replace_missing_values_with_constant(data, column, constant):
temp = data[column].fillna(constant)
data[column] = temp
return data
def replace_missing_values_with_mean(data, column):
temp = data[column].fillna(data[column].mean())
data[column] = temp
return data
def numero_banios_influye_precio(data):
numbBath = data['bathrooms'].value_counts()
numbBathKeys = numbBath.keys()
priceArray = []
for number in numbBathKeys:
subset = data.loc[data['bathrooms'] == number]
print('Numero de banios:' + str(number))
print(subset['price'])
priceArray.append(subset["price"].mean())
print(numbBathKeys)
print(priceArray)
width = .2
plt.bar(numbBathKeys, priceArray, width, color="blue")
plt.ylabel('precio')
plt.xlabel('#banios')
plt.title('banios inlfuye precio')
plt.xticks(np.arange(0, max(numbBathKeys), .5))
plt.yticks(np.arange(0, 60000, 5000))
plt.show()
def numero_habitaciones_influye_precio(data):
numbHab = data['bedrooms'].value_counts()
numbHabKeys = numbHab.keys()
priceArray = []
for number in numbHabKeys:
subset = data.loc[data['bedrooms'] == number]
print('Numero de habitaciones:' + str(number))
print(subset['price'])
priceArray.append(subset["price"].mean())
print(numbHabKeys)
print(priceArray)
width = .2
plt.bar(numbHabKeys, priceArray, width, color="blue")
plt.ylabel('precio')
plt.xlabel('#habitaciones')
plt.title('Habitaciones influye precio')
plt.xticks(np.arange(0, max(numbHabKeys), .5))
plt.yticks(np.arange(0, 15000, 1000))
plt.show()
if __name__ == '__main__':
filePath = "train.json"
data = open_file(filePath)
#headers = [x for x in data]
#print(headers)
#for head in headers:
# if head != 'description' and head != 'features' and head != 'photos':
# print(data[head].value_counts())
#print(data.head)
#show_data_info(data)
#print(data[0:10])
#numero_banios_influye_precio(data)
numero_habitaciones_influye_precio(data)
#create_histogram(data)
#create_density_plot(data)
#create_whisker_plots(data)
| Python | 168 | 22.797619 | 84 | /a.py | 0.644322 | 0.634067 |
isabellaleehs/Data_Visualization | refs/heads/master | # Create choropleth map
#
# Date: Dec 2017
import plotly as py
import pandas as pd
import pycountry
def get_data(filename):
'''
Loads data from file and cleans it.
Inputs:
filename: file directory
Returns: a cleaned dataframe
'''
df = pd.read_csv(filename)
# Reset header row
df.columns = df.iloc[0]
df = df[1:]
# Rename column
df = df.rename(index=str, columns={"2016": "Estimated no. w/ HIV"})
# Remove all parenthesis and square brackets
df['Country'] = df.Country.apply(lambda x: x.replace(' (',', ').replace(')',''))
# Alternative to above: df['Country'] = df['Country'].str.replace(r"\s+\((.*)\)", r", \1")
df['Estimated no. w/ HIV'] = df['Estimated no. w/ HIV'].str.replace(r"\s+\[.*\]","")
# Lower case, remove spaces between numbers, remove strings and set to 0
df['Estimated no. w/ HIV'] = df['Estimated no. w/ HIV'].str.replace(" ","")
df['Estimated no. w/ HIV'] = df['Estimated no. w/ HIV'].str.strip("<>")
df['Estimated no. w/ HIV'] = df['Estimated no. w/ HIV'].str.replace("Nodata","")
# Modify names of countries not recognized by pycountry
df['Country'] = df['Country'].replace('Democratic Republic of the Congo','Congo, the Democratic Republic of the')
df['Country'] = df['Country'].replace('Republic of Korea',"Korea, Democratic People's Republic of")
return df
def get_country_code(x):
'''
Finds the 3 letter alpha code for a country.
Inputs:
x: country name
Returns: alpha_3 code for the country
'''
if pycountry.countries.lookup(x) != None:
return pycountry.countries.lookup(x).alpha_3
# Get and clean data
df = get_data('data.csv')
df['Code'] = df['Country'].apply(get_country_code)
# Make choropleth map using data
data = [ dict(
type = 'choropleth',
locations = df['Code'],
z = df['Estimated no. w/ HIV'],
text = df['Country'],
colorscale = [[0,"#c6dbef"],[0.2,"#6baed6"],[0.4,"#4292c6"],\
[0.6,"#2171b5"],[0.8,"#0e5693"],[1,"#013e7c"]],
autocolorscale = False,
reversescale = False,
marker = dict(
line = dict (
color = 'rgb(180,180,180)',
width = 0.5
) ),
colorbar = dict(
autotick = False,
title = 'Estimated no.<br>w/ HIV'),
) ]
layout = dict(
title = 'Number of people (all ages) living with HIV<br>Estimates by country<br><br>\
[Source:<a href="http://apps.who.int/gho/data/node.main.620?lang=en"> World Health Organization</a>]',
margin = dict(
l=10,
r=10,
b=50,
t=150,
pad=4
),
geo = dict(
showframe = False,
showcoastlines = False,
projection = dict(
type = 'Mercator'
)
)
)
# Display map
fig = dict( data=data, layout=layout )
py.offline.plot( fig, validate=False, filename='d3-world-map' )
| Python | 101 | 28.445545 | 117 | /Choropleth map/make_map.py | 0.570612 | 0.546402 |
chsoftworld/S-SEC-demo- | refs/heads/master | import requests
from lxml import etree
import pymysql
url = 'http://data.10jqka.com.cn/funds/ddzz/#refCountId=db_50741cd6_397,db_509381c1_860'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1;WOW64; rv:6.0) '
'Gecko/20100101 Firefox/6.0',
}
html = requests.get(url,headers=headers).text
parse_html = etree.HTML(html)
num_list = parse_html.xpath('//tbody/tr/td[2]/a/text()')
name_list = parse_html.xpath('//tbody/tr/td[3]/a/text()')
stacks = []
count = 0
for i in range(len(num_list)):
if count==20:
break
demo = [name_list[i],num_list[i],]
if demo not in stacks:
count+=1
stacks.append(demo)
else:
continue
print(stacks)
print(len(stacks))
# [['300785', 'N值得买'], ['002105', '信隆健康'], ['002453', '华软科技'], ['300167', '迪威迅'], ['600078', '澄星股份'], ['002473', '圣莱达'], ['002225', '濮耐股份'], ['000586', '汇源通信'], ['002124', '天邦股份'], ['300527', '中国应急'], ['603189', '网达软件'], ['300378', '鼎捷软件'], ['300417', '南华仪器'], ['300632', '光莆股份'], ['300424', '航新科技'], ['002915', '中欣氟材'], ['300769', '德方纳米'], ['603068', '博通集成'], ['002312', '三泰控股'], ['300253', '卫宁健康']]
db = pymysql.connect('localhost','root','123456','SSEC',charset='utf8')
cursor = db.cursor()
count = 0
for i in stacks:
cursor.execute('select count(id) from stacks')
res = cursor.fetchall()
if res[0][0] == 20:
print('数据已满')
break
try:
cursor.execute('insert into stacks values(Null,%s,%s)',[i[0],i[1]])
db.commit()
count += 1
print(count/20*100,'%--完成')
except Exception as e:
print(e)
result = input('>>r键返回')
if result == 'r':
db.rollback()
break
else:
continue
cursor.execute('select * from stacks')
res = cursor.fetchall()
print(res)
print(len(res))
cursor.close()
db.close()
for i in range(20):
print(i//4+1,i%4+1,end=' ')
| Python | 63 | 29.063492 | 401 | /Stock SEC/demo.py | 0.566684 | 0.465999 |
chsoftworld/S-SEC-demo- | refs/heads/master | import tkinter as tk
from threading import Thread
from tkinter import messagebox
import pymysql as sql
import requests
import time
from lxml import etree
import json
from stack_detail import *
from gevent import monkey # monkey 插件
from queue import Queue
import os
class SSEC:
"""
界面可视化
"""
def __init__(self,window):
self.window = window
self.table = tk.Label(self.window,bg='#2c3842')
self.table.pack(fill='both', expand=1)
self.image = tk.PhotoImage(file='stacks_SEG.png')
self.db = sql.connect('localhost', 'root', '123456', 'SSEC', charset='utf8')
self.cursor = self.db.cursor()
self.index()
def index(self):
"""
主页面,默认有20枚股票
:return:
"""
messagebox.showwarning(title='SSEC',message='准备获取实时数据,这会占用您几秒钟,\n点击[ok]开始')
self.label = tk.Label(self.table,bg='#2c3842')
self.label.pack()
self.cursor.execute('select * from stacks') # 从数据库提取股票数据(股票名称与股票编号)
self.res = self.cursor.fetchall()
count = -1
stack_box = {}
self.url = 'http://www.aigaogao.com/tools/action.aspx?act=apr'
ths = []
self.colors = {}
for i in self.res:
"""
使用多线程分别爬取20枚股票当前的涨跌状态
"""
name = i[1]
number = i[2]
t = Thread(target=self.get_color,args=(name,number))
ths.append(t)
t.start()
for i in ths:
i.join()
for i in self.res:
"""
根据当前的涨跌状态为每一枚股票上色
"""
count += 1
name = i[1]
number = i[2]
stack_box[str(count)] = tk.Label(self.label, bg='#2c3842')
stack_box[str(count)].grid(row=count // 4 + 1, column=count % 4 + 1, pady=6, padx=3)
tk.Button(stack_box[str(count)], bd=1, text=name, width=10, height=2, font=('黑体', '12', 'bold'), bg=self.colors[name],
fg='white', command=lambda num=number, name=name: self.detail(num, name)).grid(row=1, column=1)
tk.Button(stack_box[str(count)], bd=1, text='X', bg='#f84b4c', font=('黑体', '12', 'bold'), fg='white',
height=2).grid(row=1, column=2)
self.entry = tk.Entry(self.table, width=30, font=('黑体', '12', 'bold'))
self.entry.place(x=140, y=420)
btn = tk.Button(self.table, width=20, text='搜索其他股票', fg='white', bg='#25a9e1')
btn.place(x=420, y=420)
def get_color(self,name,number):
"""
每个线程爬取自己当前股票的颜色值
:param name:
:param number:
:return:
"""
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Connection': 'keep-alive',
'Content-Length': '11',
'Content-type': 'application/x-www-form-urlencoded',
'Cookie': 'Hm_lvt_85261bbccca7731cac0375109980ddf5=1563243079; __utmc=90353546; __utmz=90353546.1563243079.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=90353546.1687968940.1563243079.1563243079.1563262167.2; __utmt=1; s_histo=601678; __utmb=90353546.12.10.1563262167; Hm_lpvt_85261bbccca7731cac0375109980ddf5=1563264268',
'Host': 'www.aigaogao.com',
'Origin': 'http://www.aigaogao.com',
'Referer': 'http://www.aigaogao.com/tools/history.html?s={}'.format(number),
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
'X-Prototype-Version': '1.4.0',
'X-Requested-With': 'XMLHttpRequest',
}
data = {'s': str(number)}
html = requests.post(self.url, headers=headers, data=data).text
d = eval(html)
num = float(d['data'][0]['change'])
if num > 0:
self.colors[name] = '#da7252'
elif num == 0:
self.colors[name] = '#747474'
else:
self.colors[name] = '#2db67a'
def detail(self,num,name):
"""
生成子进程,用于观察股票的走势
:param num:
:param name:
:return:
"""
monkey.patch_all()
pid = os.fork()
if pid<0:
print('子进程创建失败')
elif pid==0:
Details(num,name)
else:
while True:
time.sleep(0.1)
def back_to_index(self):
"""
返回首页函数
:return:
"""
os._exit(0) # 结束子进程
self.label.destroy()
self.index()
def views(self):
self.label = tk.Label(self.table, bg='#2c3842',image=self.image)
tk.Button(self.table,bg='#25a9e1',command=self.back_to_index)
if __name__=='__main__':
window = tk.Tk(className='S-SEC')
window.geometry('720x500')
SSEC(window)
window.mainloop()
| Python | 157 | 30.21656 | 345 | /Stock SEC/UI.py | 0.534472 | 0.470205 |
chsoftworld/S-SEC-demo- | refs/heads/master | # from lxml import etree
# import requests
# import numpy as np
# import matplotlib.dates as md
# import matplotlib.pyplot as mp
# from UI import *
# def details(num,name):
# """
# 获取并绘制数据
# :param num:
# :return:
# """
# print('start get')
#
# """
# 获取阶段
# """
# url = 'http://www.aigaogao.com/tools/history.html?s={}'.format(num)
# headers = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
# 'Accept-Encoding': 'gzip, deflate',
# 'Accept-Language': 'zh-CN,zh;q=0.9',
# 'Cache-Control': 'max-age=0',
# 'Connection': 'keep-alive',
# 'Cookie': 'Hm_lvt_85261bbccca7731cac0375109980ddf5=1563243079; __utmc=90353546; __utmz=90353546.1563243079.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=90353546.1687968940.1563243079.1563243079.1563262167.2; s_histo=601678; Hm_lpvt_85261bbccca7731cac0375109980ddf5=1563264268',
# 'Host': 'www.aigaogao.com',
# 'Referer': 'http://www.aigaogao.com/tools/history.html?s={}'.format(num),
# 'Upgrade-Insecure-Requests': '1',
# 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
#
# }
# print('web start get')
# html = requests.get(url,headers).text
# print('web get over')
# dates =[]
# opening_prices=[]
# hightest_prices=[]
# lowerest_price=[]
# closing_prices=[]
# volumns = []
# for i in range(90,0,-1):
# res = etree.HTML(html).xpath('//div[@id="ctl16_contentdiv"]//tr[{}]//text()'.format(i+1))
#
# str_list = res[0].split('/')
# date = '-'.join([str_list[-1],str_list[0],str_list[1]])
# dates.append(date)
# opening_prices.append(float(res[1].replace(',','')))
# hightest_prices.append(float(res[2].replace(',','')))
# lowerest_price.append(float(res[3].replace(',','')))
# closing_prices.append(float(res[4].replace(',','')))
# volumns.append(float(res[5].replace(',','')))
# dates = np.array(dates,dtype='M8[D]')
# opening_prices = np.array(opening_prices)
# hightest_prices=np.array(hightest_prices)
# lowerest_price=np.array(lowerest_price)
# closing_prices=np.array(closing_prices)
# volumns = np.array(volumns)
# print('start draw')
# """
# 绘制阶段
# """
# mp.figure('S-SEC', facecolor='lightgray') # 设定窗口标题,窗口背景色
# mp.title(num, fontsize=18) # 设定窗口内标题
#
# mp.xlabel('Date', fontsize=14) # 设定x轴标题
# mp.ylabel('Price', fontsize=14) # 设定y轴标题
# mp.grid(linestyle=':') # 设定图标网格线
# mp.tick_params(labelsize=10) # 设定刻度参数文字大小
# # 设置可定定位器
# ax = mp.gca() # 获取当前坐标轴
# maloc = md.WeekdayLocator(byweekday=md.MO) # 每周一 一个主刻度
# miloc = md.DayLocator() # 每天一个子刻度
# ax.xaxis.set_major_locator(maloc)
# # 设置主刻度日期的格式
# ax.xaxis.set_major_formatter(md.DateFormatter('%Y-%m-%d'))
#
# ax.xaxis.set_minor_locator(miloc)
# dates = dates.astype(md.datetime.datetime) # 转日期格式
#
# # 收盘走势线
# mp.plot(dates, closing_prices, label='Closing_prices', linewidth=2, color='black', alpha=1.0)
# # 绘制蜡烛图
# # 调整颜色
# rise = closing_prices >= opening_prices
# color = [('white' if x else 'green') for x in rise]
# ecolor = [('red' if x else 'green') for x in rise]
# # 绘制实体
# heights = closing_prices - opening_prices
# mp.bar(dates, heights, 0.8, opening_prices, color=color, edgecolor=ecolor, align='center',zorder=-4)
# # 绘制影线
# mp.vlines(dates,lowerest_price, hightest_prices, color=ecolor, zorder=-5)
#
# # 实现加权卷积
# # 通过指数函数,寻求一组卷积核
# kernel = np.exp(np.linspace(-1, 0, 5))
# kernel = kernel[::-1]
#
# # 绘制5日均线-加权卷积运算
# sma53 = np.convolve(closing_prices, kernel, 'valid') / kernel.sum()
# mp.plot(dates[4:], sma53, label='SMA-5days+', linewidth=2, color='gray', alpha=0.7, zorder=-4)
# # print('sma5+:',sma53[-5:])
# # 求5日布林带
# stds = np.zeros(sma53.size)
# for i in range(stds.size):
# stds[i] = closing_prices[i:i + 5].std()
# lowers = sma53 - 2 * stds
# mp.plot(dates[4:], lowers, label='lowers', linewidth=2, color='gray', alpha=0.2)
# # print('lowers:',lowers[-5:])
# uppers = sma53 + 2 * stds
# mp.plot(dates[4:], uppers, label='uppers', linewidth=2, color='gray', alpha=0.2)
# # print('uppers:',uppers[-5:])
# mp.fill_between(dates[4:], uppers, lowers, uppers > lowers, color='gray', alpha=0.2, zorder=-1)
#
# mp.legend(loc='lower right', fontsize=10, )
# mp.gcf().autofmt_xdate() # 自动斜化
#
# mp.show()
#
#
# if __name__=='__main__':
# details(600745,'实验')
#
#
#
from lxml import etree
import requests
import numpy as np
import matplotlib.dates as md
import matplotlib.pyplot as mp
from UI import *
class Details:
def __init__(self,num,name):
self.num = num
self.name = name
self.dates = []
self.opening_prices = []
self.hightest_prices = []
self.lowerest_price = []
self.closing_prices = []
self.volumns = []
self.plan = 0
self.details()
def details(self):
"""
获取并绘制数据
:param num:
:return:
"""
print('start get')
"""
获取阶段
"""
url = 'http://www.aigaogao.com/tools/history.html?s={}'.format(self.num)
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': 'Hm_lvt_85261bbccca7731cac0375109980ddf5=1563243079; __utmc=90353546; __utmz=90353546.1563243079.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=90353546.1687968940.1563243079.1563243079.1563262167.2; s_histo=601678; Hm_lpvt_85261bbccca7731cac0375109980ddf5=1563264268',
'Host': 'www.aigaogao.com',
'Referer': 'http://www.aigaogao.com/tools/history.html?s={}'.format(self.num),
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
}
print('web start get')
self.html = requests.get(url,headers).text
print('web get over')
self.jobs = []
for i in range(90,0,-1):
tt = Thread(target=self.get_msg,args=(i,))
self.jobs.append(tt)
tt.setDaemon(True)
tt.start()
# for job in self.jobs:
# job.join()
# print('回收')
self.shows()
def get_msg(self,i):
"""
网页源码中获取股票日期,低开高收参数值
:param i:
:return:
"""
res = etree.HTML(self.html).xpath('//div[@id="ctl16_contentdiv"]//tr[{}]//text()'.format(i+1))
str_list = res[0].split('/')
date = '-'.join([str_list[-1],str_list[0],str_list[1]])
self.dates.append(date)
self.opening_prices.append(float(res[1].replace(',','')))
self.hightest_prices.append(float(res[2].replace(',','')))
self.lowerest_price.append(float(res[3].replace(',','')))
self.closing_prices.append(float(res[4].replace(',','')))
self.volumns.append(float(res[5].replace(',','')))
self.plan+=1
print('进度:%.2f'%(self.plan/90*100)+'%')
return
def shows(self):
"""
绘制阶段
"""
self.dates = np.array(self.dates, dtype='M8[D]')
self.opening_prices = np.array(self.opening_prices)
self.hightest_prices = np.array(self.hightest_prices)
self.lowerest_price = np.array(self.lowerest_price)
self.closing_prices = np.array(self.closing_prices)
self.volumns = np.array(self.volumns)
print('start draw')
mp.figure('S-SEC', facecolor='lightgray') # 设定窗口标题,窗口背景色
mp.title(self.num, fontsize=18) # 设定窗口内标题
mp.xlabel('Date', fontsize=14) # 设定x轴标题
mp.ylabel('Price', fontsize=14) # 设定y轴标题
mp.grid(linestyle=':') # 设定图标网格线
mp.tick_params(labelsize=10) # 设定刻度参数文字大小
# 设置可定定位器
ax = mp.gca() # 获取当前坐标轴
maloc = md.WeekdayLocator(byweekday=md.MO) # 每周一 一个主刻度
miloc = md.DayLocator() # 每天一个子刻度
ax.xaxis.set_major_locator(maloc)
# 设置主刻度日期的格式
ax.xaxis.set_major_formatter(md.DateFormatter('%Y-%m-%d'))
ax.xaxis.set_minor_locator(miloc)
dates = self.dates.astype(md.datetime.datetime) # 转日期格式
# 收盘走势线
mp.plot(dates, self.closing_prices, label='Closing_prices', linewidth=2, color='black', alpha=1.0)
# 绘制蜡烛图
# 调整颜色
rise = self.closing_prices >= self.opening_prices
color = [('white' if x else 'green') for x in rise]
ecolor = [('red' if x else 'green') for x in rise]
# 绘制实体
heights = self.closing_prices - self.opening_prices
mp.bar(dates, heights, 0.8, self.opening_prices, color=color, edgecolor=ecolor, align='center',zorder=-4)
# 绘制影线
mp.vlines(dates,self.lowerest_price, self.hightest_prices, color=ecolor, zorder=-5)
# 实现加权卷积
# 通过指数函数,寻求一组卷积核
kernel = np.exp(np.linspace(-1, 0, 5))
kernel = kernel[::-1]
# 绘制5日均线-加权卷积运算
sma53 = np.convolve(self.closing_prices, kernel, 'valid') / kernel.sum()
mp.plot(dates[4:], sma53, label='SMA-5days+', linewidth=2, color='gray', alpha=0.7, zorder=-4)
# print('sma5+:',sma53[-5:])
# 求5日布林带
stds = np.zeros(sma53.size)
for i in range(stds.size):
stds[i] = self.closing_prices[i:i + 5].std()
lowers = sma53 - 2 * stds
mp.plot(dates[4:], lowers, label='lowers', linewidth=2, color='gray', alpha=0.2)
# print('lowers:',lowers[-5:])
uppers = sma53 + 2 * stds
mp.plot(dates[4:], uppers, label='uppers', linewidth=2, color='gray', alpha=0.2)
# print('uppers:',uppers[-5:])
mp.fill_between(dates[4:], uppers, lowers, uppers > lowers, color='gray', alpha=0.2, zorder=-1)
mp.legend(loc='lower right', fontsize=10, )
mp.gcf().autofmt_xdate() # 自动斜化
mp.show()
if __name__=='__main__':
Details(600745,'实验') | Python | 282 | 36.255318 | 301 | /Stock SEC/stack_detail.py | 0.576487 | 0.525464 |
chsoftworld/S-SEC-demo- | refs/heads/master | dic = {
'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.9',
'Cache-Control':'max-age=0',
'Connection':'keep-alive',
'Cookie':'Hm_lvt_85261bbccca7731cac0375109980ddf5=1563243079; __utmc=90353546; __utmz=90353546.1563243079.1.1.utmcsr=baidu|utmccn=(organic)|utmcmd=organic; __utma=90353546.1687968940.1563243079.1563243079.1563262167.2; s_histo=601678; Hm_lpvt_85261bbccca7731cac0375109980ddf5=1563264268',
'Host':'www.aigaogao.com',
'Referer':'http://www.aigaogao.com/tools/history.html?s=604675',
'Upgrade-Insecure-Requests':'1',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
} | Python | 15 | 53.466667 | 288 | /Stock SEC/dictset.py | 0.75 | 0.520833 |
okdshin/mutelang | refs/heads/master | import pretty_midi
from scipy.io import wavfile
def main(midi_filename, wav_filename):
midi = pretty_midi.PrettyMIDI(midi_filename)
audio = midi.fluidsynth()
wavfile.write(wav_filename, 44100, audio)
if __name__ == '__main__':
import fire
fire.Fire(main)
| Python | 13 | 20.461538 | 48 | /midi2wav.py | 0.681004 | 0.663082 |
okdshin/mutelang | refs/heads/master | import subprocess
class EOL(Exception):
pass
class Parser:
def __init__(self, filename, code):
self.filename = filename
self.cur = 0
self.code = code
self.look_ahead = code[0]
self.bpm = 120
self.velocity = 90
self.instrument = 'Cello'
self.note_code = ''
self.note_code_reset_flag = False
self.middle_midi_list = []
self.drum_mode = False
self.instruments = []
def _match(self, x):
if self.look_ahead == x:
self._consume()
else:
raise RuntimeError("not match {}".format(x))
def _match_str(self, xs):
for x in xs:
self._match(x)
def _ignore_ws(self):
while self.look_ahead in ' \t\n':
self._consume()
def _ws(self):
if self.look_ahead not in ' \t\n':
raise RuntimeError("not match white space")
self._ignore_ws()
def _int(self):
int_str = ''
while self.look_ahead in '0123456789':
int_str += self.look_ahead
self._consume()
return int(int_str)
def _str(self):
s = ''
while self.look_ahead.isalpha() or self.look_ahead in "0123456789":
s += self.look_ahead
self._consume()
return s
def _consume(self):
self.cur += 1
if len(self.code) == self.cur:
raise EOL
self.look_ahead = self.code[self.cur]
def process_note_code(self):
print('note code', self.note_code)
filename = '{0}-{2}-{1}.mid'.format(self.filename, self.instrument,
len(self.middle_midi_list))
print("process", self.instrument)
if '-' in self.instrument:
subprocess.call(
'echo \'{code}\' | python3 drum_seq.py \'[{insts}]\' {bpm} {filename} {velocity}'
.format(code=self.note_code,
insts=','.join(['"' + s + '"' for s in self.instruments]),
bpm=self.bpm,
velocity=self.velocity,
filename=filename),
shell=True)
else:
subprocess.call(
'echo \'{code}\' | python3 chord_bass_seq.py \'{inst}\' {bpm} {filename} {velocity}'
.format(code=self.note_code,
inst=self.instrument,
bpm=self.bpm,
velocity=self.velocity,
filename=filename),
shell=True)
self.middle_midi_list.append(filename)
self.note_code = ''
def parse(self):
try:
while True:
self._ignore_ws()
if self.look_ahead == 'b':
self._match_str('bpm')
self._ignore_ws()
self._match('=')
self._ignore_ws()
self.bpm = self._int()
print('bpm', self.bpm)
self._ws()
elif self.look_ahead == 'v':
self._match_str('velocity')
self._ignore_ws()
self._match('=')
self._ignore_ws()
self.velocity = self._int()
print('velocity', self.velocity)
self._ws()
elif self.look_ahead == 'i':
if self.note_code != '':
self.process_note_code()
self._match_str('instrument')
self._ignore_ws()
self._match('=')
self._ignore_ws()
if self.drum_mode:
self._match('{')
self._ignore_ws()
instruments = []
instruments.append(self._str())
self._ignore_ws()
while self.look_ahead == ',':
self._consume()
self._ignore_ws()
instruments.append(self._str())
self._ignore_ws()
self._match('}')
self.instruments = instruments
self.instrument = '-'.join(instruments)
print('instrument detected', self.instrument)
else:
self.instrument = self._str()
print('instrument detected', self.instrument)
self._ws()
elif self.look_ahead == 'd':
print()
print(self.code[self.cur:])
self._match_str('drum')
self.drum_mode = True
print("drum_mode on")
elif self.look_ahead == '|':
print('note code detect')
while self.look_ahead != '\n':
self.note_code += self.look_ahead
self._consume()
except EOL:
print("end")
if self.note_code != '':
print('note code', self.note_code)
self.process_note_code()
print("stack", self.middle_midi_list)
subprocess.call('python3 stack_midi.py \'[{0}]\' {1}.mid'.format(
','.join(['"' + s + '"' for s in self.middle_midi_list]),
self.filename),
shell=True)
def main(filename):
with open(filename, 'r') as f:
code = f.read()
parser = Parser(filename, code)
try:
parser.parse()
except RuntimeError as e:
print('"{}"'.format(parser.look_ahead))
raise e
if __name__ == "__main__":
import fire
fire.Fire(main)
| Python | 174 | 32.609196 | 100 | /mutec.py | 0.432969 | 0.426813 |
okdshin/mutelang | refs/heads/master | import sys
import math
import pretty_midi
class Note:
def __init__(self, base: str, accidental: str, octave_num: int):
self.base = base
self.accidental = accidental
self.octave_num = octave_num
def name(self):
return self.base + self.accidental + str(self.octave_num)
def __repr__(self):
return self.name()
class MidiGenerator:
def __init__(self, instrument, bpm, velocity):
self.dt4 = int((60 * 1000) / bpm)
self.t = 0
self.velocity = velocity
program = pretty_midi.instrument_name_to_program(instrument)
self.inst = pretty_midi.Instrument(program=program)
def append_rest(self, rest_type):
dt = self.dt4 * 2**(2 - math.log2(rest_type))
self.t += dt
def append_note(self, note_type, note_list):
dt = self.dt4 * 2**(2 - math.log2(note_type))
print(note_list, dt)
for note in note_list:
note_number = pretty_midi.note_name_to_number(note.name())
note = pretty_midi.Note(velocity=self.velocity,
pitch=note_number,
start=self.t/1000,
end=(self.t + dt)/1000)
self.inst.notes.append(note)
self.t += dt
def finish_bar(self):
left = self.t % (4*self.dt4)
if left != 0:
self.t += left
def write(self, filename):
midi = pretty_midi.PrettyMIDI()
midi.instruments.append(self.inst)
midi.write(filename)
class EOL(Exception):
pass
class Parser:
def __init__(self, midi_gen, code):
self.cur = 0
self.midi_gen = midi_gen
self.code = code
self.look_ahead = code[0]
self.note_list = []
self.note_list_reset_flag = False
self.last_note_base = 'c'
self.last_octave = 3
def _match(self, x):
if self.look_ahead == x:
self._consume()
else:
raise RuntimeError("not match {}".format(x))
def _consume(self):
self.cur += 1
if len(self.code) == self.cur:
raise EOL
self.look_ahead = self.code[self.cur]
def parse(self):
try:
while True:
if self.look_ahead == '|':
print('finish bar')
self.midi_gen.finish_bar()
self._consume()
elif self.look_ahead in (' ', '\t', '\n'):
print('ignore')
self._consume()
elif self.look_ahead in "abcdefg":
print('set note', self.look_ahead)
if self.note_list_reset_flag:
self.note_list = []
self.note_list_reset_flag = False
note_base = self.look_ahead
self._consume()
if self.look_ahead in "!#":
accidental = self.look_ahead
self._consume()
else:
accidental = ''
if self.look_ahead in "0123456789":
octave = int(self.look_ahead)
self._consume()
else:
octave = int(self.last_octave)
if (ord(self.last_note_base) - ord(note_base)) > 0:
print("+1 octave")
octave += 1
self.note_list.append(
Note(note_base.capitalize(), accidental, octave))
self.last_note_base = note_base
self.last_octave = octave
elif self.look_ahead in ".*":
print('rest')
if self.look_ahead == '.':
self.midi_gen.append_rest(16)
elif self.look_ahead == '*':
self.midi_gen.append_rest(4)
self._consume()
elif self.look_ahead in "ihqox":
self.note_list_reset_flag = True
if self.look_ahead == 'i':
self.midi_gen.append_note(1, self.note_list)
elif self.look_ahead == 'h':
self.midi_gen.append_note(2, self.note_list)
elif self.look_ahead == 'q':
self.midi_gen.append_note(4, self.note_list)
elif self.look_ahead == 'o':
self.midi_gen.append_note(8, self.note_list)
elif self.look_ahead == 'x':
self.midi_gen.append_note(16, self.note_list)
self._consume()
else:
raise RuntimeError("invalid charactor: ", self.look_ahead)
except EOL:
print("end")
def main(instrument: str, bpm: int, filename: str, velocity: int):
midi_gen = MidiGenerator(instrument, bpm, velocity)
parser = Parser(midi_gen, sys.stdin.read())
parser.parse()
midi_gen.write(filename)
if __name__ == '__main__':
import fire
fire.Fire(main)
| Python | 151 | 33.42384 | 78 | /chord_bass_seq.py | 0.471528 | 0.461331 |
okdshin/mutelang | refs/heads/master | import pretty_midi
def main(src_filename_list, dst_filename):
dst_midi = pretty_midi.PrettyMIDI()
for filename in src_filename_list:
src_midi = pretty_midi.PrettyMIDI(filename)
dst_midi.instruments.extend(src_midi.instruments)
dst_midi.write(dst_filename)
if __name__ == '__main__':
import fire
fire.Fire(main)
| Python | 14 | 24.071428 | 57 | /stack_midi.py | 0.672365 | 0.672365 |
okdshin/mutelang | refs/heads/master | import sys
import math
import pretty_midi
class Note:
def __init__(self, base: str, accidental: str, octave_num: int):
self.base = base
self.accidental = accidental
self.octave_num = octave_num
def name(self):
return self.base + self.accidental + str(self.octave_num)
def __repr__(self):
return self.name()
class MidiGenerator:
def __init__(self, instrument_list, bpm, velocity):
self.dt4 = int((60 * 1000) / bpm)
self.t = 0
self.velocity = velocity
self.instrument_list = instrument_list
program = 20 #pretty_midi.instrument_name_to_program(instrument)
self.inst = pretty_midi.Instrument(program=program, is_drum=True)
def append_rest(self, rest_type):
dt = self.dt4 * 2**(2 - math.log2(rest_type))
self.t += dt
def append_note(self, note_type, index_list):
dt = self.dt4 * 2**(2 - math.log2(note_type))
print(index_list, dt)
for index in index_list:
note_number = pretty_midi.drum_name_to_note_number(
self.instrument_list[index])
note = pretty_midi.Note(velocity=self.velocity,
pitch=note_number,
start=self.t / 1000,
end=(self.t + dt) / 1000)
self.inst.notes.append(note)
self.t += dt
def finish_bar(self):
left = self.t % (4 * self.dt4)
if left != 0:
self.t += left
def write(self, filename):
midi = pretty_midi.PrettyMIDI()
midi.instruments.append(self.inst)
midi.write(filename)
class EOL(Exception):
pass
class Parser:
def __init__(self, midi_gen, code):
self.cur = 0
self.midi_gen = midi_gen
self.code = code
self.look_ahead = code[0]
self.index_list = []
self.index_list_reset_flag = False
self.last_index = 'c'
def _match(self, x):
if self.look_ahead == x:
self._consume()
else:
raise RuntimeError("not match {}".format(x))
def _consume(self):
self.cur += 1
if len(self.code) == self.cur:
raise EOL
self.look_ahead = self.code[self.cur]
def parse(self):
try:
while True:
if self.look_ahead == ';':
print('end')
return
elif self.look_ahead == '|':
print('finish bar')
self.midi_gen.finish_bar()
self._consume()
elif self.look_ahead in (' ', '\t', '\n'):
print('ignore')
self._consume()
elif self.look_ahead in "0123456789":
print('set index', self.look_ahead)
if self.index_list_reset_flag:
self.index_list = []
self.index_list_reset_flag = False
index = int(self.look_ahead)
self._consume()
self.index_list.append(index)
self.last_index = index
elif self.look_ahead in ".*":
print('rest')
if self.look_ahead == '.':
self.midi_gen.append_rest(16)
elif self.look_ahead == '*':
self.midi_gen.append_rest(4)
self._consume()
elif self.look_ahead in "ihqox":
self.index_list_reset_flag = True
if self.look_ahead == 'i':
self.midi_gen.append_note(1, self.index_list)
elif self.look_ahead == 'h':
self.midi_gen.append_note(2, self.index_list)
elif self.look_ahead == 'q':
self.midi_gen.append_note(4, self.index_list)
elif self.look_ahead == 'o':
self.midi_gen.append_note(8, self.index_list)
elif self.look_ahead == 'x':
self.midi_gen.append_note(16, self.index_list)
self._consume()
else:
print(self.look_ahead)
raise
except EOL:
print("end")
def main(instrument_list: str, bpm: int, filename: str, velocity: int):
midi_gen = MidiGenerator(instrument_list, bpm, velocity)
parser = Parser(midi_gen, sys.stdin.read())
parser.parse()
midi_gen.write(filename)
if __name__ == '__main__':
import fire
fire.Fire(main)
| Python | 141 | 32.212765 | 73 | /drum_seq.py | 0.485373 | 0.474482 |
KagenLH/forme-app | refs/heads/main | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import Email, ValidationError, InputRequired, Length, EqualTo
from app.models import User
def user_exists(form, field):
# Checking if user exists
email = field.data
user = User.query.filter(User.email == email).first()
if user:
raise ValidationError('Email address is already in use.')
def username_exists(form, field):
# Checking if username is already in use
username = field.data
user = User.query.filter(User.username == username).first()
if user:
raise ValidationError('Username is already in use.')
class SignUpForm(FlaskForm):
username = StringField(
'username', validators=[InputRequired(message='Input Required'), Length(max=40, message='Must be less than 40 characters'), username_exists])
email = StringField('email', validators=[InputRequired(), Length(
max=40, message='Must be less than 40 characters'), Email(message='Invalid'), user_exists])
password = PasswordField('password', validators=[
InputRequired(), EqualTo('confirm', message='Passwords must match')])
confirm = PasswordField('confirm')
| Python | 30 | 39.400002 | 149 | /app/forms/signup_form.py | 0.712871 | 0.706271 |
KagenLH/forme-app | refs/heads/main | """empty message
Revision ID: fa590b961f4f
Revises: ffdc0a98111c
Create Date: 2021-08-16 13:55:52.581549
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'fa590b961f4f'
down_revision = 'ffdc0a98111c'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('forms',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=50), nullable=True),
sa.Column('owner_id', sa.Integer(), nullable=False),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('label_align', sa.String(length=10), nullable=True),
sa.Column('description_align', sa.String(length=10), nullable=True),
sa.Column('title_align', sa.String(length=10), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('forms')
# ### end Alembic commands ###
| Python | 37 | 27.243244 | 72 | /migrations/versions/20210816_135552_.py | 0.662201 | 0.635407 |
KagenLH/forme-app | refs/heads/main | from app.models import db, Form
def seed_forms():
test = Form(
title = "Test Form Render",
owner_id = 1,
description = "",
label_placement = "",
description_align = "",
title_align = "",
)
db.session.add(test)
db.session.commit()
def undo_forms():
db.session.execute('TRUNCATE forms RESTART IDENTITY CASCADE;')
db.session.commit()
| Python | 20 | 20.6 | 66 | /app/seeds/forms.py | 0.537037 | 0.534722 |
KagenLH/forme-app | refs/heads/main | """empty message
Revision ID: 94f5eda37179
Revises: b3e721c02f48
Create Date: 2021-08-20 17:15:46.455809
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '94f5eda37179'
down_revision = 'b3e721c02f48'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'label',
existing_type=sa.VARCHAR(length=55),
nullable=False)
op.alter_column('forms', 'title',
existing_type=sa.VARCHAR(length=50),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('forms', 'title',
existing_type=sa.VARCHAR(length=50),
nullable=True)
op.alter_column('fields', 'label',
existing_type=sa.VARCHAR(length=55),
nullable=True)
# ### end Alembic commands ###
| Python | 38 | 25.710526 | 65 | /migrations/versions/20210820_171546_.py | 0.611823 | 0.568473 |
KagenLH/forme-app | refs/heads/main | """empty message
Revision ID: d0c387e43ca4
Revises: 94f5eda37179
Create Date: 2021-08-21 11:33:10.206199
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd0c387e43ca4'
down_revision = '94f5eda37179'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('forms', sa.Column('field_id', sa.Integer(), nullable=True))
op.create_foreign_key(None, 'forms', 'fields', ['field_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'forms', type_='foreignkey')
op.drop_column('forms', 'field_id')
# ### end Alembic commands ###
| Python | 30 | 25.166666 | 78 | /migrations/versions/20210821_113310_.py | 0.670064 | 0.606369 |
KagenLH/forme-app | refs/heads/main | """empty message
Revision ID: b05fdd14ae4f
Revises: 4563136888fd
Create Date: 2021-08-20 10:34:08.171553
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b05fdd14ae4f'
down_revision = '4563136888fd'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'label',
existing_type=sa.VARCHAR(length=55),
nullable=True)
op.alter_column('fields', 'required',
existing_type=sa.BOOLEAN(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'required',
existing_type=sa.BOOLEAN(),
nullable=True)
op.alter_column('fields', 'label',
existing_type=sa.VARCHAR(length=55),
nullable=False)
# ### end Alembic commands ###
| Python | 38 | 25.657894 | 65 | /migrations/versions/20210820_103408_.py | 0.613031 | 0.57157 |
KagenLH/forme-app | refs/heads/main | from app.models import db, User
# Adds a demo user, you can add other users here if you want
def seed_users():
demo = User(
username='Demo', email='demo@aa.io', password='password')
marnie = User(
username='marnie', email='marnie@aa.io', password='password')
bobbie = User(
username='bobbie', email='bobbie@aa.io', password='password')
db.session.add(demo)
db.session.add(marnie)
db.session.add(bobbie)
db.session.commit()
# Uses a raw SQL query to TRUNCATE the users table.
# SQLAlchemy doesn't have a built in function to do this
# TRUNCATE Removes all the data from the table, and RESET IDENTITY
# resets the auto incrementing primary key, CASCADE deletes any
# dependent entities
def undo_users():
db.session.execute('TRUNCATE users RESTART IDENTITY CASCADE;')
db.session.commit()
| Python | 27 | 30.25926 | 66 | /app/seeds/users.py | 0.684834 | 0.684834 |
KagenLH/forme-app | refs/heads/main | """empty message
Revision ID: beeeac90e4ba
Revises: d25f4d1b7ea0
Create Date: 2021-08-20 10:00:09.924819
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'beeeac90e4ba'
down_revision = 'd25f4d1b7ea0'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'required',
existing_type=sa.BOOLEAN(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'required',
existing_type=sa.BOOLEAN(),
nullable=False)
# ### end Alembic commands ###
| Python | 32 | 22.9375 | 65 | /migrations/versions/20210820_100009_.py | 0.631854 | 0.601828 |
KagenLH/forme-app | refs/heads/main | """empty message
Revision ID: 4df12f583573
Revises: 2453c767d036
Create Date: 2021-08-21 16:10:57.556468
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4df12f583573'
down_revision = '2453c767d036'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'form_id',
existing_type=sa.INTEGER(),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'form_id',
existing_type=sa.INTEGER(),
nullable=False)
# ### end Alembic commands ###
| Python | 32 | 23.0625 | 65 | /migrations/versions/20210821_161057_.py | 0.625974 | 0.596104 |
KagenLH/forme-app | refs/heads/main | from .db import db
from .user import User
from .form import Form
from .field import Field
| Python | 4 | 21.5 | 24 | /app/models/__init__.py | 0.777778 | 0.777778 |
KagenLH/forme-app | refs/heads/main | """empty message
Revision ID: b8ec5632d693
Revises: beeeac90e4ba
Create Date: 2021-08-20 10:05:24.638509
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b8ec5632d693'
down_revision = 'beeeac90e4ba'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'label',
existing_type=sa.VARCHAR(length=55),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'label',
existing_type=sa.VARCHAR(length=55),
nullable=True)
# ### end Alembic commands ###
| Python | 32 | 23.1875 | 65 | /migrations/versions/20210820_100524_.py | 0.635659 | 0.583979 |
KagenLH/forme-app | refs/heads/main | """empty message
Revision ID: 2453c767d036
Revises: d0c387e43ca4
Create Date: 2021-08-21 14:53:11.208418
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '2453c767d036'
down_revision = 'd0c387e43ca4'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('forms_field_id_fkey', 'forms', type_='foreignkey')
op.drop_column('forms', 'field_id')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('forms', sa.Column('field_id', sa.INTEGER(), autoincrement=False, nullable=True))
op.create_foreign_key('forms_field_id_fkey', 'forms', 'fields', ['field_id'], ['id'])
# ### end Alembic commands ###
| Python | 30 | 26.766666 | 99 | /migrations/versions/20210821_145311_.py | 0.671068 | 0.618247 |
KagenLH/forme-app | refs/heads/main | from .db import db
class Form(db.Model):
__tablename__ = 'forms'
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(50), nullable=False)
owner_id = db.Column(db.Integer, db.ForeignKey('users.id'), nullable=False)
description = db.Column(db.Text)
label_placement = db.Column(db.String(10))
description_align = db.Column(db.String(10))
title_align = db.Column(db.String(10))
# creates a pseudo-column (you won't see it) in the 'fields' table called 'form' that can be assigned a Form instance when creating a Field instance -- 'form' is not the name of this table
fields = db.relationship('Field', backref='form')
# field_id = db.Column(db.Integer, db.ForeignKey('fields.id'))
# fields = db.relationship("Field", foreign_keys=field_id ,back_populates="forms", lazy="joined")
def to_dict(self):
# convert associated fields to serializable dictionaries
form_fields = [field.to_dict() for field in self.fields]
return {
'id': self.id,
'fields': form_fields,
'title': self.title,
'owner_id': self.owner_id,
'description': self.description,
'label_placement': self.label_placement,
'description_align': self.description_align,
'title_align': self.title_align
}
def __repr__(self):
return str(self.to_dict())
| Python | 38 | 36.473682 | 192 | /app/models/form.py | 0.627107 | 0.621489 |
KagenLH/forme-app | refs/heads/main | from flask import Blueprint, jsonify, request, session
from flask_login import login_required, current_user
from app.models import Form, db, Field
form_routes = Blueprint("forms", __name__)
# get all forms --- remove this route?
@form_routes.route('/')
# @login_required
def get_forms():
forms = Form.query.all() # original query for ALL forms
return {'forms': [form.to_dict() for form in forms]}
@form_routes.route('/<int:id>', methods=['GET', 'DELETE'])
@login_required
def forms(id):
# get a specific form by primary key
if request.method == 'GET':
form = Form.query.get(id)
return form.to_dict()
# delete a specific form by primary key
elif request.method == 'DELETE':
form = Form.query.get(id) # takes a form's id
db.session.delete(form)
db.session.commit()
return form.to_dict()
# (GET) allow user to access a form without being logged in, i.e. SHARED form
# @form_routes.route('/<int:id>/shared')
# def shared_form(id):
# form = Form.query.get(id)
# return form.to_dict()
# get forms by owner_id (i.e. all forms owned by a specific user)
@form_routes.route('/users/<int:id>')
def user_forms(id): # takes a user's id
forms = Form.query.filter_by(owner_id=id).all()
# destructure in forms store
return {'forms': [form.to_dict() for form in forms]}
@form_routes.route('/build', methods=['POST'])
@login_required
def create_form():
# print('***** REQUEST DATA INFO *****', request.get_json())
user_id = session['_user_id']
# pull JSON data from request body
data = request.get_json()
form_fields = []
form = Form(
title=data["title"],
owner_id=user_id,
description=data["description"],
label_placement=data["labelPlacement"],
description_align=data["descriptionAlignment"],
title_align=data["titleAlignment"],
)
db.session.add(form)
db.session.commit()
# print('FORM FORM FORM:', form)
for field_info in data["fields"]:
# all of the columns in the fields table (except id)
expected_keys = [
"type",
"label",
"maxLength",
"required",
"placeholder",
"instructions",
"choices"
]
# check whether field_info["maxLength"] exists
if "maxLength" in field_info:
# convert the value from string to integer
field_info["maxLength"] = int(field_info["maxLength"])
for key in expected_keys:
if key not in field_info:
# create the key and set the default value to None
field_info.setdefault(key)
# print('******* FIELD INFO ********', field_info)
field_choices = field_info['choices']
choices_string = ""
for choice in field_choices:
choices_string += (str(choice) + '&&')
field = Field(
type=field_info["type"],
label=field_info["label"],
max_length=field_info["maxLength"],
required=field_info["required"],
placeholder=field_info["placeholder"],
instructions=field_info["instructions"],
choices=choices_string,
form=form # handles the form_id
)
# db.session.add(field)
form_fields.append(field)
db.session.add_all(form_fields)
db.session.commit()
# test_form = Form.query.filter_by(title='To Test Fields').first()
# print("*** FORM.FIELDS ***", type(test_form.fields))
# print("*** FIELD.FORMS ***", form_fields[0].form)
# # ...so we can use the dict.update() method
# return_form = form.to_dict()
# # add an entry in 'form' contaning its related fields
# return_form.update({"fields": [field.to_dict() for field in form_fields]})
# print('**** FORM WITH FIELDS ****', form.to_dict())
return form.to_dict()
@form_routes.route('/<int:id>', methods=['PUT'])
@login_required
def edit_form(id):
form = Form.query.get(id)
if form:
if form.owner_id == current_user.id:
data = request.get_json()
form.title= data["title"]
form.description= data["description"]
form.label_placement= data["labelPlacement"]
form.description_align= data["descriptionAlignment"]
form.title_align= data["titleAlignment"]
# Remove any fields on the form that previously existed
for field in form.fields:
db.session.delete(field)
db.session.commit()
# Re-add all the fields to the form
form_fields = []
for field_info in data["fields"]:
# all of the columns in the fields table (except id)
expected_keys = [
"type",
"label",
"maxLength",
"required",
"placeholder",
"instructions",
"choices"
]
# check whether field_info["maxLength"] exists
if "maxLength" in field_info:
# convert the value from string to integer
field_info["maxLength"] = int(field_info["maxLength"])
for key in expected_keys:
if key not in field_info:
# create the key and set the default value to None
field_info.setdefault(key)
# print('******* FIELD INFO ********', field_info)
field_choices = field_info['choices']
choices_string = ""
for choice in field_choices:
choices_string += (str(choice) + '&&')
field = Field(
type=field_info["type"],
label=field_info["label"],
max_length=field_info["maxLength"],
required=field_info["required"],
placeholder=field_info["placeholder"],
instructions=field_info["instructions"],
choices=choices_string,
form=form # handles the form_id
)
# db.session.add(field)
form_fields.append(field)
db.session.add_all(form_fields)
db.session.commit()
return form.to_dict()
else:
return "You do not own the form you are trying to edit.", 401
else:
return "The form you're trying to edit does not exist.", 400
# ! currently causes error "405 method not allowed"
# ! when not bundled with `user_forms(id)` above
# delete a specific form by primary key
# @form_routes.route('/<int:id>', methods=['DELETE'])
# def delete_form(id):
# if request.method == 'DELETE':
# form = Form.query.get(id)
# db.session.delete(form)
# db.session.commit()
# return form.to_dict()
# @form_routes.route('/<int:id>')
# def get_form(id):
# form = Form.query.filter(Form.id == id).first()
# # fields = Field.query.filter(Field.form_id == form.id).all()
# print('FORM IS HERE!!! ', form.to_dict())
# # print('FIELD IS HERE!!!!! ***',
# # {'fields': [field.to_dict() for field in fields]})
# # form["fields"] = {'fields': [field.to_dict() for field in fields]}
# return form.to_dict()
@form_routes.route('/<int:id>/shared', methods=['GET'])
@login_required
def get_share_forms(id):
# get a specific form by primary key
if request.method == 'GET':
form = Form.query.get(id)
print('FORM CHOICES!!!!!!', form)
return form.to_dict()
| Python | 243 | 30.843622 | 80 | /app/api/form_routes.py | 0.549496 | 0.548204 |
KagenLH/forme-app | refs/heads/main | """empty message
Revision ID: b3e721c02f48
Revises: 9aec744a6b98
Create Date: 2021-08-20 13:35:16.871785
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b3e721c02f48'
down_revision = '9aec744a6b98'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'form_id',
existing_type=sa.INTEGER(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('fields', 'form_id',
existing_type=sa.INTEGER(),
nullable=True)
# ### end Alembic commands ###
| Python | 32 | 22.875 | 65 | /migrations/versions/20210820_133516_.py | 0.636126 | 0.570681 |
KagenLH/forme-app | refs/heads/main | # from flask import Blueprint, jsonify, request
# from flask_login import login_required
# from app.models import Field, db
# field_routes = Blueprint('fields', __name__)
# @field_routes.route('/', methods=['POST'])
# def fields():
# if request.method == 'POST':
# # get fields data from request body
# data = request.get_json()
# form_fields = []
# for field_info in data:
# field = Field(
# type=field_info["type"],
# label=field_info["label"],
# max_length=field_info["max_length"],
# required=field_info["required"],
# placeholder=field_info["placeholder"],
# instructions=field_info["instructions"],
# choices=field_info["choices"],
# form_id=field_info["form_id"]
# )
# # db.session.add(field)
# form_fields.append(field)
# # adds each instance individually, so list format is ok
# db.session.add_all(form_fields)
# db.session.commit()
# # must return dictionary, tuple, or string
# return {"fields": [field.to_dict for field in form_fields]}
# @field_routes.route('/forms/<int:id>')
# def form_fields(id):
# fields = Field.query.filter_by(form_id=id).all()
# return {'fields': [field.to_dict for field in fields]}
| Python | 42 | 32.119049 | 69 | /app/api/field_routes.py | 0.557153 | 0.557153 |
KagenLH/forme-app | refs/heads/main | from .db import db
class Field(db.Model):
__tablename__ = 'fields'
id = db.Column(db.Integer, primary_key=True)
type = db.Column(db.String(255), nullable=False)
label = db.Column(db.String(55), nullable=False)
max_length = db.Column(db.Integer)
required = db.Column(db.Boolean, nullable=False)
placeholder = db.Column(db.String(255))
instructions = db.Column(db.String(255))
choices = db.Column(db.Text)
form_id = db.Column(db.Integer, db.ForeignKey("forms.id"))
# forms = db.relationship("Form", foreign_keys=form_id, lazy="joined") # redundant
def to_dict(self):
return {
'id': self.id,
'form_id': self.form_id,
'type': self.type,
'label': self.label,
'max_length': self.max_length,
'required': self.required,
'placeholder': self.placeholder,
'instructions': self.instructions,
# splits choices into a list, removes empty list entry at the end
'choices': self.choices[:-2].split('&&')
}
| Python | 30 | 34.900002 | 86 | /app/models/field.py | 0.592386 | 0.581244 |
KagenLH/forme-app | refs/heads/main | from app.models import db, Field
from app.models import Form
def seed_fields():
form = Form(
title='To Test Fields',
owner_id=1
)
db.session.add(form)
testField = Field(
type="text",
label="Test Field",
required=False,
form=form, # creates the form_id / association
choices='Some Stuff&&Another choice&&Hello from hell&&'
)
db.session.add(testField)
db.session.commit()
def undo_fields():
db.session.execute('TRUNCATE fields RESTART IDENTITY CASCADE;')
db.session.commit()
| Python | 26 | 20.846153 | 67 | /app/seeds/fields.py | 0.612676 | 0.610915 |
NLeSC/cwltool-service | refs/heads/master | #!/usr/bin/env python
import os
import sys
import setuptools.command.egg_info as egg_info_cmd
import shutil
from setuptools import setup, find_packages
SETUP_DIR = os.path.dirname(__file__)
README = os.path.join(SETUP_DIR, 'README')
setup(name='cwltool_service',
version='2.0',
description='Common workflow language runner service',
long_description=open(README).read(),
author='Common workflow language working group',
author_email='common-workflow-language@googlegroups.com',
url="https://github.com/common-workflow-language/cwltool-service",
download_url="https://github.com/common-workflow-language/cwltool-service",
license='Apache 2.0',
py_modules=["cwltool_stream", "cwl_flask", "cwltool_client"],
install_requires=[
'Flask',
'requests',
'PyYAML'
],
entry_points={
'console_scripts': [ "cwltool-stream=cwltool_stream:main",
"cwl-server=cwl_flask:main",
"cwl-client=cwl_client:main"]
},
zip_safe=True
)
| Python | 34 | 30.382353 | 81 | /setup.py | 0.614808 | 0.611059 |
orianao/cssproj | refs/heads/master | from db import DatabaseController as DbC
def get_results():
all_results = DbC.get_admission_results(1)
return all_results
def calculate_results():
specializations = DbC.get_all_specializations()
candidates = DbC.get_all_candidates()
repartition = []
specs = {}
opt_arr = {}
for item in specializations:
specs[item.identifier] = {}
specs[item.identifier]["name"] = item.name
specs[item.identifier]["capacity"] = item.capacity
specs[item.identifier]["free_spots"] = item.capacity
for item in candidates:
r = DbC.AdmissionResult()
r.candidate_cnp = item.cnp
r.final_score = max(item.info_grade, item.math_grade)*0.3 + item.high_school_avg_grade*0.2 + 0.5*item.admission_grade
r.specialization_id = item.first_option
r.allocation = DbC.AdmissionStatus.UNPROCESSED
repartition.append(r)
opt_arr[str(item.cnp)] = {}
opt_arr[str(item.cnp)]["first_option"] = item.first_option
opt_arr[str(item.cnp)]["second_option"] = item.second_option
repartition = sorted(repartition, key = lambda x: (x.specialization_id, (-1)*x.final_score, ))
for item in repartition:
if item.final_score < 5:
item.allocation = DbC.AdmissionStatus.REJECTED
continue
if specs[item.specialization_id]["free_spots"] > 2:
item.allocation = DbC.AdmissionStatus.FREE
specs[item.specialization_id]["free_spots"] -= 1
elif specs[item.specialization_id]["free_spots"] > 0:
item.allocation = DbC.AdmissionStatus.FEE
specs[item.specialization_id]["free_spots"] -= 1
else:
item.specialization_id = opt_arr[str(item.candidate_cnp)]["second_option"]
if specs[item.specialization_id]["free_spots"] > 2:
item.allocation = DbC.AdmissionStatus.FREE
specs[item.specialization_id]["free_spots"] -= 1
elif specs[item.specialization_id]["free_spots"] > 0:
item.allocation = DbC.AdmissionStatus.FEE
specs[item.specialization_id]["free_spots"] -= 1
else:
item.allocation = DbC.AdmissionStatus.REJECTED
# print("Candidate CNP: ", item.candidate_cnp)
# print("Admission Grade: ", item.final_score)
# print("AdmissionResult: ", item.allocation)
# print("Specialization: ", specs[item.specialization_id]["name"])
# print("Specialization ID: ", item.specialization_id)
return repartition
def set_results():
results = calculate_results()
for item in results:
if DbC.save_admission_result_for_candidate(item) != "OK":
raise "Error in repartition processing!"
print("Repartition completed successfully.")
# set_results()
| Python | 70 | 34.542858 | 119 | /app/utils.py | 0.709807 | 0.702974 |
folmez/Handsfree-KGS | refs/heads/master | from pynput.mouse import Button, Controller
import cv2
import imageio
import matplotlib.pyplot as plt
import threading
import time
import queue
import os
import numpy as np
import src
frames = queue.Queue(maxsize=10)
class frameGrabber(threading.Thread):
def __init__(self):
# Constructor
threading.Thread.__init__(self)
def run(self):
cam = cv2.VideoCapture(0)
img_counter = 0
while True:
ret, frame = cam.read()
if not ret:
break
img_name = f"images/game_log/opencv_frame_{img_counter}.png"
cv2.imwrite(img_name, frame)
print("{} written!".format(img_name))
frames.put(img_counter)
img_counter += 1
time.sleep(30)
cam.release()
def verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, color, i, j):
# Display a message to the user to put a stone
print(f"\nPlease put a {color} stone at {src.convert_physical_board_ij_to_str(i,j)}...")
# Assert the stone with desired color is on the goban at the exact spot
while True:
time.sleep(5)
frame_num = frames.get()
img_name = f"images/game_log/opencv_frame_{frame_num}.png"
rgb = imageio.imread(img_name)
plt.imshow(rgb)
plt.title(f"This board should have a {color} stone at {src.convert_physical_board_ij_to_str(i,j)}.")
plt.show()
ans = input(f"Did you put a {color} stone at {src.convert_physical_board_ij_to_str(i,j)}? [y/n]: ")
if ans is 'y':
rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
assert src.is_this_stone_on_the_board(rgb, x_idx, y_idx, \
red_scale_th, blue_scale_th, color, i, j, plot_stuff=True)
remove_this_frame(img_name)
frames.task_done()
remove_unused_frames()
break
else:
remove_this_frame(img_name)
frames.task_done()
def remove_this_frame(img_name):
os.remove(img_name)
print('Frame', img_name, 'removed.')
def remove_unused_frames():
print('Removing unused frames...')
while True:
time.sleep(1)
try:
frame_num = frames.get(False)
except queue.Empty:
# Handle empty queue here
break
else:
# Handle task here and call q.task_done()
frame_num = frames.get()
img_name = f"images/game_log/opencv_frame_{frame_num}.png"
remove_this_frame(img_name)
frames.task_done()
print('Unused frames removed...')
board_corners = []
def onclick(event):
print(event.xdata, event.ydata)
board_corners.append(event.xdata)
board_corners.append(event.ydata)
if __name__ == '__main__':
# Initiate the frame grabber thread for goban pictures
my_frame_grabber = frameGrabber()
# Start running the threads!
my_frame_grabber.start()
print('Frame grabbing has started...')
# MANUAL BOARD EDGE DETECTION FOR THE PYHSICAL BOARD
# Show a plot frames and ask user to input boundaries
while True:
time.sleep(5)
frame_num = frames.get()
img_name = f"images/game_log/opencv_frame_{frame_num}.png"
rgb = imageio.imread(img_name)
fig = plt.figure()
plt.imshow(rgb)
plt.title("Please click on UL-UR-BL-BR corners or close plot...")
fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
if not board_corners:
# Skip if nothing is clicked
remove_this_frame(img_name)
frames.task_done()
else:
# Read goban corners
ob = board_corners
assert ob[2] > ob[0] and ob[6] > ob[4] and \
ob[7] > ob[4] and ob[5] > ob[1]
# Remove this filename as it served its purpose and break out of loop
remove_this_frame(img_name)
frames.task_done()
break
# Remove all unused frames at the end
remove_unused_frames()
# Remove non-goban part from the RGB matrix and make it a square matrix
rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
# Find the indices of board points in the new square RGB matrix
x_idx, y_idx = src.find_board_points(rgb, plot_stuff=False)
# CALIBRATION OF PYHSICAL BOARD
# Ask the user to put black and white stones on the board
print('\nPlease put black stones on corners and a white stone at center')
bxy, wxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]
while True:
time.sleep(5)
frame_num = frames.get()
img_name = f"images/game_log/opencv_frame_{frame_num}.png"
rgb = imageio.imread(img_name)
plt.imshow(rgb)
plt.title('Did you put black on corners and white at center?')
plt.show()
ans = input('Did you put black stones on corners and a white stone at center? [y/n]: ')
if ans is 'y':
# Remove non-goban part from the RGB matrix and make it a square matrix
rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
# Calibrate
red_scale_th1, blue_scale_th1 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)
# Refind stones using the above thresholds
bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \
red_scale_th1, blue_scale_th1, plot_stuff=False)
remove_this_frame(img_name)
frames.task_done()
remove_unused_frames()
break
else:
remove_this_frame(img_name)
frames.task_done()
print('\nPlease put white stones on corners and a black stone at center')
wxy, bxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]
while True:
time.sleep(5)
frame_num = frames.get()
img_name = f"images/game_log/opencv_frame_{frame_num}.png"
rgb = imageio.imread(img_name)
plt.imshow(rgb)
plt.title('Did you put white on corners and black at center?')
plt.show()
ans = input('Did you put white stones on corners and a black stone at center? [y/n]: ')
if ans is 'y':
# Remove non-goban part from the RGB matrix and make it a square matrix
rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
# Calibrate
red_scale_th2, blue_scale_th2 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)
# Refind stones using the above thresholds
bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \
red_scale_th2, blue_scale_th2, plot_stuff=False)
remove_this_frame(img_name)
frames.task_done()
remove_unused_frames()
break
else:
remove_this_frame(img_name)
frames.task_done()
red_scale_th = 0.5 * (red_scale_th1 + red_scale_th2)
blue_scale_th = 0.5 * (blue_scale_th1 + blue_scale_th2)
# VERIFY CALIBRATION OF PHYSICAL BOARD
print(' [PLEASE KEEP IN MIND THAT YOUR LOWER-LEFT CORNER IS (1,1)]')
verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, 'black', 3, 4)
verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, 'white', 1, 1)
verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, 'black', 10, 10)
verify_calibration(x_idx, y_idx, red_scale_th, blue_scale_th, 'white', 19, 19)
print("CALIBRATION IS VERIFIED\n" + 50*"-")
# DIGITAL BOARD DETECTION
# Ask the user to open a KGS board
print('\n OPEN A KGS BOARD/GAME NOW')
input('ENTER when the digital board is open: ')
# Get the user to click on come corners to get to know the digital board
UL_x, UL_y, goban_step = src.get_goban_corners()
# Test by moving to the star points on the board
for str in ['D16', 'K16', 'Q16', 'D10', 'K10', 'Q10', 'D4', 'K4', 'Q4']:
i, j = src.str_to_integer_coordinates(str)
x, y = src.int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step)
src.make_the_move(mouse, x, y, no_click=True)
# START REPLAYING PYHSICAL BOARD MOVES ON THE DIGITAL BOARD
# Plan - 1) check frames continously until a move is made by you
# 2) check digital board until a move is made by your opponent
# First, remove all unused frames
remove_unused_frames()
# Scan the frames for moves every five seconds
mouse = Controller() # obtain mouse controller
bxy, wxy = [], [] # empty board in the beginning
while True:
time.sleep(5)
frame_num = frames.get()
img_name = f"images/game_log/opencv_frame_{frame_num}.png"
color, i, j = src.scan_next_move(img_name, ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, bxy, wxy)
if color is not None:
# Play the move and update the stone lists
bxy, wxy = src.play_next_move_on_digital_board(mouse, color, \
i, j, bxy, wxy, UL_x, UL_y, goban_step)
# Start checking the digital board for new moves
else:
# Remove this frame and start waiting for the next frame
remove_this_frame(img_name)
frames.task_done()
# Wait for the threads to finish...
my_frame_grabber.join()
print('Main Terminating...')
| Python | 244 | 37.307377 | 108 | /play_handsfree_GO.py | 0.587033 | 0.5758 |
folmez/Handsfree-KGS | refs/heads/master | import imageio
import pytest
import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import src
def test_get_digital_goban_state():
rgb_pix = imageio.imread('images/digital_goban.png')
# Process KGS goban grayscale and find the stones
assert src.get_digital_goban_state(rgb_pix) == \
set([(1,1,1), (1, 1, 14), (2,19,19)])
| Python | 12 | 31.833334 | 82 | /tests/test_screenshot_actions.py | 0.649746 | 0.616751 |
folmez/Handsfree-KGS | refs/heads/master | from pynput.mouse import Button, Controller
import src
import time
def get_goban_corners():
# Obtain mouse controller
mouse = Controller()
# Ask the user to define goban corners
print('Move cursor to upper-left (A19) corner of Goban and keep it there five seconds')
time.sleep(5)
(UL_x, UL_y) = mouse.position
print(f"Upper-Left: ({UL_x},{UL_y})")
print()
print('Move cursor to bottom-right (T1) corner of Goban and keep it there five seconds')
time.sleep(5)
(BR_x, BR_y) = mouse.position
print(f"Bottom-Right: ({BR_x},{BR_y})")
print()
# Compute goban step sizes
goban_step = 0.5 * (BR_x - UL_x) * 1/18 + 0.5 * (BR_y - UL_y) * 1/18
print(f"Goban-steps is {goban_step}")
return UL_x, UL_y, goban_step
def make_the_move(mouse, x, y, no_click=False):
(cx, cy) = mouse.position
time.sleep(0.5)
mouse.move(x - cx, y - cy)
time.sleep(0.2)
if not no_click:
mouse.click(Button.left, 1)
def int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step):
x = UL_x + (i-1) * goban_step
y = UL_y + (j-1) * goban_step
return x, y
def str_to_integer_coordinates(str):
# Upper-lef corner is 1,1 and Bottom-right corner is 19,19
# Goban boards skip the letter I
j = 19 - int(str[1:3]) + 1
if ord(str[0]) < ord('I'):
i = ord(str[0]) - ord('A') + 1
else:
i = ord(str[0]) - ord('A')
return i,j
def int_coords_to_str(i, j):
# Upper-lef corner is 1,1 and Bottom-right corner is 19,19
# Goban boards skip the letter I
if i <= ord('I') - ord('A'):
return chr(ord('A') + i-1) + f"{20-j}"
else:
return chr(ord('A') + i) + f"{20-j}"
| Python | 57 | 28.807018 | 92 | /src/mouse_actions.py | 0.584461 | 0.55621 |
folmez/Handsfree-KGS | refs/heads/master | import imageio
def get_pyhsical_goban_state(rgb_pix):
pass
def picture_to_rgb(path):
return misc.imageio(path)
| Python | 7 | 16.285715 | 38 | /src/cam_actions.py | 0.719008 | 0.719008 |
folmez/Handsfree-KGS | refs/heads/master | import src
import time
UL_x, UL_y, goban_step = src.get_goban_corners()
prev_stone_set = set()
print("Started scanning the board for moves every 5 seconds...")
while True:
# wait between screenshots
time.sleep(5)
# get board screenshot
board_rgb_screenshot = src.KGS_goban_rgb_screenshot(UL_x, UL_y, goban_step)
# find the stones on the board
current_stone_set = src.get_goban_state(board_rgb_screenshot)
# is there a new stone on the board?
if current_stone_set > prev_stone_set:
# find the new stone
stone = current_stone_set - prev_stone_set
# IN THE FUTURE, ALLOW FOR OPPONENT TO MAKE A QUICK MOVE!!!
assert len(stone) == 1
# say the new moves on the board
player = list(stone)[0][0] # 1-black, 2-white
i, j = list(stone)[0][1], list(stone)[0][2]
pos = src.int_coords_to_str(i,j)
if player==1:
update_msg = "Black played at " + pos
elif player==2:
update_msg = "White played at " + pos
print(update_msg)
prev_stone_set = current_stone_set
else:
print("No moves made!")
| Python | 32 | 34.59375 | 79 | /make_goban_speak.py | 0.609306 | 0.597893 |
folmez/Handsfree-KGS | refs/heads/master | import pyscreeze
import numpy as np
import matplotlib.pyplot as plt
import src
def get_digital_goban_state(rgb_pix, plot_stuff=False):
# RGB of Black = [ 0, 0, 0]
# RGB of White = [255, 255, 255]
# RGB of Orange = [255, 160, 16]
# Use red scale to find out black stones, blue scale to find out white stones
# (1, 1, 1) - Black A1 (upper corner)
# (2, 19, 19) - White T10 (lower corner)
idx = np.arange(19)+1
m, n, z = rgb_pix.shape
assert m == n
# Approximate diameter of a stone in terms of pixels
stone_diam = n/19
# Calculate pixels where stone centers will be positioned
stone_centers = np.round(stone_diam*idx) - 0.5 * np.round(stone_diam) - 1
stone_centers = stone_centers.astype(int)
# For every stone center, we will check a square matrix centered around
# the stone center and find the average color. If it is black, then the
# stone is black, if it is white, then the stone is white, otherwise no stone
square_length_in_a_stone = int(np.round((n/19) / np.sqrt(2)))
if square_length_in_a_stone % 2 == 0:
d = square_length_in_a_stone / 2
else:
d = (square_length_in_a_stone-1) / 2
d = int(d-1) # just in case, make square smaller and integer
# Calculate the mean of a small matrix around every board point to find out
# if there is a black stone or white stone or nothing
stones = set()
for posi, i in enumerate(stone_centers, start=1):
for posj, j in enumerate(stone_centers, start=1):
# Find black stones
mat = rgb_pix[:,:,0]
color = np.mean(np.mean(mat[i:i+d+1, j:j+d+1]))
if color < 125:
stones.add((1, posj, posi)) # black stone
rgb_pix[i-d+1:i+d, j-d+1:j+d, :] = 0
# Find white stones
mat = rgb_pix[:,:,2]
color = np.mean(np.mean(mat[i:i+d+1, j:j+d+1]))
if color > 125:
stones.add((2, posj, posi)) # white stone
rgb_pix[i-d+1:i+d, j-d+1:j+d] = 255
# Plot for debugging
if plot_stuff:
plt.imshow(rgb_pix)
plt.show()
return stones
def KGS_goban_rgb_screenshot(UL_x, UL_y, goban_step):
UL_outer_x = UL_x - 0.5*goban_step
UL_outer_y = UL_y - 0.5*goban_step
BR_outer_x = UL_x + 18*goban_step + 0.5*goban_step
BR_outer_y = UL_y + 18*goban_step + 0.5*goban_step
im = pyscreeze.screenshot(region=(UL_outer_x, UL_outer_y, \
BR_outer_x-UL_outer_x, BR_outer_y-UL_outer_y))
pix = np.array(im)
rgb_pix = pix[...,:3]
return rgb_pix
| Python | 72 | 35.541668 | 82 | /src/screenshot_actions.py | 0.582288 | 0.549981 |
folmez/Handsfree-KGS | refs/heads/master | import matplotlib.pyplot as plt
xy=[]
def onclick(event):
print(event.xdata, event.ydata)
xy.append((event.xdata, event.ydata))
fig = plt.figure()
plt.plot(range(10))
fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
print(xy)
| Python | 14 | 17.142857 | 53 | /temp/plot_save_coordinates_on_click.py | 0.704724 | 0.69685 |
folmez/Handsfree-KGS | refs/heads/master | import matplotlib.pyplot as plt
import imageio
import numpy as np
import src
IMG_PATH = 'images/empty_pyshical_goban1.png'
board_corners = []
def onclick(event):
print(event.xdata, event.ydata)
board_corners.append((event.xdata, event.ydata))
# Get RGB matrix of the picture with goban
rgb = imageio.imread(IMG_PATH)
fig = plt.figure()
plt.imshow(rgb)
plt.title("Please click on UL-UR-BL-BR corners...")
fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
UL_outer_x, UL_outer_y = board_corners[0]
UR_outer_x, UR_outer_y = board_corners[1]
BL_outer_x, BL_outer_y = board_corners[2]
BR_outer_x, BR_outer_y = board_corners[3]
# Remove non-goban part from the RGB matrix and make it a square matrix
rgb = src.rescale_pyhsical_goban_rgb(rgb, \
UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y)
# Find the indices of board points in the new square RGB matrix
x_idx, y_idx = src.find_board_points(rgb, plot_stuff=True)
# Mark board points
src.mark_board_points(rgb, x_idx, y_idx)
#bxy, wxy = [(4,4), (16,4)], [(4,16),(16,16)]
#src.mark_board_points(rgb, x_idx, y_idx, bxy, wxy)
#red_scale_th, blue_scale_th = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)
#bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, red_scale_th, blue_scale_th)
#src.is_this_stone_on_the_board(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, \
# 'black', 16,4)
| Python | 45 | 32.066666 | 83 | /auto_goban_detection.py | 0.659946 | 0.646505 |
folmez/Handsfree-KGS | refs/heads/master | import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from pynput.mouse import Button, Controller
import pytest
import imageio
import src
# Write a test of play_handsfree_GO.py using already existing frames
img_name = []
folder_name = 'images/sample_game_log/ex1/'
# empty board for outer board boundary detection
img_name.append(folder_name + 'opencv_frame_1.png')
UL_outer_x, UL_outer_y = 376.27419354838713, 91.34516129032261
UR_outer_x, UR_outer_y = 962.08064516129020, 101.66774193548395
BL_outer_x, BL_outer_y = 120.79032258064518, 641.0225806451613
BR_outer_x, BR_outer_y = 1265.3064516129032, 652.6354838709677
# black stones on corners and a white stone at center
img_name.append(folder_name + 'opencv_frame_3.png')
# white stones on corners and a black stone at center
img_name.append(folder_name + 'opencv_frame_4.png')
# verifying calibration
img_name.append(folder_name + 'opencv_frame_b_1_1.png') # black at (1,1)
img_name.append(folder_name + 'opencv_frame_b_1_19.png') # black at (1,19)
img_name.append(folder_name + 'opencv_frame_b_19_19.png') # black at (19,19)
img_name.append(folder_name + 'opencv_frame_b_19_1.png') # black at (19,1)
img_name.append(folder_name + 'opencv_frame_b_10_10.png') # black at (10,10)
img_name.append(folder_name + 'opencv_frame_b_4_4.png') # black at (4,4)
img_name.append(folder_name + 'opencv_frame_b_4_10.png') # black at (4,10)
img_name.append(folder_name + 'opencv_frame_b_4_16.png') # black at (4,16)
img_name.append(folder_name + 'opencv_frame_b_16_16.png') # black at (16,16)
img_name.append(folder_name + 'opencv_frame_w_1_1.png') # white at (1,1)
img_name.append(folder_name + 'opencv_frame_w_10_10.png') # white at (10,10)
img_name.append(folder_name + 'opencv_frame_w_16_16.png') # white at (16,16)
img_name.append(folder_name + 'opencv_frame_w_19_19.png') # white at (19,19)
#opencv_frame_b_10_4.png
#opencv_frame_b_10_16.png
#opencv_frame_b_16_4.png
#opencv_frame_b_16_10.png
#opencv_frame_b_19_1.png
#opencv_frame_w_1_19.png
#opencv_frame_w_4_4.png
#opencv_frame_w_4_10.png
#opencv_frame_w_4_16.png
#opencv_frame_w_10_16.png
#opencv_frame_w_16_4.png
#opencv_frame_w_16_10.png
#opencv_frame_w_19_1.png
def test_play_handsfree_GO():
ps = False
# STEP 0 - EMPTY GOBAN
# Get outer boundaries of pyhsical goban -- skipped for speed
ob = [UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y]
# Remove non-goban part from the RGB matrix and make it a square matrix
# Find the indices of board points in the new square RGB matrix
#UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
# BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y = \
# src.get_pyhsical_board_outer_corners(img_name[0])
rgb = imageio.imread(img_name[0])
rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
x_idx, y_idx = src.find_board_points(rgb, plot_stuff=ps)
# STEP 1 - GOBAN WITH BLACK STONES ON CORNERS AND A WHITE STONE AT CENTER
rgb = imageio.imread(img_name[1])
bxy, wxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]
rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
red_scale_th1, blue_scale_th1 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)
_, _ = src.mark_stones(rgb, x_idx, y_idx, \
red_scale_th1, blue_scale_th1, plot_stuff=ps)
# STEP 2 - GOBAN WITH WHITE STONES ON CORNERS AND A BLACK STONE AT CENTER
rgb = imageio.imread(img_name[2])
wxy, bxy = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]
rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
red_scale_th2, blue_scale_th2 = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)
_, _ = src.mark_stones(rgb, x_idx, y_idx, \
red_scale_th2, blue_scale_th2, plot_stuff=ps)
red_scale_th = 0.5 * (red_scale_th1 + red_scale_th2)
blue_scale_th = 0.5 * (blue_scale_th1 + blue_scale_th2)
# STEP 3 - VERIFY CALIBRATION
verify_calibration_for_test_purposes(img_name[3], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'black', 1, 1, ps)
verify_calibration_for_test_purposes(img_name[4], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'black', 1, 19, ps)
verify_calibration_for_test_purposes(img_name[5], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'black', 19, 19, ps)
verify_calibration_for_test_purposes(img_name[6], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'black', 19, 1, ps)
verify_calibration_for_test_purposes(img_name[7], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'black', 10, 10, ps)
verify_calibration_for_test_purposes(img_name[8], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'black', 4, 4, ps)
verify_calibration_for_test_purposes(img_name[9], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'black', 4, 10, ps)
verify_calibration_for_test_purposes(img_name[10], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'black', 4, 16, ps)
verify_calibration_for_test_purposes(img_name[11], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'black', 16, 16, ps)
verify_calibration_for_test_purposes(img_name[12], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'white', 1, 1, ps)
verify_calibration_for_test_purposes(img_name[13], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'white', 10, 10, ps)
verify_calibration_for_test_purposes(img_name[14], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'white', 16, 16, ps)
verify_calibration_for_test_purposes(img_name[15], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, 'white', 19, 19, ps)
# DIGITAL BOARD DETECTION
# Ask the user to open a KGS board
print('\n OPEN A KGS BOARD/GAME NOW')
input('ENTER when the digital board is open: ')
# Get the user to click on come corners to get to know the digital board
UL_x, UL_y, goban_step = src.get_goban_corners()
# START REPLAYING PYHSICAL BOARD MOVES ON THE DIGITAL BOARD
mouse = Controller() # obtain mouse controller
print("Placing a black stone at (10,10)")
bxy, wxy = [], [] # empty board in the beginning
color, i, j = src.scan_next_move(img_name[7], ob, x_idx, y_idx, \
red_scale_th, blue_scale_th, bxy, wxy, plot_stuff=ps)
_, _ = src.play_next_move_on_digital_board(mouse, color, i, j, bxy, wxy, \
UL_x, UL_y, goban_step)
def verify_calibration_for_test_purposes(img, ob, x, y, r, b, c, i, j, ps):
rgb = imageio.imread(img)
rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
print(f"Verifying a {c} stone at {src.convert_physical_board_ij_to_str(i,j)}...")
assert src.is_this_stone_on_the_board(rgb, x, y, r, b, c, i, j, ps)
| Python | 135 | 49.940742 | 85 | /tests/test_play_handsfree_GO.py | 0.637342 | 0.581213 |
folmez/Handsfree-KGS | refs/heads/master | from pynput.mouse import Button, Controller
import time
import sys
import os
import pytest
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import src
def test_str_to_integer_coordinates():
assert src.str_to_integer_coordinates('A19') == (1, 1)
assert src.str_to_integer_coordinates('D16') == (4, 4)
assert src.str_to_integer_coordinates('D10') == (4, 10)
assert src.str_to_integer_coordinates('T1') == (19, 19)
assert src.str_to_integer_coordinates('K10') == (10, 10)
def test_integer_coordinates_to_str():
assert src.int_coords_to_str(1, 1) == 'A19'
assert src.int_coords_to_str(4, 4) == 'D16'
assert src.int_coords_to_str(4, 10) == 'D10'
assert src.int_coords_to_str(19, 19) == 'T1'
assert src.int_coords_to_str(10, 10) == 'K10'
@pytest.mark.slow
def test_place_stones_on_all_stars():
print()
# Get goban corners
UL_x, UL_y, goban_step = src.get_goban_corners()
# Obtain mouse controller
mouse = Controller()
# Place stones on stars
print('\n', 41*'-')
print(5*'-', 'Placing stones on all stars', 5*'-')
print(41*'-', '\n')
for str in ['D16', 'K16', 'Q16', 'D10', 'K10', 'Q10', 'D4', 'K4', 'Q4']:
i, j = src.str_to_integer_coordinates(str)
x, y = src.int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step)
src.make_the_move(mouse, x, y)
# Get KGS goban as a square grayscale
rgb_pix = src.KGS_goban_rgb_screenshot(UL_x, UL_y, goban_step)
| Python | 43 | 33.930233 | 82 | /tests/test_mouse_actions.py | 0.622503 | 0.575899 |
folmez/Handsfree-KGS | refs/heads/master | from setuptools import setup, find_packages
setup( name='Handsfree-KGS',
version='0.0',
description='Pay Handsfree Go on KGS',
author='Fatih Olmez',
author_email='folmez@gmail.com',
packages=find_packages())
| Python | 7 | 32.57143 | 46 | /setup.py | 0.608511 | 0.6 |
folmez/Handsfree-KGS | refs/heads/master | import matplotlib.pyplot as plt
import numpy as np
from scipy.signal import argrelmin
import imageio
import src
def play_next_move_on_digital_board(mouse, color, i, j, bxy, wxy, \
UL_x, UL_y, goban_step):
if color is not None:
print(f"New move: {color} played at {convert_physical_board_ij_to_str(i,j)}")
if color is 'black':
bxy.append((i,j))
elif color is 'white':
wxy.append((i,j))
# make the move
x, y = src.int_coords_to_screen_coordinates(UL_x, UL_y, i, j, goban_step)
src.make_the_move(mouse, x, y)
return bxy, wxy
def convert_physical_board_ij_to_str(i,j):
"""
The pyhsical board will have the upper-left corner labeled as (1,1) and the
bottom-right corner labeled as (19,19). This little script will help translate
and correct and misalignment between the here-described labeling and the
algorithm.
"""
return f"({i},{j})"
def scan_next_move(img_name, ob, x_idx, y_idx, red_scale_th, blue_scale_th, \
bxy, wxy, plot_stuff=False):
rgb = imageio.imread(img_name)
rgb = src.rescale_pyhsical_goban_rgb(rgb, ob)
bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \
red_scale_th, blue_scale_th, plot_stuff=plot_stuff)
print(bxy ,wxy, bxy_new, wxy_new)
if set(bxy_new) == set(bxy) and set(wxy_new) == set(wxy):
color, i, j = None, None, None
print('No new moves')
elif len(set(bxy_new)-set(bxy)) == 1 and set(wxy_new) == set(wxy):
color = 'black'
[(i,j)] = list(set(bxy_new)-set(bxy))
elif len(set(wxy_new)-set(wxy)) == 1 and set(bxy_new) == set(bxy):
color = 'white'
[(i,j)] = list(set(wxy_new)-set(wxy))
else:
raise ValueError('Move scanner error!')
return color, i, j
BOARD_CORNERS = []
def onclick(event):
print(event.xdata, event.ydata)
BOARD_CORNERS.append((event.xdata, event.ydata))
def get_pyhsical_board_outer_corners(img_name):
rgb = imageio.imread(img_name)
fig = plt.figure()
plt.imshow(rgb)
plt.title("Please click on UL-UR-BL-BR corners or close plot...")
fig.canvas.mpl_connect('button_press_event', onclick)
plt.show()
UL_outer_x, UL_outer_y = BOARD_CORNERS[0]
UR_outer_x, UR_outer_y = BOARD_CORNERS[1]
BL_outer_x, BL_outer_y = BOARD_CORNERS[2]
BR_outer_x, BR_outer_y = BOARD_CORNERS[3]
return UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y
def find_board_points(rgb, plot_stuff=False):
"""
You have the RGB matrix of the goban as a square matrix but you don't
know which entries correspon to the points on the board. This code finds
the board points by plotting average red, green and blue scales and
calculating the 19 local minima. Why? Because board points are
intersections of black lines and RGB value of black color is [0,0,0].
"""
if plot_stuff:
plt.subplot(221)
plt.imshow(rgb)
plt.subplot(222)
x1_idx = find_custom_local_minima(np.mean(rgb[:,:,0],axis=0), 'r', plot_stuff)
plt.subplot(223)
x2_idx = find_custom_local_minima(np.mean(rgb[:,:,1],axis=0), 'g', plot_stuff)
plt.subplot(224)
x3_idx = find_custom_local_minima(np.mean(rgb[:,:,2],axis=0), 'b', plot_stuff)
plt.show()
plt.subplot(221)
plt.imshow(rgb)
plt.subplot(222)
y1_idx = find_custom_local_minima(np.mean(rgb[:,:,0],axis=1), 'r', plot_stuff)
plt.subplot(223)
y2_idx = find_custom_local_minima(np.mean(rgb[:,:,1],axis=1), 'g', plot_stuff)
plt.subplot(224)
y3_idx = find_custom_local_minima(np.mean(rgb[:,:,2],axis=1), 'b', plot_stuff)
plt.show()
else:
x1_idx = find_custom_local_minima(np.mean(rgb[:,:,0],axis=0), 'r', plot_stuff)
x2_idx = find_custom_local_minima(np.mean(rgb[:,:,1],axis=0), 'g', plot_stuff)
x3_idx = find_custom_local_minima(np.mean(rgb[:,:,2],axis=0), 'b', plot_stuff)
y1_idx = find_custom_local_minima(np.mean(rgb[:,:,0],axis=1), 'r', plot_stuff)
y2_idx = find_custom_local_minima(np.mean(rgb[:,:,1],axis=1), 'g', plot_stuff)
y3_idx = find_custom_local_minima(np.mean(rgb[:,:,2],axis=1), 'b', plot_stuff)
# Sometimes indices found by red, green and blue scales don't agree
x_idx = src.make_indices_agree(x1_idx, x2_idx, x3_idx)
y_idx = src.make_indices_agree(y1_idx, y2_idx, y3_idx)
return x_idx, y_idx
def rescale_pyhsical_goban_rgb(rgb, ob):
# Get outer boundaries from ob
UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y = ob
# Rescale to n by n matrix
n = 300
# find n points on the left and on the right boundaries
x_left_vals, y_left_vals, rgb, _ = \
src.return_int_pnts(n, rgb, BL_outer_x, BL_outer_y, UL_outer_x, UL_outer_y)
x_right_vals, y_right_vals, rgb, _ = \
src.return_int_pnts(n, rgb, BR_outer_x, BR_outer_y, UR_outer_x, UR_outer_y)
# Calculate a new RGB matrix only for the board, by removing outside the board
new_rgb = np.zeros([n,n,3])
for i in range(n):
x1, y1 = x_left_vals[i], y_left_vals[i]
x2, y2 = x_right_vals[i], y_right_vals[i]
# print((x1,y1), (x2,y2))
_, _, rgb, v = src.return_int_pnts(n, rgb, x1, y1, x2, y2)
for j in range(n):
new_rgb[n-i-1, j, :] = v[j]
return new_rgb.astype(np.uint8)
def plot_goban_rgb(rgb, bxy=[], wxy=[]):
plt.imshow(rgb)
plt.ylabel('1st index = 1, ..., 19')
plt.xlabel('2nd index = 1, ..., 19')
plt.show()
def average_RGB(rgb, xMAX, yMAX, x, y, w):
# Calculates average RGB around a board point for stone detection
xL, xR = np.maximum(0, x-w), np.minimum(x+w+1, xMAX-1)
yL, yR = np.maximum(0, y-w), np.minimum(y+w+1, yMAX-1)
red_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 0]))
green_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 1]))
blue_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 2]))
return [red_scale, green_scale, blue_scale]
def make_indices_agree(x1, x2, x3):
# Board points are determined from local extrema of R,G,B values.
# But sometimes they don't match. In that case, choose the one whose
# second difference looks like a constant
a1 = np.amax(abs(np.diff(np.diff(x1))))
a2 = np.amax(abs(np.diff(np.diff(x2))))
a3 = np.amax(abs(np.diff(np.diff(x3))))
x = 0
x = x1 if a1 <= a2 and a1 <= a3 else x
x = x2 if a2 <= a1 and a2 <= a3 else x
x = x3 if a3 <= a1 and a3 <= a2 else x
assert x is not 0
return x
def calibrate(rgb, x_idx, y_idx, bxy=[], wxy=[]):
"""
Depending on light, laptop angle etc. the board may have different RGB values
at different times. So how do we distinguis black and white stones?
RGB of black = [0,0,0]
RGB of white = [255,255,255]
We will use red scale to distinguish black stones and blue scale to
distinguish white stones.
"""
xMAX, yMAX, _ = rgb.shape
roll_w = int(np.round(0.01*xMAX))
# BLACK STONE CALIBRATION
# Input black stone indices is bxy is empty
if not bxy:
msg = 'Enter black stone indices (e.g. 1 14 and 0 for end): '
while True:
input_text = input(msg)
if input_text == '0':
break
else:
j,i = list(map(int, input_text.split()))
bxy.append((i,j))
RGB = src.average_RGB(rgb, xMAX, yMAX, x_idx[i-1], y_idx[j-1], roll_w)
print('RGB = ', RGB)
# Find maximum red scale of black stones
RMAX = 0
for j,i in bxy:
RGB = src.average_RGB(rgb, xMAX, yMAX, x_idx[i-1], y_idx[j-1], roll_w)
print(f"Black stone at ({i},{j}) with RGB = ", RGB)
RMAX = np.maximum(RMAX, RGB[0])
# Find the min red scale of the rest to distinguish
RMIN_rest = 255
for i,x in enumerate(x_idx, start=1):
for j,y in enumerate(y_idx, start=1):
if (j,i) not in bxy:
RGB = src.average_RGB(rgb, xMAX, yMAX, x, y, roll_w)
RMIN_rest = np.minimum(RMIN_rest, RGB[0])
print('\nBlack stones have a maximum red scale =', RMAX)
print('Rest of the board have a minimum red scale', RMIN_rest)
print('Black stone red scale threshold will be average of these two.\n')
# Red scale threshold for black stone detection
assert RMAX < RMIN_rest
red_scale_th = 0.5 * RMAX + 0.5 * RMIN_rest
# WHITE STONE CALIBRATION
# Input white stone indices is wxy is empty
if not wxy:
msg = 'Enter white stone indices (e.g. 1 14 and 0 for end): '
while True:
input_text = input(msg)
if input_text == '0':
break
else:
j,i = list(map(int, input_text.split()))
wxy.append((i,j))
RGB = src.average_RGB(rgb, xMAX, yMAX, x_idx[i-1], y_idx[j-1], roll_w)
print('RGB = ', RGB)
# Find minimum blue scale of white stones
BMIN = 255
for (j,i) in wxy:
RGB = src.average_RGB(rgb, xMAX, yMAX, x_idx[i-1], y_idx[j-1], roll_w)
print(f"White stone at ({i},{j}) with RGB = ", RGB)
BMIN = np.minimum(BMIN, RGB[2])
# Find the max blue scale of the rest to distinguis
BMAX_rest = 0
for i,x in enumerate(x_idx, start=1):
for j,y in enumerate(y_idx, start=1):
if (j,i) not in wxy:
RGB = src.average_RGB(rgb, xMAX, yMAX, x, y,roll_w)
BMAX_rest = np.maximum(BMAX_rest, RGB[2])
print('\nWhite stones have a minimum blue scale >', BMIN)
print('Rest of the board have a maximum blue scale', BMAX_rest)
print('White stone blue scale threshold will be average of these two.\n')
# Blue scale threshold for white stone detection
assert BMIN > BMAX_rest
blue_scale_th = 0.5 * BMIN + 0.5 * BMAX_rest
return red_scale_th, blue_scale_th
def is_this_stone_on_the_board(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, \
color, i, j, plot_stuff=False):
i,j = j,i # RGB matrix is messed up so this needs to be done
x, y = x_idx[i-1], y_idx[j-1]
if plot_stuff:
fig = plt.figure()
plt.imshow(rgb)
plt.ylabel('1st index = 1, ..., 19')
plt.xlabel('2nd index = 1, ..., 19')
plt.title(f"Checking if there is a {color} stone at ({j},{i})")
plt.plot(x, y, 'ro', markersize=20, fillstyle='none')
plt.show()
xMAX, yMAX, _ = rgb.shape
roll_w = int(np.round(0.01*xMAX))
xL, xR = np.maximum(0, x-roll_w), np.minimum(x+roll_w+1, xMAX-1)
yL, yR = np.maximum(0, y-roll_w), np.minimum(y+roll_w+1, yMAX-1)
red_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 0]))
blue_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 2]))
msg = f"There is {color} stone at {src.int_coords_to_str(i,j)} = ({i},{j})"
if color == 'black' and red_scale < red_scale_th:
print(msg)
return True
elif color == 'white' and blue_scale > blue_scale_th:
print(msg)
return True
else:
return False
def mark_stones(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, plot_stuff=True):
xMAX, yMAX, _ = rgb.shape
roll_w = int(np.round(0.01*xMAX))
new_rgb = np.copy(rgb)
bxy, wxy = [], [] # black and white stone lists including pairs
for i, x in enumerate(x_idx, start=1):
for j, y in enumerate(y_idx, start=1):
xL, xR = np.maximum(0, x-roll_w), np.minimum(x+roll_w+1, xMAX-1)
yL, yR = np.maximum(0, y-roll_w), np.minimum(y+roll_w+1, yMAX-1)
red_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 0]))
blue_scale = np.mean(np.mean(rgb[yL:yR, xL:xR, 2]))
#print((x,y), red_scale, blue_scale)
if red_scale < red_scale_th or blue_scale > blue_scale_th:
if blue_scale > blue_scale_th:
wxy.append((j,i))
new_rgb[yL:yR, xL:xR,:] = 255, 255, 255 # white stone
elif red_scale < red_scale_th:
bxy.append((j,i))
new_rgb[yL:yR, xL:xR,:] = 255, 255, 0 # black stone
else:
new_rgb[yL:yR, xL:xR,:] = 255, 0, 0 # empty
if plot_stuff:
src.plot_goban_rgb(new_rgb)
return bxy, wxy
def mark_board_points(rgb, x_idx, y_idx, bxy=[], wxy=[]):
"""
Mark board points with red squares. Use yellow color for black stones and
white color for white stones that are inputted.
"""
xMAX, yMAX, _ = rgb.shape
roll_w = int(np.round(0.01*xMAX))
new_rgb = np.copy(rgb)
for i,x in enumerate(x_idx, start=1):
for j,y in enumerate(y_idx, start=1):
xL, xR = np.maximum(0, x-roll_w), np.minimum(x+roll_w+1, xMAX-1)
yL, yR = np.maximum(0, y-roll_w), np.minimum(y+roll_w+1, yMAX-1)
if (j,i) in bxy: # black stone
new_rgb[yL:yR, xL:xR,:] = 255, 255, 0 # yellow color
elif (j,i) in wxy: # white stone
new_rgb[yL:yR, xL:xR,:] = 255, 255, 255 # white color
else: # empty board point
new_rgb[yL:yR, xL:xR,:] = 255, 0, 0 # red color
src.plot_goban_rgb(new_rgb)
def find_custom_local_minima(ar1, color, plot_stuff):
roll_w = int(np.round(len(ar1)/100))
ar2 = subtract_rolling_sum(roll_w, ar1)
idx = find_local_minima(ar2)
if plot_stuff:
plt.plot(ar2, color)
for i in idx:
plt.plot(i, ar2[i], 'k*')
return idx
def find_local_minima(ar):
# Try to find the optional cut-off that may help determine the 19 points on
# the go board. Start with an interval [min_val, max_val] and squeeze until
# it hits exactly 19 points
# Find indices that correspond to local minima
x = argrelmin(ar)
idx_list = x[0]
target = 19
min_val, max_val = np.amin(ar), 100.0
# Assert that above choices are good
assert sum(ar[i] <= min_val for i in idx_list) < target
assert sum(ar[i] <= max_val for i in idx_list) > target
# Find the cut-off below which there are exactly 19 local minima
while True:
new_val = 0.5 * min_val + 0.5 * max_val
if sum(ar[i] <= new_val for i in idx_list) < target:
min_val = new_val
elif sum(ar[i] <= new_val for i in idx_list) > target:
max_val = new_val
elif sum(ar[i] <= new_val for i in idx_list) == target:
break
# Find the indices
return [i for i in idx_list if ar[i] <= new_val]
def rolling_sum(w, ar):
new_ar = np.zeros(len(ar))
for i in range(len(ar)):
if i >= w and i <= len(ar)-w-1:
new_ar[i] = np.mean(ar[i-w:i+w+1])
elif i < w:
new_ar[i] = np.mean(ar[0:i+1])
elif i > len(ar)-w-1:
new_ar[i] = np.mean(ar[i:len(ar)+1])
assert len(new_ar) == len(ar)
return new_ar
def subtract_rolling_sum(w, ar):
return ar - rolling_sum(w,ar)
def return_int_pnts(num, rgb, x1, y1, x2, y2):
x_vals = np.round(np.linspace(x1, x2, num=num, endpoint=True))
x_vals = x_vals.astype(int)
y_vals = np.round(np.linspace(y1, y2, num=num, endpoint=True))
y_vals = y_vals.astype(int)
# one of these two must not contain any duplicates
assert len(x_vals) == len(set(x_vals)) or len(y_vals) == len(set(y_vals))
# Return RGB values
return_array = [rgb[y,x,0:3] for x,y in zip(x_vals, y_vals)]
# make all red
# for x,y in zip(x_vals, y_vals):
# rgb[y,x,0:3] = 255, 0, 0
return x_vals, y_vals, rgb, return_array
| Python | 399 | 38.42857 | 87 | /src/picture_actions.py | 0.57507 | 0.553458 |
folmez/Handsfree-KGS | refs/heads/master | from .mouse_actions import get_goban_corners, str_to_integer_coordinates
from .mouse_actions import int_coords_to_screen_coordinates, make_the_move
from .mouse_actions import int_coords_to_str
from .screenshot_actions import KGS_goban_rgb_screenshot, get_digital_goban_state
from .picture_actions import plot_goban_rgb, average_RGB, make_indices_agree
from .picture_actions import return_int_pnts, subtract_rolling_sum
from .picture_actions import rolling_sum, find_custom_local_minima
from .picture_actions import mark_board_points, is_this_stone_on_the_board
from .picture_actions import mark_stones, calibrate
from .picture_actions import find_board_points, rescale_pyhsical_goban_rgb
from .picture_actions import get_pyhsical_board_outer_corners
from .picture_actions import convert_physical_board_ij_to_str
from .picture_actions import play_next_move_on_digital_board, scan_next_move
| Python | 13 | 67.384613 | 81 | /src/__init__.py | 0.817773 | 0.817773 |
folmez/Handsfree-KGS | refs/heads/master | import pytest
import imageio
import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import src
# stones - upper-left corner is (1,1), lower-left corner is (19,1)
IMG_PATH = ['images/pyshical_goban_pic1.png', 'images/pyshical_goban_pic2.png', \
'images/pyshical_goban_pic3.png', 'images/pyshical_goban_pic4.png', \
'images/pyshical_goban_pic5.png']
bxy0, wxy0 = [(4,4), (16,4)], [(4,16),(16,16)]
bxy1, wxy1 = [(1,9), (16,8)], [(10,1),(13,19)]
bxy2, wxy2 = [(1,19), (17,3)], [(1,3),(19,19)]
bxy3, wxy3 = [(1,19), (19,1), (5,4), (6,16), (12,8), (14,6), (16,10), (19,13)], \
[(1,1), (4,10), (7,7), (10,4), (10,10), (12,11), (15,7), (19,19)]
bxy4, wxy4 = [(1,1), (19,19), (1,19), (19,1)], [(10,10)]
UL_outer_x0, UL_outer_y0 = 315, 24
UR_outer_x0, UR_outer_y0 = 999, 40
BL_outer_x0, BL_outer_y0 = 3, 585
BR_outer_x0, BR_outer_y0 = 1273, 621
UL_outer_x3, UL_outer_y3 = 321, 235
UR_outer_x3, UR_outer_y3 = 793, 244
BL_outer_x3, BL_outer_y3 = 92, 603
BR_outer_x3, BR_outer_y3 = 933, 608
UL_outer_x4, UL_outer_y4 = 414, 256
UR_outer_x4, UR_outer_y4 = 962, 269
BL_outer_x4, BL_outer_y4 = 217, 659
BR_outer_x4, BR_outer_y4 = 1211, 679
@pytest.mark.skip
def test_board_outer_corner():
UL_outer_x0_click, UL_outer_y0_click, _, _, _, _, _, _ = \
src.get_pyhsical_board_outer_corners(IMG_PATH[0])
assert abs(UL_outer_x0_click - UL_outer_x0) < 5 # five pixels
assert abs(UL_outer_y0_click - UL_outer_y0) < 5
def test_board_state_detection_from_camera_picture():
assert_board_state(IMG_PATH[4], bxy4, wxy4, 'black', bxy4[0], \
UL_outer_x4, UL_outer_y4, UR_outer_x4, UR_outer_y4, \
BL_outer_x4, BL_outer_y4, BR_outer_x4, BR_outer_y4, \
plot_stuff=False)
assert_board_state(IMG_PATH[0], bxy0, wxy0, 'black', bxy0[1], \
UL_outer_x0, UL_outer_y0, UR_outer_x0, UR_outer_y0, \
BL_outer_x0, BL_outer_y0, BR_outer_x0, BR_outer_y0)
assert_board_state(IMG_PATH[1], bxy1, wxy1, 'white', wxy1[0], \
UL_outer_x0, UL_outer_y0, UR_outer_x0, UR_outer_y0, \
BL_outer_x0, BL_outer_y0, BR_outer_x0, BR_outer_y0, \
plot_stuff=True)
assert_board_state(IMG_PATH[2], bxy2, wxy2, 'black', bxy2[0], \
UL_outer_x0, UL_outer_y0, UR_outer_x0, UR_outer_y0, \
BL_outer_x0, BL_outer_y0, BR_outer_x0, BR_outer_y0)
assert_board_state(IMG_PATH[3], bxy3, wxy3, 'white', wxy3[6], \
UL_outer_x3, UL_outer_y3, UR_outer_x3, UR_outer_y3, \
BL_outer_x3, BL_outer_y3, BR_outer_x3, BR_outer_y3)
def assert_board_state(IMG_PATH, bxy, wxy, color, ij_pair, \
UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y, \
plot_stuff=False):
# Get RGB matrix of the picture with goban
rgb = imageio.imread(IMG_PATH)
# Remove non-goban part from the RGB matrix and make it a square matrix
rgb = src.rescale_pyhsical_goban_rgb(rgb, \
UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y)
# Find the indices of board points in the new square RGB matrix
x_idx, y_idx = src.find_board_points(rgb, plot_stuff=plot_stuff)
# Find color thresholds for stone detection
red_scale_th, blue_scale_th = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)
# Refind stones using the above thresholds
bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, \
red_scale_th, blue_scale_th, plot_stuff=plot_stuff)
assert set(bxy) == set(bxy_new)
assert set(wxy) == set(wxy_new)
assert src.is_this_stone_on_the_board(rgb, x_idx, y_idx, \
red_scale_th, blue_scale_th, color, ij_pair[0], ij_pair[1], \
plot_stuff=True)
| Python | 84 | 47.940475 | 83 | /tests/test_picture_actions.py | 0.556069 | 0.48504 |
folmez/Handsfree-KGS | refs/heads/master | import matplotlib.pyplot as plt
import imageio
import numpy as np
import src
IMG_PATH = 'images/pyshical_goban_pic1.png'
#IMG_PATH = 'images/pyshical_goban_pic2.png'
#IMG_PATH = 'images/pyshical_goban_pic3.png'
UL_outer_x, UL_outer_y = 315, 24
UR_outer_x, UR_outer_y = 999, 40
BL_outer_x, BL_outer_y = 3, 585
BR_outer_x, BR_outer_y = 1273, 621
#IMG_PATH = 'images/pyshical_goban_pic4.png'
#UL_outer_x, UL_outer_y = 321, 235
#UR_outer_x, UR_outer_y = 793, 244
#BL_outer_x, BL_outer_y = 92, 603
#BR_outer_x, BR_outer_y = 933, 608
# Get RGB matrix of the picture with goban
rgb = imageio.imread(IMG_PATH)
plt.imshow(rgb)
plt.show()
# Remove non-goban part from the RGB matrix and make it a square matrix
rgb = src.rescale_pyhsical_goban_rgb(rgb, \
UL_outer_x, UL_outer_y, UR_outer_x, UR_outer_y, \
BL_outer_x, BL_outer_y, BR_outer_x, BR_outer_y)
# Find the indices of board points in the new square RGB matrix
x_idx, y_idx = src.find_board_points(rgb, plot_stuff=True)
bxy, wxy = [(4,4), (16,4)], [(4,16),(16,16)]
src.mark_board_points(rgb, x_idx, y_idx, bxy, wxy)
red_scale_th, blue_scale_th = src.calibrate(rgb, x_idx, y_idx, bxy, wxy)
bxy_new, wxy_new = src.mark_stones(rgb, x_idx, y_idx, red_scale_th, blue_scale_th)
src.is_this_stone_on_the_board(rgb, x_idx, y_idx, red_scale_th, blue_scale_th, \
'black', 16,4)
| Python | 41 | 33.219513 | 82 | /temp/process_pyhsical_goban_pic.py | 0.649323 | 0.604419 |
hoichunlaw/EventDriven | refs/heads/master | import eikon as ek
import numpy as np
import pandas as pd
import os
import shutil
import zipfile
import datetime
import cufflinks as cf
import configparser as cp
import platform
import pickle
import nltk
nltk.download('stopwords')
from copy import deepcopy
import collections
from nltk.tokenize import TreebankWordTokenizer
from nltk.corpus import stopwords
stoppingWordSet = set(stopwords.words('english'))
import tensorflow_hub as hub
import re
import tensorflow as tf
import tensorflow.keras as keras
dataRootPath = r"D:/Eikon_Data/"
dataRootPathNews = r"D:/Eikon_Data/News/"
dataRootPathMarketData = r"D:/Eikon_Data/Market_Data/"
dataRootPathDB = r"D:/Database/"
modelPath = r"D:/python/PROD_Model/"
zipFolderPath = r"D:/Zip_Folder/"
tf_hub_path = r"C:/Users/hc_la/AppData/Local/Temp/tfhub_modules/"
date_format = "%Y-%m-%d"
elmo = hub.Module("https://tfhub.dev/google/elmo/3", trainable=True)
def createFullNameDict():
df = pd.read_csv(dataRootPathDB+"Underlying_Database/full_name.csv")
return {u:l.split(",") for u,l in zip(df["undlName"].values, df["full_name_list"].values)}
def getUndlNameList(criterion=""):
if criterion == "":
df = pd.read_csv(dataRootPathDB + "Underlying_Database/undlNameList.csv")
return df.undlName.values
elif criterion == "HK" or criterion == "AX" or criterion == "SI":
df = pd.read_csv(dataRootPathDB + "Underlying_Database/undlNameList.csv")
return [u for u in df.undlName.values if criterion in u]
else:
df = pd.read_csv(dataRootPathDB + "Underlying_Database/sector.csv")
sectorDict = {k:v.split(",") for k, v in zip(df["Cluster"], df["undlNameList"])}
return sectorDict.get(criterion)
# create undlName full name dict
undlNameFullNameDict = createFullNameDict()
df = pd.read_csv(dataRootPathDB + "Underlying_Database/sector.csv")
undlSectorDict = {}
for cluster, l in zip(df["Cluster"], df["undlNameList"]):
for u in l.split(","):
undlSectorDict[u] = cluster
def getSector(undlName):
return undlSectorDict.get(undlName)
today = datetime.datetime.now()
date_format = "%Y-%m-%d"
def checkFolderExist(path):
return os.path.isdir(path)
def checkFileExist(path):
return os.path.isfile(path)
def createFolder(rootPath, folderName):
if not checkFolderExist(rootPath+"/"+folderName):
os.mkdir(rootPath+"/"+folderName)
return True
else:
return "Folder already exist"
def formatDate(date, fm=date_format):
return date.strftime(fm)
def convertToDateObj(date, fm=date_format):
return datetime.datetime.strptime(date, date_format)
def moveDate(date, dayDelta=0, hourDelta=0):
if type(date) == str:
return datetime.datetime.strptime(date, date_format) + datetime.timedelta(days=dayDelta)
else:
return date + datetime.timedelta(days=dayDelta)
def PreviousBusinessDay(date, businessDateList):
if type(date) == str:
myDate = datetime.datetime.strptime(date, date_format)
else:
myDate = date
while formatDate(myDate) not in businessDateList:
myDate = moveDate(formatDate(myDate), -1)
return formatDate(myDate)
def convertTimestampsToDateStr(timestamp):
s = timestamp[0:10]
return s
def normalize_headline(row):
result = row.lower()
#Delete useless character strings
result = result.replace('...', ' ')
whitelist = set('abcdefghijklmnopqrstuvwxyz 0123456789.,;\'-:?')
result = ''.join(filter(whitelist.__contains__, result))
#result2 = []
#for c in result:
# if c
return result
def removeStoppingWords(sent):
result = []
for w in sent.split(" "):
if w not in stoppingWordSet:
result.append(w)
result.append(" ")
return "".join(result)
def removeHeading(sent):
# remove "BRIEF-"
# remove "BUZZ -"
# remove "REFILE-BRIEF-"
# remove "UPDATE "
# remove "EXCLUSIVE-"
# remove "Reuters Insider - "
# remove "BREAKINGVIEWS-"
headingList = ["BRIEF-", "BUZZ -", "BUZZ-", "REFILE-", "REFILE-BRIEF-", "UPDATE ", "EXCLUSIVE-", "Reuters Insider - ", "BREAKINGVIEWS-"]
result = sent.lower()
for h in headingList:
if h.lower() in result:
result = result.replace(h.lower(), "")
return result
def removeOthers(sent):
wordList = ["holding", "holdings", "ltd"]
result = sent
for w in wordList:
if w in result:
result = result.replace(w, "")
return result
def precision(y_true, y_pred):
total = 0
valid = 0
for i,j in zip(y_true, y_pred):
if j == 1:
total+=1
if i==1:
valid+=1
if total == 0:
return -1
else:
return valid / total
def iaGetTimeSeries(undlName, field, dateFrom, dateTo):
if type(dateFrom) != str: dateFrom = formatDate(dateFrom)
if type(dateTo) != str: dateTo = formatDate(dateTo)
df = pd.read_csv(dataRootPathMarketData+undlName.split('.')[0] + '_'+undlName.split('.')[1]+'.csv')
df = df[df.Date >= dateFrom]
df = df[df.Date <= dateTo]
df = df.set_index(["Date"])
return pd.DataFrame(df[field])
def createUndlDataFrame(undlName, undlNameFullNameList, newsSource, filterFuncList, dateFrom, dateTo,
benchmark = ""):
print("Loading", undlName, dateFrom, dateTo, end=" ")
# get news headlines
df_list = []
dateRef = datetime.datetime.strptime(dateFrom, date_format)
while dateRef <= datetime.datetime.strptime(dateTo, date_format):
df_list.append(pd.read_csv(dataRootPathNews + formatDate(dateRef) + "/" + undlName + "_headlines.csv"))
dateRef = moveDate(dateRef, 1)
news_df = pd.concat(df_list, axis=0)
# rename and sort columns
cols = news_df.columns
news_df.columns = ["timestamp"] + list(cols[1:])
news_df = news_df.sort_values(["timestamp"])
news_df.loc[:,"date"] = news_df["versionCreated"].apply(convertTimestampsToDateStr)
# return empty df if no data
if news_df.shape[0] == 0:
print(" done")
return pd.DataFrame({"date": [], "undlName":[], "sourceCode": [], "storyId":[], "text": [], "oneDayReturn": [], "twoDayReturn": [], "threeDayReturn": []})
# get market data
start = min(news_df.date)
end = max(news_df.date)
spot_df = iaGetTimeSeries(undlName, "CLOSE", moveDate(start, -10), moveDate(end, 10))
if benchmark != "":
spot_df_benchmark = iaGetTimeSeries(benchmark, "CLOSE", moveDate(start, -10), moveDate(end, 10))
spot_df_benchmark = spot_df_benchmark.loc[spot_df.index]
# truncate news_df when stock has limited historical data
news_df = news_df[(news_df.date >= min(spot_df.index))]
# create one day, two day and three day change columns
if benchmark != "":
spot_df.loc[:,"Future-1"] = spot_df.CLOSE.shift(-1)
spot_df.loc[:,"Future-2"] = spot_df.CLOSE.shift(-2)
spot_df.loc[:,"Future-3"] = spot_df.CLOSE.shift(-3)
spot_df = spot_df.iloc[:-3,]
spot_df_benchmark.loc[:,"Future-1"] = spot_df_benchmark.CLOSE.shift(-1)
spot_df_benchmark.loc[:,"Future-2"] = spot_df_benchmark.CLOSE.shift(-2)
spot_df_benchmark.loc[:,"Future-3"] = spot_df_benchmark.CLOSE.shift(-3)
spot_df_benchmark = spot_df_benchmark.iloc[:-3,]
spot_df.loc[:,"oneDayReturn"] = \
np.log(spot_df["Future-1"].values / spot_df["CLOSE"].values)-np.log(spot_df_benchmark["Future-1"].values / spot_df_benchmark["CLOSE"].values)
spot_df.loc[:,"twoDayReturn"] = \
np.log(spot_df["Future-2"].values / spot_df["CLOSE"].values)-np.log(spot_df_benchmark["Future-2"].values / spot_df_benchmark["CLOSE"].values)
spot_df.loc[:,"threeDayReturn"] = \
np.log(spot_df["Future-3"].values / spot_df["CLOSE"].values)-np.log(spot_df_benchmark["Future-3"].values / spot_df_benchmark["CLOSE"].values)
else:
spot_df.loc[:,"Future-1"] = spot_df.CLOSE.shift(-1)
spot_df.loc[:,"Future-2"] = spot_df.CLOSE.shift(-2)
spot_df.loc[:,"Future-3"] = spot_df.CLOSE.shift(-3)
spot_df = spot_df.iloc[:-3,]
spot_df.loc[:,"oneDayReturn"] = np.log(spot_df["Future-1"].values / spot_df["CLOSE"].values)
spot_df.loc[:,"twoDayReturn"] = np.log(spot_df["Future-2"].values / spot_df["CLOSE"].values)
spot_df.loc[:,"threeDayReturn"] = np.log(spot_df["Future-3"].values / spot_df["CLOSE"].values)
oneDayReturnDict = {d:v for d,v in zip(spot_df.index, spot_df["oneDayReturn"])}
twoDayReturnDict = {d:v for d,v in zip(spot_df.index, spot_df["twoDayReturn"])}
threeDayReturnDict = {d:v for d,v in zip(spot_df.index, spot_df["threeDayReturn"])}
# create concat df, news and log-chg
businessDateList = list(spot_df.index)
d = news_df.date.values
oneDay = []
twoDay = []
threeDay = []
for i in range(len(news_df)):
oneDay.append(oneDayReturnDict[PreviousBusinessDay(d[i], businessDateList)])
twoDay.append(twoDayReturnDict[PreviousBusinessDay(d[i], businessDateList)])
threeDay.append(threeDayReturnDict[PreviousBusinessDay(d[i], businessDateList)])
news_df.loc[:,"oneDayReturn"] = oneDay
news_df.loc[:,"twoDayReturn"] = twoDay
news_df.loc[:,"threeDayReturn"] = threeDay
# data preprocessing
fil_df = news_df[news_df["sourceCode"]==newsSource]
fil_df.loc[:,"text"] = fil_df.text.apply(lambda x: x.lower())
for f in filterFuncList:
fil_df.loc[:,"text"] = fil_df.text.apply(f).values
tmp = []
for name in undlNameFullNameList:
tmp.append(fil_df[fil_df.text.apply(lambda x: name in x)])
fil_df = pd.concat(tmp, axis=0)
if fil_df.shape[0] == 0:
df = pd.DataFrame({"date": [], "undlName":[], "sourceCode": [], "storyId":[], "text": [], "oneDayReturn": [], "twoDayReturn": [], "threeDayReturn": []})
else:
fil_df["undlName"] = [undlName for i in range(len(fil_df))]
df = fil_df[["date", "undlName", "sourceCode", "storyId", "text", "oneDayReturn", "twoDayReturn", "threeDayReturn"]]
print(" done")
return df
def elmo_vector(x):
if type(x) == list:
embeddings = elmo(x, signature="default", as_dict=True)["elmo"]
else:
embeddings = elmo(x.tolist(), signature="default", as_dict=True)["elmo"]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
sess.run(tf.tables_initializer())
return sess.run(tf.reduce_mean(embeddings, 1))
def build_model():
input_layer = keras.layers.Input(shape=(1024,))
h = keras.layers.Dropout(rate=0.2)(input_layer)
prediction = keras.layers.Dense(1, activation="sigmoid")(h)
model = keras.Model(inputs=[input_layer], outputs=prediction)
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=[keras.metrics.Precision()])
return model
def createNewsHeadlinePrediction(ex, sector_list):
undlNameList = getUndlNameList(ex)
for sector in sector_list:
undlNameList += getUndlNameList(sector)
start_date = formatDate(today)
end_date = formatDate(today)
#resultDict = {"undlName":[], "bull_signals":[], "bear_signals":[]}
# load model
#market_bull_model = build_model()
#market_bull_model.reset_states()
#market_bull_model.load_weights(modelPath + ex + "_market_bull_model.h5")
#market_bear_model = build_model()
#market_bear_model.reset_states()
#market_bear_model.load_weights(modelPath + ex + "_market_bear_model.h5")
#sectorBullModelDict = {}
#sectorBearModelDict = {}
#for sector in sector_list:
#model = build_model()
#model.reset_states()
#model.load_weights(modelPath + sector + "_bull_model.h5")
#sectorBullModelDict[sector] = model
#model = build_model()
#model.reset_states()
#model.load_weights(modelPath + sector + "_bear_model.h5")
#sectorBearModelDict[sector] = model
tmp = []
for undlName in undlNameList:
tmp_df = createUndlDataFrame(undlName, undlNameFullNameDict[undlName], "NS:RTRS",
[removeHeading, normalize_headline, removeOthers],
start_date, end_date, "")
tmp_df = tmp_df.drop_duplicates(subset='storyId')
tmp_df = tmp_df.sort_values(["date"])
if len(tmp_df) != 0: tmp.append(tmp_df)
if len(tmp) != 0:
df = pd.concat(tmp, axis=0)
else:
print("No News Headlines")
return True
print(df.shape)
# create ELMo Vector
#batch = [df["text"].values[i:i+100] for i in range(0, df.shape[0], 100)]
#batch_elmo = [elmo_vector(x) for x in batch]
#elmo_vector_list = np.concatenate(batch_elmo, axis=0)
#market_bull_model_result = market_bull_model.predict(elmo_vector_list).reshape(-1)
#market_bear_model_result = market_bear_model.predict(elmo_vector_list).reshape(-1)
#sector_bull_model_result = []
#sector_bear_model_result = []
#i = 0
#for undlName in df["undlName"].values:
# sector_bull_model = sectorBullModelDict[getSector(undlName)]
# sector_bear_model = sectorBearModelDict[getSector(undlName)]
# sector_bull_model_result += list(sector_bull_model.predict(elmo_vector_list[i].reshape(1, -1)).reshape(-1))
# sector_bear_model_result += list(sector_bear_model.predict(elmo_vector_list[i].reshape(1, -1)).reshape(-1))
# i += 1
#sector_bull_model_result = np.array(sector_bull_model_result)
#sector_bear_model_result = np.array(sector_bear_model_result)
#resultDict["undlName"] += list(df["undlName"].values)
#resultDict["bull_signals"] += [1 if i > 1 else 0 for i in market_bull_model_result + sector_bull_model_result]
#resultDict["bear_signals"] += [1 if i > 1 else 0 for i in market_bear_model_result + sector_bear_model_result]
#result_df = pd.DataFrame.from_dict(resultDict)
#to_drop = [i for i in range(result_df.shape[0]) if result_df.iloc[i, 1] == 0 and result_df.iloc[i, 2] == 0]
#result_df = result_df.drop(to_drop)
result_df = df.loc[:,["undlName", "text"]]
result_df.to_csv(r"D:/python/EventDriven/result/" + formatDate(today) + "_" + ex + ".csv")
return True
def main():
sector_list = ["Tencent", "Chinese_Bank", "Chinese_Insurance", "Chinese_Oil", "Chinese_Auto",
"Chinese_Telecom", "Chinese_Industrial", "HK_Property", "HK_Bank"]
createNewsHeadlinePrediction(ex="HK", sector_list=[])
sector_list = ["AX_Bank"]
createNewsHeadlinePrediction(ex="AX", sector_list=[])
createNewsHeadlinePrediction(ex="SI", sector_list=[])
if __name__=="__main__":
main()
| Python | 400 | 35.645 | 162 | /News_Headlines_Prediction.py | 0.639241 | 0.631123 |
hoichunlaw/EventDriven | refs/heads/master | import eikon as ek
import numpy as np
import pandas as pd
import os
import zipfile
import datetime
import cufflinks as cf
import configparser as cp
ek.set_app_key('e4ae85e1e08b47ceaa1ee066af96cabe6e56562a')
dataRootPath = r"D:/Eikon_Data/"
dataRootPathNews = r"D:/Eikon_Data/News/"
dataRootPathMarketData = r"D:/Eikon_Data/Market_Data/"
databasePath = r"D:/Database/"
zipFolderPath = r"D:/Zip_Folder/"
date_format = "%Y-%m-%d"
def checkFolderExist(path):
return os.path.isdir(path)
def checkFileExist(path):
return os.path.isfile(path)
def createFolder(rootPath, folderName):
if rootPath[-1] == "/":
myRootPath = rootPath[:-1]
else:
myRootPath = rootPath
if not checkFolderExist(myRootPath+"/"+folderName):
os.mkdir(myRootPath+"/"+folderName)
return True
else:
return "Folder already exist"
def formatDate(date, fm=date_format):
return date.strftime(fm)
def moveDate(date, dayDelta=0, hourDelta=0):
if type(date) == str:
return datetime.datetime.strptime(date, date_format) + datetime.timedelta(days=dayDelta) + datetime.timedelta(hours=hourDelta)
else:
return date + datetime.timedelta(days=dayDelta) + + datetime.timedelta(hours=hourDelta)
def zipdir(path, ziph):
# ziph is zipfile handle
for root, dirs, files in os.walk(path):
for file in files:
ziph.write(os.path.join(root, file))
def iaZipFolder(path):
if path[-1] == '/':
zipFileName = path.split("/")[-2] + "_zip.zip"
else:
zipFileName = path.split("/")[-1] + "_zip.zip"
if checkFileExist(zipFolderPath + zipFileName): os.remove(zipFolderPath + zipFileName)
zipf = zipfile.ZipFile(zipFolderPath + zipFileName, 'w', zipfile.ZIP_DEFLATED)
zipdir(path, zipf)
zipf.close()
def downloadNews(undlName, date, savePath):
if not checkFolderExist(savePath + formatDate(date)):
createFolder(savePath, formatDate(date))
# download data
df = ek.get_news_headlines("R:"+undlName+" and english",
date_from=formatDate(moveDate(date,-1)) + "T16:00:00",
date_to=formatDate(moveDate(date)) + "T16:00:00",
count=100)
# move date back to HK time
df.index = moveDate(np.array(list(df.index)),0,8)
df.versionCreated = moveDate(np.array(list(df.versionCreated)),0,8)
# save data
df.to_csv(savePath + formatDate(date) + "/" + undlName + "_headlines.csv")
def downloadHistoricalNews(undlName, dateFrom, dateTo, savePath):
if type(dateFrom) == str:
myDateFrom = datetime.datetime.strptime(dateFrom, date_format)
else:
myDateFrom = dateFrom
if type(dateTo) == str:
myDateTo = datetime.datetime.strptime(dateTo, date_format)
else:
myDateTo = dateTo
dateRef = myDateFrom
while dateRef <= myDateTo:
print("Download", undlName, dateRef)
downloadNews(undlName, dateRef, savePath)
dateRef = moveDate(dateRef, 1)
def downloadMarketData(undlName, date, savePath):
# download data
try:
df_new = ek.get_timeseries(undlName, fields=["CLOSE", "HIGH", "LOW", "OPEN", "VOLUME"],
start_date=formatDate(date), end_date=formatDate(date), interval="daily", corax="adjusted")
except:
df_new = []
if type(df_new) == pd.core.frame.DataFrame:
myUndlName = undlName.split('.')[0] + '_' + undlName.split('.')[1]
df_new.index = pd.Series(df_new.index).apply(formatDate)
if checkFileExist(savePath + myUndlName + ".csv"):
df = pd.read_csv(savePath + myUndlName + ".csv")
df = df.set_index("Date")
if df_new.index[0] not in list(df.index):
df = pd.concat([df, df_new], axis=0)
df.to_csv(savePath + myUndlName + ".csv")
else:
df_new.to_csv(savePath + myUndlName + ".csv")
def downloadHistoricalMarketData(undlName, dateFrom, dateTo, savePath):
# download data
df = ek.get_timeseries(undlName, fields=["CLOSE", "HIGH", "LOW", "OPEN", "VOLUME"],
start_date=dateFrom, end_date=dateTo, interval="daily", corax="adjusted")
df.index = pd.Series(df.index).apply(formatDate)
myUndlName = undlName.split('.')[0] + '_' + undlName.split('.')[1]
df.to_csv(savePath + myUndlName + ".csv")
def main():
today = datetime.datetime.now()
df = pd.read_csv(r'D:/Database/Underlying_Database/undlNameList.csv')
undlNameList = list(df.undlName.values)
# download News Headlines
for undlName in undlNameList:
print("Download", undlName, today)
downloadNews(undlName, today, dataRootPathNews)
if __name__=="__main__":
main()
| Python | 136 | 34.051472 | 134 | /Downloader.py | 0.632893 | 0.621565 |
hoichunlaw/EventDriven | refs/heads/master | import numpy as np
import pandas as pd
import os
import win32com.client as win32
import datetime
path = r"D:/python/EventDriven/result/"
date_format = "%Y-%m-%d"
def formatDate(date, fm=date_format):
return date.strftime(fm)
def moveDate(date, dayDelta=0, hourDelta=0):
if type(date) == str:
return datetime.datetime.strptime(date, date_format) + datetime.timedelta(days=dayDelta) + datetime.timedelta(hours=hourDelta)
else:
return date + datetime.timedelta(days=dayDelta) + + datetime.timedelta(hours=hourDelta)
def email(to, sub, HTMLBody, attachmentURLList):
outlook = win32.Dispatch('outlook.application')
mail = outlook.CreateItem(0)
mail.To = to
mail.Subject = sub
mail.HTMLBody = HTMLBody
for url in attachmentURLList:
if os.path.exists(url): mail.Attachments.Add(url)
mail.Send()
def main():
today = datetime.datetime.now()
url1 = path + formatDate(today) + "_HK.csv"
url2 = path + formatDate(today) + "_AX.csv"
url3 = path + formatDate(today) + "_SI.csv"
email("isaac.law@rbccm.com", "_News_", "", [url1, url2, url3])
if __name__=="__main__":
main()
| Python | 37 | 29.837837 | 134 | /Email.py | 0.667835 | 0.654689 |
teslaworksumn/munchi-pi-api | refs/heads/master | import time
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD) #pin numbering scheme uses board header pins
GPIO.setup(8,GPIO.OUT) #pin 8, GPIO15
while True:
'''test pin8 GPIO15'''
GPIO.output(8,1) #output high to pin 8
time.sleep(0.5) #delay 0.5 sec
GPIO.output(8,0) #output low to pin 8
time.sleep(0.5)
| Python | 12 | 24.833334 | 69 | /port_test.py | 0.714744 | 0.653846 |
teslaworksumn/munchi-pi-api | refs/heads/master | import time
import RPi.GPIO as GPIO
import Adafruit_ADS1x15
THERMISTORVALUE 100000
SERIESRESISTOR 100000 #series resistor to thermistor
BCOEFFICIENT 4072
thermistorR2Temp = {3.2575:0, 2.5348:5, 1.9876:10, 1.5699:15, 1.2488:20, 1.0000:25, 0.80594:30, 0.65355:35, 0.53312:40, 0.43735:45, 0.36074:50, 0.29911:55, 0.24925:60, 0.20872:65, 0.17558:70, 0.14837:75, 0.12592:80, 0.10731:85, 0.091816:90, 0.078862:95, 0.067988:100, 0.058824:105, 0.051071:110}
GPIO.setmode(GPIO.BOARD) #pin numbering scheme uses board header pins
GPIO.setup(19,GPIO.out) #pin 19, GPIO12 output
GPIO.setup(26,GPIO.out) #pin 26, GPIO07 output
adc = Adafruit_ADS1x15.ADS1015() #create an ADS1015 ADC (12-bit) instance.
# Choose a gain of 1 for reading voltages from 0 to 4.09V.
# Or pick a different gain to change the range of voltages that are read:
# - 2/3 = +/-6.144V
# - 1 = +/-4.096V
# - 2 = +/-2.048V
# - 4 = +/-1.024V
# - 8 = +/-0.512V
# - 16 = +/-0.256V
# See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.
GAIN = 1
while True:
reading = adc.read_adc(0, gain=GAIN) #read A0, 12 bit signed integer, -2048 to 2047 (0=GND, 2047=4.096*gain)
voltReading = reading * 4.096 / 2047.0 #convert adc to voltage
thermoR = SERIESRESISTOR / ((4.0/voltReading) - 1)#convert voltage to thermoster resistance
#7002 thermistor
#temp =
print ("reading: " + reading)
print ("thermistor resistance: " + thermoR)
#print ("temp: " + temp)
| Python | 36 | 39.083332 | 295 | /munchi_rasp_pi.py | 0.690922 | 0.474012 |
teslaworksumn/munchi-pi-api | refs/heads/master | import time
import RPi.GPIO as GPIO
import Adafruit_ADS1x15
GPIO.setmode(GPIO.BOARD) #pin numbering scheme uses board header pins
GPIO.setup(8,GPIO.OUT) #pin 8, GPIO15
adc = Adafruit_ADS1x15.ADS1015() #create an ADS1015 ADC (12-bit) instance.
# Choose a gain of 1 for reading voltages from 0 to 4.09V.
# Or pick a different gain to change the range of voltages that are read:
# - 2/3 = +/-6.144V
# - 1 = +/-4.096V
# - 2 = +/-2.048V
# - 4 = +/-1.024V
# - 8 = +/-0.512V
# - 16 = +/-0.256V
# See table 3 in the ADS1015/ADS1115 datasheet for more info on gain.
GAIN = 1
while True:
'''test adc'''
reading = adc.read_adc(0, gain=GAIN) #read A0, 12 bit signed integer, -2048 to 2047 (0=GND, 2047=4.096*gain)
#reading = adc.read_adc(0) #gain defaults to 1
print(reading)
if reading < 1000:
GPIO.output(8,0)
else:
GPIO.output(8.1)
time.sleep(0.5)
| Python | 30 | 27.966667 | 109 | /adc_test.py | 0.662831 | 0.547756 |
romkof/CarND-Behavioral-Cloning-P3 | refs/heads/master | import os
import csv
import cv2
import numpy as np
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
data_path = 'record'
samples = []
with open( data_path + '/driving_log.csv') as csvfile:
reader = csv.reader(csvfile)
for line in reader:
samples.append(line)
train_samples, validation_samples = train_test_split(samples, test_size=0.2)
def generator(samples, batch_size=32):
num_samples = len(samples)
while 1: # Loop forever so the generator never terminates
shuffle(samples)
for offset in range(0, num_samples, batch_size):
batch_samples = samples[offset:offset+batch_size]
images = []
angles = []
for batch_sample in batch_samples:
def get_image_path(row):
return data_path + '/IMG/'+batch_sample[row].split('/')[-1]
def read_image(path):
img = cv2.imread(path)
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
center_image_path = get_image_path(0)
left_image_path = get_image_path(1)
right_image_path = get_image_path(2)
center_image = read_image(center_image_path)
left_image = read_image(left_image_path)
right_image = read_image(right_image_path)
correction = 0.25 # this is a parameter to tune
center_angle = float(batch_sample[3])
left_angle = center_angle + correction
right_angle = center_angle - correction
fliped_center_image = cv2.flip(center_image, 1)
fliped_center_angle = center_angle*-1.0
images.extend((center_image, left_image, right_image, fliped_center_image))
angles.extend((center_angle, left_angle, right_angle, fliped_center_angle))
# trim image to only see section with road
X_train = np.array(images)
y_train = np.array(angles)
yield sklearn.utils.shuffle(X_train, y_train)
# compile and train the model using the generator function
train_generator = generator(train_samples, batch_size=32)
validation_generator = generator(validation_samples, batch_size=32)
ch, row, col = 3, 80, 320 # Trimmed image format
from keras.models import Sequential, Model
from keras.layers import Cropping2D, Lambda, Convolution2D, Flatten, Dense, Dropout
import tensorflow as tf
import cv2
def resize_image(x):
from keras.backend import tf as ktf
return ktf.image.resize_images(x, (66, 200))
model = Sequential()
model.add(Lambda(lambda x: x/255.0 - 0.5, input_shape=(160,320,3)))
model.add(Cropping2D(cropping=((70,25),(0,0))))
model.add(Lambda(resize_image))
model.add(Convolution2D(24,5,5, subsample=(2,2), activation ="relu"))
model.add(Convolution2D(36,5,5, subsample=(2,2), activation ="relu"))
model.add(Convolution2D(48,5,5, subsample=(2,2), activation ="relu"))
model.add(Convolution2D(64,3,3, activation ="relu"))
model.add(Convolution2D(64,3,3, activation ="relu"))
model.add(Flatten())
model.add(Dense(100))
model.add(Dense(50))
model.add(Dropout(0.7))
model.add(Dense(10))
model.add(Dropout(0.7))
model.add(Dense(1))
model.compile(loss='mse', optimizer='adam')
model.summary()
history_object = model.fit_generator(train_generator, samples_per_epoch=
len(train_samples), validation_data=validation_generator,
nb_val_samples=len(validation_samples), nb_epoch=40)
model.save("model.h5") | Python | 102 | 34.92157 | 91 | /model.py | 0.627012 | 0.597817 |
Jmbac0n/randomiser | refs/heads/master | # Simple script that generates a random
# combination of words from separate strings
colours = ['red','blue','green'.'yellow']
shapes = ['circle','square','triangle','star']
import random
x = random.randint(0, 2)
y = random.randint(0, 2)
combination = colours[x] + (" ") + shapes[y]
print(combination)
| Python | 13 | 22.615385 | 46 | /randomiser.py | 0.65625 | 0.64375 |
prateeksahu10/web-api | refs/heads/master | import requests
response=requests.get("https://api.forismatic.com/api/1.0/?method=getQuote&lang=en&format=text")
print(response.content)
| Python | 3 | 44.666668 | 96 | /assignment13.py | 0.79562 | 0.781022 |
Alfredjoy/Ecom_project | refs/heads/master | from django.urls import path, include
from store import views
urlpatterns = [
path('',views.store,name='store'),
path('cart',views.cart, name='cart'),
path('checkout',views.checkout, name='checkout')
]
| Python | 10 | 20.700001 | 52 | /store/urls.py | 0.677419 | 0.677419 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.network.protocol.DataPacket import DataPacket
from podrum.network.protocol.ProtocolInfo import ProtocolInfo
class ServerToClientHandshakePacket(DataPacket):
NID = ProtocolInfo.SERVER_TO_CLIENT_HANDSHAKE_PACKET
jwt = None
def canBeSentBeforeLogin():
return True
def decodePayload(self):
self.jwt = self.getString()
def encodePayload(self):
self.putString(self.jwt)
| Python | 29 | 29.206896 | 77 | /src/podrum/network/protocol/ServerToClientHandshakePacket.py | 0.584475 | 0.583333 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
class Facing:
AXIS_Y = 0
AXIS_Z = 1
AXIS_X = 2
FLAG_AXIS_POSITIVE = 1
DOWN = AXIS_Y << 1
UP = (AXIS_Y << 1) | FLAG_AXIS_POSITIVE
NORTH = AXIS_Z << 1
SOUTH = (AXIS_Z << 1) | FLAG_AXIS_POSITIVE
WEST = AXIS_X << 1
EAST = (AXIS_X << 1) | FLAG_AXIS_POSITIVE
ALL = [
DOWN,
UP,
NORTH,
SOUTH,
WEST,
EAST
]
HORIZONTAL = [
NORTH,
SOUTH,
WEST,
EAST
]
CLOCKWISE = {
AXIS_Y: {
NORTH: EAST,
EAST: SOUTH,
SOUTH: WEST,
WEST: NORTH
},
AXIS_Z: {
UP: EAST,
EAST: DOWN,
DOWN: WEST,
WEST: UP
},
AXIS_X: {
UP: NORTH,
NORTH: DOWN,
DOWN: SOUTH,
SOUTH: UP
}
}
@staticmethod
def axis(direction):
return direction >> 1
@staticmethod
def is_positive(direction):
return (direction & Facing.FLAG_AXIS_POSITIVE) == Facing.FLAG_AXIS_POSITIVE
@staticmethod
def opposite(direction):
return direction ^ Facing.FLAG_AXIS_POSITIVE
@staticmethod
def rotate(direction, axis, clockwise):
if not Facing.CLOCKWISE[axis]:
raise ValueError("Invalid axis {}".format(axis))
if not Facing.CLOCKWISE[axis][direction]:
raise ValueError("Cannot rotate direction {} around axis {}".format(direction, axis))
rotated = Facing.CLOCKWISE[axis][direction]
return rotated if clockwise else Facing.opposite(rotated)
@staticmethod
def validate(facing):
if facing in Facing.ALL:
raise ValueError("Invalid direction {}".format(facing))
| Python | 91 | 23.32967 | 97 | /src/podrum/math/Facing.py | 0.500903 | 0.495483 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.network.protocol.DataPacket import DataPacket
from podrum.network.protocol.ProtocolInfo import ProtocolInfo
class ClientToServerHandshakePacket(DataPacket):
NID = ProtocolInfo.CLIENT_TO_SERVER_HANDSHAKE_PACKET
def canBeSentBeforeLogin():
return True
def encodePayload(): pass
def decodePayload(): pass
| Python | 25 | 30.719999 | 77 | /src/podrum/network/protocol/ClientToServerHandshakePacket.py | 0.586381 | 0.58512 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
#!/usr/bin/env python3
import sys
import inspect
from os import getcwd, path
from threading import Thread
sys.path.insert(0, path.dirname(path.dirname(path.abspath(inspect.getfile(inspect.currentframe())))))
from podrum.Server import Server
if __name__ == "__main__":
if len(sys.argv) >= 3:
if sys.argv[1] == "--no_wizard" and sys.argv[2] == "-travis":
serverThread = Thread(target=Server, args=(getcwd(), False, True))
else:
print("[!] None valid args selected.")
serverThread = Thread(target=Server, args=(getcwd(), True))
elif len(sys.argv) == 2:
if sys.argv[1] == "--no_wizard":
serverThread = Thread(target=Server, args=(getcwd(), False))
else:
print("[!] None valid args selected.")
serverThread = Thread(target=Server, args=(getcwd(), True))
else:
serverThread = Thread(target=Server, args=(getcwd(), True))
serverThread.start()
| Python | 38 | 36.394737 | 101 | /src/podrum/Podrum.py | 0.540781 | 0.535298 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.network.protocol.ClientToServerHandshakePacket import ClientToServerHandshakePacket
from podrum.network.protocol.DataPacket import DataPacket
from podrum.network.protocol.DisconnectPacket import DisconnectPacket
from podrum.network.protocol.LoginPacket import LoginPacket
from podrum.network.protocol.PlayStatusPacket import PlayStatusPacket
from podrum.network.protocol.ResourcePacksInfoPacket import ResourcePacksInfoPacket
from podrum.network.protocol.ServerToClientHandshakePacket import ServerToClientHandshakePacket
class PacketPool:
packetPool = {}
def __init__(self):
self.registerPackets()
def registerPacket(packet):
self.pool[packet.NID] = packet.copy()
def registerPackets(self):
self.registerPacket(ClientToServerHandshakePacket)
self.registerPacket(DisconnectPacket)
self.registerPacket(LoginPacket)
self.registerPacket(PlayStatusPacket)
self.registerPacket(ResourcePacksInfoPacket)
self.registerPacket(ServerToClientHandshakePacket)
| Python | 37 | 39.945946 | 95 | /src/podrum/network/PacketPool.py | 0.683727 | 0.683071 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
import base64
import binascii
import json
import os
import signal
import sys
import socket
import time
import urllib
import hmac
import hashlib
class Utils:
def getOS():
if sys.platform == 'linux' or sys.platform == 'linux2':
return 'linux'
elif sys.platform == 'darwin':
return 'osx'
elif sys.platform == 'win32' or sys.platform == 'win64':
return 'windows'
def killServer():
os.kill(os.getpid(), signal.SIGTERM)
def getPrivateIpAddress():
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
return ip
def getPublicIpAddress():
ip = urllib.request.urlopen('https://ident.me').read().decode('utf8')
return ip
def microtime(get_as_float = False) :
if get_as_float:
return time.time()
else:
return '%f %d' % math.modf(time.time())
def substr(string, start, length = None):
if start < 0:
start = start + len(string)
if not length:
return string[start:]
elif length > 0:
return string[start:start + length]
else:
return string[start:length]
def hex2bin(hexdec):
if hexdec == 'x':
return False
if hexdec == '':
return False
dec = int(hexdec, 16)
b = binascii.unhexlify('%x' % dec)
return b
def binToHex(b):
return binascii.hexlify(b)
def HMACSHA256(data, secret):
encodedData = data.encode()
byteSecret = secret.encode()
return hmac.new(byteSecret, encodedData, hashlib.sha256).hexdigest().upper()
def base64UrlEncode(data):
return base64.urlsafe_b64encode(data.encode()).replace(b"=", b"").decode()
def base64UrlDecode(data):
return base64.urlsafe_b64decode(data).decode()
def encodeJWT(header, payload, secret):
body = Utils.base64UrlEncode(json.dumps(header)) + "." + Utils.base64UrlEncode(json.dumps(payload))
secret = Utils.HMACSHA256(body, secret)
return body + "." + Utils.base64UrlEncode(secret)
def decodeJWT(token: str):
[headB64, payloadB64, sigB64] = token.split(".")
rawPayloadJSON = Utils.base64UrlDecode(payloadB64)
if rawPayloadJSON == False:
raise Exception("Payload base64 is invalid and cannot be decoded")
decodedPayload = json.loads(rawPayloadJSON)
if isinstance(decodedPayload, str):
decodedPayload = json.loads(decodedPayload)
if not isinstance(decodedPayload, dict):
raise Exception("Decoded payload should be dict, " + str(type(decodedPayload).__name__) + " received")
return decodedPayload
| Python | 102 | 31.637255 | 115 | /src/podrum/utils/Utils.py | 0.565635 | 0.547612 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.utlis.Binary import Binary
from podrum.utlis.UUID import UUID
class BinaryStream:
buffer = ""
offset = None
def __int__(self, buffer = "", offset = 0):
self.buffer = buffer
self.offset = offset
def reset(self):
self.buffer = ""
self.offset = 0
def setBuffer(self, buffer = "", offset = 0):
self.buffer = buffer
self.offset = int(offset)
def getOffset(self):
return self.offset
def getBuffer(self):
return self.buffer
def get(self, len):
if len < 0:
self.offset = len(self.buffer) - 1;
return ""
elif len == True:
str = self.buffer[0:self.offset]
self.offset = len(self.buffer)
return str
buffer = self.buffer[self.offset:self.offset+len]
self.offset += length
return buffer
def put(self, str):
self.buffer += str
def getBool(self):
return self.get(1) != b'\x00'
def putBool(self, v):
self.buffer += (b"\x01" if v else b"\x00")
def getByte(self):
self.offset += 1
return ord(self.buffer[self.offset])
def putByte(self, v):
self.buffer += chr(v)
def getLong(self):
return Binary.readLong(self.get(8))
def putLong(self, v):
self.buffer += Binary.writeLong(v)
def getLLong(self):
return Binary.readLLong(self.get(8))
def putLLong(self, v):
self.buffer += Binary.writeLLong(v)
def getInt(self):
return Binary.readInt(self.get(4))
def putInt(self, v):
self.buffer += Binary.writeInt(v)
def getLInt(self):
return Binary.readLInt(self.get(4))
def putLInt(self, v):
self.buffer += Binary.writeLInt(v)
def getShort(self):
return Binary.readShort(self.get(2))
def putShort(self, v):
self.buffer += Binary.writeShort(v)
def getLShort(self):
return Binary.readLShort(self.get(2))
def putLShort(self, v):
self.buffer += Binary.writeLShort(v)
def getSignedShort(self):
return Binary.readSignedShort(self.get(2))
def getSignedLShort(self):
return Binary.readSignedLShort(self.get(4))
def getFloat(self):
return Binary.readFloat(self.get(4))
def putFloat(self, v):
self.buffer += Binary.writeFloat(v)
def getLFloat(self):
return Binary.readLFloat(self.get(4))
def putLFloat(self, v):
self.buffer += Binary.writeLFloat(v)
def getRoundedFloat(self, accuracy):
return Binary.readRoundedFloat(self.get(4), accuracy)
def getRoundedLFloat(self, accuracy):
return Binary.readRoundedLFloat(self.get(4), accuracy)
def getTriad(self):
return Binary.readTriad(self.get(3))
def putTriad(self, v):
self.buffer += Binary.writeTriad(v)
def getLTriad(self):
return Binary.readLTriad(self.get(3))
def putLTriad(self, v):
self.buffer += Binary.writeLTriad(v)
def getUnsignedVarInt(self):
return Binary.readUnsignedVarInt(self.buffer, self.offset)
def putUnsignedVarInt(self, v):
self.put(Binary.writeUnsignedVarInt(v))
def getVarInt(self):
return Binary.readVarInt(self.buffer, self.offset)
def putVarInt(self, v):
self.put(Binary.writeVarInt(v))
def getUnsignedVarLong(self):
return Binary.readUnsignedVarLong(self.buffer, self.offset)
def putUnsignedVarLong(self, v):
self.put(Binary.writeUnsignedVarLong(v))
def getVarLong(self):
return Binary.readVarLong(self.buffer, self.offset)
def putVarLong(self, v):
self.put(Binary.writeVarLong(v))
def getString(self):
self.get(self.getUnsignedVarInt())
def putString(self, v):
self.putUnsignedVarInt(len(v))
self.put(v)
def getUUID(self):
part1 = self.getLInt()
part0 = self.getLInt()
part3 = self.getLInt()
part2 = self.getLInt()
return UUID(part0, part1, part2, part3)
def putUUID(self, uuid: UUID):
self.putLInt(uuid.getPart(1))
self.putLInt(uuid.getPart(0))
self.putLInt(uuid.getPart(3))
self.putLInt(uuid.getPart(2))
def feof(self):
try:
self.buffer[self.offset]
return True
except IndexError:
return False
| Python | 188 | 26.143618 | 77 | /src/podrum/utils/BinaryStream.py | 0.560847 | 0.552812 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from struct import unpack, pack, calcsize
from re import match
import decimal
import sys
from .bcmath import bcmath
class Binary:
def checkLength(string, expect):
length = len(string)
assert (length == expect), 'Expected ' + str(expect) + 'bytes, got ' + str(length)
@staticmethod
def signByte(value: int):
if calcsize == 8:
return (int(value) & 0xffffffff) >> 56
else:
return (int(value) & 0xffffffff) >> 24
@staticmethod
def unsignByte(value: int):
return int(value) & 0xff
@staticmethod
def signShort(value: int):
if calcsize == 8:
return (int(value) & 0xffffffff) >> 48
else:
return (int(value) & 0xffffffff) >> 16
@staticmethod
def unsignShort(value: int):
return int(value) & 0xffff
@staticmethod
def signInt(value: int):
if calcsize == 8:
return (int(value) & 0xffffffff) >> 32
else:
return (int(value) & 0xffffffff) >> 31
@staticmethod
def unsignInt(value: int):
return int(value) & 0xffffffff
@staticmethod
def readTriad(str: bytes) -> int:
Binary.checkLength(str, 3)
return unpack('>L', b'\x00' + str)[0]
@staticmethod
def writeTriad(value: int) -> bytes:
return pack('>L', value)[1:]
@staticmethod
def readLTriad(str: bytes) -> int:
Binary.checkLength(str, 3)
return unpack('<L', b'\x00' + str)[0]
@staticmethod
def writeLTriad(value: int) -> bytes:
return pack('<L', value)[0:-1]
@staticmethod
def readBool(b: bytes) -> int:
return unpack('?', b)[0]
@staticmethod
def writeBool(b: int) -> bytes:
return b'\x01' if b else b'\x00'
@staticmethod
def readByte(c: bytes) -> int:
Binary.checkLength(c, 1)
return unpack('>B', c)[0]
@staticmethod
def readSignedByte(c: bytes) -> int:
Binary.checkLength(c, 1)
return unpack('>b', c)[0]
@staticmethod
def writeByte(c: int) -> bytes:
return pack(">B", c)
@staticmethod
def readShort(str: bytes) -> int:
Binary.checkLength(str, 2)
return unpack('>H', str)[0]
@staticmethod
def readSignedShort(str: bytes) -> int:
Binary.checkLength(str, 2)
return Binary.signShort(Binary.readShort(str))
@staticmethod
def writeShort(value: int) -> bytes:
return pack('>H', value)
@staticmethod
def readLShort(str: bytes) -> int:
Binary.checkLength(str, 2)
return unpack('<H', str)[0]
@staticmethod
def readSignedLShort(str: bytes) -> int:
Binary.checkLength(str, 2)
return Binary.signShort(Binary.readLShort(str))
@staticmethod
def writeLShort(value: int) -> bytes:
return pack('<H', value)
@staticmethod
def readInt(str: bytes) -> int:
Binary.checkLength(str, 4)
return unpack('>L', str)[0]
@staticmethod
def writeInt(value: int) -> bytes:
return pack('>L', value)
@staticmethod
def readLInt(str: bytes) -> int:
Binary.checkLength(str, 4)
return unpack('<L', str)[0]
@staticmethod
def writeLInt(value: int) -> bytes:
return pack('<L', value)
@staticmethod
def readFloat(str: bytes) -> int:
Binary.checkLength(str, 4)
return unpack('>f', str)[0]
@staticmethod
def readRoundedFloat(str, accuracy):
return round(Binary.readFloat(str), accuracy)
@staticmethod
def writeFloat(value: int) -> bytes:
return pack('>f', value)
@staticmethod
def readLFloat(str: bytes) -> int:
Binary.checkLength(str, 4)
return unpack('<f', str)[0]
@staticmethod
def readRoundedLFloat(str, accuracy):
return round(Binary.readLFloat(str), accuracy)
@staticmethod
def writeLFloat(value: int) -> bytes:
return pack('<f', value)
@staticmethod
def printFloat(value):
return match(r"/(\\.\\d+?)0+$/", "" + value).group(1)
@staticmethod
def readDouble(str: bytes) -> int:
Binary.checkLength(str, 8)
return unpack('>d', str)[0]
@staticmethod
def writeDouble(value: int) -> bytes:
return pack('>d', value)
@staticmethod
def readLDouble(str: bytes) -> int:
Binary.checkLength(str, 8)
return unpack('<d', str)[0]
@staticmethod
def writeLDouble(value: int) -> bytes:
return pack('<d', value)
@staticmethod
def readLong(str: bytes) -> int:
Binary.checkLength(str, 8)
return unpack('>L', str)[0]
@staticmethod
def writeLong(value: int) -> bytes:
return pack('>L', value)
@staticmethod
def readLLong(str: bytes) -> int:
Binary.checkLength(str, 8)
return unpack('<L', str)[0]
@staticmethod
def writeLLong(value: int) -> bytes:
return pack('<L', value)
@staticmethod
def readUnsignedVarInt(buffer, offset):
value = "0";
buffer = str(buffer)
i = 0
while i <= 35:
i += 7
offset += 1
b = ord(buffer[offset])
value = bcmath.bcadd(value, bcmath.bcmul(str(b & 0x7f), bcmath.bcpow("2", str(i))))
if (b & 0x80) == 0:
return value
elif (len(buffer) - 1) < int(offset):
raise TypeError('Expected more bytes, none left to read')
raise TypeError('Varint did not terminate after 5 bytes!')
@staticmethod
def readVarInt(buffer, offset):
raw = Binary.readUnsignedVarInt(buffer, offset)
temp = bcmath.bcdiv(raw, "2")
if bcmath.bcmod(raw, "2") == "1":
temp = bcmath.bcsub(bcmath.bcmul(temp, "-1"), "1")
return temp
@staticmethod
def writeUnsignedVarInt(value):
buffer = ""
value = value & 0xffffffff
if bcmath.bccomp(value, "0") == -1:
value = bcmath.bcadd(value, "18446744073709551616")
i = 0
while i <= 5:
i = i + 1
byte = int(bcmath.bcmod(value, "128"))
value = bcmath.bcdiv(value, "128")
if value != 0:
buffer += chr(byte | 0x80)
else:
buffer += chr(byte)
return buffer
raise TypeError('Value too large to be encoded as a varint')
@staticmethod
def writeVarInt(value):
value = bcmath.bcmod(bcmath.bcmul(value, "2"), "18446744073709551616")
if bcmath.bccomp(value, "0") == -1:
value = bcmath.bcsub(bcmath.bcmul(value, "-1"), "1")
return Binary.writeUnsignedVarInt(value)
@staticmethod
def readUnsignedVarLong(buffer, offset):
value = "0"
buffer = str(buffer)
i = 0
while i <= 63:
i += 7
offset += 1
b = ord(buffer[offset])
value = bcmath.bcadd(value, bcmath.bcmul(str(b & 0x7f), bcmath.bcpow("2", str(i))))
if (b & 0x80) == 0:
return value
elif (len(buffer) - 1) < int(offset):
raise TypeError("Expected more bytes, none left to read")
raise TypeError("VarLong did not terminate after 10 bytes!")
@staticmethod
def readVarLong(buffer, offset):
raw = Binary.readUnsignedVarLong(buffer, offset)
temp = bcmath.bcdiv(raw, "2")
if bcmath.bcmod(raw, "2") == "1":
temp = bcmath.bcsub(bcmath.bcmul(temp, "-1"), "1")
return temp
@staticmethod
def writeUnsignedVarLong(value):
buffer = ""
if bcmath.bccomp(value, "0") == -1:
value = bcmath.bcadd(value, "18446744073709551616")
i = 0
while i <= 10:
i = i + 1
byte = int(bcmath.bcmod(value, "128"))
value = bcmath.bcdiv(value, "128")
if value != 0:
buffer += chr(byte | 0x80)
else:
buffer += chr(byte)
return buffer
raise TypeError("Value too large to be encoded as a VarLong")
@staticmethod
def writeVarLong(value):
value = bcmath.bcmod(bcmath.bcmul(value, "2"), "18446744073709551616")
if bcmath.bccomp(value, "0") == -1:
value = bcmath.bcsub(bcmath.bcmul(value, "-1"), "1")
return Binary.writeUnsignedVarLong(value)
@staticmethod
def flipShortEndianness(value):
return Binary.readLShort(Binary.writeShort(value))
@staticmethod
def flipIntEndianness(value):
return Binary.readLInt(Binary.writeInt(value))
@staticmethod
def flipLongEndianness(value):
return Binary.readLLong(Binary.writeLong(value))
| Python | 320 | 28.021875 | 95 | /src/podrum/utils/Binary.py | 0.553462 | 0.528481 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
import os
from podrum.lang import Base
class Parser:
def checkYesNo(str):
str = str.lower()
if str == 'y' or str == 'yes':
return True
elif str == 'n' or str == 'no':
return False
else:
return
def checkIfLangExists(str):
path = os.getcwd() + '/src/podrum/lang/'
allLangs = Base.Base.getLangNames(path)
if(str in allLangs):
return True
else:
return False | Python | 34 | 27.382353 | 77 | /src/podrum/wizard/Parser.py | 0.449348 | 0.448345 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from abc import ABCMeta, abstractmethod
from podrum.nbt.NBTStream import NBTStream
from podrum.nbt.ReaderTracker import ReaderTracker
class NamedTag:
__metaclass__ = ABCMeta
name = None
cloning = False
def __init__(self, name = ''):
if len(name > 32767):
raise ValueError("Tag name cannot be more than 32767 bytes, got length " + str(len(name)))
self.name = name
def getName():
return NamedTag.name
def setName(name):
NamedTag.name = name
def getValue(): pass
def getType(): pass
def write(nbt: NBTStream): pass
def read(nbt: NBTStream, tracker: ReaderTracker): pass
def toString(indentation = 0):
return (" " * indentation) + type(object) + ": " + (("name='NamedTag.name', ") if (NamedTag.name != "") else "") + "value='" + str(NamedTag.getValue()) + "'"
def safeClone() -> NamedTag:
if NamedTag.cloning:
raise ValueError("Recursive NBT tag dependency detected")
NamedTag.cloning = True
retval = NamedTag.copy()
NamedTag.cloning = False
retval.cloning = False
return retval
def equals(that: NamedTag):
return NamedTag.name == that.name and NamedTag.equalsValue(that)
def equalsValue(that: NamedTag):
return isinstance(that, NamedTag()) and NamedTag.getValue() == that.getValue()
| Python | 60 | 30.433332 | 166 | /src/podrum/nbt/tag/NamedTag.py | 0.563627 | 0.557264 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
import hashlib
import os
import random
import time
from podrum.utils.Binary import Binary
from podrum.utils.Utils import Utils
class UUID:
parts = [0, 0, 0, 0]
version = None
def __init__(self, part1 = 0, part2 = 0, part3 = 0, part4 = 0, version = None):
self.parts[0] = int(part1)
self.parts[1] = int(part2)
self.parts[2] = int(part3)
self.parts[3] = int(part4)
self.version = (self.parts[1] & 0xf000) >> 12 if version == None else int(version)
def getVersion(self):
return self.version
def equals(self, uuid: UUID):
return uuid.parts[0] == self.parts[0] and uuid.parts[1] == self.parts[1] and uuid.parts[2] == self.parts[2] and uuid.parts[3] == self.parts[3]
def fromBinary(self, uuid, version = None):
if len(uuid) != 16:
raise Exception("Must have exactly 16 bytes")
return UUID(Binary.readInt(Utils.substr(uuid, 0, 4)), Binary.readInt(Utils.substr(uuid, 4, 4)), Binary.readInt(Utils.substr(uuid, 8, 4)), Binary.readInt(Utils.substr(uuid, 12, 4)), version)
def fromString(self, uuid, version = None):
return self.fromBinary(Utils.hex2bin(uuid.strip().replace("-", "")), version)
def fromData(self, data):
hash = hashlib.new("md5").update("".join(data))
return self.fromBinary(hash, 3)
def fromRandom(self):
return self.fromData(Binary.writeInt(int(time.time())), Binary.writeShort(os.getpid()), Binary.writeShort(os.geteuid()), Binary.writeInt(random.randint(-0x7fffffff, 0x7fffffff)), Binary.writeInt(random.randint(-0x7fffffff, 0x7fffffff)))
def toBinary(self):
return Binary.writeInt(self.parts[0]) + Binary.writeInt(self.parts[1]) + Binary.writeInt(self.parts[2]) + Binary.writeInt(self.parts[3])
def toString(self):
hex = Utils.bin2hex(self.toBinary())
if self.version != None:
return Utils.substr(hex, 0, 8) + "-" + Utils.substr(hex, 8, 4) + "-" + int(self.version, 16) + Utils.substr(hex, 13, 3) + "-8" + Utils.substr(hex, 17, 3) + "-" + Utils.substr(hex, 20, 12)
return Utils.substr(hex, 0, 8) + "-" + Utils.substr(hex, 8, 4) + "-" + Utils.substr(hex, 12, 4) + "-" + Utils.substr(hex, 16, 4) + "-" + Utils.substr(hex, 20, 12)
def getPart(self, partNumber: int):
if partNumber < 0 or partNumber > 3:
raise Exception("Invalid UUID part index" + str(partNumber))
return self.parts[partNumber]
| Python | 66 | 43.5 | 244 | /src/podrum/utils/UUID.py | 0.59176 | 0.558393 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.network.protocol.DataPacket import DataPacket
from podrum.network.protocol.ProtocolInfo import ProtocolInfo
class ResourcePacksInfoPacket(DataPacket):
NID = ProtocolInfo.RESOURCE_PACKS_INFO_PACKET
mustAccept = False
hasScripts = False
behaviorPackEntries = []
resourcePackEntries = []
def decodePayload(self):
self.mustAccept = self.getBool()
self.hasScripts = self.getBool()
behaviorPackCount = self.getLShort()
while behaviorPackCount > 0:
self.getString()
self.getString()
self.getLLong()
self.getString()
self.getString()
self.getString()
self.getBool()
behaviorPackCount -= 1
resourcePackCount = self.getLShort()
while resourcePackCount > 0:
self.getString()
self.getString()
self.getLLong()
self.getString()
self.getString()
self.getString()
self.getBool()
resourcePackCount -= 1
def encodePayload(self):
self.putBool(self.mustAccept)
self.putBool(self.hasScripts)
self.putLShort(len(self.behaviorPackEntries))
for entry in self.behaviorPackEntries:
self.putString(entry.getPackId())
self.putString(entry.getPackVersion())
self.putLLong(entry.getPackSize())
self.putString("") # TODO: encryption key
self.putString("") # TODO: subpack name
self.putString("") # TODO: content identity
self.putBool(False) # TODO: has scripts (?)
self.putLShort(len(self.resourcePackEntries))
for entry in self.resourcePackEntries:
self.putString(entry.getPackId())
self.putString(entry.getPackVersion())
self.putLLong(entry.getPackSize())
self.putString("") # TODO: encryption key
self.putString("") # TODO: subpack name
self.putString("") # TODO: content identity
self.putBool(False) # TODO: seems useless for resource packs
| Python | 71 | 35.15493 | 77 | /src/podrum/network/protocol/ResourcePacksInfoPacket.py | 0.577328 | 0.57538 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
import time
import os
from podrum.lang.Base import Base
from podrum.utils.Logger import Logger
from podrum.utils.ServerFS import ServerFS
from podrum.utils.Utils import Utils
from podrum.wizard.Wizard import Wizard
from pyraklib.server.PyRakLibServer import PyRakLibServer
from pyraklib.server.ServerHandler import ServerHandler
class Server:
path = None
withWizard = None
port = 19132
podrumLogo = """
____ _
| _ \ ___ __| |_ __ _ _ _ __ ___
| |_) / _ \ / _` | '__| | | | '_ ` _ \
| __/ (_) | (_| | | | |_| | | | | | |
|_| \___/ \__,_|_| \__,_|_| |_| |_|
"""
def __init__(self, path, withWizard, isTravisBuild = False):
super().__init__()
startTime = Utils.microtime(True)
self.path = path
self.withWizard = withWizard
if(withWizard):
ServerFS.checkAllFiles(path)
else:
Wizard.skipWizard(path, True)
port = self.port
print(str(self.podrumLogo))
Wizard.isInWizard = False
Logger.log('info', str(Base.get("startingServer")).replace("{ip}", str(Utils.getPrivateIpAddress())).replace("{port}", str(port)))
Logger.log('info', str(Base.get("extIpMsg")).replace("{ipPublic}", str(Utils.getPublicIpAddress())))
Logger.log('info', str(Base.get("license")))
server = PyRakLibServer(port=19132)
handler = ServerHandler(server, None)
handler.sendOption("name", "MCPE;Podrum powered server;407;1.16.0;0;0;0;PodrumPoweredServer;0")
doneTime = Utils.microtime(True)
finishStartupSeconds = "%.3f" % (doneTime - startTime)
Logger.log('info', f'Done in {str(finishStartupSeconds)}s. Type "help" to view all available commands.')
if (isTravisBuild):
Server.checkTravisBuild(path)
else:
while Wizard.isInWizard == False:
cmd = input('> ')
Server.command(cmd, True)
cmd = None
ticking = True
while ticking:
time.sleep(0.002)
def command(string, fromConsole):
if string.lower() == 'stop':
Logger.log('info', 'Stopping server...')
Utils.killServer()
elif string.lower() == '':
return
elif string.lower() == 'help':
Logger.log('info', '/stop: Stops the server')
else:
Logger.log('error', str(Base.get("invalidCommand")))
def checkTravisBuild(path):
if not ServerFS.checkForFile(path, "server.json"):
Logger.log("error", "Couldn't find server.json file.")
os._exit(1)
if os.path.getsize(f'{path}/server.json') == 0:
Logger.log("error", "The server.json file is empty.")
os._exit(1)
print("Build success.")
os._exit(0)
| Python | 91 | 35.923077 | 139 | /src/podrum/Server.py | 0.517531 | 0.508548 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
class ResourcePack:
def getPath(): pass
def getPackName(): pass
def getPackId(): pass
def getPackSize(): pass
def getPackVersion(): pass
def getSha256(): pass
def getPackChunk(start, length): pass
| Python | 27 | 24.25926 | 77 | /src/podrum/resourcepacks/ResourcePack.py | 0.510264 | 0.504399 |
jessehylton/Podrum | refs/heads/master | """
* ____ _
* | _ \ ___ __| |_ __ _ _ _ __ ___
* | |_) / _ \ / _` | '__| | | | '_ ` _ \
* | __/ (_) | (_| | | | |_| | | | | | |
* |_| \___/ \__,_|_| \__,_|_| |_| |_|
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
"""
from podrum.network.PacketPool import PacketPool
class Player:
connection = None
server = None
logger = None
address = Nome
name = None
locale = None
randomId = None
uuid = None
xuid = None
skin = None
viewDistance = None
gamemode = 0
pitch = 0
yaw = 0
headYaw = 0
onGround = False
platformChatId = ''
deviceOS = None
deviceModel = None
deviceId = Nome
def __init__(self, connection, address, logger, server):
self.connection = connection
self.address = address
self.logger = logger
self.server = server
| Python | 42 | 24.642857 | 77 | /src/podrum/Player.py | 0.532962 | 0.528319 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.