repo_name
stringclasses 400
values | branch_name
stringclasses 4
values | file_content
stringlengths 16
72.5k
| language
stringclasses 1
value | num_lines
int64 1
1.66k
| avg_line_length
float64 6
85
| max_line_length
int64 9
949
| path
stringlengths 5
103
| alphanum_fraction
float64 0.29
0.89
| alpha_fraction
float64 0.27
0.89
|
|---|---|---|---|---|---|---|---|---|---|
Jmitch13/Senior-Honors-Project
|
refs/heads/main
|
import requests
import sqlite3
from sqlite3 import Error
from bs4 import BeautifulSoup
# Create the top 100 database
Top100 = sqlite3.connect('Top100Prospects.db')
#Year list for the top 100 prospects
yearList = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
#Function to create the tables from 2012-2019
def top_100_table(year):
tp = Top100.cursor()
#concatenate the string
table_values = '(Rank INTEGER, Player_Name TEXT, Team TEXT, Organization_Rank TEXT, Age INTEGER, Position TEXT, MLB_Est TEXT)'
tp.execute('CREATE TABLE IF NOT EXISTS _' + year + 'Top100Prospects' + table_values)
tp.close()
#Function to enter the data into the respective SQLite table
def data_entry(year, rank, player_name, team, organization_rank, age, position, mlb_est):
tp = Top100.cursor()
insertStatement = "INSERT INTO _" + year + "Top100Prospects (Rank, Player_Name, Team, Organization_Rank, Age, Position, MLB_Est) VALUES(?, ?, ?, ?, ?, ?, ?)"
statTuple = (rank, player_name, team, organization_rank, age, position, mlb_est)
tp.execute(insertStatement, statTuple)
Top100.commit()
tp.close()
#Function to web scrape The Baseball Cube for the top 100 prospects
def web_scrape(playerList, year):
source = requests.get('http://www.thebaseballcube.com/prospects/years/byYear.asp?Y=' + year + '&Src=ba').text
soup = BeautifulSoup(source, "html.parser")
table = soup.find('table', id = 'grid2')
table_rows = table.find_all('tr')
for tr in table_rows:
td = tr.find_all('td')
row = [i.text for i in td]
#Manipulates the data that is not needed
if len(row) > 9:
row[9] = row[9][:4]
row[13] = row[13][:4]
del row[-2:]
del row[10:13]
del row[5:9]
playerList.append(row)
#removes the table labels that are not needed
del playerList[:2]
del playerList[25]
del playerList[50]
del playerList[75]
del playerList[100]
def main():
#create the database for every top 100 prospect from 2012-2019
for i in range(len(yearList)):
#call the method to create 8 tables
top_100_table(yearList[i])
#stores the data of all available free agent
playerList = []
#call web_scrape method
web_scrape(playerList, yearList[i])
for j in range(len(playerList)):
#insert the top100prospect data
data_entry(yearList[i], int(playerList[j][0]), playerList[j][1], playerList[j][2], playerList[j][3], int(yearList[i]) - int(playerList[j][5]) + 1, playerList[j][4], playerList[j][6])
if __name__ == "__main__":
main()
|
Python
| 68
| 38.205883
| 194
|
/Top100prospects.py
| 0.626189
| 0.578639
|
Jmitch13/Senior-Honors-Project
|
refs/heads/main
|
import requests
import sqlite3
from sqlite3 import Error
from bs4 import BeautifulSoup
# Create the free agency database
International = sqlite3.connect('InternationalProspects.db')
# List for the Free Agency Pool
yearList = ['2015', '2016', '2017', '2018', '2019']
#Create the International Table from 2015-2019
def international_table(year):
ip = International.cursor()
#concanate the string
table_values = '(Rank INTEGER, Player_Name TEXT, Position TEXT, Age INTEGER, Projected_Team TEXT, Future_Value TEXT)'
ip.execute('CREATE TABLE IF NOT EXISTS _' + year + 'TopInternationalClass' + table_values)
ip.close()
#Enter the data of a player into the respective table
def data_entry(year, rank, player_name, position, age, proj_team, fut_val):
ip = International.cursor()
#need the underscore because a table can't start with a number
insertStatement = "INSERT INTO _" + year + "International_Prospects (Rank, Player_Name, Team, Organization_Rank, Age, Position, MLB_Est) VALUES(?, ?, ?, ?, ?, ?, ?)"
statTuple = (rank, player_name, position, age, proj_team, fut_val)
ip.execute(insertStatement, statTuple)
International.commit()
ip.close()
#Scrapes ESPN for all of the Free Agents for a given year
def web_scrape(playerList, year):
#URL changes based on the year
source = requests.get('https://www.fangraphs.com/prospects/the-board/' + year + '-international/summary?sort=-1,1&type=0&pageitems=200&pg=0').text
soup = BeautifulSoup(source, "html.parser")
table = soup.find_all('table')
for table_rows in table:
table_row = table_rows.find_all('tr')
#Scrape all the data from the table
for tr in table_row:
td = tr.find_all('td')
row = [i.text for i in td]
playerList.append(row)
#main function to create the database of all the top international free agents from 2015-2019
def main():
#5 tables will be created in sqLite with all available international free agents from fangraphs
for i in range(len(yearList)):
international_table(yearList[i])
if __name__ == "__main__":
main()
|
Python
| 52
| 40.096153
| 169
|
/InternationalProspects.py
| 0.674897
| 0.652949
|
Jmitch13/Senior-Honors-Project
|
refs/heads/main
|
import requests
import sqlite3
from sqlite3 import Error
from bs4 import BeautifulSoup
#Creates the player draft database
PlayerDraft = sqlite3.connect('PlayerDraft.db')
yearList = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
#Function to create the player draft tables
def player_draft_table(year):
pd = PlayerDraft.cursor()
#concanate the string
table_values = '(Player_Name TEXT, Rank INTEGER, Position TEXT, School TEXT)'
pd.execute('CREATE TABLE IF NOT EXISTS _' + year + 'Draft_Class' + table_values)
pd.close()
#Inserts the data into the table
def data_entry(year, player_name, rank, position, school):
pd = PlayerDraft.cursor()
insertStatement = "INSERT INTO _" + year + "Draft_Class (Player_Name, Rank, Position, School) VALUES(?, ?, ?, ?)"
statTuple = (player_name, rank, position, school)
pd.execute(insertStatement, statTuple)
PlayerDraft.commit()
pd.close()
#Scrapes the internet from Baseball Almanac
def web_scrape(draftList, year):
source = requests.get('https://www.baseball-almanac.com/draft/baseball-draft.php?yr=' + year).text
soup = BeautifulSoup(source, "html.parser")
table = soup.find('table')
table_rows = table.find_all('tr')
#Scrape all the data from the table
for tr in table_rows:
td = tr.find_all('td')
row = [i.text for i in td]
#Adds the top 200 prospects for every year
if len(draftList) > 201:
break
draftList.append(row)
#main function to create a database for the top prospects from 2012-2019
def main():
for i in range(len(yearList)):
player_draft_table(yearList[i])
draftList = []
web_scrape(draftList, yearList[i])
#removes the heading of the table due to the structure on Baseball Almanac
draftList.pop(0)
draftList.pop(0)
for j in range(len(draftList)):
data_entry(yearList[i], draftList[j][3], draftList[j][1], draftList[j][5], draftList[j][6])
if __name__ == "__main__":
main()
|
Python
| 56
| 35.75
| 117
|
/PlayerDraftProspects.py
| 0.637181
| 0.610691
|
Jmitch13/Senior-Honors-Project
|
refs/heads/main
|
import requests
import sqlite3
from sqlite3 import Error
from bs4 import BeautifulSoup
# Create the Cumulative database
CTeamStats = sqlite3.connect('CumulativeTeamStats.db')
# This vector will be used to collect every team from 2012 to 2019
yearList = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
#Function to create the tables from 2012-2019
def cumulative_team_stats_table():
#cts -> cumulative team stats
cts = CTeamStats.cursor()
table_values = '(Team_Name TEXT, Wins INTEGER, Runs INTEGER, Run_Differential INTEGER, WAR INTEGER, WPA INTEGER, Dollars REAL, Batter TEXT, AVG REAL, OBP REAL, SLG REAL, OPS REAL, wOBA REAL, wRCplus REAL, BBperc TEXT, Kperc TEXT, Spd REAL, Def REAL, BWAR REAL, BWPA REAL, BDollars TEXT, Pitcher TEXT, ERA REAL, ERAminus REAL, WHIP REAL, FIPx REAL, FIPxminus REAL, Kper9 REAL, Kper9plus REAL, HRper9 REAL, GBperc REAL, PWAR REAL, PWPA REAL, PDollars TEXT)'
#concatenate the string
cts.execute('CREATE TABLE IF NOT EXISTS Cumulative_Team_Stats' + table_values)
cts.close()
#Fucntion used to enter the data of a team into the cts database
def data_entry(year, team_name, wins, runs, rd, war, wpa, dollar, batter, avg, obp, slg, ops, woba, wrc, bb, k, spd, defense, bwar, bwpa, bdollar, pitcher, era, eramin, whip, fipx, fipxmin, kper9, kper9plus, hrper9, gbperc, pwar, pwpa, pdollar):
cts = CTeamStats.cursor()
insertStatement = "INSERT INTO Cumulative_Team_Stats (Team_Name, Wins, Runs, Run_Differential, WAR, WPA, Dollars, Batter, AVG, OBP, SLG, OPS, wOBA, wRCplus, BBperc, Kperc, Spd, Def, BWAR, BWPA, BDollars, Pitcher, ERA, ERAminus, WHIP, FIPx, FIPxminus, Kper9, Kper9plus, HRper9, GBperc, PWAR, PWPA, PDollars) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
statTuple = (year + team_name, wins, runs, rd, war, wpa, dollar, batter, avg, obp, slg, ops, woba, wrc, bb, k, spd, defense, bwar, bwpa, bdollar, pitcher, era, eramin, whip, fipx, fipxmin, kper9, kper9plus, hrper9, gbperc, pwar, pwpa, pdollar)
cts.execute(insertStatement, statTuple)
CTeamStats.commit()
cts.close()
#Function used to scrape fangraphs to get all of the desired team statistics
def web_scrape(teamList, year):
#adds all the pitcher stats from the teams
source = requests.get('https://www.fangraphs.com/leaders.aspx?pos=all&stats=pit&lg=all&qual=0&type=c,6,117,62,119,36,301,40,48,63,60,4,59,32,17,42&season=' + year + '&month=0&season1=' + year + '&ind=0&team=0,ts&rost=0&age=0&filter=&players=0&startdate=2019-01-01&enddate=2019-12-31&sort=1,a').text
soup = BeautifulSoup(source, "html.parser")
#use the identifier class to scrape the right table
table = soup.find('table', class_ = 'rgMasterTable')
table_rows = table.find_all('tr')
#Scrape all the data from the table
for tr in table_rows:
td = tr.find_all('td')
row = [i.text for i in td]
del row[:1]
#Simple conditional checks to make sure all the data looks the same
if len(row) != 0:
row[8] = row[8][:-1]
if row[10] == '($1.9)':
row = '$1.9'
row[10] = row[10][1:]
teamList.append(row)
#adds all the batter stats to the teams
source = requests.get('https://www.fangraphs.com/leaders.aspx?pos=all&stats=bat&lg=all&qual=0&type=c,12,34,35,23,37,38,50,61,199,58,62,59,60,13,39&season=' + year + '&month=0&season1=' + year + '&ind=0&team=0,ts&rost=0&age=0&filter=&players=0&startdate=2019-01-01&enddate=2019-12-31&sort=1,a').text
soup = BeautifulSoup(source, "html.parser")
table = soup.find('table', class_ = 'rgMasterTable')
table_rows = table.find_all('tr')
#Scrape all the data from the table
for tr in table_rows:
td = tr.find_all('td')
row = [i.text for i in td]
del row[:2]
if len(row) != 0:
row[1] = row[1][:-1]
row[2] = row[2][:-1]
if row[11] == '($20.6)':
row[11] = '$20.6'
if row[11] == '($19.0)':
row[11] = '$19.0'
row[11] = row[11][1:]
teamList.append(row)
#Check to make the correct data is being added
#Main Program
def main():
cumulative_team_stats_table()
#for every year in the vector yearList
for i in range(len(yearList)):
teamList = []
#Scrape the table for the entire year
web_scrape(teamList, yearList[i])
#Enter the data for all 30 major league teams
for j in range(30):
data_entry(yearList[i], teamList[j][0], teamList[j][11], int(teamList[j][13]), int(teamList[j+30][13]) - int(teamList[j][14]), round(float(teamList[j][12]) + float(teamList[j+30][9]), 3), round(float(teamList[j][9]) + float(teamList[j+30][10]), 3), round(float(teamList[j][10]) + float(teamList[j+30][11]), 3), '-', float(teamList[j+30][3]), float(teamList[j+30][4]), float(teamList[j+30][5]), float(teamList[j+30][14]), float(teamList[j+30][6]), int(teamList[j+30][7]), float(teamList[j+30][1]), float(teamList[j+30][2]), float(teamList[j+30][12]), float(teamList[j+30][8]), float(teamList[j+30][9]), float(teamList[j+30][10]), float(teamList[j+30][11]), '-', float(teamList[j][1]), int(teamList[j][2]), float(teamList[j][15]), float(teamList[j][3]), float(teamList[j][4]), float(teamList[j][5]), float(teamList[j][6]), float(teamList[j][7]), float(teamList[j][8]), float(teamList[j][12]), float(teamList[j][9]), float(teamList[j][10]))
if __name__ == "__main__":
main()
|
Python
| 84
| 65.023811
| 949
|
/CumulativeTeamStats.py
| 0.623268
| 0.566963
|
Jmitch13/Senior-Honors-Project
|
refs/heads/main
|
import requests
import sqlite3
from sqlite3 import Error
from bs4 import BeautifulSoup
# Create the free agency database
FreeAgency = sqlite3.connect('FreeAgency.db')
# List to gather every year from 2012 to 2019
yearList = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
#Create the Free Agency Pool from 2012-2019
def free_agency_table(year):
fa = FreeAgency.cursor()
#concatenate the string
table_values = '(Player_Name TEXT, Age INTEGER, Position TEXT, FA_Type TEXT, Rank INTEGER, Years INTEGER, Amount TEXT)'
fa.execute('CREATE TABLE IF NOT EXISTS _' + year + 'FA_Class' + table_values)
fa.close()
#Enter the data of a player into the respective table
def data_entry(year, player_name, age, position, fa_type, rank, years, amount):
fa = FreeAgency.cursor()
insertStatement = "INSERT INTO _" + year + "FA_Class (Player_Name, Age, Position, FA_Type, Rank, Years, Amount) VALUES(?, ?, ?, ?, ?, ?, ?)"
statTuple = (player_name, age, position, fa_type, rank, years, amount)
fa.execute(insertStatement, statTuple)
FreeAgency.commit()
fa.close()
#Scrapes ESPN for all of the Free Agents for a given year
def web_scrape(playerList, year):
source = requests.get('http://www.espn.com/mlb/freeagents/_/year/' + year).text
soup = BeautifulSoup(source, "html.parser")
table = soup.find('table')
table_rows = table.find_all('tr')
#Scrape all the data from the table
for tr in table_rows:
td = tr.find_all('td')
row = [i.text for i in td]
#Check to make the correct data is being added
if row[0] != 'PLAYER' and row[0] != 'Free Agents':
playerList.append(row)
#Remove 2011 team and new team
for i in range(len(playerList)):
del playerList[i][4:6]
#Function to modify the player list since some of the data from ESPN is not ideal for sorting purposes
def modifyPlayerList(playerList, i, j):
if playerList[j][3] == 'Signed (A)':
playerList[j][3] = 'A'
elif playerList[j][3] == 'Signed (B)':
playerList[j][3] = 'B'
else:
playerList[j][3] = 'None'
#set the age to the correct number
playerList[j][2] = int(playerList[j][2])
playerList[j][2] -= (2020 - int(yearList[i]))
#set the rank of the players, 51 is a place holder
if playerList[j][5] == 'NR':
playerList[j][5] = 51
else:
playerList[j][5] = int(playerList[j][5])
playerList[j][5] = 51 if playerList[j][5] == 'NR' else int(playerList[j][5])
#correct dollar amount FA
if playerList[j][6] == '--' or playerList[j][6] == 'Minor Lg':
playerList[j][4] = '0'
if playerList[j][6] == '--':
playerList[j][6] = 'Not Signed'
#Main function to create the free agent database which contains every free agent from 2012 to 2019
def main():
#create the database for every freeagent from 2011-2020
for i in range(len(yearList)):
#call the method to create 10 tables
free_agency_table(yearList[i])
#stores the data of all available free agent
playerList = []
#call web_scrape method
web_scrape(playerList, yearList[i])
print(playerList)
for j in range(len(playerList)):
#modify list method
modifyPlayerList(playerList, i, j)
#insert the free agent data
data_entry(yearList[i], playerList[j][0], int(playerList[j][2]), playerList[j][1], playerList[j][3], playerList[j][5], int(playerList[j][4]), playerList[j][6])
if __name__ == "__main__":
main()
|
Python
| 88
| 39.477272
| 171
|
/FreeAgent.py
| 0.61863
| 0.586849
|
pyfaddist/yafcorse
|
refs/heads/main
|
from flask import Flask, Response
from flask.testing import FlaskClient
def test_simple_request(client: FlaskClient):
response: Response = client.get('/some-request', headers={
'Origin': 'https://test.org'
})
assert response.status_code == 404
assert 'Access-Control-Allow-Origin'.lower() in response.headers
assert 'Access-Control-Max-Age'.lower() not in response.headers
assert response.headers.get('Access-Control-Allow-Origin') is not None
assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
|
Python
| 13
| 42.923077
| 84
|
/tests/test_simple_request.py
| 0.709282
| 0.704028
|
pyfaddist/yafcorse
|
refs/heads/main
|
from flask.app import Flask
from yafcorse import Yafcorse
def test_extension(app: Flask):
assert app.extensions.get('yafcorse') is not None
assert isinstance(app.extensions.get('yafcorse'), Yafcorse)
|
Python
| 8
| 25.375
| 63
|
/tests/test_ceate_extensions.py
| 0.758294
| 0.758294
|
pyfaddist/yafcorse
|
refs/heads/main
|
from flask import Response
from flask.testing import FlaskClient
# def test_with_origin(client: FlaskClient):
# response: Response = client.options('/some-request', headers={
# 'Access-Control-Request-Method': 'POST',
# 'Access-Control-Request-Headers': 'Content-Type, X-Custom',
# 'Origin': 'https://test.org'
# })
# assert response.status_code == 404
# assert 'Access-Control-Max-Age' in response.headers
# assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
def test_with_origin(client: FlaskClient):
response: Response = client.options('/some-request', headers={
'Origin': 'https://test.org'
})
assert response.status_code == 404
assert 'Access-Control-Allow-Origin'.lower() in response.headers
assert 'Access-Control-Max-Age'.lower() in response.headers
assert response.headers.get('Access-Control-Allow-Origin') is not None
assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
assert response.headers.get('Access-Control-Max-Age') is not None
assert response.headers.get('Access-Control-Max-Age') != ''
def test_without_origin(client: FlaskClient):
response: Response = client.options('/some-request', headers={
})
assert response.status_code == 404
assert 'Access-Control-Allow-Origin'.lower() not in response.headers
assert 'Access-Control-Max-Age'.lower() not in response.headers
assert 'Access-Control-Allow-Methods'.lower() not in response.headers
assert 'Access-Control-Allow-Headers'.lower() not in response.headers
def test_allow_method(client: FlaskClient):
response: Response = client.options('/some-request', headers={
'Access-Control-Request-Method': 'POST',
'Origin': 'https://test.org'
})
assert response.status_code == 404
assert 'Access-Control-Allow-Methods'.lower() in response.headers
assert 'POST' in response.headers.get('Access-Control-Allow-Methods')
assert 'Access-Control-Max-Age'.lower() in response.headers
assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
assert 'Access-Control-Allow-Headers'.lower() not in response.headers
def test_dont_allow_method(client: FlaskClient):
response: Response = client.options('/some-request', headers={
'Access-Control-Request-Method': 'PATCH',
'Origin': 'https://test.org'
})
assert response.status_code == 404
assert 'Access-Control-Allow-Methods'.lower() not in response.headers
assert 'Access-Control-Max-Age'.lower() in response.headers
assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
assert 'Access-Control-Allow-Headers'.lower() not in response.headers
def test_allow_headers(client: FlaskClient):
response: Response = client.options('/some-request', headers={
'Access-Control-Request-Headers': 'Content-Type, X-Test-Header',
'Origin': 'https://test.org'
})
assert response.status_code == 404
assert 'Access-Control-Allow-Headers'.lower() in response.headers
assert 'Content-Type' in response.headers.get('Access-Control-Allow-Headers')
assert 'X-Test-Header' in response.headers.get('Access-Control-Allow-Headers')
assert 'Access-Control-Max-Age'.lower() in response.headers
assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
assert 'Access-Control-Allow-Methods'.lower() not in response.headers
def test_dont_allow_headers(client: FlaskClient):
response: Response = client.options('/some-request', headers={
'Access-Control-Request-Headers': 'Content-Type, X-Test-Header, X-Not-Allowed',
'Origin': 'https://test.org'
})
assert response.status_code == 404
assert 'Access-Control-Allow-Headers'.lower() not in response.headers
assert 'Access-Control-Max-Age'.lower() in response.headers
assert response.headers.get('Access-Control-Allow-Origin') == 'https://test.org'
assert 'Access-Control-Allow-Methods'.lower() not in response.headers
|
Python
| 87
| 46.804596
| 91
|
/tests/test_preflight_request.py
| 0.687906
| 0.682856
|
pyfaddist/yafcorse
|
refs/heads/main
|
import pytest
from flask import Flask, Response
from flask.testing import FlaskClient
from yafcorse import Yafcorse
@pytest.fixture()
def local_app():
app = Flask(__name__)
cors = Yafcorse({
'allowed_methods': ['GET', 'POST', 'PUT'],
'allowed_headers': ['Content-Type', 'X-Test-Header'],
'origins': lambda origin: origin == 'https://from_lambda'
})
cors.init_app(app)
return app
@pytest.fixture()
def local_client(local_app: Flask):
return local_app.test_client()
def test_origin_function(local_client: FlaskClient):
response: Response = local_client.options('/some-request', headers={
'Origin': 'https://from_lambda'
})
assert response.status_code == 404
assert 'Access-Control-Allow-Origin'.lower() in response.headers
assert 'Access-Control-Max-Age'.lower() in response.headers
assert response.headers.get('Access-Control-Allow-Origin') is not None
assert response.headers.get('Access-Control-Allow-Origin') == 'https://from_lambda'
assert response.headers.get('Access-Control-Max-Age') is not None
assert response.headers.get('Access-Control-Max-Age') != ''
def test_origin_function_fail(local_client: FlaskClient):
response: Response = local_client.options('/some-request', headers={
'Origin': 'https://other_than_lambda'
})
assert response.status_code == 404
assert 'Access-Control-Allow-Origin'.lower() not in response.headers
assert 'Access-Control-Max-Age'.lower() not in response.headers
|
Python
| 46
| 32.586956
| 87
|
/tests/test_origins_function.py
| 0.678317
| 0.674434
|
pyfaddist/yafcorse
|
refs/heads/main
|
# def test_no_cors_enabled():
# assert False
|
Python
| 2
| 23.5
| 29
|
/tests/test_default_configuration.py
| 0.645833
| 0.645833
|
pyfaddist/yafcorse
|
refs/heads/main
|
import re
from typing import Callable, Iterable
from flask import Flask, Response, request
# Yet Another Flask CORS Extension
# --------------------------------
# Based on https://developer.mozilla.org/de/docs/Web/HTTP/CORS
# DEFAULT_CONFIGURATION = {
# 'origins': '*',
# 'allowed_methods': ['GET', 'HEAD', 'POST', 'OPTIONS', 'PUT', 'PATCH', 'DELETE'],
# 'allowed_headers': '*',
# 'allow_credentials': True,
# 'cache_max_age': str(60 * 5)
# }
DEFAULT_CONFIGURATION = {
'origins': None,
'allowed_methods': [],
'allowed_headers': None,
'allow_credentials': False,
'cache_max_age': None
}
class Yafcorse(object):
def __init__(self, configuration: dict = DEFAULT_CONFIGURATION, app: Flask = None) -> None:
super().__init__()
self.__initialized = False
self.__origins = configuration.get('origins', DEFAULT_CONFIGURATION.get('origins'))
self.__regex_origin_patterns = configuration.get('origin_patterns', None)
self.__allowed_methods = configuration.get('allowed_methods', DEFAULT_CONFIGURATION.get('allowed_methods'))
self.__allowed_headers = configuration.get('allowed_headers', DEFAULT_CONFIGURATION.get('allowed_headers'))
self.__allow_credentials = configuration.get('allow_credentials', DEFAULT_CONFIGURATION.get('allow_credentials'))
self.__max_age = configuration.get('cache_max_age', DEFAULT_CONFIGURATION.get('cache_max_age'))
self.__allowed_methods_value = ''
self.__allowed_headers_value = ''
self.init_app(app)
def init_app(self, app: Flask):
if not self.__initialized and app:
self.__allowed_methods_value = ', '.join(self.__allowed_methods)
self.__allowed_methods = [m.strip().lower() for m in self.__allowed_methods]
self.__allowed_headers_value = ', '.join(self.__allowed_headers)
self.__allowed_headers = [h.strip().lower() for h in self.__allowed_headers]
if not isinstance(self.__origins, str) and isinstance(self.__origins, (list, tuple, Iterable)):
self.__validate_origin = _check_if_contains_origin(self.__origins)
elif isinstance(self.__origins, Callable):
self.__validate_origin = self.__origins
elif self.__regex_origin_patterns is not None:
self.__validate_origin = _check_if_regex_match_origin(self.__regex_origin_patterns)
else:
self.__validate_origin = _check_if_asterisk_origin(self.__origins)
app.after_request(self.__handle_response)
app.extensions['yafcorse'] = self
self.__initialized = True
def __append_headers(self, response: Response, origin: str, is_preflight_request: bool = False):
response.headers.add_header('Access-Control-Allow-Origin', origin)
if 'Access-Control-Request-Method' in request.headers \
and request.headers.get('Access-Control-Request-Method', '').strip().lower() in self.__allowed_methods:
response.headers.add_header('Access-Control-Allow-Methods', self.__allowed_methods_value)
if 'Access-Control-Request-Headers' in request.headers \
and _string_list_in(request.headers.get('Access-Control-Request-Headers').split(','), self.__allowed_headers):
response.headers.add_header('Access-Control-Allow-Headers', self.__allowed_headers_value)
if self.__allow_credentials:
response.headers.add_header('Access-Control-Allow-Credentials', 'true')
if is_preflight_request:
response.headers.add_header('Access-Control-Max-Age', self.__max_age)
def __handle_response(self, response: Response):
is_preflight_request = request.method == 'OPTIONS'
if not is_preflight_request and 'Origin' not in request.headers:
return response
origin = request.headers.get('Origin')
if not self.__validate_origin(origin):
return response
self.__append_headers(response, origin, is_preflight_request)
return response
def _string_list_in(target: list[str], source: list[str]):
contained = [element for element in target if element.strip().lower() in source]
return contained == target
def _check_if_regex_match_origin(patterns):
compiled_patterns = [re.compile(p) for p in patterns]
def execute_check(origin):
for matcher in compiled_patterns:
if matcher.match(origin):
return True
return False
execute_check.__name__ = _check_if_regex_match_origin.__name__
return execute_check
def _check_if_contains_origin(origins):
def execute_check(origin):
for o in origins:
if o == origin:
return True
return False
execute_check.__name__ = _check_if_contains_origin.__name__
return execute_check
def _check_if_asterisk_origin(origins):
allow_all = origins == '*'
def execute_check(origin):
return allow_all and origin is not None
execute_check.__name__ = _check_if_asterisk_origin.__name__
return execute_check
|
Python
| 129
| 38.906979
| 122
|
/src/yafcorse/__init__.py
| 0.633256
| 0.632673
|
pyfaddist/yafcorse
|
refs/heads/main
|
import pytest
from flask import Flask
from yafcorse import Yafcorse
@pytest.fixture()
def app():
app = Flask(__name__)
cors = Yafcorse({
'origins': '*',
'allowed_methods': ['GET', 'POST', 'PUT'],
'allowed_headers': ['Content-Type', 'X-Test-Header'],
'allow_credentials': True,
'cache_max_age': str(60 * 5)
})
cors.init_app(app)
return app
@pytest.fixture()
def client(app: Flask):
return app.test_client()
|
Python
| 25
| 18.08
| 61
|
/tests/conftest.py
| 0.580713
| 0.574423
|
hjnewman3/PDF-Text-Extractor
|
refs/heads/master
|
'''
PDF Text Extractor Module
This module will extract the text from a .pdf file and return the
contents as a string.
'''
from io import StringIO
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
from pdfminer.pdfpage import PDFPage
import getopt
class PDFExtractor(object):
# takes in a parameter of a pdf file
# returns the contents as a string
def pdf_to_text(self, pdf_file, pages=None):
# allows multiple pages to be passed in as a parameter
if pages:
num_of_pages = set(pages)
else:
num_of_pages = set()
output = StringIO()
manager = PDFResourceManager()
# parameters require a resource manager and an output text stream
converter = TextConverter(manager, output, laparams=LAParams())
# parameters require a resource manager and a text converter
interpreter = PDFPageInterpreter(manager, converter)
input_file = open(pdf_file, 'rb')
for page in PDFPage.get_pages(input_file, num_of_pages):
interpreter.process_page(page)
input_file.close()
converter.close()
text = output.getvalue()
output.close()
return text
|
Python
| 44
| 28.84091
| 73
|
/src/extractor.py
| 0.671494
| 0.671494
|
hjnewman3/PDF-Text-Extractor
|
refs/heads/master
|
'''
PDF Text Extractor Main Module
This module will read every .pdf file within a directory. It will
use the PDFExtractor to extract its contents to a string. That
string will then be passed to TextFormatter where it will be
properly formatted to the desired format.
The module will ask the user for a desired output file name, but
if one if not provided then a default name will be used.
The .exe file must be within the same directory as the .pdf files.
'''
import os
import pymsgbox
from extractor import PDFExtractor
from formatter import TextFormatter
# returs a name of the output file
def get_user_input():
user_input = pymsgbox.prompt('Enter name', default=add_txt_ext(''), title='FBPI .pdf Text Extractor')
# closes program if user clicks cancel
if user_input == None:
exit(0)
return user_input
# ensure the output file has a name
def add_txt_ext(user_input):
if len(user_input) < 1:
return '_output'
else:
return user_input
# main function, runs on program startup
def main():
#create an pdf extractor
extractor = PDFExtractor()
# create a text formatter
formatter = TextFormatter()
# stores the name of the output file
user_input = get_user_input()
# create the output .txt file
output_file = open(add_txt_ext(user_input) + '.txt', 'w')
# stores a list of all files in the current directory
file_list = os.listdir(os.getcwd())
# interate through all the files in the file list
for files in file_list:
# will only process .pdf files
if files.endswith('.pdf'):
# convert contents of each pdf file to a string
name_badge = extractor.pdf_to_text(files)
# formats the string to the propper format
name_badge = formatter.name_tab_title(name_badge)
# writes the formatted string to the output file
output_file.write(name_badge)
output_file.close()
if __name__ == '__main__':
main()
|
Python
| 69
| 28.144928
| 105
|
/src/main.py
| 0.669816
| 0.668821
|
hjnewman3/PDF-Text-Extractor
|
refs/heads/master
|
'''
Text Formatter Module
This module will format the string input to match the desired output.
'''
class TextFormatter(object):
# takes in a string parameter
# returns the string formatted as: 'name TAB title'
def name_tab_title(self, text):
# stores contents of the input text into a list
name_badge = text.split('\n')
badges = []
# strip the whitepsace from every element
for element in name_badge:
badges.append(element.strip())
# return true from as long as the badge has a blank line
while badges.count(''):
badges.remove('')
# stores the last string added to the badge list as the title
title = badges.pop()
# stores the first string added to the badge list as the name
name = badges.pop()
# formats the string as 'name TAB title'
name_badge = ('%s\t%s\n' % (name, title))
return name_badge
|
Python
| 34
| 27.352942
| 69
|
/src/formatter.py
| 0.608921
| 0.608921
|
janetyc/JaneDiary
|
refs/heads/master
|
# -*- coding: utf-8 -*-
from datetime import datetime
#from googletrans import Translator
from translate import Translator
from TwitterSearch import *
import configparser
import random
import re
import io
weather = [u"Sunny", u"Rainy", u"Cloudy"]
weather_tw = [u"晴天",u"雨天", u"陰天"]
translator= Translator(to_lang='zh-TW')
#translator= Translator()
cf = configparser.ConfigParser()
cf.read('janediary.conf')
#consumer_key = cf.get('twitter', 'consumer_key')
#consumer_secret = cf.get('twitter', 'consumer_secret')
#access_token = cf.get('twitter', 'access_token')
#access_token_secret = cf.get('twitter', 'access_token_secret')
ts = TwitterSearch(
consumer_key = cf.get('twitter', 'consumer_key'),
consumer_secret = cf.get('twitter', 'consumer_secret'),
access_token = cf.get('twitter', 'access_token'),
access_token_secret = cf.get('twitter', 'access_token_secret')
)
data_path = cf.get('data', 'data_path')
tso = TwitterSearchOrder()
def get_tweets(keyword_list, num=20, lang='en'):
tweets = []
try:
tso.set_keywords(keyword_list)
tso.set_language(lang)
i = 0
for tweet in ts.search_tweets_iterable(tso):
if i == num: break
if tweet['retweeted']: continue
tweets.append(tweet)
i = i+1
except TwitterSearchException as e:
print(e)
return tweets
def generate_jane_story(num=20, lang='en'):
tweets = get_tweets(['jane'], num, lang)
story = ""
for tweet in tweets:
story = u"%s %s" % (story, tweet['text'])
return story
def clear_up_text(text):
text = re.sub(r'RT @\S+: ', '', text)
clear_text = re.sub(r'http\S+', '', text)
clear_text = remove_emoji(clear_text)
return clear_text.strip()
def remove_emoji(text):
emoji_pattern = re.compile(
u"(\ud83d[\ude00-\ude4f])|" # emoticons
u"(\ud83c[\udf00-\uffff])|" # symbols & pictographs (1 of 2)
u"(\ud83d[\u0000-\uddff])|" # symbols & pictographs (2 of 2)
u"(\ud83d[\ude80-\udeff])|" # transport & map symbols
u"(\ud83c[\udde0-\uddff])" # flags (iOS)
"+", flags=re.UNICODE)
return emoji_pattern.sub(r'', text)
def get_translation(input_text, lang='zh-TW'):
output = ""
try:
#output = translator.translate(input_text, dest=lang)
output = translator.translate(input_text)
except Exception as e:
print(e)
return ""
return output
def save_story(filename, text):
with io.open(filename,'w',encoding='utf8') as f:
f.write(text)
f.close()
if __name__ == '__main__':
jane_story_en = ""
clear_story = ""
translated_story = ""
jane_story_en = generate_jane_story(10, 'en')
clear_story = clear_up_text(jane_story_en)
print("---")
print(clear_story)
translated_story = get_translation(clear_story[:500])
print("----")
print(translated_story)
current_time = datetime.now()
weather_idx = random.randrange(3)
y, m, d, h = current_time.year, current_time.month, current_time.day, current_time.hour
clear_story = u"%s %s\n%s" % (current_time.strftime('%Y-%m-%d %H:00'), weather[weather_idx], clear_story)
translated_story = u"%d年%d月%d日%d時 %s\n%s" % (y, m, d, h, weather_tw[weather_idx], translated_story)
print(clear_story)
print("\n")
print(translated_story)
print("save file")
save_story("%s/%s.txt" %(data_path, current_time.strftime("%Y%m%d")), clear_story+"\n\n"+translated_story)
#save_story("%s/%s_en.txt" % (data_path, current_time.strftime("%Y%m%d")), clear_story)
#save_story("%s/%s_tw.txt" % (data_path, current_time.strftime("%Y%m%d")), translated_story)
|
Python
| 120
| 29.808332
| 110
|
/get_twitter_story.py
| 0.615468
| 0.60411
|
ritikrath/NLP
|
refs/heads/master
|
import numpy as pd
import matplotlib.pyplot as plt
import pandas as pd
dataset=pd.read_csv('music.csv')
|
Python
| 5
| 20
| 32
|
/temp.py
| 0.788462
| 0.788462
|
harshagr18/Gesture-Cursor
|
refs/heads/master
|
import mediapipe as mp
import numpy as np
import cv2
cap = cv2.VideoCapture(0)
facmesh = mp.solutions.face_mesh
face = facmesh.FaceMesh(static_image_mode=True, min_tracking_confidence=0.6, min_detection_confidence=0.6)
draw = mp.solutions.drawing_utils
while True:
_, frm = cap.read()
print(frm.shape)
break
rgb = cv2.cvtColor(frm, cv2.COLOR_BGR2RGB)
op = face.process(rgb)
if op.multi_face_landmarks:
for i in op.multi_face_landmarks:
print(i.landmark[0].y*480)
draw.draw_landmarks(frm, i, facmesh.FACE_CONNECTIONS, landmark_drawing_spec=draw.DrawingSpec(color=(0, 255, 255), circle_radius=1))
cv2.imshow("window", frm)
if cv2.waitKey(1) == 27:
cap.release()
cv2.destroyAllWindows()
break
|
Python
| 30
| 23
| 134
|
/mesh.py
| 0.725
| 0.686111
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
import os
import json
import boto3
import requests
from logger import log, logError
from dotenv import load_dotenv
load_dotenv()
s3 = boto3.client("s3",aws_access_key_id=os.environ.get('S3_ACCESS_KEY'),aws_secret_access_key=os.environ.get('S3_SECRET_ACCESS_KEY'))
API_BASE_URL = "https://archive-server.tattle.co.in"
# API_BASE_URL = "https://postman-echo.com/post"
ARCHIVE_TOKEN = os.environ.get('ARCHIVE_TOKEN')
def register_post(data):
"""
registers a post on archive server
"""
url_to_post_to = API_BASE_URL+"/api/posts"
payload = json.dumps(data)
headers = {
'token': ARCHIVE_TOKEN,
'Content-Type': "application/json",
'cache-control': "no-cache",
}
try:
r = requests.post(url_to_post_to, data=payload, headers=headers)
if r.status_code==200:
log('STATUS CODE 200 \n'+json.dumps(r.json(), indent=2))
else:
log('STATUS CODE '+str(r.status_code)+'\n '+r.text)
except:
log('error with API call')
def upload_file(file_name, s3=s3 ,acl="public-read"):
bucket_name = os.environ.get('TGM_BUCKET_NAME')
#opens file, reads it, and uploads it to the S3 bucket.
try:
with open(file_name, 'rb') as data:
s3.upload_fileobj(data,bucket_name,file_name,ExtraArgs={"ACL": acl,"ContentType": file_name.split(".")[-1]})
except:
logError('ERROR_S3_UPLOAD of '+file_name)
file_url = "https://s3.ap-south-1.amazonaws.com/"+bucket_name+"/"+file_name
return file_url
def upload_file(file_name, s3=s3 ,acl="public-read"):
bucket_name = os.environ.get('TGM_BUCKET_NAME')
#opens file, reads it, and uploads it to the S3 bucket.
try:
with open(file_name, 'rb') as data:
s3.upload_fileobj(data,bucket_name,file_name,ExtraArgs={"ACL": acl,"ContentType": file_name.split(".")[-1]})
except:
logError('ERROR_S3_UPLOAD of '+file_name)
file_url = "https://s3.ap-south-1.amazonaws.com/"+bucket_name+"/"+file_name
return file_url
|
Python
| 60
| 30.15
| 134
|
/tattle_helper.py
| 0.688437
| 0.672912
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
token = "78a6fc20-fa83-11e9-a4ad-d1866a9a3c7b" # add your token here
url = "<base-api-url>/api/posts"
try:
payload = d
payload = json.dumps(payload)
headers = {
'token': token,
'Content-Type': "application/json",
'cache-control': "no-cache",
}
r = requests.post(url, data=payload, headers=headers)
if r.ok:
print ('success')
else:
print ('something went wrong')
except:
logging.exception('error in POST request')
raise
{
"type" : "image", # can be image, text, video
"data" : "",
"filename": "4bf4b1cc-516b-469d-aa38-be6762d417a5", #filename you put on s3
"userId" : 169 # for telegram_bot this should be 169
}
|
Python
| 26
| 26.461538
| 79
|
/post_request.py
| 0.56662
| 0.530154
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
from tattle_helper import register_post, upload_file
data = {
"type" : "image",
"data" : "",
"filename": "asdf",
"userId" : 169
}
response = upload_file(file_name='denny.txt')
print(response)
# register_post(data)
|
Python
| 13
| 16.923077
| 52
|
/test.py
| 0.625
| 0.612069
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
import os
import sys
import json
import requests
import telegram
import logging
import re
from threading import Thread
from telegram.ext import CommandHandler, MessageHandler, Updater, Filters, InlineQueryHandler
from telegram import InlineQueryResultArticle, InputTextMessageContent
from telegram.ext.dispatcher import run_async
from dotenv import load_dotenv
from pymongo import MongoClient
from logger import log, logError
from tattle_helper import upload_file
# loads all environment variables
load_dotenv()
log('STARTING APP v1')
TOKEN = os.environ.get('ACCESS_TOKEN')
PORT = int(os.environ.get('PORT', '8443'))
print(TOKEN)
# logging.basicConfig(filename='telegram_bot_log.log',filemode='a',format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)
# Calls for Database modification and reads start
def insert_document(document, required_collection):
return db[required_collection].insert_one(document)
def find_document(query, required_collection):
return db[required_collection].find_one(query)
def update_document(find_query, update_query, required_collection, upsert=False):
return db[required_collection].update_one(find_query, update_query, upsert)
def delete_document(find_query, required_collection):
return db[required_collection].delete_one(find_query)
# Calls for Database modification and reads end
@run_async
def start(update, context):
# start message
context.bot.send_message(chat_id=update.effective_chat.id, text="Hey! \n\nI'm the Tattle Bot. Here are some instructions to use me:\n\n1. You can send whatever content to me that you'd like. All mediums : Text, Video, and Photos are allowed.\n2. You can tag your content using hashtags. When uploading photos or videos you can mention the tags in the caption, with text you can just tag it at the end or in the beginning(anywhere else in the text will also work).\n3. You can edit your messages after you've sent them, we'll update them in our database accordingly.\n 4. In case you miss tagging a message, you can reply to that message and insert the tags required. Only tags will be extracted, so please don't write text while replying to messages.")
def determine_type(message_json):
# checks what type of content is being passed, and returns the type
type_of_content = ''
if(message_json.text):
type_of_content = 'text'
elif(message_json.photo):
type_of_content = 'photo'
elif(message_json.video):
type_of_content = 'video'
elif(message_json.document):
type_of_content = 'document'
return type_of_content
def entity_extraction(all_entities, message_content):
# entity extraction, which basically extracts all the hashtags out of the message
list_of_tags = []
if(bool(all_entities)):
# checks if there are any entities, and if so loops over them
for each_entity in all_entities:
if(each_entity['type'] == 'hashtag'):
# string slicing based on offset and length values
tag = message_content[each_entity['offset']:(
each_entity['offset']+each_entity['length'])]
list_of_tags.append(tag)
if(bool(list_of_tags)):
# converts to set to remove duplicates
return list(set(list_of_tags))
else:
return None
def new_tags(message_json, current_document, all_tags):
# adds or replaces tags in messages that had no tags or in case of edits
new_tags = all_tags
update_document({'message_id': message_json.reply_to_message.message_id}, {
"$set": {"reply_tags": new_tags}}, 'messages')
def error_message(message_json):
# standard error message
context.bot.send_message(chat_id=message_json.chat.id,
text="Something went wrong with registering these tags, apologies for the same.")
def reply_to_messages(message_json, edit_flag):
all_tags = entity_extraction(message_json.entities, message_json.text)
if(all_tags is not None):
# first finds the document that the reply is being done to
current_document = find_document(
{'message_id': message_json.reply_to_message.message_id}, 'messages')
try:
# add reply tags with a new key called reply_tags
new_tags(message_json, current_document, all_tags)
except:
# or, throw an error message and log
error_message()
raise
def edit_message(message_json, final_dict, content_type, context):
tags = []
# check content type before processing the data
if(content_type == 'text'):
# In case of edits, we need to replace file on S3. Replacing happens automatically as long as file name is same.
file_name = str(message_json.message_id) + '.txt'
with open(file_name, 'w') as open_file:
open_file.write(message_json['text'])
upload_file(file_name)
os.remove(file_name)
final_dict = process_text(
message_json, final_dict, message_json['text'], False)
else:
final_dict = process_media(
message_json, final_dict, content_type, context, False)
# in case message is being edited, we first find the document being edited
current_document = find_document(
{'message_id': message_json.message_id}, 'messages')
# we check if the document had any existing tags, if so we store them before deleting the document
# FLAW IN CODE : If existing tags are being edited, it doesn't reflect this way. NEED TO FIX.
try:
tags = current_document['tags']
except KeyError:
tags = None
try:
reply_tags = current_document['reply_tags']
except KeyError:
reply_tags = None
if(reply_tags is not None):
final_dict['reply_tags'] = reply_tags
# add tags to final dict for new, edited document
if(tags is not None):
final_dict['tags'] = tags
# delete the document
delete_document({'message_id': message_json.message_id}, 'messages')
# insert edited document
insert_document(final_dict, 'messages')
def process_text(message_json, final_dict, message_content, caption_flag):
# check if we're processing a caption or a text message
if(caption_flag):
all_tags = entity_extraction(
message_json['caption_entities'], message_content)
else:
all_tags = entity_extraction(message_json['entities'], message_content)
# check if any tags are present
if(all_tags is not None):
final_dict['tags'] = all_tags
if(bool(message_content)):
# cleans out the hashtags
modified_message = re.sub(r'#\w+', '', message_content)
# removes all excessive spacing
cleaned_message = re.sub(' +', ' ', modified_message)
# changes key based on whether it is a caption or not
if(caption_flag):
# removing leading and trailing spaces
final_dict['caption'] = cleaned_message.strip()
else:
final_dict['text'] = cleaned_message.strip()
return final_dict
# just for testing
# BASE_URL = "http://archive-telegram-bot.tattle.co.in.s3.amazonaws.com/"
# print("{}{}".format(BASE_URL, file_name))
def make_post_request(dict_to_post):
log('***')
log(dict_to_post)
API_BASE_URL = "https://archive-server.tattle.co.in"
access_token = os.environ.get('ARCHIVE_TOKEN')
url_to_post_to = API_BASE_URL+"/api/posts"
payload = json.dumps(dict_to_post)
headers = {
'token': access_token,
'Content-Type': "application/json",
'cache-control': "no-cache",
}
r = requests.post(url_to_post_to, data=payload, headers=headers)
print('API response')
print(r)
# print(r.json())
def construct_dict(file_name, file_type):
return {"type": file_type, "data": "", "filename": file_name, "userId": 169}
def process_media(message_json, final_dict, content_type, context, creation_flag):
# check if content type is photo, and constructs dict and file_name appropriately
if(content_type == 'photo'):
final_dict['photo'] = [{'file_id': each_photo.file_id, 'width': each_photo.width,
'height': each_photo.height, 'file_size': each_photo.file_size} for each_photo in message_json.photo]
file_id = message_json.photo[-1].file_id
file_name = str(message_json.message_id)+'.jpeg'
post_request_type = 'image'
# same with video as above
elif(content_type == 'video'):
final_dict['video'] = {'file_id': message_json.video.file_id, 'width': message_json.video.width, 'height': message_json.video.height, 'duration': message_json.video.duration, 'thumb': {'file_id': message_json.video.thumb.file_id,
'width': message_json.video.thumb.width, 'height': message_json.video.thumb.height, 'file_size': message_json.video.thumb.file_size}, 'mime_type': message_json.video.mime_type, 'file_size': message_json.video.file_size}
file_id = message_json.video.file_id
file_type = str(message_json.video.mime_type).split("/")[-1]
file_name = str(message_json.message_id)+"."+file_type
post_request_type = 'video'
# process_media is only called from two places, one of which is when message is edited. Since we don't want duplicates, we set a flag to differentiate.
if(creation_flag):
try:
new_file = context.bot.get_file(file_id)
new_file.download(file_name) # downloads the file
final_dict['file_name'] = file_name
file_url = upload_file(file_name) # uploads to S3
final_dict['s3_url'] = file_url
os.remove(file_name) # removes it from local runtime
request_dict = construct_dict(file_name, post_request_type)
make_post_request(request_dict)
except:
logging.exception(
"The file_name when the error happened is: {}".format(file_name))
# process any caption or text found
final_dict = process_text(message_json, final_dict,
message_json.caption, True)
return final_dict
@run_async
def storing_data(update, context):
log(update)
final_dict = {}
# print(update)
# selects just the effective_message part
relevant_section = update.effective_message
# some general data appended to each dict
final_dict['message_id'] = relevant_section['message_id']
final_dict['date'] = relevant_section['date']
# final_dict['from'] = {'id':relevant_section.from_user.id,'type':relevant_section.chat.type,'first_name':relevant_section.from_user.first_name,'last_name':relevant_section.from_user.last_name,'username':relevant_section.from_user.username,'is_bot':relevant_section.from_user.is_bot}
content_type = determine_type(relevant_section)
final_dict['content_type'] = content_type
# checks if the request is that of an edition
if(relevant_section.edit_date):
# if yes, checks if the edited message was replying to another message
if(relevant_section.reply_to_message):
# if yes, then deals with it by setting edit flag to True
reply_to_messages(relevant_section, True)
return
else:
# else, just edits the message normally
edit_message(relevant_section, final_dict, content_type, context)
return
# if the message is a reply, then respond appropriately
if(relevant_section.reply_to_message):
# edit flag is set to false because we're just handling simple reply
reply_to_messages(relevant_section, False)
return
if(content_type == 'text'):
# creates file with message ID, then writes the text into the file and uploads it to S3
try:
file_name = str(relevant_section.message_id) + '.txt'
with open(file_name, 'w') as open_file:
open_file.write(relevant_section['text'])
file_url = upload_file(file_name)
final_dict['s3_url'] = file_url
os.remove(file_name)
request_dict = construct_dict(file_name, content_type)
r = make_post_request(request_dict)
except Exception as e:
logging.exception(
"The file_name when the error happened is: {}".format(file_name))
logging.exception(e)
# if new text message, process it and then insert it in the database
final_dict = process_text(
relevant_section, final_dict, relevant_section['text'], False)
insert_document(final_dict, 'messages')
else:
final_dict = process_media(
relevant_section, final_dict, content_type, context, True)
insert_document(final_dict, 'messages')
context.bot.send_message(
chat_id=update.effective_chat.id, text='message archived')
# context.bot.send_message(chat_id=update.effective_chat.id, text=update.message.text)
def stop_and_restart():
"""Gracefully stop the Updater and replace the current process with a new one"""
updater.stop()
os.execl(sys.executable, sys.executable, *sys.argv)
def restart(update, context):
update.message.reply_text('Bot is restarting...')
Thread(target=stop_and_restart).start()
try:
client = MongoClient("mongodb+srv://"+os.environ.get("TGM_DB_USERNAME")+":"+os.environ.get("TGM_DB_PASSWORD") +
"@tattle-data-fkpmg.mongodb.net/test?retryWrites=true&w=majority&ssl=true&ssl_cert_reqs=CERT_NONE")
db = client[os.environ.get("TGM_DB_NAME")]
except error_message:
print('error connecting to db')
print(error_message)
updater = Updater(token=TOKEN, use_context=True, workers=32)
dispatcher = updater.dispatcher
start_handler = CommandHandler('start', start)
storing_data_handler = MessageHandler(Filters.all, storing_data)
restart_handler = CommandHandler(
'r', restart, filters=Filters.user(username='@thenerdyouknow'))
dispatcher.add_handler(restart_handler)
dispatcher.add_handler(start_handler)
dispatcher.add_handler(storing_data_handler)
# updater.start_webhook(listen="0.0.0.0",
# port=PORT,
# url_path=TOKEN)
# updater.bot.set_webhook("https://services-server.tattle.co.in/" + TOKEN)
updater.start_polling()
updater.idle()
log('STARTING SERVER v1.0')
|
Python
| 354
| 40.141243
| 756
|
/prototype.py
| 0.654285
| 0.652362
|
tattle-made/archive-telegram-bot
|
refs/heads/master
|
from datetime import datetime
def log(data):
print('----', datetime.now(), '----')
print(data)
def logError(error):
print('****', datetime.now(), '****')
print(error)
|
Python
| 10
| 17.6
| 41
|
/logger.py
| 0.556757
| 0.556757
|
madjar/blog
|
refs/heads/master
|
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from __future__ import unicode_literals
AUTHOR = 'Georges Dubus'
SITENAME = 'Compile-toi toi même'
SITESUBTITLE = u'(Georges Dubus)' # TODO: remove in next version ?
SITEURL = ''
ABSOLUTE_SITEURL = SITEURL # TODO: remove
TIMEZONE = 'Europe/Paris'
DEFAULT_LANG = 'en'
LOCALE = ('en_US.UTF-8', 'fr_FR.UTF8') # TODO: toujours d'actualité ?
THEME = 'stolenidea'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
MENUITEMS = (
('Archives', SITEURL + '/archives.html'),
('Tags', SITEURL + '/tags.html')
)
# Social widget
SOCIAL = (
('Github', 'https://github.com/madjar'),
('Twitter', 'http://twitter.com/georgesdubus'),
('Google+', 'https://plus.google.com/u/0/104750974388692229541'),
)
# TWITTER_USERNAME = 'georgesdubus'
DEFAULT_PAGINATION = 10 # TODO: voir si je dois modifier quelque chose pour ça
PATH = ('content')
STATIC_PATHS = ['CNAME', 'images', 'slides', '.well-known', '_config.yml']
ARTICLE_EXCLUDES = ['slides']
# TODO : use buildout to handle the plugin deps ?
PLUGIN_PATHS = ['plugins']
PLUGINS = ['pelican_youtube']
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
|
Python
| 48
| 27.125
| 96
|
/pelicanconf.py
| 0.648148
| 0.628148
|
ericfourrier/auto-clean
|
refs/heads/develop
|
import seaborn as sns
import matplotlib.pyplot as plt
def plot_corrmatrix(df, square=True, linewidths=0.1, annot=True,
size=None, figsize=(12, 9), *args, **kwargs):
"""
Plot correlation matrix of the dataset
see doc at https://stanford.edu/~mwaskom/software/seaborn/generated/seaborn.heatmap.html#seaborn.heatmap
"""
sns.set(context="paper", font="monospace")
f, ax = plt.subplots(figsize=figsize)
sns.heatmap(df.corr(), vmax=1, square=square, linewidths=linewidths,
annot=annot, annot_kws={"size": size}, *args, **kwargs)
|
Python
| 15
| 38.333332
| 108
|
/autoc/utils/corrplot.py
| 0.662712
| 0.652542
|
ericfourrier/auto-clean
|
refs/heads/develop
|
from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(name='autoc',
version="0.1",
description='autoc is a package for data cleaning exploration and modelling in pandas',
long_description=readme(),
author=['Eric Fourrier'],
author_email='ericfourrier0@gmail.com',
license='MIT',
url='https://github.com/ericfourrier/auto-cl',
packages=find_packages(),
test_suite='test',
keywords=['cleaning', 'preprocessing', 'pandas'],
install_requires=[
'numpy>=1.7.0',
'pandas>=0.15.0',
'seaborn>=0.5',
'scipy>=0.14']
)
|
Python
| 24
| 26.75
| 93
|
/setup.py
| 0.572072
| 0.551051
|
ericfourrier/auto-clean
|
refs/heads/develop
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Get data from https://github.com/ericfourrier/autoc-datasets
"""
import pandas as pd
def get_dataset(name, *args, **kwargs):
"""Get a dataset from the online repo
https://github.com/ericfourrier/autoc-datasets (requires internet).
Parameters
----------
name : str
Name of the dataset 'name.csv'
"""
path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name)
return pd.read_csv(path, *args, **kwargs)
|
Python
| 22
| 24.636364
| 102
|
/autoc/utils/getdata.py
| 0.654255
| 0.650709
|
ericfourrier/auto-clean
|
refs/heads/develop
|
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Automated test suites with unittest
run "python -m unittest -v test" in the module directory to run the tests
The clock decorator in utils will measure the run time of the test
"""
#########################################################
# Import Packages and helpers
#########################################################
import unittest
# internal helpers
# from autoc.utils.helpers import clock, create_test_df, removena_numpy, cserie
from autoc.utils.helpers import random_pmf, clock, create_test_df, cserie, simu, removena_numpy
from autoc.utils.getdata import get_dataset
from autoc.explorer import DataExploration
from autoc.naimputer import NaImputer
from autoc.outliersdetection import OutliersDetection
import pandas as pd
import numpy as np
flatten_list = lambda x: [y for l in x for y in flatten_list(
l)] if isinstance(x, list) else [x]
# flatten_list = lambda x: [y for l in x for y in flatten_list(l)] if isinstance(x,list) else [x]
#########################################################
# Writing the tests
#########################################################
class TestDataExploration(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls._test_df = create_test_df()
cls._test_dc = DataExploration(data=cls._test_df)
@clock
def test_to_lowercase(self):
df_lower = self._test_dc.to_lowercase()
self.assertNotEqual(id(df_lower), id(self._test_dc.data))
self.assertTrue((pd.Series(['a'] * 500 + ['b'] * 200 + ['c'] * 300)==
df_lower.loc[:, 'character_variable_up1']).all())
self.assertTrue((pd.Series(['a'] * 500 + ['b'] * 200 + ['d'] * 300)==
df_lower.loc[:, 'character_variable_up2']).all())
@clock
def test_copy(self):
exploration_copy = DataExploration(data=create_test_df(), copy=True)
self.assertEqual(id(self._test_df), id(self._test_dc.data))
self.assertNotEqual(id(self._test_df), id(exploration_copy.data))
@clock
def test_cserie(self):
char_var = cserie(self._test_dc.data.dtypes == "object")
self.assertIsInstance(char_var, list)
self.assertIn('character_variable', char_var)
@clock
def test_removena_numpy(self):
test_array = np.array([np.nan, 1, 2, np.nan])
self.assertTrue((removena_numpy(test_array) == np.array([1, 2])).all())
@clock
def test_sample_df(self):
self.assertEqual(len(self._test_dc.sample_df(pct=0.061)),
0.061 * float(self._test_dc.data.shape[0]))
@clock
def test_nrow(self):
self.assertEqual(self._test_dc._nrow, self._test_dc.data.shape[0])
@clock
def test_col(self):
self.assertEqual(self._test_dc._ncol, self._test_dc.data.shape[1])
@clock
def test_is_numeric(self):
self.assertTrue(self._test_dc.is_numeric("num_variable"))
self.assertTrue(self._test_dc.is_numeric("many_missing_70"))
self.assertFalse(self._test_dc.is_numeric("character_variable"))
@clock
def test_is_int_factor(self):
self.assertFalse(self._test_dc.is_int_factor("num_variable"))
self.assertTrue(self._test_dc.is_int_factor("int_factor_10", 0.01))
self.assertTrue(self._test_dc.is_int_factor("int_factor_10", 0.1))
self.assertFalse(self._test_dc.is_int_factor("int_factor_10", 0.005))
self.assertFalse(self._test_dc.is_int_factor("character_variable"))
@clock
def test_where_numeric(self):
self.assertEqual(cserie(self._test_dc.where_numeric().all()), self._test_dc._dfnum)
@clock
def test_total_missing(self):
self.assertEqual(self._test_dc.total_missing,
self._test_dc.data.isnull().sum().sum())
@clock
def test_None_count(self):
nacolcount = self._test_dc.nacolcount()
self.assertEqual(nacolcount.loc['None_100', 'Napercentage'], 0.1)
self.assertEqual(nacolcount.loc['None_100', 'Nanumber'], 100)
self.assertEqual(nacolcount.loc['None_na_200', 'Napercentage'], 0.2)
self.assertEqual(nacolcount.loc['None_na_200', 'Nanumber'], 200)
@clock
def test_nacolcount_capture_na(self):
nacolcount = self._test_dc.nacolcount()
self.assertEqual(nacolcount.loc['na_col', 'Napercentage'], 1.0)
self.assertEqual(nacolcount.loc['many_missing_70', 'Napercentage'], 0.7)
@clock
def test_nacolcount_is_type_dataframe(self):
self.assertIsInstance(self._test_dc.nacolcount(),
pd.core.frame.DataFrame)
@clock
def test_narowcount_capture_na(self):
narowcount = self._test_dc.narowcount()
self.assertEqual(sum(narowcount['Nanumber'] > 0), self._test_dc._nrow)
#
# @clock
# def test_detect_other_na(self):
# other_na = self._test_dc.detect_other_na()
# self.assertIsInstance(narowcount, pd.core.frame.DataFrame)
@clock
def test_narowcount_is_type_dataframe(self):
narowcount = self._test_dc.narowcount()
self.assertIsInstance(narowcount, pd.core.frame.DataFrame)
@clock
def test_manymissing_capture(self):
manymissing = self._test_dc.manymissing(0.7)
self.assertIsInstance(manymissing, list)
self.assertIn('many_missing_70', manymissing)
self.assertIn('na_col', manymissing)
@clock
def test_nacols_full(self):
nacols_full = self._test_dc.nacols_full
self.assertIsInstance(nacols_full, list)
self.assertIn('na_col',nacols_full )
@clock
def test_narows_full(self):
test_df = pd.DataFrame(np.random.randint(0,100,size=(100, 4)), columns=list('ABCD'))
test_df.loc[99, :] = np.nan
self.assertIn(99, DataExploration(test_df).narows_full)
self.assertNotIn(1, test_df)
@clock
def test_constant_col_capture(self):
constantcol = self._test_dc.constantcol()
self.assertIsInstance(constantcol, list)
self.assertIn('constant_col', constantcol)
self.assertIn('constant_col_num', constantcol)
self.assertIn('na_col', constantcol)
@clock
def test_count_unique(self):
count_unique = self._test_dc.count_unique()
self.assertIsInstance(count_unique, pd.Series)
self.assertEqual(count_unique.id, 1000)
self.assertEqual(count_unique.constant_col, 1)
self.assertEqual(count_unique.character_factor, 7)
@clock
def test_dfchar_check_col(self):
dfchar = self._test_dc._dfchar
self.assertIsInstance(dfchar, list)
self.assertNotIn('num_variable', dfchar)
self.assertIn('character_factor', dfchar)
self.assertIn('character_variable', dfchar)
self.assertNotIn('many_missing_70', dfchar)
@clock
def test_dfnum_check_col(self):
dfnum = self._test_dc._dfnum
self.assertIsInstance(dfnum, list)
self.assertIn('num_variable', dfnum)
self.assertNotIn('character_factor', dfnum)
self.assertNotIn('character_variable', dfnum)
self.assertIn('many_missing_70', dfnum)
@clock
def test_factors_check_col(self):
factors = self._test_dc.factors()
self.assertIsInstance(factors, list)
self.assertNotIn('num_factor', factors)
self.assertNotIn('character_variable', factors)
self.assertIn('character_factor', factors)
@clock
def test_detectkey_check_col(self):
detectkey = self._test_dc.detectkey()
self.assertIsInstance(detectkey, list)
self.assertIn('id', detectkey)
self.assertIn('member_id', detectkey)
@clock
def test_detectkey_check_col_dropna(self):
detectkeyna = self._test_dc.detectkey(dropna=True)
self.assertIn('id_na', detectkeyna)
self.assertIn('id', detectkeyna)
self.assertIn('member_id', detectkeyna)
@clock
def test_findupcol_check(self):
findupcol = self._test_dc.findupcol()
self.assertIn(['id', 'duplicated_column'], findupcol)
self.assertNotIn('member_id', flatten_list(findupcol))
@clock
def test_count_unique(self):
count_unique = self._test_dc.count_unique()
self.assertIsInstance(count_unique, pd.Series)
self.assertEqual(count_unique.id, len(self._test_dc.data.id))
self.assertEqual(count_unique.constant_col, 1)
self.assertEqual(count_unique.num_factor, len(
pd.unique(self._test_dc.data.num_factor)))
@clock
def test_structure(self):
structure = self._test_dc.structure()
self.assertIsInstance(structure, pd.DataFrame)
self.assertEqual(len(self._test_dc.data),
structure.loc['na_col', 'nb_missing'])
self.assertEqual(len(self._test_dc.data), structure.loc[
'id', 'nb_unique_values'])
self.assertTrue(structure.loc['id', 'is_key'])
@clock
def test_nearzerovar(self):
nearzerovar = self._test_dc.nearzerovar(save_metrics=True)
self.assertIsInstance(nearzerovar, pd.DataFrame)
self.assertIn('nearzerovar_variable', cserie(nearzerovar.nzv))
self.assertIn('constant_col', cserie(nearzerovar.nzv))
self.assertIn('na_col', cserie(nearzerovar.nzv))
class TestNaImputer(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls._test_na = NaImputer(data=create_test_df())
@clock
def test_fillna_serie(self):
test_serie = pd.Series([1, 3, np.nan, 5])
self.assertIsInstance(
self._test_na.fillna_serie(test_serie), pd.Series)
self.assertEqual(self._test_na.fillna_serie(test_serie)[2], 3.0)
@clock
def test_fillna_serie(self):
test_char_variable = self._test_na.fillna_serie('character_variable_fillna')
test_num_variable = self._test_na.fillna_serie('numeric_variable_fillna')
self.assertTrue(test_char_variable.notnull().any())
self.assertTrue(test_num_variable.notnull().any())
self.assertTrue((pd.Series(
['A'] * 300 + ['B'] * 200 + ['C'] * 200 + ['A'] * 300) == test_char_variable).all())
self.assertTrue(
(pd.Series([1] * 400 + [3] * 400 + [2] * 200) == test_num_variable).all())
@clock
def test_fill_low_na(self):
df_fill_low_na = self._test_na.basic_naimputation(columns_to_process=['character_variable_fillna',
'numeric_variable_fillna'])
df_fill_low_na_threshold = self._test_na.basic_naimputation(threshold=0.4)
self.assertIsInstance(df_fill_low_na, pd.DataFrame)
self.assertIsInstance(df_fill_low_na_threshold, pd.DataFrame)
self.assertTrue((pd.Series(['A'] * 300 + ['B'] * 200 + ['C'] * 200 + [
'A'] * 300) == df_fill_low_na.character_variable_fillna).all())
self.assertTrue((pd.Series([1] * 400 + [3] * 400 + [2] * 200)
== df_fill_low_na.numeric_variable_fillna).all())
self.assertTrue((pd.Series(['A'] * 300 + ['B'] * 200 + ['C'] * 200 + [
'A'] * 300) == df_fill_low_na_threshold.character_variable_fillna).all())
self.assertTrue((pd.Series([1] * 400 + [3] * 400 + [2] * 200)
== df_fill_low_na_threshold.numeric_variable_fillna).all())
self.assertTrue(
sum(pd.isnull(df_fill_low_na_threshold.many_missing_70)) == 700)
class TestOutliersDetection(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls.data = create_test_df()
cls.outlier_d = OutliersDetection(cls.data)
@clock
def test_outlier_detection_serie_1d(self):
strong_cutoff = self.outlier_d.strong_cutoff
df_outliers = self.outlier_d.outlier_detection_serie_1d('outlier', strong_cutoff)
self.assertIn(1, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(10, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertIn(100, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(2, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
@clock
def test_outlier_detection_serie_1d_with_na(self):
strong_cutoff = self.outlier_d.strong_cutoff
df_outliers = self.outlier_d.outlier_detection_serie_1d('outlier_na', strong_cutoff)
self.assertIn(1, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(10, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertIn(100, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
self.assertNotIn(2, cserie(df_outliers.loc[:, 'is_outlier'] == 1))
class TestHelper(unittest.TestCase):
@classmethod
def setUpClass(cls):
""" creating test data set for the test module """
cls.data = create_test_df()
@clock
def test_random_pmf(self):
self.assertAlmostEqual(len(random_pmf(10)), 10)
self.assertAlmostEqual(random_pmf(10).sum(), 1)
@clock
def test_simu(self):
pmf = random_pmf(4)
samples_unique = simu((np.array(['A', 'B']), np.array([0, 1])), 10)
self.assertTrue((samples_unique == 'B').all())
# class TestGetData(unittest.TestCase):
#
# @clock
# def test_getdata_titanic(self):
# """ Test if downloading titanic data is working """
# titanic = get_dataset('titanic')
# self.assertIsInstance(titanic, pd.DataFrame)
# self.assertEqual(titanic.shape[0], 891)
# self.assertEqual(titanic.shape[1], 15)
# Adding new tests sets
# def suite():
# suite = unittest.TestSuite()
# suite.addTest(TestPandasPatch('test_default_size'))
# return suite
# Other solution than calling main
#suite = unittest.TestLoader().loadTestsFromTestCase(TestPandasPatch)
#unittest.TextTestRunner(verbosity = 1 ).run(suite)
if __name__ == "__main__":
unittest.main(exit=False)
|
Python
| 364
| 37.843407
| 106
|
/test.py
| 0.6173
| 0.600255
|
ericfourrier/auto-clean
|
refs/heads/develop
|
from autoc.explorer import DataExploration, pd
from autoc.utils.helpers import cserie
import seaborn as sns
import matplotlib.pyplot as plt
#from autoc.utils.helpers import cached_property
from autoc.utils.corrplot import plot_corrmatrix
import numpy as np
from scipy.stats import ttest_ind
from scipy.stats.mstats import ks_2samp
def missing_map(df, nmax=100, verbose=True, yticklabels=False, figsize=(15, 11), *args, **kwargs):
""" Returns missing map plot like in amelia 2 package in R """
f, ax = plt.subplots(figsize=figsize)
if nmax < df.shape[0]:
df_s = df.sample(n=nmax) # sample rows if dataframe too big
return sns.heatmap(df_s.isnull(), yticklabels=yticklabels, vmax=1, *args, **kwargs)
# class ColumnNaInfo
class NaImputer(DataExploration):
def __init__(self, *args, **kwargs):
super(NaImputer, self).__init__(*args, **kwargs)
self.get_data_isna()
@property
def nacols(self):
""" Returns a list of column with at least one missing values """
return cserie(self.nacolcount().Nanumber > 0)
@property
def nacols_i(self):
""" Returns the index of column with at least one missing values """
return cserie(self.nacolcount().Nanumber > 0)
def get_overlapping_matrix(self, normalize=True):
""" Look at missing values overlapping """
arr = self.data_isna.astype('float').values
arr = np.dot(arr.T, arr)
if normalize:
arr = arr / (arr.max(axis=1)[:, None])
index = self.nacols
res = pd.DataFrame(index=index, data=arr, columns=index)
return res
def infos_na(self, na_low=0.05, na_high=0.90):
""" Returns a dict with various infos about missing values """
infos = {}
infos['nacolcount'] = self.nacolcount()
infos['narowcount'] = self.narowcount()
infos['nb_total_na'] = self.total_missing
infos['many_na_col'] = self.manymissing(pct=na_high)
infos['low_na_col'] = cserie(self.nacolcount().Napercentage < na_low)
infos['total_pct_na'] = self.nacolcount().Napercentage.mean()
return infos
def get_isna(self, col):
""" Returns a dummy variable indicating in a observation of a specific col
is na or not 0 -> not na , 1 -> na """
return self.data.loc[:, col].isnull().astype(int)
@property
def data_isna_m(self):
""" Returns merged dataframe (data, data_is_na)"""
return pd.concat((self.data, self.data_isna), axis=1)
def get_data_isna(self, prefix="is_na_", filter_nna=True):
""" Returns dataset with is_na columns from the a dataframe with missing values
Parameters
----------
prefix : str
the name of the prefix that will be append to the column name.
filter_nna: bool
True if you want remove column without missing values.
"""
if not filter_nna:
cols_to_keep = self.data.columns
else:
cols_to_keep = self.nacols
data_isna = self.data.loc[:, cols_to_keep].isnull().astype(int)
data_isna.columns = ["{}{}".format(prefix, c) for c in cols_to_keep]
self.data_isna = data_isna
return self.data_isna
def get_corrna(self, *args, **kwargs):
""" Get matrix of correlation of na """
return self.data_isna.corr(*args, **kwargs)
def corrplot_na(self, *args, **kwargs):
""" Returns a corrplot of data_isna """
print("This function is deprecated")
plot_corrmatrix(self.data_isna, *args, **kwargs)
def plot_corrplot_na(self, *args, **kwargs):
""" Returns a corrplot of data_isna """
plot_corrmatrix(self.data_isna, *args, **kwargs)
def plot_density_m(self, colname, subset=None, prefix="is_na_", size=6, *args, **kwargs):
""" Plot conditionnal density plot from all columns or subset based on
is_na_colname 0 or 1"""
colname_na = prefix + colname
density_columns = self.data.columns if subset is None else subset
# filter only numeric values and different values from is_na_col
density_columns = [c for c in density_columns if (
c in self._dfnum and c != colname)]
print(density_columns)
for col in density_columns:
g = sns.FacetGrid(data=self.data_isna_m, col=colname_na, hue=colname_na,
size=size, *args, **kwargs)
g.map(sns.distplot, col)
def get_isna_mean(self, colname, prefix="is_na_"):
""" Returns empirical conditional expectatation, std, and sem of other numerical variable
for a certain colname with 0:not_a_na 1:na """
na_colname = "{}{}".format(prefix, colname)
cols_to_keep = list(self.data.columns) + [na_colname]
measure_var = self.data.columns.tolist()
measure_var = [c for c in measure_var if c != colname]
functions = ['mean', 'std', 'sem']
return self.data_isna_m.loc[:, cols_to_keep].groupby(na_colname)[measure_var].agg(functions).transpose()
def get_isna_ttest_s(self, colname_na, colname, type_test="ks"):
""" Returns tt test for colanme-na and a colname """
index_na = self.data.loc[:, colname_na].isnull()
measure_var = self.data.loc[:, colname].dropna() # drop na vars
if type_test == "ttest":
return ttest_ind(measure_var[index_na], measure_var[~index_na])
elif type_test == "ks":
return ks_2samp(measure_var[index_na], measure_var[~index_na])
def get_isna_ttest(self, colname_na, type_test="ks"):
res = pd.DataFrame()
col_to_compare = [c for c in self._dfnum if c !=
colname_na] # remove colname_na
for col in col_to_compare:
ttest = self.get_isna_ttest_s(colname_na, col, type_test=type_test)
res.loc[col, 'pvalue'] = ttest[1]
res.loc[col, 'statistic'] = ttest[0]
res.loc[col, 'type_test'] = type_test
return res
def isna_summary(self, colname, prefix="is_na_"):
""" Returns summary from one col with describe """
na_colname = "{}{}".format(prefix, colname)
cols_to_keep = list(self.data.columns) + [na_colname]
return self.data_isna_m.loc[:, cols_to_keep].groupby(na_colname).describe().transpose()
def delete_narows(self, pct, index=False):
""" Delete rows with more na percentage than > perc in data
Return the index
Arguments
---------
pct : float
percentage of missing values, rows with more na percentage
than > perc are deleted
index : bool, default False
True if you want an index and not a Dataframe
verbose : bool, default False
True if you want to see percentage of data discarded
Returns
--------
- a pandas Dataframe with rows deleted if index=False, index of
columns to delete either
"""
index_missing = self.manymissing(pct=pct, axis=0, index=False)
pct_missing = len(index_missing) / len(self.data.index)
if verbose:
print("There is {0:.2%} rows matching conditions".format(
pct_missing))
if not index:
return self.data.loc[~index_missing, :]
else:
return index_missing
def fillna_serie(self, colname, threshold_factor=0.1, special_value=None, date_method='ffill'):
""" fill values in a serie default with the mean for numeric or the most common
factor for categorical variable"""
if special_value is not None:
# "Missing for example"
return self.data.loc[:, colname].fillna(special_value)
elif self.data.loc[:, colname].dtype == float:
# fill with median
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].median())
elif self.is_int_factor(colname, threshold_factor):
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].mode()[0])
# fillna for datetime with the method provided by pandas
elif self.data.loc[:, colname].dtype == '<M8[ns]':
return self.data.loc[:, colname].fillna(method=date_method)
else:
# Fill with most common value
return self.data.loc[:, colname].fillna(self.data.loc[:, colname].value_counts().index[0])
def basic_naimputation(self, columns_to_process=[], threshold=None):
""" this function will return a dataframe with na value replaced int
the columns selected by the mean or the most common value
Arguments
---------
- columns_to_process : list of columns name with na values you wish to fill
with the fillna_serie function
Returns
--------
- a pandas DataFrame with the columns_to_process filled with the fillena_serie
"""
# self.data = self.df.copy()
if threshold:
columns_to_process = columns_to_process + cserie(self.nacolcount().Napercentage < threshold)
self.data.loc[:, columns_to_process] = self.data.loc[
:, columns_to_process].apply(lambda x: self.fillna_serie(colname=x.name))
return self.data
def split_tt_na(self, colname, index=False):
""" Split the dataset returning the index of test , train """
index_na = self.data.loc[:, colname].isnull()
index_test = (index_na == True)
index_train = (index_na == False)
if index:
return index_test, index_train
else:
return self.data.loc[index_test, :], self.data.loc[index_train, :]
|
Python
| 227
| 41.766521
| 112
|
/autoc/naimputer.py
| 0.607025
| 0.603008
|
ericfourrier/auto-clean
|
refs/heads/develop
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : File with all custom exceptions
"""
class NotNumericColumn(Exception):
""" The column should be numeric """
pass
class NumericError(Exception):
""" The column should not be numeric """
pass
# class NotFactor
|
Python
| 17
| 17.17647
| 45
|
/autoc/exceptions.py
| 0.656958
| 0.653722
|
ericfourrier/auto-clean
|
refs/heads/develop
|
__all__ = ["explorer", "naimputer"]
from .explorer import DataExploration
from .naimputer import NaImputer
from .preprocess import PreProcessor
from .utils.getdata import get_dataset
# from .preprocess import PreProcessor
|
Python
| 6
| 36
| 38
|
/autoc/__init__.py
| 0.797297
| 0.797297
|
ericfourrier/auto-clean
|
refs/heads/develop
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : This is a framework for Modeling with pandas, numpy and skicit-learn.
The Goal of this module is to rely on a dataframe structure for modelling g
"""
#########################################################
# Import modules and global helpers
#########################################################
import pandas as pd
import numpy as np
from numpy.random import permutation
from autoc.utils.helpers import cserie
from pprint import pprint
from .exceptions import NotNumericColumn
class DataExploration(object):
"""
This class is designed to provide infos about the dataset such as
number of missing values, number of unique values, constant columns,
long strings ...
For the most useful methods it will store the result into a attributes
When you used a method the output will be stored in a instance attribute so you
don't have to compute the result again.
"""
def __init__(self, data, copy=False):
"""
Parameters
----------
data : pandas.DataFrame
the data you want explore
copy: bool
True if you want make a copy of DataFrame, default False
Examples
--------
explorer = DataExploration(data = your_DataFrame)
explorer.structure() : global structure of your DataFrame
explorer.psummary() to get the a global snapchot of the different stuff detected
data_cleaned = explorer.basic_cleaning() to clean your data.
"""
assert isinstance(data, pd.DataFrame)
self.is_data_copy = copy
self.data = data if not self.is_data_copy else data.copy()
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
self._nrow = len(self.data.index)
self._ncol = len(self.data.columns)
self._dfnumi = (self.data.dtypes == float) | (
self.data.dtypes == int)
self._dfnum = cserie(self._dfnumi)
self._dfchari = (self.data.dtypes == object)
self._dfchar = cserie(self._dfchari)
self._nacolcount = pd.DataFrame()
self._narowcount = pd.DataFrame()
self._count_unique = pd.DataFrame()
self._constantcol = []
self._dupcol = []
self._nearzerovar = pd.DataFrame()
self._corrcolumns = []
self._dict_info = {}
self._structure = pd.DataFrame()
self._string_info = ""
self._list_other_na = {'unknown', 'na',
'missing', 'n/a', 'not available'}
# def get_label(self):
# """ return the Serie of label you want predict """
# if not self.label:
# print("""the label column is empty the data will be considered
# as a dataset of predictors""")
# return self.data[self.label]
def is_numeric(self, colname):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
Notes
------
df._get_numeric_data() is a primitive from pandas
to get only numeric data
"""
dtype_col = self.data.loc[:, colname].dtype
return (dtype_col == int) or (dtype_col == float)
def is_int_factor(self, colname, threshold=0.1):
"""
Returns True if a the type of column is numeric else False
Parameters
----------
colname : str
the name of the column of the self.data
threshold : float
colname is an 'int_factor' if the number of
unique values < threshold * nrows
"""
dtype_col = self.data.loc[:, colname].dtype
if dtype_col == int and self.data.loc[:, colname].nunique() <= (threshold * self.data.shape[0]):
return True
else:
return False
def to_lowercase(self):
""" Returns a copy of dataset with data to lower """
return self.data.applymap(lambda x: x.lower() if type(x) == str else x)
def where_numeric(self):
""" Returns a Boolean Dataframe with True for numeric values False for other """
return self.data.applymap(lambda x: isinstance(x, (int, float)))
def count_unique(self):
""" Return a serie with the number of unique value per columns """
if len(self._count_unique):
return self._count_unique
self._count_unique = self.data.apply(lambda x: x.nunique(), axis=0)
return self._count_unique
def sample_df(self, pct=0.05, nr=10, threshold=None):
""" sample a number of rows of a dataframe = min(max(0.05*nrow(self,nr),threshold)"""
a = max(int(pct * float(len(self.data.index))), nr)
if threshold:
a = min(a, threshold)
return self.data.loc[permutation(self.data.index)[:a],:]
def sign_summary(self, subset=None):
"""
Returns the number and percentage of positive and negative values in
a column, a subset of columns or all numeric columns of the dataframe.
Parameters
----------
subset : label or list
Column name or list of column names to check.
Returns
-------
summary : pandas.Series or pandas.DataFrame
Summary of the signs present in the subset
"""
if subset:
subs = subs if isinstance(subs, list) else [subs]
if sum(col not in self._dfnum for col in subs) > 0:
raise NotNumericColumn('At least one of the columns you passed ' \
'as argument are not numeric.')
else:
subs = self._dfnum
summary = pd.DataFrame(columns=['NumOfNegative', 'PctOfNegative',
'NumOfPositive', 'PctOfPositive'])
summary['NumOfPositive'] = self.data[subs].apply(lambda x: (x >= 0).sum(), axis=0)
summary['NumOfNegative'] = self.data[subs].apply(lambda x: (x <= 0).sum(), axis=0)
summary['PctOfPositive'] = summary['NumOfPositive'] / len(self.data)
summary['PctOfNegative'] = summary['NumOfNegative'] / len(self.data)
return summary
@property
def total_missing(self):
""" Count the total number of missing values """
# return np.count_nonzero(self.data.isnull().values) # optimized for
# speed
return self.nacolcount().Nanumber.sum()
def nacolcount(self):
""" count the number of missing values per columns """
if len(self._nacolcount):
return self._nacolcount
self._nacolcount = self.data.isnull().sum(axis=0)
self._nacolcount = pd.DataFrame(self._nacolcount, columns=['Nanumber'])
self._nacolcount['Napercentage'] = self._nacolcount[
'Nanumber'] / (self._nrow)
return self._nacolcount
def narowcount(self):
""" count the number of missing values per columns """
if len(self._narowcount):
return self._narowcount
self._narowcount = self.data.isnull().sum(axis=1)
self._narowcount = pd.DataFrame(
self._narowcount, columns=['Nanumber'])
self._narowcount['Napercentage'] = self._narowcount[
'Nanumber'] / (self._ncol)
return self._narowcount
def detect_other_na(self, verbose=True, auto_replace=False):
""" Detect missing values encoded by the creator of the dataset
like 'Missing', 'N/A' ...
Parameters
----------
verbose : bool
True if you want to print some infos
auto_replace: bool
True if you want replace this value by np.nan, default False
Returns
-------
an DataFrame of boolean if not auto_replace else cleaned DataFrame with
self._list_other_na replaced by np.nan
Notes
------
* You can use na_values parameter in pandas.read_csv to specify the missing
values to convert to nan a priori
* Speed can be improved
"""
res = self.to_lowercase().applymap(lambda x: x in self._list_other_na)
print("We detected {} other type of missing values".format(res.sum().sum()))
if auto_replace:
return self.data.where((res == False), np.nan)
else:
return res
@property
def nacols_full(self):
""" Returns a list of columns with only missing values """
return cserie(self.nacolcount().Nanumber == self._nrow)
@property
def narows_full(self):
""" Returns an index of rows with only missing values """
return self.narowcount().Nanumber == self._ncol
# def manymissing2(self, pct=0.9, axis=0, index=False):
# """ identify columns of a dataframe with many missing values ( >= a), if
# row = True row either.
# - the output is a index """
# if axis == 1:
# self.manymissing = self.narowcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# elif axis == 0:
# self.manymissing = self.nacolcount()
# self.manymissing = self.manymissing['Napercentage'] >= pct
# else:
# raise ValueError
# if index:
# return manymissing
# else:
# return cserie(manymissing)
def manymissing(self, pct=0.9, axis=0):
""" identify columns of a dataframe with many missing values ( >= pct), if
row = True row either.
- the output is a list """
if axis == 1:
self._manymissingrow = self.narowcount()
self._manymissingrow = self._manymissingrow['Napercentage'] >= pct
return self._manymissingrow
elif axis == 0:
self._manymissingcol = self.nacolcount()
self._manymissingcol = cserie(
self._manymissingcol['Napercentage'] >= pct)
return self._manymissingcol
else:
raise ValueError("Axis should be 1 for rows and o for columns")
def df_len_string(self, drop_num=False):
""" Return a Series with the max of the length of the string of string-type columns """
if drop_num:
return self.data.drop(self._dfnum, axis=1).apply(lambda x: np.max(x.str.len()), axis=0)
else:
return self.data.apply(lambda x: np.max(x.str.len()) if x.dtype.kind =='O' else np.nan , axis=0)
def detectkey(self, index_format=False, pct=0.15, dropna=False, **kwargs):
""" identify id or key columns as an index if index_format = True or
as a list if index_format = False """
if not dropna:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: len(x.unique()) == len(x), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: len(x.unique()) == len(x), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
else:
col_to_keep = self.sample_df(
pct=pct, **kwargs).apply(lambda x: x.nunique() == len(x.dropna()), axis=0)
if len(col_to_keep) == 0:
return []
is_key_index = col_to_keep
is_key_index[is_key_index] == self.data.loc[:, is_key_index].apply(
lambda x: x.nunique() == len(x.dropna()), axis=0)
if index_format:
return is_key_index
else:
return cserie(is_key_index)
def constantcol(self, **kwargs):
""" identify constant columns """
# sample to reduce computation time
if len(self._constantcol):
return self._constantcol
col_to_keep = self.sample_df(
**kwargs).apply(lambda x: len(x.unique()) == 1, axis=0)
if len(cserie(col_to_keep)) == 0:
return []
self._constantcol = cserie(self.data.loc[:, col_to_keep].apply(
lambda x: len(x.unique()) == 1, axis=0))
return self._constantcol
def constantcol2(self, **kwargs):
""" identify constant columns """
return cserie((self.data == self.data.ix[0]).all())
def factors(self, nb_max_levels=10, threshold_value=None, index=False):
""" return a list of the detected factor variable, detection is based on
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
this is a bad implementation
Arguments
----------
nb_max_levels: the mac nb of levels you fix for a categorical variable
threshold_value : the nb of of unique value in percentage of the dataframe length
index : if you want the result as an index or a list
"""
if threshold_value:
max_levels = max(nb_max_levels, threshold_value * self._nrow)
else:
max_levels = nb_max_levels
def helper_factor(x, num_var=self._dfnum):
unique_value = set()
if x.name in num_var:
return False
else:
for e in x.values:
if len(unique_value) >= max_levels:
return False
else:
unique_value.add(e)
return True
if index:
return self.data.apply(lambda x: helper_factor(x))
else:
return cserie(self.data.apply(lambda x: helper_factor(x)))
@staticmethod
def serie_quantiles(array, nb_quantiles=10):
binq = 1.0 / nb_quantiles
if type(array) == pd.Series:
return array.quantile([binq * i for i in xrange(nb_quantiles + 1)])
elif type(array) == np.ndarray:
return np.percentile(array, [binq * i for i in xrange(nb_quantiles + 1)])
else:
raise("the type of your array is not supported")
def dfquantiles(self, nb_quantiles=10, only_numeric=True):
""" this function gives you a all the quantiles
of the numeric variables of the dataframe
only_numeric will calculate it only for numeric variables,
for only_numeric = False you will get NaN value for non numeric
variables """
binq = 1.0 / nb_quantiles
if only_numeric:
return self.data.loc[:, self._dfnumi].quantile([binq * i for i in xrange(nb_quantiles + 1)])
else:
return self.data.quantile([binq * i for i in xrange(nb_quantiles + 1)])
def numeric_summary(self):
""" provide a more complete sumary than describe, it is using only numeric
value """
df = self.data.loc[:, self._dfnumi]
func_list = [df.count(), df.min(), df.quantile(0.25),
df.quantile(0.5), df.mean(),
df.std(), df.mad(), df.skew(),
df.kurt(), df.quantile(0.75), df.max()]
results = [f for f in func_list]
return pd.DataFrame(results, index=['Count', 'Min', 'FirstQuartile',
'Median', 'Mean', 'Std', 'Mad', 'Skewness',
'Kurtosis', 'Thirdquartile', 'Max']).T
def infer_types(self):
""" this function will try to infer the type of the columns of data"""
return self.data.apply(lambda x: pd.lib.infer_dtype(x.values))
def structure(self, threshold_factor=10):
""" this function return a summary of the structure of the pandas DataFrame
data looking at the type of variables, the number of missing values, the
number of unique values """
if len(self._structure):
return self._structure
dtypes = self.data.dtypes
nacolcount = self.nacolcount()
nb_missing = nacolcount.Nanumber
perc_missing = nacolcount.Napercentage
nb_unique_values = self.count_unique()
dtype_infer = self.infer_types()
dtypes_r = self.data.apply(lambda x: "character")
dtypes_r[self._dfnumi] = "numeric"
dtypes_r[(dtypes_r == 'character') & (
nb_unique_values <= threshold_factor)] = 'factor'
constant_columns = (nb_unique_values == 1)
na_columns = (perc_missing == 1)
is_key = nb_unique_values == self._nrow
string_length = self.df_len_string(drop_num=False)
# is_key_na = ((nb_unique_values + nb_missing) == self.nrow()) & (~na_columns)
dict_str = {'dtypes_r': dtypes_r, 'perc_missing': perc_missing,
'nb_missing': nb_missing, 'is_key': is_key,
'nb_unique_values': nb_unique_values, 'dtypes_p': dtypes,
'constant_columns': constant_columns, 'na_columns': na_columns,
'dtype_infer': dtype_infer, 'string_length': string_length}
self._structure = pd.concat(dict_str, axis=1)
self._structure = self._structure.loc[:, ['dtypes_p', 'dtypes_r', 'nb_missing', 'perc_missing',
'nb_unique_values', 'constant_columns',
'na_columns', 'is_key', 'dtype_infer', 'string_length']]
return self._structure
def findupcol(self, threshold=100, **kwargs):
""" find duplicated columns and return the result as a list of list """
df_s = self.sample_df(threshold=100, **kwargs).T
dup_index_s = (df_s.duplicated()) | (
df_s.duplicated(keep='last'))
if len(cserie(dup_index_s)) == 0:
return []
df_t = (self.data.loc[:, dup_index_s]).T
dup_index = df_t.duplicated()
dup_index_complet = cserie(
(dup_index) | (df_t.duplicated(keep='last')))
l = []
for col in cserie(dup_index):
index_temp = self.data[dup_index_complet].apply(
lambda x: (x == self.data[col])).sum() == self._nrow
temp = list(self.data[dup_index_complet].columns[index_temp])
l.append(temp)
self._dupcol = l
return self._dupcol
def finduprow(self, subset=[]):
""" find duplicated rows and return the result a sorted dataframe of all the
duplicates
subset is a list of columns to look for duplicates from this specific subset .
"""
if sum(self.data.duplicated()) == 0:
print("there is no duplicated rows")
else:
if subset:
dup_index = (self.data.duplicated(subset=subset)) | (
self.data.duplicated(subset=subset, keep='last'))
else:
dup_index = (self.data.duplicated()) | (
self.data.duplicated(keep='last'))
if subset:
return self.data[dup_index].sort(subset)
else:
return self.data[dup_index].sort(self.data.columns[0])
def nearzerovar(self, freq_cut=95 / 5, unique_cut=10, save_metrics=False):
""" identify predictors with near-zero variance.
freq_cut: cutoff ratio of frequency of most common value to second
most common value.
unique_cut: cutoff percentage of unique value over total number of
samples.
save_metrics: if False, print dataframe and return NON near-zero var
col indexes, if True, returns the whole dataframe.
"""
nb_unique_values = self.count_unique()
percent_unique = 100 * nb_unique_values / self._nrow
def helper_freq(x):
if nb_unique_values[x.name] == 0:
return 0.0
elif nb_unique_values[x.name] == 1:
return 1.0
else:
return float(x.value_counts().iloc[0]) / x.value_counts().iloc[1]
freq_ratio = self.data.apply(helper_freq)
zerovar = (nb_unique_values == 0) | (nb_unique_values == 1)
nzv = ((freq_ratio >= freq_cut) & (
percent_unique <= unique_cut)) | (zerovar)
if save_metrics:
return pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio, 'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns)
else:
print(pd.DataFrame({'percent_unique': percent_unique, 'freq_ratio': freq_ratio,
'zero_var': zerovar, 'nzv': nzv}, index=self.data.columns))
return nzv[nzv == True].index
def findcorr(self, cutoff=.90, method='pearson', data_frame=False, print_mode=False):
"""
implementation of the Recursive Pairwise Elimination.
The function finds the highest correlated pair and removes the most
highly correlated feature of the pair, then repeats the process
until the threshold 'cutoff' is reached.
will return a dataframe is 'data_frame' is set to True, and the list
of predictors to remove oth
Adaptation of 'findCorrelation' function in the caret package in R.
"""
res = []
df = self.data.copy(0)
cor = df.corr(method=method)
for col in cor.columns:
cor[col][col] = 0
max_cor = cor.max()
if print_mode:
print(max_cor.max())
while max_cor.max() > cutoff:
A = max_cor.idxmax()
B = cor[A].idxmax()
if cor[A].mean() > cor[B].mean():
cor.drop(A, 1, inplace=True)
cor.drop(A, 0, inplace=True)
res += [A]
else:
cor.drop(B, 1, inplace=True)
cor.drop(B, 0, inplace=True)
res += [B]
max_cor = cor.max()
if print_mode:
print(max_cor.max())
if data_frame:
return df.drop(res, 1)
else:
return res
self._corrcolumns = res
def get_infos_consistency(self):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col,narows and cols """
infos = {'duplicated_rows': {'value': cserie(self.data.duplicated(), index=True), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'dup_columns': {'value': self.findupcol(), 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.findupcol())},
'constant_columns': {'value': self.constantcol(), 'level': 'WARNING',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.constantcol())},
'narows_full': {'value': cserie(self.narows_full), 'level': 'ERROR',
'action': 'delete','comment': 'You should delete this rows with df.drop_duplicates()'},
'nacols_full': {'value': self.nacols_full, 'level': 'ERROR',
'action': 'delete', 'comment': 'You should delete one of the column with df.drop({}, axis=1)'.format(self.nacols_full)}
}
# update
self._dict_info.update(infos)
return infos
def get_infos_na(self, manymissing_ph=0.9, manymissing_pl=0.05):
""" Update self._dict_info and returns infos about duplicates rows and cols,
constant col, narows and cols """
nacolcount_p = self.nacolcount().Napercentage
infos = {'nb_total_missing': {'value': self.total_missing, 'level': 'INFO', 'action': None},
'pct_total_missing': {'value': float(self.total_missing) / self._nrow, 'level': 'INFO', 'action': None},
'many_na_columns': {'value': cserie((nacolcount_p > manymissing_ph)), 'level': 'ERROR', 'action': 'delete or impute'},
'low_na_columns': {'value': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)), 'level': 'WARNING', 'action': 'impute'},
}
# update
self._dict_info.update(infos)
return infos
def print_infos(self, infos="consistency", print_empty=False):
""" pprint of get_infos
Parameters
----------
print_empty: bool:
False if you don't want print the empty infos (
no missing colum for example)"""
if infos == "consistency":
dict_infos = self.get_infos_consistency()
if not print_empty:
dict_infos = {k: v for k, v in dict_infos.items() if len(v['value']) > 0}
pprint(dict_infos)
def psummary(self, manymissing_ph=0.70, manymissing_pl=0.05, nzv_freq_cut=95 / 5, nzv_unique_cut=10,
threshold=100, string_threshold=40, dynamic=False):
"""
This function will print you a summary of the dataset, based on function
designed is this package
- Output : python print
It will store the string output and the dictionnary of results in private variables
"""
nacolcount_p = self.nacolcount().Napercentage
if dynamic:
print('there are {0} duplicated rows\n'.format(
self.data.duplicated().sum()))
print('the columns with more than {0:.2%} manymissing values:\n{1} \n'.format(manymissing_ph,
cserie((nacolcount_p > manymissing_ph))))
print('the columns with less than {0:.2%} manymissing values are :\n{1} \n you should fill them with median or most common value \n'.format(
manymissing_pl, cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl))))
print('the detected keys of the dataset are:\n{0} \n'.format(
self.detectkey()))
print('the duplicated columns of the dataset are:\n{0}\n'.format(
self.findupcol(threshold=100)))
print('the constant columns of the dataset are:\n{0}\n'.format(
self.constantcol()))
print('the columns with nearzerovariance are:\n{0}\n'.format(
list(cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv))))
print('the columns highly correlated to others to remove are:\n{0}\n'.format(
self.findcorr(data_frame=False)))
print('these columns contains big strings :\n{0}\n'.format(
cserie(self.df_len_string() > string_threshold)))
else:
self._dict_info = {'nb_duplicated_rows': np.sum(self.data.duplicated()),
'many_missing_percentage': manymissing_ph,
'manymissing_columns': cserie((nacolcount_p > manymissing_ph)),
'low_missing_percentage': manymissing_pl,
'lowmissing_columns': cserie((nacolcount_p > 0) & (nacolcount_p <= manymissing_pl)),
'keys_detected': self.detectkey(),
'dup_columns': self.findupcol(threshold=100),
'constant_columns': self.constantcol(),
'nearzerovar_columns': cserie(self.nearzerovar(nzv_freq_cut, nzv_unique_cut, save_metrics=True).nzv),
'high_correlated_col': self.findcorr(data_frame=False),
'big_strings_col': cserie(self.df_len_string() > string_threshold)
}
self._string_info = u"""
there are {nb_duplicated_rows} duplicated rows\n
the columns with more than {many_missing_percentage:.2%} manymissing values:\n{manymissing_columns} \n
the columns with less than {low_missing_percentage:.2%}% manymissing values are :\n{lowmissing_columns} \n
you should fill them with median or most common value\n
the detected keys of the dataset are:\n{keys_detected} \n
the duplicated columns of the dataset are:\n{dup_columns}\n
the constant columns of the dataset are:\n{constant_columns}\n
the columns with nearzerovariance are:\n{nearzerovar_columns}\n
the columns highly correlated to others to remove are:\n{high_correlated_col}\n
these columns contains big strings :\n{big_strings_col}\n
""".format(**self._dict_info)
print(self._string_info)
def metadata(self):
""" Return a dict/json full of infos about the dataset """
meta = {}
meta['mem_size'] = self.data.memory_usage(index=True).sum() # in bytes
meta['columns_name'] = self.data.columns.tolist()
meta['columns_name_n'] = [e.lower() for e in self.data.columns]
meta['nb_rows'] = self.data.shape[0]
meta['nb_columns'] = self.data.shape[1]
# drop dtype_p for mongodb compatibility
structure_data = self.structure().drop(labels='dtypes_p', axis=1)
structure_data = structure_data.to_dict('index')
meta['structure'] = structure_data
meta['numeric_summary'] = self.numeric_summary().to_dict('index')
return meta
|
Python
| 678
| 42.346607
| 152
|
/autoc/explorer.py
| 0.567423
| 0.561673
|
ericfourrier/auto-clean
|
refs/heads/develop
|
"""
@author: efourrier
Purpose : This is a simple experimental class to detect outliers. This class
can be used to detect missing values encoded as outlier (-999, -1, ...)
"""
from autoc.explorer import DataExploration, pd
import numpy as np
#from autoc.utils.helpers import cserie
from exceptions import NotNumericColumn
def iqr(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return np.percentile(ndarray, 75) - np.percentile(ndarray, 25)
def z_score(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return (ndarray - np.mean(ndarray)) / (np.std(ndarray))
def iqr_score(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return (ndarray - np.median(ndarray)) / (iqr(ndarray))
def mad_score(ndarray, dropna=True):
if dropna:
ndarray = ndarray[~np.isnan(ndarray)]
return (ndarray - np.median(ndarray)) / (np.median(np.absolute(ndarray - np.median(ndarray))) / 0.6745)
class OutliersDetection(DataExploration):
"""
this class focuses on identifying outliers
Parameters
----------
data : DataFrame
Examples
--------
* od = OutliersDetection(data = your_DataFrame)
* od.structure() : global structure of your DataFrame
"""
def __init__(self, *args, **kwargs):
super(OutliersDetection, self).__init__(*args, **kwargs)
self.strong_cutoff = {'cutoff_z': 6,
'cutoff_iqr': 6, 'cutoff_mad': 6}
self.basic_cutoff = {'cutoff_z': 3,
'cutoff_iqr': 2, 'cutoff_mad': 2}
def check_negative_value(self, colname):
""" this function will detect if there is at leat one
negative value and calculate the ratio negative postive/
"""
if not self.is_numeric(colname):
NotNumericColumn("The serie should be numeric values")
return sum(serie < 0)
def outlier_detection_serie_1d(self, colname, cutoff_params, scores=[z_score, iqr_score, mad_score]):
if not self.is_numeric(colname):
raise("auto-clean doesn't support outliers detection for Non numeric variable")
keys = [str(func.__name__) for func in scores]
df = pd.DataFrame(dict((key, func(self.data.loc[:, colname]))
for key, func in zip(keys, scores)))
df['is_outlier'] = 0
for s in keys:
cutoff_colname = "cutoff_{}".format(s.split('_')[0])
index_outliers = np.absolute(df[s]) >= cutoff_params[cutoff_colname]
df.loc[index_outliers, 'is_outlier'] = 1
return df
def check_negative_value(self):
""" this will return a the ratio negative/positve for each numeric
variable of the DataFrame
"""
return self.data[self._dfnum].apply(lambda x: self.check_negative_value_serie(x.name))
def outlier_detection_1d(self, cutoff_params, subset=None,
scores=[z_score, iqr_score, mad_score]):
""" Return a dictionnary with z_score,iqr_score,mad_score as keys and the
associate dataframe of distance as value of the dictionnnary"""
df = self.data.copy()
numeric_var = self._dfnum
if subset:
df = df.drop(subset, axis=1)
df = df.loc[:, numeric_var] # take only numeric variable
# if remove_constant_col:
# df = df.drop(self.constantcol(), axis = 1) # remove constant variable
# df_outlier = pd.DataFrame()
for col in df:
df_temp = self.outlier_detection_serie_1d(col, cutoff_params, scores)
df_temp.columns = [col + '_' +
col_name for col_name in df_temp.columns]
#df_outlier = pd.concat([df_outlier, df_temp], axis=1)
return df_temp
|
Python
| 106
| 35.339622
| 107
|
/autoc/outliersdetection.py
| 0.606438
| 0.59891
|
ericfourrier/auto-clean
|
refs/heads/develop
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : The purpose of this class is too automaticely transfrom a DataFrame
into a numpy ndarray in order to use an aglorithm
"""
#########################################################
# Import modules and global helpers
#########################################################
from autoc.explorer import DataExploration, pd
import numpy as np
from numpy.random import permutation
from autoc.utils.helpers import cserie
from autoc.exceptions import NumericError
class PreProcessor(DataExploration):
subtypes = ['text_raw', 'text_categorical', 'ordinal', 'binary', 'other']
def __init__(self, *args, **kwargs):
super(PreProcessor, self).__init__(*args, **kwargs)
self.long_str_cutoff = 80
self.short_str_cutoff = 30
self.perc_unique_cutoff = 0.2
self.nb_max_levels = 20
def basic_cleaning(self,filter_nacols=True, drop_col=None,
filter_constantcol=True, filer_narows=True,
verbose=True, filter_rows_duplicates=True, inplace=False):
"""
Basic cleaning of the data by deleting manymissing columns,
constantcol, full missing rows, and drop_col specified by the user.
"""
col_to_remove = []
index_to_remove = []
if filter_nacols:
col_to_remove += self.nacols_full
if filter_constantcol:
col_to_remove += list(self.constantcol())
if filer_narows:
index_to_remove += cserie(self.narows_full)
if filter_rows_duplicates:
index_to_remove += cserie(self.data.duplicated())
if isinstance(drop_col, list):
col_to_remove += drop_col
elif isinstance(drop_col, str):
col_to_remove += [drop_col]
else:
pass
col_to_remove = list(set(col_to_remove))
index_to_remove = list(set(index_to_remove))
if verbose:
print("We are removing the folowing columns : {}".format(col_to_remove))
print("We are removing the folowing rows : {}".format(index_to_remove))
if inplace:
return self.data.drop(index_to_remove).drop(col_to_remove, axis=1)
else:
return self.data.copy().drop(index_to_remove).drop(col_to_remove, axis=1)
def _infer_subtype_col(self, colname):
""" This fonction tries to infer subtypes in order to preprocess them
better for skicit learn. You can find the different subtypes in the class
variable subtypes
To be completed ....
"""
serie_col = self.data.loc[:, colname]
if serie_col.nunique() == 2:
return 'binary'
elif serie_col.dtype.kind == 'O':
if serie_col.str.len().mean() > self.long_str_cutoff and serie_col.nunique()/len(serie_col) > self.perc_unique_cutoff:
return "text_long"
elif serie_col.str.len().mean() <= self.short_str_cutoff and serie_col.nunique() <= self.nb_max_levels:
return 'text_categorical'
elif self.is_numeric(colname):
if serie_col.dtype == int and serie_col.nunique() <= self.nb_max_levels:
return "ordinal"
else :
return "other"
def infer_subtypes(self):
""" Apply _infer_subtype_col to the whole DataFrame as a dictionnary """
return {col: {'dtype': self.data.loc[:,col].dtype, 'subtype':self._infer_subtype_col(col)} for col in self.data.columns}
def infer_categorical_str(self, colname, nb_max_levels=10, threshold_value=0.01):
""" Returns True if we detect in the serie a factor variable
A string factor is based on the following caracteristics :
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
Arguments
----------
nb_max_levels: int
the max nb of levels you fix for a categorical variable
threshold_value : float
the nb of of unique value in percentage of the dataframe length
"""
# False for numeric columns
if threshold_value:
max_levels = max(nb_max_levels, threshold_value * self._nrow)
else:
max_levels = nb_max_levels
if self.is_numeric(colname):
return False
# False for categorical columns
if self.data.loc[:, colname].dtype == "category":
return False
unique_value = set()
for i, v in self.data.loc[:, colname], iteritems():
if len(unique_value) >= max_levels:
return False
else:
unique_value.add(v)
return True
def get_factors(self, nb_max_levels=10, threshold_value=None, index=False):
""" Return a list of the detected factor variable, detection is based on
ther percentage of unicity perc_unique = 0.05 by default.
We follow here the definition of R factors variable considering that a
factor variable is a character variable that take value in a list a levels
this is a bad implementation
Arguments
----------
nb_max_levels: int
the max nb of levels you fix for a categorical variable.
threshold_value : float
the nb of of unique value in percentage of the dataframe length.
index: bool
False, returns a list, True if you want an index.
"""
res = self.data.apply(lambda x: self.infer_categorical_str(x))
if index:
return res
else:
return cserie(res)
def factors_to_categorical(self, inplace=True, verbose=True, *args, **kwargs):
factors_col = self.get_factors(*args, **kwargs)
if verbose:
print("We are converting following columns to categorical :{}".format(
factors_col))
if inplace:
self.df.loc[:, factors_col] = self.df.loc[:, factors_col].astype(category)
else:
return self.df.loc[:, factors_col].astype(category)
def remove_category(self, colname, nb_max_levels, replace_value='other', verbose=True):
""" Replace a variable with too many categories by grouping minor categories to one """
if self.data.loc[:, colname].nunique() < nb_max_levels:
if verbose:
print("{} has not been processed because levels < {}".format(
colname, nb_max_levels))
else:
if self.is_numeric(colname):
raise NumericError(
'{} is a numeric columns you cannot use this function'.format())
top_levels = self.data.loc[
:, colname].value_counts[0:nb_max_levels].index
self.data.loc[~self.data.loc[:, colname].isin(
top_levels), colname] = replace_value
|
Python
| 177
| 38.785309
| 131
|
/autoc/preprocess.py
| 0.590741
| 0.587049
|
ericfourrier/auto-clean
|
refs/heads/develop
|
# -*- coding: utf-8 -*-
"""
@author: efourrier
Purpose : Create toolbox functions to use for the different pieces of code ot the package
"""
from numpy.random import normal
from numpy.random import choice
import time
import pandas as pd
import numpy as np
import functools
def print_section(section_name, width=120):
""" print centered section for reports in DataExplora"""
section_name = ' ' + section_name + ' '
print('{:=^{ }}'.format(section_name, width))
# def get_dataset(name, *args, **kwargs):
# """Get a dataset from the online repo
# https://github.com/ericfourrier/autoc-datasets (requires internet).
#
# Parameters
# ----------
# name : str
# Name of the dataset 'name.csv'
# """
# path = "https://raw.githubusercontent.com/ericfourrier/autoc-datasets/master/{0}.csv".format(name)
# return pd.read_csv(path, *args, **kwargs)
def flatten_list(x):
return [y for l in x for y in flatten_list(l)] if isinstance(x, list) else [x]
def cserie(serie, index=False):
if index:
return serie[serie].index
else:
return serie[serie].index.tolist()
def removena_numpy(array):
return array[~(np.isnan(array))]
def common_cols(df1, df2):
""" Return the intersection of commun columns name """
return list(set(df1.columns) & set(df2.columns))
def bootstrap_ci(x, n=300, ci=0.95):
"""
this is a function depending on numpy to compute bootstrap percentile
confidence intervalfor the mean of a numpy array
Arguments
---------
x : a numpy ndarray
n : the number of boostrap samples
ci : the percentage confidence (float) interval in ]0,1[
Return
-------
a tuple (ci_inf,ci_up)
"""
low_per = 100 * (1 - ci) / 2
high_per = 100 * ci + low_per
x = removena_numpy(x)
if not len(x):
return (np.nan, np.nan)
bootstrap_samples = choice(a=x, size=(
len(x), n), replace = True).mean(axis = 0)
return np.percentile(bootstrap_samples, [low_per, high_per])
def clock(func):
""" decorator to measure the duration of each test of the unittest suite,
this is extensible for any kind of functions it will just add a print """
def clocked(*args):
t0 = time.time()
result = func(*args)
elapsed = (time.time() - t0) * 1000 # in ms
print('elapsed : [{0:0.3f}ms]'.format(elapsed))
return result
return clocked
def cached_property(fun):
"""A memoize decorator for class properties."""
@functools.wraps(fun)
def get(self):
try:
return self._cache[fun]
except AttributeError:
self._cache = {}
except KeyError:
pass
ret = self._cache[fun] = fun(self)
return ret
return property(get)
def create_test_df():
""" Creating a test pandas DataFrame for the unittest suite """
test_df = pd.DataFrame({'id': [i for i in range(1, 1001)], 'member_id': [
10 * i for i in range(1, 1001)]})
test_df['na_col'] = np.nan
test_df['id_na'] = test_df.id
test_df.loc[1:3, 'id_na'] = np.nan
test_df['constant_col'] = 'constant'
test_df['constant_col_num'] = 0
test_df['character_factor'] = [
choice(list('ABCDEFG')) for _ in range(1000)]
test_df['num_factor'] = [choice([1, 2, 3, 4]) for _ in range(1000)]
test_df['nearzerovar_variable'] = 'most_common_value'
test_df.loc[0, 'nearzerovar_variable'] = 'one_value'
test_df['binary_variable'] = [choice([0, 1]) for _ in range(1000)]
test_df['character_variable'] = [str(i) for i in range(1000)]
test_df['duplicated_column'] = test_df.id
test_df['many_missing_70'] = [1] * 300 + [np.nan] * 700
test_df['character_variable_fillna'] = ['A'] * \
300 + ['B'] * 200 + ['C'] * 200 + [np.nan] * 300
test_df['numeric_variable_fillna'] = [1] * 400 + [3] * 400 + [np.nan] * 200
test_df['num_variable'] = 100.0
test_df['int_factor_10'] = [choice(range(10)) for _ in range(1000)]
test_df['outlier'] = normal(size=1000)
test_df.loc[[1, 10, 100], 'outlier'] = [999, 3, 999]
test_df['outlier_na'] = test_df['outlier']
test_df.loc[[300, 500], 'outlier_na'] = np.nan
test_df['datetime'] = pd.date_range('1/1/2015', periods=1000, freq='H')
test_df['None_100'] = [1] * 900 + [None] * 100
test_df['None_na_200'] = [1] * 800 + [None] * 100 + [np.nan] * 100
test_df['character_variable_up1'] = ['A'] * 500 + ['B'] * 200 + ['C'] * 300
test_df['character_variable_up2'] = ['A'] * 500 + ['B'] * 200 + ['D'] * 300
test_df['other_na'] = ['Missing'] * 100 + ['missing'] * 100 + ['N/a'] * 100 + \
['NA'] * 100 + ['na'] * 100 + ['n/a'] * 100 + ['Not Available'] * 100 + \
['Unknown'] * 100 + ['do_not_touch'] * 200
return test_df
def simu(pmf, size):
""" Draw one sample from of a discrete distribution, pmf is supposed to
be in ascending order
Parameters
----------
pmf : tuple(ndarray, ndarray)
a tuple with (labels,probs) labels are supposed to be in ascending order
size: int
the number of sampel you want generate
Returns
------
int (depends of the type of labels)
draw a random sample from the pmf
"""
labels, probs = pmf[0], pmf[1]
u = np.random.rand(size)
cumulative_sum = probs.cumsum()
return labels[(u >= cumulative_sum[:, None]).argmin(axis=0)]
def shuffle_df(df, reindex=False):
new_df = df.sample(frac=1) if not reindex else df.sample(
frac=1).reset_index()
return new_df
def random_pmf(nb_labels):
""" Return a random probability mass function of nb_labels"""
random_numbers = np.random.random(nb_labels)
return random_numbers / np.sum(random_numbers)
def random_histogram(nb_labels, nb_observations):
""" Return a random probability mass function of nb_labels"""
random_histo = np.random.choice(np.arange(0, nb_observations), nb_labels)
return random_histo / np.sum(random_histo)
def keep_category(df, colname, pct=0.05, n=5):
""" Keep a pct or number of every levels of a categorical variable
Parameters
----------
pct : float
Keep at least pct of the nb of observations having a specific category
n : int
Keep at least n of the variables having a specific category
Returns
--------
Returns an index of rows to keep
"""
tokeep = []
nmin = df.groupby(colname).apply(lambda x: x.sample(
max(1, min(x.shape[0], n, int(x.shape[0] * pct)))).index)
for index in nmin:
tokeep += index.tolist()
return pd.Index(tokeep)
# for k, i in df.groupby(colname).groups:
# to_keep += np.random.choice(i, max(1, min(g.shape[0], n, int(g.shape[0] * pct))), replace=False)
# return to_keep
#
def simulate_na_col(df, colname, n=None, pct=None, weights=None,
safety=True, *args, **kwargs):
""" Simulate missing values in a column of categorical variables
Notes
-----
Fix issue with category variable"""
# if df.loc[:,colname].dtype == 'float' or df.loc[:,colname].dtype == 'int':
# raise ValueError('This function only support categorical variables')
if (n is None) and (pct is not None):
# be careful here especially if cols has a lot of missing values
n = int(pct * df.shape[0])
if isinstance(colname, pd.core.index.Index) or isinstance(colname, list):
for c in colname:
simulate_na_col(df, colname=c, n=n, pct=pct, weights=weights)
else:
if safety:
tokeep = keep_category(df, colname, *args, **kwargs)
# we are not smapling from tokeep
col = df.loc[:, colname].drop(tokeep)
col = col.dropna()
print(colname)
col_distribution = col.value_counts(normalize=True, sort=False)
labels = col_distribution.index # characters
# generate random pmf
pmf_na = weights if weights else random_pmf(len(labels))
na_distribution = pd.Series(data=pmf_na, index=labels)
# draw samples from this pmf
weights_na = col.apply(lambda x: na_distribution[x])
weights_na /= weights_na.sum()
index_to_replace = col.sample(
n=n, weights=weights_na, replace=False).index
df.loc[index_to_replace, colname] = np.nan
def get_test_df_complete():
""" get the full test dataset from Lending Club open source database,
the purpose of this fuction is to be used in a demo ipython notebook """
import requests
from zipfile import ZipFile
import StringIO
zip_to_download = "https://resources.lendingclub.com/LoanStats3b.csv.zip"
r = requests.get(zip_to_download)
zipfile = ZipFile(StringIO.StringIO(r.content))
file_csv = zipfile.namelist()[0]
# we are using the c parser for speed
df = pd.read_csv(zipfile.open(file_csv), skiprows=[0], na_values=['n/a', 'N/A', ''],
parse_dates=['issue_d', 'last_pymnt_d', 'next_pymnt_d', 'last_credit_pull_d'])
zipfile.close()
df = df[:-2]
nb_row = float(len(df.index))
df['na_col'] = np.nan
df['constant_col'] = 'constant'
df['duplicated_column'] = df.id
df['many_missing_70'] = np.nan
df.loc[1:int(0.3 * nb_row), 'many_missing_70'] = 1
df['bad'] = 1
index_good = df['loan_status'].isin(
['Fully Paid', 'Current', 'In Grace Period'])
df.loc[index_good, 'bad'] = 0
return df
def kl(p, q):
"""
Kullback-Leibler divergence for discrete distributions
Parameters
----------
p: ndarray
probability mass function
q: ndarray
probability mass function
Returns
--------
float : D(P || Q) = sum(p(i) * log(p(i)/q(i))
Discrete probability distributions.
"""
return np.sum(np.where(p != 0, p * np.log(p / q), 0))
def kl_series(serie1, serie2, dropna=True):
if dropna:
serie1 = serie1.dropna()
serie2 = serie2.dropna()
return kl(serie1.value_counts(normalize=True).values,
serie2.value_counts(normalize=True).values)
def plot_hist_na(df, colname):
df_h = df.copy()
na_name = "is_na_{}".format(colname)
df_h[na_name] = df_h[colname].isnull().astype(int)
measure_col = cserie((df.dtypes == int) | (df.dtypes == float))
df_h.groupby(na_name)[measure_col].hist()
def psi(bench, target, group, print_df=True):
""" This function return the Population Stability Index, quantifying if the
distribution is stable between two states.
This statistic make sense and works is only working for numeric variables
for bench and target.
Params:
- bench is a numpy array with the reference variable.
- target is a numpy array of the new variable.
- group is the number of group you want consider.
"""
labels_q = np.percentile(
bench, [(100.0 / group) * i for i in range(group + 1)], interpolation="nearest")
# This is the right approach when you have not a lot of unique value
ben_pct = (pd.cut(bench, bins=np.unique(labels_q),
include_lowest=True).value_counts()) / len(bench)
target_pct = (pd.cut(target, bins=np.unique(labels_q),
include_lowest=True).value_counts()) / len(target)
target_pct = target_pct.sort_index() # sort the index
ben_pct = ben_pct.sort_index() # sort the index
psi = sum((target_pct - ben_pct) * np.log(target_pct / ben_pct))
# Print results for better understanding
if print_df:
results = pd.DataFrame({'ben_pct': ben_pct.values,
'target_pct': target_pct.values},
index=ben_pct.index)
return {'data': results, 'statistic': psi}
return psi
|
Python
| 338
| 33.760357
| 104
|
/autoc/utils/helpers.py
| 0.605498
| 0.582858
|
Csingh1s/TwitterProject
|
refs/heads/master
|
# -*- coding: utf-8 -*-
import requests
import json
import urllib
from datetime import datetime
from flask import Flask, render_template, request
from flask import session
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, BooleanField, PasswordField
from wtforms.validators import DataRequired, Length
from flask_bootstrap import Bootstrap
from flask_sqlalchemy import SQLAlchemy
from flask_sqlalchemy import *
#import sqlalchemy
import tweepy
import os
app = Flask(__name__)
#app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql://admin:admin@localhost/tweeter'
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://admin:admin@/tweeter?unix_socket=/cloudsql/tweeter-247304:us-central1:mysql'
app.secret_key = 'dev'
bootstrap = Bootstrap(app)
db = SQLAlchemy(app)
consumerKey = "7FNmg12xwTmCeFIdbapPKh5ea"
consumerSecret = "fyP8qzUgZEyhG9rjII1AWecuC6KUG8OgEFoLDTOpaOIgj8Zymg"
accessToken = "1151510140362854403-BttX7aXPLQQxbRl2UcSFRcLDpVj1lK"
accessTokenKey = "3VtIebPaaQEWsXNl4NdckXFQKfGnNswSxpUTunYvqkOyt"
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenKey)
api = tweepy.API(auth)
class Message(db.Model):
id = db.Column(db.Integer, primary_key=True)
class Query(db.Model):
id = db.Column(db.Integer, primary_key=True)
query = db.Column(db.String(300), nullable=True)
time = db.Column(db.DateTime, nullable=True, default=datetime.now())
topwords = db.Column(db.String(300), nullable=True)
db.drop_all()
db.create_all()
for i in range(1000):
m = Message()
db.session.add(m)
db.session.commit()
class HelloForm(FlaskForm):
username = StringField('Username', validators=[DataRequired(), Length(1, 20)])
password = PasswordField('Password', validators=[DataRequired(), Length(8, 150)])
remember = BooleanField('Remember me')
submit = SubmitField()
@app.route('/', methods=['GET', 'POST'])
def index():
return render_template('index.html')
@app.route('/form', methods=['GET', 'POST'])
def test_form():
form = HelloForm()
return render_template('form.html', form=form)
@app.route('/nav', methods=['GET', 'POST'])
def test_nav():
return render_template('nav.html')
@app.route('/pagination', methods=['GET', 'POST'])
def test_pagination():
db.drop_all()
db.create_all()
for i in range(100):
m = Message()
db.session.add(m)
db.session.commit()
page = request.args.get('page', 1, type=int)
pagination = Message.query.paginate(page, per_page=10)
messages = pagination.items
return render_template('pagination.html', pagination=pagination, messages=messages)
@app.route('/utils', methods=['GET', 'POST'])
def test_utils():
return render_template('utils.html')
@app.route('/search', methods=['GET', 'POST'])
def search():
try:
page = request.args.get('page', 1, type=int)
pagination = Message.query.paginate(page, per_page=50)
messages = []
if "query" in request.form:
session["query"] = request.form["query"]
query = request.form["query"]
elif "query" not in request.form and session.get("query") != None:
query = session.get("query")
else:
return render_template('pagination.html', pagination=pagination, messages=[])
topWords = ""
# ================== get tweets ================= #
if session.get(str(page)) == None and page == 1:
maxId = 99999999999999999999
elif session.get(str(page)) == None and page != 1:
maxId = session.get(str(page - 1))["sinceId"] - 1
else:
maxId = session.get(str(page))["maxId"]
tweets = []
flag = False
while (1):
tweets_original = api.search(q=query, count=100, max_id=maxId, lang="en")
if len(tweets_original) == 0:
break
for tweet in tweets_original:
tweets.append(
{
"id" : tweet.id,
"created_at": str(tweet.created_at),
"text": tweet.text
}
)
if len(tweets) == 50:
flag = True
break
if flag == True:
break
maxId = tweets_original.since_id - 1
# ========= update session =========== #
if len(tweets) > 0:
session[str(page)] = {
"maxId" : tweets[0]["id"],
"sinceId" : tweets[len(tweets)-1]["id"],
}
# ================== count every word in every tweet ================= #
for tweet in tweets:
stweet = tweet["text"].split()
tweet_words = {}
top_words = []
for word in stweet:
if word not in tweet_words:
tweet_words[word] = 1
if len(top_words) < 10: top_words.append(word)
continue
tweet_words[word] += 1
# ================== get top 10 words ================= #
if len(top_words) > 10:
for word, cnt in tweet_words.items():
if word in tweet_words: continue
last_word = top_words[0]
last_idx = 0
i = 0
# ============ get word of max_words which has minimal count ========= #
for mword in top_words:
if tweet_words[last_word] > tweet_words[mword]:
last_word = mword
last_idx = i
i += 1
# ============ update max_words with new word ======================== #
if tweet_words[word] > tweet_words[last_word]:
top_words[last_idx] = word
# ========== sort max_words ============ #
i = 0
j = 0
for i in range(0, len(top_words)):
for j in range(i + 1, len(top_words)):
if tweet_words[top_words[i]] < tweet_words[top_words[j]]:
tmp = top_words[i]
top_words[i] = top_words[j]
top_words[j] = tmp
j += 1
i += 1
i = 0
tweet["topWords"] = ""
for i in range(0, len(top_words)):
if i != len(top_words) - 1:
tweet["topWords"] += top_words[i] + ", "
continue
tweet["topWords"] += top_words[i]
if topWords == "": topWords = tweet["topWords"]
for tweet in tweets:
messages.append(tweet)
# ------------ log query, top words of first tweet ----------- #
q = Query()
q.query = query
q.topwords = topWords
db.session.add(q)
db.session.commit()
return render_template('pagination.html', pagination=pagination, messages=messages)
except Exception as e:
return render_template('pagination.html', pagination=pagination, messages=[])
|
Python
| 215
| 31.920931
| 133
|
/examples/app.py
| 0.526702
| 0.513704
|
anglipku/Udacity-course-final-project_Intro-to-Hadoop-and-MapReduce
|
refs/heads/master
|
#!/usr/bin/python
import sys
oldAuthor = None # save the old author's id
hourList = [] # save the list of hours that an author makes posts
for line in sys.stdin:
data = line.strip().split("\t")
author, hour = data
if oldAuthor and author!=oldAuthor:
# if the author changes to a new author, determine the hours of highest frequency, print each of them out
LstOfMostFreqHours = set([x for x in hourList if all([hourList.count(x)>=hourList.count(y) for y in hourList])])
for i in LstOfMostFreqHours:
print oldAuthor,'\t', i
oldAuthor = author # set author to the new author
hourList = []
oldAuthor = author
hourList.append(hour)
if oldAuthor != None:
# for the last author, determine the hours of highest frequency, print each of them out
LstOfMostFreqHours = set([x for x in hourList if all([hourList.count(x)>=hourList.count(y) for y in hourList])])
for i in LstOfMostFreqHours:
print oldAuthor, "\t", i
|
Python
| 28
| 34.607143
| 119
|
/student_times_reducer.py
| 0.668657
| 0.668657
|
anglipku/Udacity-course-final-project_Intro-to-Hadoop-and-MapReduce
|
refs/heads/master
|
#!/usr/bin/python
import sys
import csv
reader = csv.reader(sys.stdin, delimiter='\t')
for line in reader:
post_id = line[0]
post_type = line[5]
abs_parent_id = line[7]
post_length = len(line[4])
if post_id == "id":
continue
if post_type[0] == "q": # i.e. if the post is a "question"
print post_id ,"\t", "1", "\t", post_length # here, "1" indicates "question"
if post_type[0] == "a": # i.e. if the post is an "answer"
print abs_parent_id, "\t", "2", "\t", post_length
# here "2" indicates "answer". The double keys (id and "1", "2") will make sure that an answer always comes after the corresponding question
|
Python
| 22
| 29.727272
| 148
|
/average_length_mapper.py
| 0.591716
| 0.573964
|
anglipku/Udacity-course-final-project_Intro-to-Hadoop-and-MapReduce
|
refs/heads/master
|
#!/usr/bin/python
import sys
oldQuestionNode = None # save the old question's node id
Student_IDs = [] # the list of question/answers/comment id's for a forum thread
for line in sys.stdin:
data = line.strip().split("\t")
question_id, author_id = data
if oldQuestionNode and oldQuestionNode != question_id:
# print the old question's node id, and the list of student id
print oldQuestionNode, "\t", Student_IDs
oldQuestionNode = question_id # set question node ID to that of the new question
Student_IDs = [author_id]
elif oldQuestionNode:
Student_IDs.append(author_id)
else:
oldQuestionNode = question_id
Student_IDs.append(author_id)
if oldQuestionNode != None:
# for the last question, print question node id, and student IDs
print oldQuestionNode, "\t", Student_IDs
|
Python
| 29
| 28.620689
| 87
|
/study_groups_reducer.py
| 0.681024
| 0.681024
|
anglipku/Udacity-course-final-project_Intro-to-Hadoop-and-MapReduce
|
refs/heads/master
|
#!/usr/bin/python
import sys
import csv
reader = csv.reader(sys.stdin, delimiter='\t')
for line in reader:
post_id = line[0]
post_type = line[5]
author_id = line[3]
abs_parent_id = line[7]
if post_id == "id":
continue
if post_type[0] == "q": # i.e. if the post is a "question"
print post_id ,"\t", author_id
if post_type[0] != "q": # i.e. if the post is an "answer" or "comment"
print abs_parent_id, "\t", author_id
|
Python
| 21
| 21.523809
| 74
|
/study_groups_mapper.py
| 0.56962
| 0.556962
|
anglipku/Udacity-course-final-project_Intro-to-Hadoop-and-MapReduce
|
refs/heads/master
|
#!/usr/bin/python
import sys
oldQuestionNode = None # save the old question's node id
oldQuestionLength = 0 # save the old question's length
AnsLengthList = [] # the list of the length of answers for a question
for line in sys.stdin:
data = line.strip().split("\t")
question_id, post_type, post_length = data
if oldQuestionNode and oldQuestionNode != question_id: # i.e. it's a new question
# print the old question's node id, question length, avg answer length
if AnsLengthList == []:
print oldQuestionNode,"\t",oldQuestionLength,"\t", 0
else:
print oldQuestionNode,"\t",oldQuestionLength,"\t", sum(AnsLengthList)/len(AnsLengthList)
oldQuestionNode = question_id # set question node ID to that of the new question
oldQuestionLength = float(post_length)
AnsLengthList = []
elif oldQuestionNode:
AnsLengthList.append(float(post_length))
else:
oldQuestionNode = question_id
oldQuestionLength =float(post_length)
if oldQuestionNode != None:
# for the last question, print id, question length, avg answer length
if AnsLengthList == []:
print oldQuestionNode,"\t",oldQuestionLength,"\t", 0
else:
print oldQuestionNode,"\t",oldQuesitionLength,"\t", sum(AnsLengthList)/len(AnsLengthList)
|
Python
| 37
| 34.945946
| 98
|
/average_length_reducer.py
| 0.679699
| 0.677444
|
anglipku/Udacity-course-final-project_Intro-to-Hadoop-and-MapReduce
|
refs/heads/master
|
#!/usr/bin/python
import sys
import csv
reader = csv.reader(sys.stdin, delimiter='\t')
for line in reader:
author_id = line[3]
added_at = line[8]
if len(added_at) > 11:
hour = int(added_at[11] + added_at[12])
print author_id,"\t", hour
|
Python
| 13
| 19.384615
| 46
|
/student_times_mapper.py
| 0.603774
| 0.573585
|
anglipku/Udacity-course-final-project_Intro-to-Hadoop-and-MapReduce
|
refs/heads/master
|
#!/usr/bin/python
import sys
import csv
reader = csv.reader(sys.stdin, delimiter='\t')
for line in reader:
tag = line[2]
tag_list = tag.strip().split(' ')
for A_tag in tag_list:
print A_tag
|
Python
| 13
| 15.846154
| 46
|
/popular_tags_mapper.py
| 0.598173
| 0.593607
|
anglipku/Udacity-course-final-project_Intro-to-Hadoop-and-MapReduce
|
refs/heads/master
|
#!/usr/bin/python
import sys
oldTag = None # save the oldTag
oldTagCount = 0 # save the oldTag's Count
Top10Tag = [] # the list of top 10 tags
Top10TagCount = [] # the list of top 1 tags' counts
for line in sys.stdin:
tag = line
if oldTag and oldTag != tag:
# check if the old tag's count beats the current 10th tag
# if so, replace the current 10th tag, and its count, with those of the old tag
if len(Top10TagCount) == 10:
if oldTagCount > min(Top10TagCount) :
Top10Tag[Top10TagCount.index(min(Top10TagCount))]=oldTag
Top10TagCount[Top10TagCount.index(min(Top10TagCount))]=oldTagCount
else:
Top10Tag.append(oldTag)
Top10TagCount.append(oldTagCount)
oldTag = tag # set tag to the new one
oldTagCount = 0
oldTag = tag
oldTagCount = oldTagCount+1
if oldTag != None:
# for the last tag, print id, question length, avg answer length
# check if the old tag's count beats the current 10th tag
# if so, replace the current 10th tag, and its count, with those of the old tag
if oldTagCount > min(Top10TagCount) :
Top10Tag[Top10TagCount.index(min(Top10TagCount))]=oldTag
Top10TagCount[Top10TagCount.index(min(Top10TagCount))]=oldTagCount
# Sort the final top 10 list, and print out
for i in range(10):
print Top10Tag[Top10TagCount.index(max(Top10TagCount))], "\t", max(Top10TagCount)
del Top10Tag[Top10TagCount.index(max(Top10TagCount))]
del Top10TagCount[Top10TagCount.index(max(Top10TagCount))]
|
Python
| 46
| 32.95652
| 87
|
/popular_tags_reducer.py
| 0.682458
| 0.632522
|
DJAxel/ChargetripApiEtl
|
refs/heads/main
|
import os
import json
path = r"/home/axel/Documents/electralign-data/"
stations = []
for filename in sorted(os.listdir(path)):
filepath = os.path.join(path, filename)
if os.path.isfile(filepath):
print(filename)
with open(filepath, 'r') as file:
data = json.load(file)
stations += data
with open(path+'stations-all.json', 'w') as file:
json.dump(stations, file)
print("Saved " + str(len(stations)) + " stations")
|
Python
| 19
| 23.736841
| 50
|
/merge.py
| 0.631915
| 0.631915
|
DJAxel/ChargetripApiEtl
|
refs/heads/main
|
import sys
import traceback
from python_graphql_client import GraphqlClient
import json
API_KEY = '5e8c22366f9c5f23ab0eff39' # This is the public key, replace with your own to access all data
client = GraphqlClient(endpoint="https://staging-api.chargetrip.io/graphql")
client.headers = {
'x-client-id': API_KEY
}
query = """
query stationListAll ($page: Int!) {
stationList(size: 100, page: $page) {
id
external_id
country_code
party_id
name
address
city
postal_code
state
country
coordinates {
latitude
longitude
}
related_locations {
latitude
longitude
}
parking_type
evses {
uid
evse_id
status
status_schedule {
period_begin
period_end
status
}
capabilities
connectors {
id
standard
format
power_type
max_voltage
max_amperage
max_electric_power
power
tariff_ids
terms_and_conditions
last_updated
properties
}
floor_level
coordinates {
latitude
longitude
}
physical_reference
parking_restrictions
images {
url
thumbnail
category
type
width
height
}
last_updated
parking_cost
properties
}
directions {
language
text
}
operator {
id
external_id
name
website
logo {
url
thumbnail
category
type
width
height
}
country
contact {
phone
email
website
facebook
twitter
properties
}
}
suboperator {
id
name
}
owner {
id
name
}
facilities
time_zone
opening_times {
twentyfourseven
regular_hours {
weekday
period_begin
period_end
}
exceptional_openings {
period_begin
period_end
}
exceptional_closings {
period_begin
period_end
}
}
charging_when_closed
images {
url
thumbnail
category
type
width
height
}
last_updated
location {
type
coordinates
}
elevation
chargers {
standard
power
price
speed
status {
free
busy
unknown
error
}
total
}
physical_address {
continent
country
county
city
street
number
postalCode
what3Words
formattedAddress
}
amenities
properties
realtime
power
speed
status
review {
rating
count
}
}
}
"""
stations = []
startPage = 0
endpage = 2000
lastPageSaved = None
lastPageFetched = None
failedPages = []
numberOfPagesFetched = 0
def attempt(variables, times=3):
to_raise = None
for _ in range(times):
try:
if _ > 1:
print("Failed to load, starting attempt "+str(_))
return client.execute(query=query, variables=variables)
except Exception as err:
to_raise = err
raise to_raise
def fetchPage(pageNumber):
variables = {"page": pageNumber}
try:
result = attempt( variables )
global lastPageFetched
lastPageFetched = pageNumber
return result
except Exception as err:
print("An error occured while fetching page "+str(pageNumber))
print(err)
traceback.print_exc(file=sys.stdout)
failedPages.append(pageNumber)
return None
def saveResults(currentPage):
global lastPageSaved, stations
if(lastPageSaved == currentPage):
return
firstPage = lastPageSaved + 1 if lastPageSaved else startPage
lastPage = currentPage
with open('/home/axel/Documents/electralign-data/stations-page-' + str(firstPage) + '-' + str(lastPage) + '.json', 'w') as f:
json.dump(stations, f)
stations = [];
print("Saved pages "+str(firstPage)+" until "+str(lastPage)+".")
lastPageSaved = lastPage
for x in range(startPage, endpage+1):
print("Fetching page "+str(x))
data = fetchPage(x)
if data is not None:
stations = stations + data['data']['stationList']
print(len(stations))
if( len(data['data']['stationList']) < 100 ):
break;
numberOfPagesFetched += 1
if(numberOfPagesFetched % 100 == 0):
saveResults(x)
saveResults(lastPageFetched)
print("The following pages failed to load:")
print(failedPages)
|
Python
| 246
| 17.861788
| 129
|
/index.py
| 0.562406
| 0.554645
|
DJAxel/ChargetripApiEtl
|
refs/heads/main
|
from python_graphql_client import GraphqlClient
API_KEY = '5f8fbc2aa23e93716e7c621b'
client = GraphqlClient(endpoint="https://staging-api.chargetrip.io/graphql")
client.headers = {
'x-client-id': API_KEY
}
query = """
query stationListAll ($page: Int!) {
stationList(size: 100, page: $page) {
id
external_id
country_code
party_id
name
address
city
postal_code
state
country
coordinates {
latitude
longitude
}
related_locations {
latitude
longitude
}
parking_type
evses {
uid
evse_id
status
status_schedule {
period_begin
period_end
status
}
capabilities
connectors {
id
standard
format
power_type
max_voltage
max_amperage
max_electric_power
power
tariff_ids
terms_and_conditions
last_updated
properties
}
floor_level
coordinates {
latitude
longitude
}
physical_reference
parking_restrictions
images {
url
thumbnail
category
type
width
height
}
last_updated
parking_cost
properties
}
directions {
language
text
}
operator {
id
external_id
name
website
logo {
url
thumbnail
category
type
width
height
}
country
contact {
phone
email
website
facebook
twitter
properties
}
}
suboperator {
id
name
}
owner {
id
name
}
facilities
time_zone
opening_times {
twentyfourseven
regular_hours {
weekday
period_begin
period_end
}
exceptional_openings {
period_begin
period_end
}
exceptional_closings {
period_begin
period_end
}
}
charging_when_closed
images {
url
thumbnail
category
type
width
height
}
last_updated
location {
type
coordinates
}
elevation
chargers {
standard
power
price
speed
status {
free
busy
unknown
error
}
total
}
physical_address {
continent
country
county
city
street
number
postalCode
what3Words
formattedAddress
}
amenities
properties
realtime
power
speed
status
review {
rating
count
}
}
}
"""
variables = {"page": 1}
result = client.execute(query=query, variables=variables, verify=False)
print(result)
|
Python
| 182
| 14.318682
| 76
|
/test.py
| 0.507356
| 0.505562
|
DJAxel/ChargetripApiEtl
|
refs/heads/main
|
import os
import json
filepath = r"/home/axel/Documents/electralign-data/stations-all.json"
newData = {"data": {"stationList": []}}
if os.path.isfile(filepath):
with open(filepath, 'r') as file:
print("File opened")
data = json.load(file)
print("Data loaded")
newData["data"]["stationList"] = data
print("new data set")
filepath = r"/home/axel/Documents/electralign-data/stations-all-fixed.json"
with open(filepath, 'w') as file:
print("New file opened")
json.dump(newData, file)
print("Done saving data")
|
Python
| 19
| 28.736841
| 75
|
/mutate.py
| 0.651327
| 0.651327
|
kairotavares/tutorials
|
refs/heads/master
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import google.protobuf.text_format
from p4 import p4runtime_pb2
from p4.config import p4info_pb2
class P4InfoBrowser(object):
def __init__(self, p4_info_filepath):
p4info = p4info_pb2.P4Info()
# Load the p4info file into a skeleton P4Info object
with open(p4_info_filepath) as p4info_f:
google.protobuf.text_format.Merge(p4info_f.read(), p4info)
self.p4info = p4info
def get(self, entity_type, name=None, id=None):
if name is not None and id is not None:
raise AssertionError("name or id must be None")
for o in getattr(self.p4info, entity_type):
pre = o.preamble
if name:
if (pre.name == name or pre.alias == name):
return o
else:
if pre.id == id:
return o
if name:
raise AttributeError("Could not find %r of type %s" % (name, entity_type))
else:
raise AttributeError("Could not find id %r of type %s" % (id, entity_type))
def get_id(self, entity_type, name):
return self.get(entity_type, name=name).preamble.id
def get_name(self, entity_type, id):
return self.get(entity_type, id=id).preamble.name
def get_alias(self, entity_type, id):
return self.get(entity_type, id=id).preamble.alias
def __getattr__(self, attr):
# Synthesize convenience functions for name to id lookups for top-level entities
# e.g. get_table_id() or get_action_id()
m = re.search("^get_(\w+)_id$", attr)
if m:
primitive = m.group(1)
return lambda name: self.get_id(primitive, name)
# Synthesize convenience functions for id to name lookups
m = re.search("^get_(\w+)_name$", attr)
if m:
primitive = m.group(1)
return lambda id: self.get_name(primitive, id)
raise AttributeError("%r object has no attribute %r" % (self.__class__, attr))
# TODO remove
def get_table_entry(self, table_name):
t = self.get(table_name, "table")
entry = p4runtime_pb2.TableEntry()
entry.table_id = t.preamble.id
entry
pass
def get_match_field(self, table_name, match_field_name):
for t in self.p4info.tables:
pre = t.preamble
if pre.name == table_name:
for mf in t.match_fields:
if mf.name == match_field_name:
return mf
def get_match_field_id(self, table_name, match_field_name):
return self.get_match_field(table_name,match_field_name).id
def get_match_field_pb(self, table_name, match_field_name, value):
p4info_match = self.get_match_field(table_name, match_field_name)
bw = p4info_match.bitwidth
p4runtime_match = p4runtime_pb2.FieldMatch()
p4runtime_match.field_id = p4info_match.id
# TODO switch on match type and map the value into the appropriate message type
match_type = p4info_pb2._MATCHFIELD_MATCHTYPE.values_by_number[
p4info_match.match_type].name
if match_type == 'EXACT':
exact = p4runtime_match.exact
exact.value = value
elif match_type == 'LPM':
lpm = p4runtime_match.lpm
lpm.value = value[0]
lpm.prefix_len = value[1]
# TODO finish cases and validate types and bitwidth
# VALID = 1;
# EXACT = 2;
# LPM = 3;
# TERNARY = 4;
# RANGE = 5;
# and raise exception
return p4runtime_match
def get_action_param(self, action_name, param_name):
for a in self.p4info.actions:
pre = a.preamble
if pre.name == action_name:
for p in a.params:
if p.name == param_name:
return p
raise AttributeError("%r has no attribute %r" % (action_name, param_name))
def get_action_param_id(self, action_name, param_name):
return self.get_action_param(action_name, param_name).id
def get_action_param_pb(self, action_name, param_name, value):
p4info_param = self.get_action_param(action_name, param_name)
#bw = p4info_param.bitwidth
p4runtime_param = p4runtime_pb2.Action.Param()
p4runtime_param.param_id = p4info_param.id
p4runtime_param.value = value # TODO make sure it's the correct bitwidth
return p4runtime_param
|
Python
| 135
| 36.740742
| 88
|
/P4D2_2017_Fall/exercises/p4runtime/p4info/p4browser.py
| 0.607185
| 0.594621
|
kairotavares/tutorials
|
refs/heads/master
|
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import abstractmethod
import grpc
from p4 import p4runtime_pb2
from p4.tmp import p4config_pb2
from p4info import p4browser
def buildSetPipelineRequest(p4info, device_config, device_id):
request = p4runtime_pb2.SetForwardingPipelineConfigRequest()
config = request.configs.add()
config.device_id = device_id
config.p4info.CopyFrom(p4info)
config.p4_device_config = device_config.SerializeToString()
request.action = p4runtime_pb2.SetForwardingPipelineConfigRequest.VERIFY_AND_COMMIT
return request
def buildTableEntry(p4info_browser,
table_name,
match_fields={},
action_name=None,
action_params={}):
table_entry = p4runtime_pb2.TableEntry()
table_entry.table_id = p4info_browser.get_tables_id(table_name)
if match_fields:
table_entry.match.extend([
p4info_browser.get_match_field_pb(table_name, match_field_name, value)
for match_field_name, value in match_fields.iteritems()
])
if action_name:
action = table_entry.action.action
action.action_id = p4info_browser.get_actions_id(action_name)
if action_params:
action.params.extend([
p4info_browser.get_action_param_pb(action_name, field_name, value)
for field_name, value in action_params.iteritems()
])
return table_entry
class SwitchConnection(object):
def __init__(self, name, address='127.0.0.1:50051', device_id=0):
self.name = name
self.address = address
self.device_id = device_id
self.p4info = None
self.channel = grpc.insecure_channel(self.address)
# TODO Do want to do a better job managing stub?
self.client_stub = p4runtime_pb2.P4RuntimeStub(self.channel)
@abstractmethod
def buildDeviceConfig(self, **kwargs):
return p4config_pb2.P4DeviceConfig()
def SetForwardingPipelineConfig(self, p4info_file_path, dry_run=False, **kwargs):
p4info_broswer = p4browser.P4InfoBrowser(p4info_file_path)
device_config = self.buildDeviceConfig(**kwargs)
request = buildSetPipelineRequest(p4info_broswer.p4info, device_config, self.device_id)
if dry_run:
print "P4 Runtime SetForwardingPipelineConfig:", request
else:
self.client_stub.SetForwardingPipelineConfig(request)
# Update the local P4 Info reference
self.p4info_broswer = p4info_broswer
def buildTableEntry(self,
table_name,
match_fields={},
action_name=None,
action_params={}):
return buildTableEntry(self.p4info_broswer, table_name, match_fields, action_name, action_params)
def WriteTableEntry(self, table_entry, dry_run=False):
request = p4runtime_pb2.WriteRequest()
request.device_id = self.device_id
update = request.updates.add()
update.type = p4runtime_pb2.Update.INSERT
update.entity.table_entry.CopyFrom(table_entry)
if dry_run:
print "P4 Runtime Write:", request
else:
print self.client_stub.Write(request)
def ReadTableEntries(self, table_name, dry_run=False):
request = p4runtime_pb2.ReadRequest()
request.device_id = self.device_id
entity = request.entities.add()
table_entry = entity.table_entry
table_entry.table_id = self.p4info_broswer.get_tables_id(table_name)
if dry_run:
print "P4 Runtime Read:", request
else:
for response in self.client_stub.Read(request):
yield response
def ReadDirectCounters(self, table_name=None, counter_name=None, table_entry=None, dry_run=False):
request = p4runtime_pb2.ReadRequest()
request.device_id = self.device_id
entity = request.entities.add()
counter_entry = entity.direct_counter_entry
if counter_name:
counter_entry.counter_id = self.p4info_broswer.get_direct_counters_id(counter_name)
else:
counter_entry.counter_id = 0
# TODO we may not need this table entry
if table_name:
table_entry.table_id = self.p4info_broswer.get_tables_id(table_name)
counter_entry.table_entry.CopyFrom(table_entry)
counter_entry.data.packet_count = 0
if dry_run:
print "P4 Runtime Read:", request
else:
for response in self.client_stub.Read(request):
print response
|
Python
| 130
| 39.061539
| 105
|
/P4D2_2017_Fall/exercises/p4runtime/switches/switch.py
| 0.651882
| 0.636905
|
tony2037/K-means-Machine-Learning
|
refs/heads/master
|
import tensorflow as tf
import numpy as np
import time
#help us to graph
import matplotlib
import matplotlib.pyplot as plt
#import datasets we need by scikit-learn
from sklearn.datasets.samples_generator import make_blobs
from sklearn.datasets.samples_generator import make_circles
#fuck Here I install scipy a matherical package
#set up data type , here i choose blobs to make it simpler
DATA_TYPE = "blobs"
#Set up Number of clusters in train data , if we choose circle,2 is enough
K = 4
if(DATA_TYPE == "circle"):
K = 2
else:
K = 4
#Set up max of iterations , if condition is not met , here I choose 1000
MAX_ITERS = 1000
#To caculate the time we use , record the begining time
start = time.time()
#Since we have chosen four clusters , We have to give four center points for training data
centers = [(-2, -2), (-2, 1.5), (1.5, -2), (2, 1.5)]
#set up the training set
#for blobs:
#n_samples:number of data,which means we have 200 points
#centers = centers
#n_features = dimmension , here we choose plane so = 2
#cluster_std = std
#shuffle:if we mix up samples,here I choose false
#random_state:random seed
#for circles:
#noise: random noise data set up to the sample set
#factor: the ratio factor between circle data set
if(DATA_TYPE == "circle"):
data, features = make_circles(n_samples=200,shuffle=True,noise=None,factor=0.4)
else:
data, features = make_blobs(n_samples=200,centers=centers,n_features=2,cluster_std=0.8,shuffle=False,random_state=42)
#Draw the four centers
#.transpose[0]: x .transpose[1]: y
fig, ax = plt.subplots()
ax.scatter(np.asarray(centers).transpose()[0], np.asarray(centers).transpose()[1], marker = 'o', s = 250)
plt.show()
#Draw the training data
fig, ax = plt.subplots()
if(DATA_TYPE == "blobs"):
ax.scatter(np.asarray(centers).transpose()[0], np.asarray(centers).transpose()[1], marker = 'o', s = 250)
ax.scatter(data.transpose()[0],data.transpose()[1], marker = 'o', s = 100 , c = features, cmap =plt.cm.coolwarm)
plt.plot()
plt.show()
#Set up tf.Variable
#points = data
#cluster_assignments = each points 's cluster
#for example:
#cluster_assignments[13]=2 means 13th point belong cluster 2
N = len(data)
points = tf.Variable(data)
cluster_assignments = tf.Variable(tf.zeros([N], dtype=tf.int64))
#centroids: each groups 's centroids
#tf.slice() really fuck up
#random pick 4 point after all
centroids = tf.Variable(tf.slice(points.initialized_value(), [0,0], [K,2]))
sess = tf.Session()
sess.run(tf.initialize_all_variables())
sess.run(centroids)
# Lost function and rep loop
#centroids = [[x1,y1],[x2,y2],[x3,y3],[x4,y4]] shape=[4,2]
#tf.tile(centroids, [N, 1]) = [N*[x1,y1], N*[x2,y2], N*[x3,y3], N*[x4,y4]] shape=[4N,2]
#rep_centroids = tf.reshape(tf.tile(centroids, [N,1]), [N,K,2]) = [ [N*[x1,y1]] , [N*[x2,y2]] , [N*[x3,y3]] , [N*[x4,y4]] ]
#The condition of stopping process is : "Centroids stop changing" :: did_assignments_change
rep_centroids = tf.reshape(tf.tile(centroids, [N,1]), [N,K,2])
rep_points = tf.reshape(tf.tile(points, [1, K]),[N, K, 2])
sum_squares = tf.reduce_sum(tf.square(rep_points - rep_centroids), reduction_indices=2)
best_centroids = tf.argmin(sum_squares, 1)
did_assignments_change = tf.reduce_any(tf.not_equal(best_centroids, cluster_assignments))
#total=[[all sum of points of group 1], [all sum of points of group 2], [all sum of points of group 3], [all sum of points of group 4]] shape=[4,2]
#count=[How many points of each group] shape = [4,1]
#total/count = [new centroids] shape = [4,1]
def bucket_mean(data, bucket_ids, num_buckets):
total = tf.unsorted_segment_sum(data, bucket_ids, num_buckets)
count = tf.unsorted_segment_sum(tf.ones_like(data), bucket_ids, num_buckets)
return total/count
means = bucket_mean(points, best_centroids, K)
#Do update
with tf.control_dependencies([did_assignments_change]):
do_updates = tf.group(centroids.assign(means), cluster_assignments.assign(best_centroids))
changed = True
iters = 0
fig, ax = plt.subplots()
if(DATA_TYPE == "blobs"):
colourindexes = [2,1,4,3]
else:
colourindexes = [2,1]
while changed and iters < MAX_ITERS:
fig, ax = plt.subplots()
iters +=1
[changed, _] = sess.run([did_assignments_change, do_updates])
[centers, assignments] = sess.run([centroids, cluster_assignments])
ax.scatter(sess.run(points).transpose()[0], sess.run(points).transpose()[1], marker = 'o', s = 200, c = assignments, cmap=plt.cm.coolwarm)
ax.scatter(centers[:,0], centers[:,1], marker = '^', s = 550, c=colourindexes, cmap=plt.cm.plasma)
ax.set_title("Iteration " + str(iters))
plt.savefig("kmeans" + str(iters) + ".png")
ax.scatter(sess.run(points).transpose()[0], sess.run(points).transpose()[1], marker='o', s=200, c=assignments, cmap=plt.cm.coolwarm)
plt.show()
end = time.time()
print("Found in %.2f seconds" %(end-start), iters, "iterations")
print("Centroids: ")
print(centers)
print("Cluster assignment", assignments)
|
Python
| 130
| 37.153847
| 147
|
/k-means/k-means.py
| 0.69268
| 0.664448
|
tony2037/K-means-Machine-Learning
|
refs/heads/master
|
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
iris = datasets.load_iris()
iris_X = iris.data
iris_y = iris.target
print("=====data=====")
print(iris_X)
print("===============")
print("data length : " + str(len(iris_X)))
print("====target====")
print(iris_y)
print("===============")
print("target length : " + str(len(iris_y)))
print("===============")
X_train, X_test, y_train, y_test = train_test_split(iris_X, iris_y, test_size=0.3)
print(y_train)
knn = KNeighborsClassifier()
knn.fit(X_train, y_train)
print(knn.predict(X_test))
print(y_test)
|
Python
| 27
| 22.703703
| 82
|
/KNN/sk4_learning_pattern.py
| 0.643192
| 0.640063
|
biswasalex410/Python
|
refs/heads/master
|
students = (
("Alex Biswas",21,3.46),
("Sabuj Chandra Das",22,3.69),
("Ahad Islam Moeen",22,3.46),
)
print(students[0:])
|
Python
| 8
| 12.625
| 23
|
/Tuples.py
| 0.453704
| 0.305556
|
biswasalex410/Python
|
refs/heads/master
|
def divisible(numl):
print("Given List is ",numl)
print("Divisible of 5 in a list ")
for num in numl :
if (num % 5 == 0):
print(num)
numl = [10, 15, 12, 17, 20]
divisible(numl)
|
Python
| 9
| 22.333334
| 38
|
/Basic Exercise for Beginners6.py
| 0.545455
| 0.483254
|
biswasalex410/Python
|
refs/heads/master
|
#Parents class , Super class, Base class
class Phone:
def call(self):
print("You can Call")
def message(self):
print("You can Message")
#Child class, Sub class, Derived class
class Samsung(Phone):
def photo(self):
print("You can Take Photo")
s = Samsung()
s.call()
s.message()
s.photo()
print(issubclass(Phone,Samsung))
|
Python
| 19
| 17.947369
| 40
|
/OOP Inheritance.py
| 0.64624
| 0.64624
|
biswasalex410/Python
|
refs/heads/master
|
import random
for x in range(1,6):
guessNumber = int(input("Enter your guess between 1 to 5 : "))
randomNumber = random.randint(1,5)
if guessNumber == randomNumber:
print("You have won")
else:
print("You have loss", randomNumber)
|
Python
| 10
| 25.4
| 66
|
/Guessing Game.py
| 0.638783
| 0.61597
|
biswasalex410/Python
|
refs/heads/master
|
def char(str):
for i in range(0, len(str), 1):
print("index[",i,"]", str[i])
str = input("Enter any name: ")
print("Print Single Charecter: ")
char(str)
"""
def printEveIndexChar(str):
for i in range(0, len(str)-1, 2):
print("index[",i,"]", str[i] )
inputStr = "pynative"
print("Orginal String is ", inputStr)
print("Printing only even index chars")
printEveIndexChar(inputStr)
"""
|
Python
| 21
| 18.333334
| 39
|
/Basic Exercise for Beginners3.py
| 0.625616
| 0.613301
|
biswasalex410/Python
|
refs/heads/master
|
number = 7536
print("Given number", number)
while (number > 0):
digit = number % 10
number = number // 10
print(digit, end = " ")
|
Python
| 6
| 22.666666
| 29
|
/Basic Exercise for Beginners11.py
| 0.595745
| 0.531915
|
biswasalex410/Python
|
refs/heads/master
|
def removeChars(str, n):
return str[n:]
print("pynative")
n = int(input("Enter the removing number: "))
print(removeChars("pynative", n))
|
Python
| 6
| 22.5
| 45
|
/Basic Exercise for Beginners4.py
| 0.687943
| 0.687943
|
biswasalex410/Python
|
refs/heads/master
|
"""
def calculate(a,b):
return a*a + 2*a*b + b*b
lambda parameter : a*a + 2*a*b + b*b
print(calculate(2,3))
"""
a = (lambda a,b : a*a + 2*a*b + b*b) (2,3)
print(a)
#another
def cube(x):
return x*x*x
a = (lambda x : x*x*x) (3)
print(a)
|
Python
| 13
| 17.615385
| 42
|
/Lambda Functions.py
| 0.543568
| 0.510373
|
biswasalex410/Python
|
refs/heads/master
|
"""
num2 = int(input("Enter a number: "))
result = 20 / num2
print(result)
print("Done")
"""
"""
text = "Alex"
print(text)
print("Done")
"""
"""
try:
list = [20,0,32]
result = list[0] / list[3]
print(result)
print("Done")
except ZeroDivisionError:
print("Dividing by zero is not possible ")
except IndexError:
print("Index Error")
finally:
print("Thanks!!!!!!!!!!")
"""
#Multiple exception hangle
"""
try:
num1 = int(input("Enter First Number: "))
num2 = int(input("Enter the Second Number: "))
result = num1/num2
print(result)
except (ValueError,ZeroDivisionError):
print("You have entered incorrect input.")
finally:
print("Thanks!!!!!!!")
"""
def voter (age):
if age < 18:
raise ValueError("Invalid Voter")
return "You are Allowed to vote"
try:
print(voter(17))
except ValueError as e:
print(e)
|
Python
| 44
| 18.863636
| 50
|
/Exception Handling.py
| 0.617411
| 0.595647
|
biswasalex410/Python
|
refs/heads/master
|
num1 = {1,2,3,4,5}
num2 = set([4,5,6])
num2.add(7)
num2.remove(4)
print(num1 | num2)
print(num1 & num2)
print(num1 - num2)
|
Python
| 7
| 16.571428
| 19
|
/Set.py
| 0.631148
| 0.467213
|
biswasalex410/Python
|
refs/heads/master
|
n = 3
for i in range(n + 1):
print((2 * i - 1) * " *")
|
Python
| 3
| 18.666666
| 29
|
/Pattern.py
| 0.383333
| 0.316667
|
biswasalex410/Python
|
refs/heads/master
|
file = open("Hello.html","w")
file.write("<h1> This is a text</h1>")
file.close()
|
Python
| 5
| 15.8
| 38
|
/Writing file.py
| 0.614458
| 0.590361
|
biswasalex410/Python
|
refs/heads/master
|
def multiplication_or_sum(num1,num2):
product = num1 * num2
if product <= 1000:
return product
else:
return num1 + num2
num1 = int(input("Enter 1st integer number: "))
num2 = int(input("Enter 2nd integer number: "))
print("\n")
result = multiplication_or_sum(num1, num2)
print("The result is ", result)
|
Python
| 12
| 26.666666
| 47
|
/Basic Exercise for Beginners 1.py
| 0.650602
| 0.60241
|
biswasalex410/Python
|
refs/heads/master
|
#Multi level inheritance
"""
class A:
def display1(self):
print("I am inside A class")
class B(A):
def display2(self):
print("I am inside B class")
class C(B):
def display3(self):
super().display1()
super().display2()
print("I am inside C class")
ob1 = C()
ob1.display3()
"""
#Multiple inheritance
class A:
def display(self):
print("I am inside A class")
class B:
def display(self):
print("I am inside B class")
class C(B,A):
pass
ob1 = C()
ob1.display()
|
Python
| 36
| 14.555555
| 36
|
/OOP Types Of Inheritance.py
| 0.557143
| 0.539286
|
biswasalex410/Python
|
refs/heads/master
|
num = list(range(10))
print(num)
num = list(range(10))
j
|
Python
| 5
| 10.6
| 21
|
/Range.py
| 0.587302
| 0.52381
|
biswasalex410/Python
|
refs/heads/master
|
#Map Function
def square(a):
return a*a
num = [1,2,3,4,5]
result = list(map(square,num))
print(result)
# Filter function
num = [1,2,3,4,5]
result = list(filter(lambda x: x%2==0,num))
print(result)
|
Python
| 15
| 12.733334
| 43
|
/map and filter function.py
| 0.640777
| 0.582524
|
biswasalex410/Python
|
refs/heads/master
|
def add(a,b):
sum = a+b
return sum
result = add(20,30)
print("Result = ",result)
|
Python
| 5
| 16.799999
| 25
|
/Returning Value from function.py
| 0.590909
| 0.545455
|
biswasalex410/Python
|
refs/heads/master
|
import pyttsx3
friend = pyttsx3.init()
friend.say('I can speak now')
friend.runAndWait()
|
Python
| 4
| 21.25
| 29
|
/Audio Book.py
| 0.761364
| 0.738636
|
biswasalex410/Python
|
refs/heads/master
|
import pyautogui
import time
message = 100
while message > 0:
time.sleep(0)
pyautogui.typewrite('Hi BC!!!')
pyautogui.press('enter')
message = message - 1
|
Python
| 8
| 20.375
| 35
|
/Send Message.py
| 0.670588
| 0.635294
|
biswasalex410/Python
|
refs/heads/master
|
class Student:
roll = " "
gpa = " "
rahim = Student()
print(isinstance(rahim,Student))
rahim.roll = 101
rahim.gpa = 3.95
print(f"Roll: {rahim.roll}, GPA: {rahim.gpa}")
karim = Student()
print(isinstance(karim,Student))
karim.roll = 102
karim.gpa = 4.85
print(f"Roll: {karim.roll}, GPA: {karim.gpa}")
|
Python
| 15
| 19.666666
| 46
|
/OOP Class and Object.py
| 0.656958
| 0.618123
|
biswasalex410/Python
|
refs/heads/master
|
#xargs
"""
def student(id,name):
print(id,name)
student(191,"Alex Biswas")
"""
"""
def student(*details):
print(details)
student(191,"Alex",3.46)
student(192,"Alex",3.46)
"""
"""
def add(*numbers):
sum = 0
for num in numbers:
sum = sum + num
print(sum)
add(10,15)
add(10,15,20)
add(10,15,20,25)
"""
#xxagrs
def student(**details):
print(details)
student(id=191,name="Alex")
|
Python
| 29
| 12.827586
| 27
|
/xargs and xxargs.py
| 0.6
| 0.5075
|
biswasalex410/Python
|
refs/heads/master
|
file = open("student.txt","r")
#print(file.readable())
#text = file.read()
#print(text)
#size = len(text)
#print(size)
#text = file.readlines()
for line in file:
print(line)
#print(text)
file.close()
|
Python
| 11
| 17.545454
| 30
|
/Reading file.py
| 0.660098
| 0.660098
|
biswasalex410/Python
|
refs/heads/master
|
studentid = {
464 : "Alex Biswas",
525 : "Sabuj Chandra Das",
957 : "Sonia Akter",
770 : "Tasni Tasnim Nilima",
}
print(studentid.get(525,"Not a valid key"))
|
Python
| 8
| 16.625
| 43
|
/Dictionary.py
| 0.489362
| 0.382979
|
biswasalex410/Python
|
refs/heads/master
|
roll = [101,102,103,104,105,106]
name = ["Alex Biswas", "Sabuj Chandra Das", "Ahad Islam Moeen", "Sonia Akter", "Mariam Akter", "Sajib Das"]
print(list(zip(roll,name)))
print(list(zip(roll,name,"ABCDEF")))
|
Python
| 5
| 32.400002
| 67
|
/Zip function.py
| 0.578313
| 0.46988
|
biswasalex410/Python
|
refs/heads/master
|
num = [1,2,3,4,5]
#[expression for item in list]
result = [x for x in num if x%2==0]
print(result)
|
Python
| 6
| 15.833333
| 35
|
/List Comprehensions.py
| 0.623762
| 0.554455
|
biswasalex410/Python
|
refs/heads/master
|
# Using string Function
"""
sampleStr = "Emma is good developer. Emma is a writer"
cnt = sampleStr.count("Emma")
print("Emma appeared",cnt,"times")
"""
#Without Using String function
def count_emma(str):
print("Given String : ",str)
count = 0
for i in range(len(str) -1):
count += str[i: i+4] == 'Emma'
return count
count = count_emma("Emma is good devveloper. Emma is a writer")
print("Emma appeared ",count,"times")
|
Python
| 17
| 25.117647
| 63
|
/Basic Exercise for Beginners7.py
| 0.654628
| 0.647856
|
biswasalex410/Python
|
refs/heads/master
|
num = [10,20,30,40,50]
print(num)
"""
index = 0
n = len(num)
while index<n:
print(num[index])
index = index+1
"""
sum = 0
for x in num:
sum = sum+x
print(sum)
|
Python
| 13
| 12.153846
| 22
|
/for loop.py
| 0.527174
| 0.456522
|
biswasalex410/Python
|
refs/heads/master
|
class Student:
roll = ""
gpa = ""
def __init__(self,roll,gpa):
self.roll = roll
self.gpa = gpa
def display(self):
print(f"Roll: {self.roll}, GPA: {self.gpa}")
rahim = Student(464,4.50)
rahim.display()
karim = Student(525,4.98)
karim.display()
|
Python
| 14
| 19.357143
| 52
|
/OOP Constructor.py
| 0.566901
| 0.524648
|
biswasalex410/Python
|
refs/heads/master
|
def isFirstLastsame(numl):
print("Given List is ",numl)
firstElement = numl[0]
lastElement = numl[-1]
if (firstElement == lastElement):
return True
else:
return False
numl = [10,15,12,17,19]
print("Result is ",isFirstLastsame(numl))
|
Python
| 10
| 25.9
| 41
|
/Basic Exercise for Beginners5.py
| 0.63806
| 0.593284
|
biswasalex410/Python
|
refs/heads/master
|
def mergeList(list1, list2):
print("First List ", list1)
print("Second List ", list2)
thirdList = []
for num in list1:
if (num % 2 != 0):
thirdList.append(num)
for num in list2:
if (num % 2 == 0):
thirdList.append(num)
return thirdList
list1 = [10, 20, 35, 11, 27]
list2 = [13, 43, 33, 12, 24]
print("Result List is ", mergeList(list1, list2))
|
Python
| 15
| 26.266666
| 49
|
/Basic Exercise for Beginners10.py
| 0.556373
| 0.473039
|
biswasalex410/Python
|
refs/heads/master
|
"""
import re
pattern = r"colour"
text = r"My favourite colour is Red"
match = re.search(pattern,text)
if match:
print(match.start())
print(match.end())
print(match.span())
"""
#Search And Replace
"""
import re
pattern = r"colour"
text = r"My favourite colour is Red. I love blue colour as well"
text1 = re.sub(pattern,"color",text,count=1)
print(text1)
"""
#Metacharecter
import re
pattern = r"[A-Z] [a-z] [0-9]"
if re.match(pattern,"Ag0"):
print("Matched")
|
Python
| 29
| 15.586206
| 64
|
/Regular expression.py
| 0.622047
| 0.610236
|
biswasalex410/Python
|
refs/heads/master
|
num = list(range(10))
print(num)
print(num[2])
num = list(range(2,5))
print(num)
num = list(range(2,101,2))
print(num)
|
Python
| 9
| 12.444445
| 26
|
/programme.py
| 0.609375
| 0.53125
|
biswasalex410/Python
|
refs/heads/master
|
class Trinangle:
def __init__(self,base,height):
self.base = base
self.height = height
def calculate_area(self):
area = 0.5 * self.base * self.height
print(f"Base: {self.base}, Height: {self.height}","Area = ",area)
t1 = Trinangle(10,20)
t1.calculate_area()
t2 = Trinangle(20,30)
t2.calculate_area()
|
Python
| 12
| 27.416666
| 73
|
/OOP Exercise1.py
| 0.614706
| 0.573529
|
biswasalex410/Python
|
refs/heads/master
|
n = int(input("Enter the last number: "))
sum = 0
for x in range(2,n+2,2):
sum = sum+x*x
print(sum)
|
Python
| 5
| 19.799999
| 41
|
/Series.py
| 0.558559
| 0.522523
|
biswasalex410/Python
|
refs/heads/master
|
from area import *
rectangle_area(25,6)
triangle_area(10,15)
|
Python
| 4
| 14.5
| 20
|
/OOP Creating youe own Module.py
| 0.754098
| 0.639344
|
biswasalex410/Python
|
refs/heads/master
|
# 2 kinds of funmctions
"""
Library -> print(), input()
userdefine -> make your own need
"""
def add(a,b):
sum = a+b
print(sum)
def sub(x,y):
sub = x-y
print(sub)
add(10,15)
sub(15,7)
def message():
print("No parameter")
message()
|
Python
| 17
| 13.823529
| 32
|
/Function.py
| 0.587302
| 0.555556
|
biswasalex410/Python
|
refs/heads/master
|
#Regular method
"""
a = 20
b = 15
print("a = ",a)
print("b = ",b)
temp = a #temp = 20
a = b #a = 15
b = temp # b = 15
print("After Swapping")
print("a = ",a)
print("b = ",b)
"""
#Python Special Method
a = 20
b = 15
print("a = ",a)
print("b = ",b)
a, b = b, a
print("After Swapping")
print("a = ",a)
print("b = ",b)
|
Python
| 23
| 12.73913
| 23
|
/Swapping.py
| 0.520635
| 0.47619
|
biswasalex410/Python
|
refs/heads/master
|
#Stack
"""
books = []
books.append("Learn C")
books.append("Learn C++")
books.append("Learn Java")
print(books)
books.pop()
print("Now the top book is :",books[-1])
print(books)
books.pop()
print("Now the top book is :",books[-1])
print(books)
books.pop()
if not books:
print("No books left")
"""
#Queue
from collections import deque
bank = deque(["Alex","Sabuj","Sonia","Moeen"])
print(bank)
bank.popleft()
print(bank)
bank.popleft()
bank.popleft()
bank.popleft()
if not bank:
print("no person left")
|
Python
| 29
| 16.620689
| 46
|
/Stack and Queue.py
| 0.672549
| 0.668627
|
igoryuha/wct
|
refs/heads/master
|
import torch
from models import NormalisedVGG, Decoder
from utils import load_image, preprocess, deprocess, extract_image_names
from ops import style_decorator, wct
import argparse
import os
parser = argparse.ArgumentParser(description='WCT')
parser.add_argument('--content-path', type=str, help='path to the content image')
parser.add_argument('--style-path', type=str, help='path to the style image')
parser.add_argument('--content-dir', type=str, help='path to the content image folder')
parser.add_argument('--style-dir', type=str, help='path to the style image folder')
parser.add_argument('--style-decorator', type=int, default=1)
parser.add_argument('--kernel-size', type=int, default=12)
parser.add_argument('--stride', type=int, default=1)
parser.add_argument('--alpha', type=float, default=0.8)
parser.add_argument('--ss-alpha', type=float, default=0.6)
parser.add_argument('--synthesis', type=int, default=0, help='0-transfer, 1-synthesis')
parser.add_argument('--encoder-path', type=str, default='encoder/vgg_normalised_conv5_1.pth')
parser.add_argument('--decoders-dir', type=str, default='decoders')
parser.add_argument('--save-dir', type=str, default='./results')
parser.add_argument('--save-name', type=str, default='result', help='save name for single output image')
parser.add_argument('--save-ext', type=str, default='jpg', help='The extension name of the output image')
parser.add_argument('--content-size', type=int, default=768, help='New (minimum) size for the content image')
parser.add_argument('--style-size', type=int, default=768, help='New (minimum) size for the style image')
parser.add_argument('--gpu', type=int, default=0, help='ID of the GPU to use; for CPU mode set --gpu = -1')
args = parser.parse_args()
assert args.content_path is not None or args.content_dir is not None, \
'Either --content-path or --content-dir should be given.'
assert args.style_path is not None or args.style_dir is not None, \
'Either --style-path or --style-dir should be given.'
device = torch.device('cuda:%s' % args.gpu if torch.cuda.is_available() and args.gpu != -1 else 'cpu')
encoder = NormalisedVGG(pretrained_path=args.encoder_path).to(device)
d5 = Decoder('relu5_1', pretrained_path=os.path.join(args.decoders_dir, 'd5.pth')).to(device)
d4 = Decoder('relu4_1', pretrained_path=os.path.join(args.decoders_dir, 'd4.pth')).to(device)
d3 = Decoder('relu3_1', pretrained_path=os.path.join(args.decoders_dir, 'd3.pth')).to(device)
d2 = Decoder('relu2_1', pretrained_path=os.path.join(args.decoders_dir, 'd2.pth')).to(device)
d1 = Decoder('relu1_1', pretrained_path=os.path.join(args.decoders_dir, 'd1.pth')).to(device)
def style_transfer(content, style):
if args.style_decorator:
relu5_1_cf = encoder(content, 'relu5_1')
relu5_1_sf = encoder(style, 'relu5_1')
relu5_1_scf = style_decorator(relu5_1_cf, relu5_1_sf, args.kernel_size, args.stride, args.ss_alpha)
relu5_1_recons = d5(relu5_1_scf)
else:
relu5_1_cf = encoder(content, 'relu5_1')
relu5_1_sf = encoder(style, 'relu5_1')
relu5_1_scf = wct(relu5_1_cf, relu5_1_sf, args.alpha)
relu5_1_recons = d5(relu5_1_scf)
relu4_1_cf = encoder(relu5_1_recons, 'relu4_1')
relu4_1_sf = encoder(style, 'relu4_1')
relu4_1_scf = wct(relu4_1_cf, relu4_1_sf, args.alpha)
relu4_1_recons = d4(relu4_1_scf)
relu3_1_cf = encoder(relu4_1_recons, 'relu3_1')
relu3_1_sf = encoder(style, 'relu3_1')
relu3_1_scf = wct(relu3_1_cf, relu3_1_sf, args.alpha)
relu3_1_recons = d3(relu3_1_scf)
relu2_1_cf = encoder(relu3_1_recons, 'relu2_1')
relu2_1_sf = encoder(style, 'relu2_1')
relu2_1_scf = wct(relu2_1_cf, relu2_1_sf, args.alpha)
relu2_1_recons = d2(relu2_1_scf)
relu1_1_cf = encoder(relu2_1_recons, 'relu1_1')
relu1_1_sf = encoder(style, 'relu1_1')
relu1_1_scf = wct(relu1_1_cf, relu1_1_sf, args.alpha)
relu1_1_recons = d1(relu1_1_scf)
return relu1_1_recons
if not os.path.exists(args.save_dir):
print('Creating save folder at', args.save_dir)
os.mkdir(args.save_dir)
content_paths = []
style_paths = []
if args.content_dir:
# use a batch of content images
content_paths = extract_image_names(args.content_dir)
else:
# use a single content image
content_paths.append(args.content_path)
if args.style_dir:
# use a batch of style images
style_paths = extract_image_names(args.style_dir)
else:
# use a single style image
style_paths.append(args.style_path)
print('Number content images:', len(content_paths))
print('Number style images:', len(style_paths))
with torch.no_grad():
for i in range(len(content_paths)):
content = load_image(content_paths[i])
content = preprocess(content, args.content_size)
content = content.to(device)
for j in range(len(style_paths)):
style = load_image(style_paths[j])
style = preprocess(style, args.style_size)
style = style.to(device)
if args.synthesis == 0:
output = style_transfer(content, style)
output = deprocess(output)
if len(content_paths) == 1 and len(style_paths) == 1:
# used a single content and style image
save_path = '%s/%s.%s' % (args.save_dir, args.save_name, args.save_ext)
else:
# used a batch of content and style images
save_path = '%s/%s_%s.%s' % (args.save_dir, i, j, args.save_ext)
print('Output image saved at:', save_path)
output.save(save_path)
else:
content = torch.rand(*content.shape).uniform_(0, 1).to(device)
for iteration in range(3):
output = style_transfer(content, style)
content = output
output = deprocess(output)
if len(content_paths) == 1 and len(style_paths) == 1:
# used a single content and style image
save_path = '%s/%s_%s.%s' % (args.save_dir, args.save_name, iteration, args.save_ext)
else:
# used a batch of content and style images
save_path = '%s/%s_%s_%s.%s' % (args.save_dir, i, j, iteration, args.save_ext)
print('Output image saved at:', save_path)
output.save(save_path)
|
Python
| 151
| 41.788078
| 109
|
/eval.py
| 0.63597
| 0.609039
|
igoryuha/wct
|
refs/heads/master
|
import torch
import torch.nn as nn
import copy
normalised_vgg_relu5_1 = nn.Sequential(
nn.Conv2d(3, 3, 1),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(3, 64, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, 3),
nn.ReLU(),
nn.MaxPool2d(2, ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 128, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, 3),
nn.ReLU(),
nn.MaxPool2d(2, ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.MaxPool2d(2, ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.MaxPool2d(2, ceil_mode=True),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU()
)
class NormalisedVGG(nn.Module):
def __init__(self, pretrained_path=None):
super().__init__()
self.net = normalised_vgg_relu5_1
if pretrained_path is not None:
self.net.load_state_dict(torch.load(pretrained_path, map_location=lambda storage, loc: storage))
def forward(self, x, target):
if target == 'relu1_1':
return self.net[:4](x)
elif target == 'relu2_1':
return self.net[:11](x)
elif target == 'relu3_1':
return self.net[:18](x)
elif target == 'relu4_1':
return self.net[:31](x)
elif target == 'relu5_1':
return self.net(x)
vgg_decoder_relu5_1 = nn.Sequential(
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 512, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(512, 256, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 256, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(256, 128, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 128, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(128, 64, 3),
nn.ReLU(),
nn.Upsample(scale_factor=2),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 64, 3),
nn.ReLU(),
nn.ReflectionPad2d((1, 1, 1, 1)),
nn.Conv2d(64, 3, 3)
)
class Decoder(nn.Module):
def __init__(self, target, pretrained_path=None):
super().__init__()
if target == 'relu1_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())[-5:])) # current -2
elif target == 'relu2_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())[-9:]))
elif target == 'relu3_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())[-16:]))
elif target == 'relu4_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())[-29:]))
elif target == 'relu5_1':
self.net = nn.Sequential(*copy.deepcopy(list(vgg_decoder_relu5_1.children())))
if pretrained_path is not None:
self.net.load_state_dict(torch.load(pretrained_path, map_location=lambda storage, loc: storage))
def forward(self, x):
return self.net(x)
|
Python
| 139
| 28.978416
| 111
|
/models.py
| 0.555796
| 0.461723
|
igoryuha/wct
|
refs/heads/master
|
import torch
import torch.nn.functional as F
def extract_image_patches_(image, kernel_size, strides):
kh, kw = kernel_size
sh, sw = strides
patches = image.unfold(2, kh, sh).unfold(3, kw, sw)
patches = patches.permute(0, 2, 3, 1, 4, 5)
patches = patches.reshape(-1, *patches.shape[-3:]) # (patch_numbers, C, kh, kw)
return patches
def style_swap(c_features, s_features, kernel_size, stride=1):
s_patches = extract_image_patches_(s_features, [kernel_size, kernel_size], [stride, stride])
s_patches_matrix = s_patches.reshape(s_patches.shape[0], -1)
s_patch_wise_norm = torch.norm(s_patches_matrix, dim=1)
s_patch_wise_norm = s_patch_wise_norm.reshape(-1, 1, 1, 1)
s_patches_normalized = s_patches / (s_patch_wise_norm + 1e-8)
# Computes the normalized cross-correlations.
# At each spatial location, "K" is a vector of cross-correlations
# between a content activation patch and all style activation patches.
K = F.conv2d(c_features, s_patches_normalized, stride=stride)
# Replace each vector "K" by a one-hot vector corresponding
# to the best matching style activation patch.
best_matching_idx = K.argmax(1, keepdim=True)
one_hot = torch.zeros_like(K)
one_hot.scatter_(1, best_matching_idx, 1)
# At each spatial location, only the best matching style
# activation patch is in the output, as the other patches
# are multiplied by zero.
F_ss = F.conv_transpose2d(one_hot, s_patches, stride=stride)
overlap = F.conv_transpose2d(one_hot, torch.ones_like(s_patches), stride=stride)
F_ss = F_ss / overlap
return F_ss
def relu_x_1_transform(c, s, encoder, decoder, relu_target, alpha=1):
c_latent = encoder(c, relu_target)
s_latent = encoder(s, relu_target)
t_features = wct(c_latent, s_latent, alpha)
return decoder(t_features)
def relu_x_1_style_decorator_transform(c, s, encoder, decoder, relu_target, kernel_size, stride=1, alpha=1):
c_latent = encoder(c, relu_target)
s_latent = encoder(s, relu_target)
t_features = style_decorator(c_latent, s_latent, kernel_size, stride, alpha)
return decoder(t_features)
def style_decorator(cf, sf, kernel_size, stride=1, alpha=1):
cf_shape = cf.shape
sf_shape = sf.shape
b, c, h, w = cf_shape
cf_vectorized = cf.reshape(c, h * w)
b, c, h, w = sf.shape
sf_vectorized = sf.reshape(c, h * w)
# map features to normalized domain
cf_whiten = whitening(cf_vectorized)
sf_whiten = whitening(sf_vectorized)
# in this normalized domain, we want to align
# any element in cf with the nearest element in sf
reassembling_f = style_swap(
cf_whiten.reshape(cf_shape),
sf_whiten.reshape(sf_shape),
kernel_size, stride
)
b, c, h, w = reassembling_f.shape
reassembling_vectorized = reassembling_f.reshape(c, h*w)
# reconstruct reassembling features into the
# domain of the style features
result = coloring(reassembling_vectorized, sf_vectorized)
result = result.reshape(cf_shape)
bland = alpha * result + (1 - alpha) * cf
return bland
def wct(cf, sf, alpha=1):
cf_shape = cf.shape
b, c, h, w = cf_shape
cf_vectorized = cf.reshape(c, h*w)
b, c, h, w = sf.shape
sf_vectorized = sf.reshape(c, h*w)
cf_transformed = whitening(cf_vectorized)
cf_transformed = coloring(cf_transformed, sf_vectorized)
cf_transformed = cf_transformed.reshape(cf_shape)
bland = alpha * cf_transformed + (1 - alpha) * cf
return bland
def feature_decomposition(x):
x_mean = x.mean(1, keepdims=True)
x_center = x - x_mean
x_cov = x_center.mm(x_center.t()) / (x_center.size(1) - 1)
e, d, _ = torch.svd(x_cov)
d = d[d > 0]
e = e[:, :d.size(0)]
return e, d, x_center, x_mean
def whitening(x):
e, d, x_center, _ = feature_decomposition(x)
transform_matrix = e.mm(torch.diag(d ** -0.5)).mm(e.t())
return transform_matrix.mm(x_center)
def coloring(x, y):
e, d, _, y_mean = feature_decomposition(y)
transform_matrix = e.mm(torch.diag(d ** 0.5)).mm(e.t())
return transform_matrix.mm(x) + y_mean
|
Python
| 127
| 31.692913
| 108
|
/ops.py
| 0.65342
| 0.642582
|
igoryuha/wct
|
refs/heads/master
|
import torch
from torchvision import transforms
from ops import relu_x_1_style_decorator_transform, relu_x_1_transform
from PIL import Image
import os
def eval_transform(size):
return transforms.Compose([
transforms.Resize(size),
transforms.ToTensor()
])
def load_image(path):
return Image.open(path).convert('RGB')
def preprocess(img, size):
transform = eval_transform(size)
return transform(img).unsqueeze(0)
def deprocess(tensor):
tensor = tensor.cpu()
tensor = tensor.squeeze(0)
tensor = torch.clamp(tensor, 0, 1)
return transforms.ToPILImage()(tensor)
def extract_image_names(path):
r_ = []
valid_ext = ['.jpg', '.png']
items = os.listdir(path)
for item in items:
item_path = os.path.join(path, item)
_, ext = os.path.splitext(item_path)
if ext not in valid_ext:
continue
r_.append(item_path)
return r_
|
Python
| 46
| 19.413044
| 70
|
/utils.py
| 0.643237
| 0.636848
|
ajbonkoski/zcomm
|
refs/heads/master
|
#!/usr/bin/env python
import itertools
import sys
import time
from zcomm import zcomm
HZ = 1
def main(argv):
z = zcomm()
msg_counter = itertools.count()
while True:
msg = str(msg_counter.next())
z.publish('FROB_DATA', msg);
time.sleep(1/float(HZ))
if __name__ == "__main__":
try:
main(sys.argv)
except KeyboardInterrupt:
pass
|
Python
| 22
| 16.727272
| 37
|
/py/pub.py
| 0.587179
| 0.582051
|
ajbonkoski/zcomm
|
refs/heads/master
|
#!/usr/bin/env python
import sys
import time
from zcomm import zcomm
def handle_msg(channel, data):
print ' channel:%s, data:%s' % (channel, data)
def main(argv):
z = zcomm()
z.subscribe('', handle_msg)
z.run()
if __name__ == "__main__":
try:
main(sys.argv)
except KeyboardInterrupt:
pass
|
Python
| 18
| 17.555555
| 52
|
/py/spy.py
| 0.58982
| 0.58982
|
ajbonkoski/zcomm
|
refs/heads/master
|
import zmq
PUB_ADDR = 'ipc:///tmp/pub';
SUB_ADDR = 'ipc:///tmp/sub';
class zcomm:
def __init__(self):
self.ctx = zmq.Context()
self.pub = self.ctx.socket(zmq.PUB)
self.pub.connect(PUB_ADDR)
self.sub = self.ctx.socket(zmq.SUB)
self.sub.connect(SUB_ADDR)
self.callbacks = {} # maps channels -> callbacks
def subscribe(self, channel, callback):
self.sub.setsockopt(zmq.SUBSCRIBE, channel)
self.callbacks[channel] = callback
def publish(self, channel, data):
self.pub.send_multipart([channel, data])
def handle(self):
channel, msg = self.sub.recv_multipart()
if channel in self.callbacks:
self.callbacks[channel](channel, msg)
elif '' in self.callbacks:
self.callbacks[''](channel, msg)
def run(self):
while True:
self.handle()
|
Python
| 32
| 26.84375
| 56
|
/py/zcomm.py
| 0.590348
| 0.590348
|
ajbonkoski/zcomm
|
refs/heads/master
|
#!/usr/bin/env python
import zmq
SUB_ADDR = 'ipc:///tmp/sub'
PUB_ADDR = 'ipc:///tmp/pub'
def main():
try:
context = zmq.Context(1)
userpub = context.socket(zmq.SUB)
userpub.bind(PUB_ADDR)
userpub.setsockopt(zmq.SUBSCRIBE, "")
usersub = context.socket(zmq.PUB)
usersub.bind(SUB_ADDR)
zmq.device(zmq.FORWARDER, userpub, usersub)
except Exception, e:
print e
print "bringing down zmq device"
except KeyboardInterrupt:
pass
finally:
pass
userpub.close()
usersub.close()
context.term()
if __name__ == "__main__":
main()
|
Python
| 32
| 19.4375
| 51
|
/crossbar.py
| 0.567278
| 0.565749
|
Ignorance-of-Dong/Algorithm
|
refs/heads/master
|
# AC:
# from others' solution
#
class Solution(object):
def distinctEchoSubstrings(self, S):
N = len(S)
P, MOD = 37, 344555666677777 # MOD is prime
Pinv = pow(P, MOD - 2, MOD)
prefix = [0]
pwr = 1
ha = 0
for x in map(ord, S):
ha = (ha + pwr * x) % MOD
pwr = pwr * P % MOD
prefix.append(ha)
seen = set()
pwr = 1
for length in range(1, N // 2 + 1):
pwr = pwr * P % MOD # pwr = P^length
for i in range(N - 2 * length + 1):
left = (prefix[i + length] - prefix[i]) * pwr % MOD # hash of s[i:i+length] * P^length
right = (prefix[i + 2 * length] - prefix[i + length]) % MOD # hash of s[i+length:i+2*length]
if left == right:
seen.add(left * pow(Pinv, i, MOD) % MOD) # left * P^-i is the canonical representation
return len(seen)
|
Python
| 28
| 33.82143
| 109
|
/contest/leetcode_biweek_17/[refered]leetcode_5146_Distinct_Echo_Substrings.py
| 0.456879
| 0.427105
|
RoelVanderPaal/javacpp-cuda-math
|
refs/heads/master
|
#!/usr/bin/python
from bs4 import BeautifulSoup
from string import Template
docDir = '/Developer/NVIDIA/CUDA-7.0/doc/html/cuda-math-api/'
one_template = """extern "C"
__global__ void math_${f}(size_t n, $t *result, $t *x) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
{
result[id] = ${f}(x[id]);
}
}
"""
two_template = """extern "C"
__global__ void math_${f}(size_t n, $t *result, $t *x, $t *y) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id < n)
{
result[id] = ${f}(x[id],y[id]);
}
}
"""
enum_template = """package com.mosco.javacpp_cuda_math;
public enum Functions$t {
$enums
}
"""
main_java_template = """package com.mosco.javacpp_cuda_math;
import org.bytedeco.javacpp.IntPointer;
import org.bytedeco.javacpp.LongPointer;
import org.bytedeco.javacpp.Pointer;
import java.io.IOException;
public class CudaMath$tc extends AbstractCudaMath {
public CudaMath${tc}() throws IOException {
super("$t");
}
$body
private void call(Functions$tc f, int n, Pointer... pointers) {
Pointer[] all = new Pointer[pointers.length + 1];
all[0] = new IntPointer(new int[]{n});
for (int i = 0; i < pointers.length; i++) {
all[i + 1] = pointers[i];
}
super.call(f.name(), n, all);
}
}
"""
test_java_template = """package com.mosco.javacpp_cuda_math;
import static org.bytedeco.javacpp.cuda.*;
import org.bytedeco.javacpp.${tc}Pointer;
import org.bytedeco.javacpp.LongPointer;
import org.junit.BeforeClass;
import org.junit.Test;
import java.util.logging.*;
import static com.mosco.javacpp_cuda_math.AbstractCudaMath.checkResult;
public class CudaMath${tc}Test {
private static final int N = 2000;
private static CudaMath${tc} cudaMath${tc};
private static LongPointer x, y;
private static LongPointer result;
$body
@BeforeClass
public static void setUp() throws Exception {
Logger logger = Logger.getLogger(AbstractCudaMath.class.getName());
ConsoleHandler handler = new ConsoleHandler();
logger.addHandler(handler);
handler.setLevel(Level.FINE);
logger.setLevel(Level.FINE);
checkResult(cuInit(0));
CUctx_st context = new CUctx_st();
checkResult(cuCtxCreate(context, 0, 0));
cudaMath${tc} = new CudaMath${tc}();
${t}[] xArray = new ${t}[N];
for (int i = 0; i < N; i++) {
xArray[i] = i;
}
x = new LongPointer(1);
checkResult(cuMemAlloc(x, N * ${tc}.BYTES));
checkResult(cuMemcpyHtoD(x.get(), new ${tc}Pointer(xArray), N * ${tc}.BYTES));
${t}[] yArray = new ${t}[N];
for (int i = 0; i < N; i++) {
yArray[i] = i;
}
y = new LongPointer(1);
checkResult(cuMemAlloc(y, N * ${tc}.BYTES));
checkResult(cuMemcpyHtoD(y.get(), new ${tc}Pointer(yArray), N * ${tc}.BYTES));
result = new LongPointer(1);
checkResult(cuMemAlloc(result, N * ${tc}.BYTES));
}
}
"""
def parseDocumentation(filename, n_type):
soup = BeautifulSoup(open(filename))
result = {'one': [], 'two': [], 'rest': [], 'error': []}
for dt in soup.body.dl.find_all("dt"):
contents = dt.span.contents
if len(contents) >= 3:
r_type = contents[3].strip().strip(u'\u200B').strip()
contents2 = dt.contents[1].contents
params = [contents2[i].strip(' (,') for i in range(1, len(contents2) - 1, 3)]
mName = dt.contents[1].a.string
if r_type == n_type and params == [n_type]:
result['one'].append(mName)
elif r_type == n_type and params == [n_type, n_type]:
result['two'].append(mName)
else:
result['rest'].append(mName)
else:
result['error'].append(dt)
# print("hier probleem " + contents[0])
return result
def updateCuFile(results, nType):
with open('src/main/resources/cuda_math_' + nType + '.cu', 'w+') as cuFile:
cuFile.seek(0)
t_one = Template(one_template)
for fName in results['one']:
cuFile.write(t_one.substitute(f=fName, t=nType))
t_two = Template(two_template)
for fName in results['two']:
cuFile.write(t_two.substitute(f=fName, t=nType))
cuFile.truncate()
def updateEnum(results, nType):
with open('src/main/java/com/mosco/javacpp_cuda_math/Functions' + nType.capitalize() + '.java', 'w+') as file:
file.seek(0)
file.write(Template(enum_template).substitute(enums=','.join(results['one']) + ',' + ','.join(results['two']),
t=nType.capitalize()))
file.truncate()
def updateMainJavaFile(results, nType):
with open('src/main/java/com/mosco/javacpp_cuda_math/CudaMath' + nType.capitalize() + '.java', 'w+') as file:
file.seek(0)
body = ''
one_t = Template(""" public void ${fNameM}(int n, LongPointer x, LongPointer result) {
call(Functions${tc}.$fName, n, result, x);
}
""")
for fName in results['one']:
body += one_t.substitute(fName=fName, fNameM=fName[:-1] if nType == 'float' else fName,
tc=nType.capitalize())
two_t = Template(""" public void ${fNameM}(int n, LongPointer x, LongPointer y, LongPointer result) {
call(Functions${tc}.$fName, n, result, x, y);
}
""")
for fName in results['two']:
body += two_t.substitute(fName=fName, fNameM=fName[:-1] if nType == 'float' else fName,
tc=nType.capitalize())
file.write(Template(main_java_template).substitute(f='', t=nType, tc=nType.capitalize(), body=body))
file.truncate()
def updateTestJavaFile(results, nType):
with open('src/test/java/com/mosco/javacpp_cuda_math/CudaMath' + nType.capitalize() + 'Test.java', 'w+') as file:
file.seek(0)
body = ''
one_t = Template(""" @Test
public void test${fName}() {
cudaMath${tc}.${fNameM}(N, x, result);
}
""")
for fName in results['one']:
body += one_t.substitute(fName=fName, fNameM=fName[:-1] if nType == 'float' else fName, t=nType,
tc=nType.capitalize())
two_t = Template(""" @Test
public void test${fName}() {
cudaMath${tc}.${fNameM}(N, x, y, result);
}
""")
for fName in results['two']:
body += two_t.substitute(fName=fName, fNameM=fName[:-1] if nType == 'float' else fName, t=nType,
tc=nType.capitalize())
file.write(Template(test_java_template).substitute(f='', t=nType, tc=nType.capitalize(), body=body))
file.truncate()
for aType in [{'t': 'float', 'd': 'SINGLE'}, {'t': 'double', 'd': 'DOUBLE'}]:
nType = aType['t']
results = parseDocumentation(docDir + 'group__CUDA__MATH__' + aType['d'] + '.html', nType)
updateCuFile(results, nType)
updateEnum(results, nType)
updateMainJavaFile(results, nType)
updateTestJavaFile(results, nType)
# for key, value in resultFloat.iteritems():
# print("%s: %i" % (key, len(value)))
# resultDouble = parseDocumentation(docDir + 'group__CUDA__MATH__DOUBLE.html', 'double')
# for key, value in resultDouble.iteritems():
# print("%s: %i" % (key, len(value)))
|
Python
| 231
| 31.290043
| 118
|
/generate.py
| 0.57434
| 0.568843
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.