blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
f41783fbde57530fba633e505bb251e72466711c | Python | GiovaneNardari/Crimes-in-Boston | /Crimes-in-Boston.py | UTF-8 | 6,884 | 2.984375 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
crimes = pd.read_csv('/Users/username/Downloads/archive/crime.csv', delimiter=',', encoding="ISO-8859-1")
#ASSASSINATOS POR HORA
crime111 = crimes['OFFENSE_CODE'] == 111
crime_assassinato = crimes[crime111]
sns.countplot(data=crime_assassinato, x='HOUR')
plt.xlabel('Hora do dia')
plt.ylabel('Número de Assassinatos')
plt.yticks(np.arange(0, 20, step=1))
plt.title('Quantidade de Assassinatos por Hora do Dia')
plt.show()
#ASSASSINATOS POR DIA DA SEMANA
crime111 = crimes['OFFENSE_CODE'] == 111
crime_assassinato = crimes[crime111]
sns.countplot(data=crime_assassinato, x='DAY_OF_WEEK', order=['Monday','Thursday','Wednesday','Tuesday','Friday','Saturday','Sunday'])
plt.xlabel('Dia da Semana')
plt.ylabel('Número de Assassinatos')
plt.yticks(np.arange(0, 36, step=2))
plt.title('Quantidade de Assassinatos por Dia da Semana')
plt.show()
#LAT,LONG - ASSASSINATOS
crime111 = crimes['OFFENSE_CODE'] == 111
crime_assassinato = crimes[crime111]
crime_assassinato_lat = crime_assassinato.query('Lat>5')
crime_assassinato_long = crime_assassinato_lat.query('Long<-5')
sns.scatterplot(data=crime_assassinato_long, x='Long', y='Lat', hue='DISTRICT')
plt.title('Local dos Assassinatos')
plt.show()
#ASSASSINATOS POR ANO
Anos = ['2015', '2016', '2017', '2018']
NAssi = [27, 47, 54, 33]
plt.plot(Anos, NAssi, label='Assassinatos', marker='o')
plt.title('Série Temporal do crime de Assassinato')
plt.grid()
plt.legend()
plt.show()
#ASSASSINATOS POR ANO PARA CADA DISTRITO
crime111 = crimes['OFFENSE_CODE'] == 111
crime_assassinato = crimes[crime111]
dfassas = crime_assassinato['DISTRICT']=='B2'
dfassasb2 = crime_assassinato[dfassas]
dfassasb2['YEAR'].value_counts()
dfassas2 = crime_assassinato['DISTRICT']=='B3'
dfassasB3 = crime_assassinato[dfassas2]
dfassasB3['YEAR'].value_counts()
dfassas1 = crime_assassinato['DISTRICT']=='C11'
dfassasC11 = crime_assassinato[dfassas1]
dfassasC11['YEAR'].value_counts()
Anos = [2015, 2016, 2017, 2018]
AB2 = [8, 15, 14, 11]
AC11 = [2, 10, 16, 4]
AB3 = [4, 10, 10, 7]
plt.plot(Anos, AB2, marker='o', label='B2')
plt.plot(Anos, AC11, marker='o', label='C11')
plt.plot(Anos, AB3, marker='o', label='B3')
plt.title('Série Temporal do crime de Assassinato para os distritos mais violentos')
plt.xlabel('Anos')
plt.ylabel('Número de Assassinatos')
plt.grid()
plt.legend()
plt.show()
#FURTOS POR HORA
crime619 = crimes['OFFENSE_CODE_GROUP'] == 'Larceny'
crime_furto = crimes[crime619]
sns.countplot(data=crime_furto, x='HOUR')
plt.xlabel('Hora do dia')
plt.ylabel('Número de Furtos')
plt.yticks(np.arange(0, 2100, step=100))
plt.title('Quantidade de Furtos por Hora do Dia')
plt.show()
#FURTOS POR DIA DA SEMANA
crime619 = crimes['OFFENSE_CODE_GROUP'] == 'Larceny'
crime_furto = crimes[crime619]
sns.countplot(data=crime_furto, x= 'DAY_OF_WEEK', order= ['Monday','Thursday','Wednesday','Tuesday','Friday','Saturday','Sunday'])
plt.xlabel('Dia da Semana')
plt.ylabel('Número de Furtos')
plt.yticks(np.arange(0, 4200, step=200))
plt.title('Quantidade de Furtos por Dia da Semana')
plt.show()
#LAT,LONG - FURTO
crimes = pd.read_csv('/Users/giovanebrunonardari/Downloads/archive/crime.csv', delimiter=',', encoding="ISO-8859-1")
crime619 = crimes['OFFENSE_CODE_GROUP'] == 'Larceny'
crime_furto = crimes[crime619]
crime_furto_lat = crime_furto.query('Lat>=0')
crime_furto_long = crime_furto_lat.query('Long<=0')
sns.scatterplot(data=crime_furto_long, x='Long', y='Lat', hue='DISTRICT')
plt.title('Local dos Furtos')
plt.show()
#FURTOS POR ANO
Anos = ['2015', '2016', '2017', '2018']
NFurto = [5006, 7902, 7807, 5220]
plt.plot(Anos, NFurto, label='Furto', marker='o')
plt.title('Série Temporal do crime de Furto')
plt.grid()
plt.legend()
plt.show()
#FURTOS POR ANO PARA CADA DISTRITO
crime619 = crimes['OFFENSE_CODE_GROUP'] == 'Larceny'
crime_furto = crimes[crime619]
dffurto1 = crime_furto['DISTRICT']=='B2'
dffurto2= crime_furto[dffurto1]
dffurto2['YEAR'].value_counts()
dffurtod4 = crime_furto['DISTRICT']=='D4'
dffurtod41= crime_furto[dffurtod4]
dffurtod41['YEAR'].value_counts()
dffurtoA1 = crime_furto['DISTRICT']=='A1'
dffurtoA2= crime_furto[dffurtoA1]
dffurtoA2['YEAR'].value_counts()
Anos = [2015, 2016, 2017, 2018]
FB2 = [608, 890, 891, 482]
FD4 = [1373, 2268, 2157, 1515]
FA1 = [914, 1403, 1402, 985]
plt.plot(Anos, FB2, label='B2', marker='o')
plt.plot(Anos, FD4, label='D4', marker='o')
plt.plot(Anos, FA1, label='A1', marker='o')
plt.title('Série Temporal do crime de Furto para os distritos mais violentos')
plt.xlabel('Anos')
plt.ylabel('Número de Assassinatos')
plt.grid()
plt.legend()
plt.show()
#DROGAS POR HORA
crime1843 = crimes['OFFENSE_CODE_GROUP'] == 'Drug Violation'
crime_drug = crimes[crime1843]
sns.countplot(data=crime_drug, x='HOUR')
plt.title('Apreensão de Drogas por Hora do Dia')
plt.xlabel('Hora do Dia')
plt.ylabel('Quantidade de Apreensões')
plt.yticks(np.arange(0, 2600, step=100))
plt.show()
#DROGAS POR DIA DA SEMANA
crime1843 = crimes['OFFENSE_CODE_GROUP'] == 'Drug Violation'
crime_drug = crimes[crime1843]
sns.countplot(data=crime_drug, x= 'DAY_OF_WEEK', order= ['Monday','Thursday','Wednesday','Tuesday','Friday','Saturday','Sunday'])
plt.title('Apreensão de Drogas por Dia da Semana')
plt.xlabel('Dia da Semana')
plt.ylabel('Quantidade de Apreensões de Drogas')
plt.yticks(np.arange(0, 3150, step=150))
plt.show()
#LAT,LONG - DROGAS
crime1843 = crimes['OFFENSE_CODE_GROUP'] == 'Drug Violation'
crime_drug = crimes[crime1843]
crime_drug_lat = crime_drug.query('Lat>=0')
crime_drug_long = crime_drug_lat.query('Long<=0')
sns.scatterplot(data=crime_drug_long, x='Long', y='Lat', hue='DISTRICT')
plt.title('Local das Apreensões de Drogas')
plt.show()
#DROGAS POR ANO
Anos = ['2015', '2016', '2017', '2018']
NDroga = [3300, 5284, 4759, 3205]
plt.plot(Anos, NDroga, label='Drogas', marker='o')
plt.yticks(np.arange(0, 7000, step=1000))
plt.title('Série Temporal do crime de Drogas')
plt.grid()
plt.legend()
plt.show()
#DROGAS POR ANO PARA CADA DISTRITO
crime1843 = crimes['OFFENSE_CODE_GROUP'] == 'Drug Violation'
crime_drug = crimes[crime1843]
dfdrogab2 = crime_drug['DISTRICT']=='B2'
dfdrogab2 = crime_drug[dfdrogab2]
dfdrogab2['YEAR'].value_counts()
dfdrogac11 = crime_drug['DISTRICT']=='C11'
dfdrogac11 = crime_drug[dfdrogac11]
dfdrogac11['YEAR'].value_counts()
dfdroga1 = crime_drug['DISTRICT']=='A1'
dfdroga1 = crime_drug[dfdroga1]
dfdroga1['YEAR'].value_counts()
Anos = [2015, 2016, 2017, 2018]
NB2 = [452, 685, 628, 504]
NC11 = [527, 784, 593, 305]
NA1 = [423, 719, 577, 357]
plt.plot(Anos, NB2, label='B2', marker='o')
plt.plot(Anos, NC11, label='C11', marker='o')
plt.plot(Anos, NA1, label='A1', marker='o')
plt.title('Série Temporal do crime de Drogas para os distritos mais "violentos"')
plt.xlabel('Anos')
plt.ylabel('Número de Apreensões')
plt.grid()
plt.legend()
plt.show()
| true |
f7ef8a44f33ee7ebbd587d5e1f4db2b171df3ac5 | Python | MahaLakshmi0411/Circle | /area.py | UTF-8 | 316 | 3.640625 | 4 | [] | no_license | pi=3.14
r=float(input("Enter the radius of a circle:"))
area=pi*r*r
print("The area of the circle is =%.2f"%area)
i = input("Input the Filename: ")
extns =i.split(".")
# repr() function is used to returns a printable representation of a object(optional)
print ("The extension of the file is : " + repr(extns[-1]))
| true |
bad1aa085d154f0b46c7a640613f25964a73f1f7 | Python | SunnyKhade/Basic-Python-Programs- | /Greatest Common Divisor of two numbers using recursion .py | UTF-8 | 246 | 3.671875 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
def gcd(a,b):
if(b!=0):
return gcd(b,a%b)
else:
return a
print('Enter two numbers : ')
a = int(input())
b = int(input())
gcd=gcd(a,b)
print('GCD is', gcd)
# In[ ]:
| true |
d2d51886b98e1e2ca08d80eeedeaed58cbadd39a | Python | wasp-codes/regression-analysis | /opendata-humbug.py | UTF-8 | 799 | 2.96875 | 3 | [] | no_license | import requests
import pandas as pd
# import for graph
import matplotlib.pyplot as plt
url = 'http://opendata.tmr.qld.gov.au/Humbug_Wharf.txt'
response = requests.get(url)
if response.status_code != 200:
print('Failed to get data:', response.status_code)
else:
print(response.text[:28])
df = pd.read_csv('http://opendata.tmr.qld.gov.au/Humbug_Wharf.txt', sep=" ", header=None, skiprows=5, engine='python')
# timeview
df = df.loc[10500:11520,[0,2]]
df = df.dropna()
df.columns = ['Time/Date','Water Level in m LAT']
df['Time/Date'] = pd.to_datetime(df['Time/Date'], format='%d%m%Y%H%M')
print(df)
# graph
df.loc[11500:11520].plot(x = 'Time/Date', y = 'Water Level in m LAT', kind = 'line')
df.loc[10500:11520].plot(x = 'Time/Date', y = 'Water Level in m LAT', kind = 'line')
plt.show()
| true |
98588da426ecbc9d7a6d87e39ad1e0b0ecf4f247 | Python | stoday/Coagle | /ModuleTest.py | UTF-8 | 150 | 2.765625 | 3 | [] | no_license |
import re
text_str = '2016-10-10 00:00:00'
datetime_part = re.search('\d\d\d\d-\d\d-\d\d \d\d:\d\d:\d\d', text_str)
print datetime_part.group()
| true |
6d64f2d9a6ce0be64effd143f165dc659dd60ac6 | Python | PythonExplorer/LatentView_TNT | /matchwise_stats.py | UTF-8 | 14,977 | 3.203125 | 3 | [
"MIT"
] | permissive | #Libraries to parse xls docs
from xlrd import open_workbook,cellname
import operator
#Libraries to create xlx files for further use and data preparation
import xlsxwriter
def open(x):
#Open data sheet
book = open_workbook(x)
#Index data sheet
sheet = book.sheet_by_index(0)
return sheet
# Create Data Sheets
def create_new_sheet(sheet_name):
workbook = xlsxwriter.Workbook(sheet_name)
new_sheet = workbook.add_worksheet()
return (workbook,new_sheet)
def winning_probabilities(sheet_name):
sheet = open(sheet_name)
teams = {}
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'A':
team1 = sheet.cell(row_index,col_index+1).value
team2 = sheet.cell(row_index,col_index+2).value
if team1 not in teams:
teams[team1] = {}
if team2 not in teams:
teams[team2] = {}
if team1 not in teams[team2]:
teams[team2][team1] = {"Win":0,"Loss":0,"Tie":0,"No Result":0}
if team2 not in teams[team1]:
teams[team1][team2] = {"Win":0,"Loss":0,"Tie":0,"No Result":0}
winning_team = sheet.cell(row_index,col_index+13).value
if team1 == winning_team:
teams[team1][team2]["Win"]+=1
teams[team2][team1]["Loss"]+=1
elif team2 == winning_team:
teams[team1][team2]["Loss"]+=1
teams[team2][team1]["Win"]+=1
elif winning_team == "Tie":
teams[team1][team2]["Tie"]+=1
teams[team2][team1]["Tie"]+=1
else:
teams[team1][team2]["No Result"]+=1
teams[team2][team1]["No Result"]+=1
#Create new sheet
workbook,match_winner_sheet = create_new_sheet("match_winners.xls")
#Initialize rows,columns
row_count = 0
match_winner_sheet.write(0,0,"Team Name")
match_winner_sheet.write(0,1,"Opponent Name")
match_winner_sheet.write(0,2,"Wins")
match_winner_sheet.write(0,3,"Loss")
match_winner_sheet.write(0,4,"Ties")
match_winner_sheet.write(0,5,"No Results")
match_winner_sheet.write(0,6,"P_Win")
match_winner_sheet.write(0,7,"P_Loss")
row_count+=1
for x in teams:
for y in teams[x]:
try:
match_winner_sheet.write(row_count,0,x)
match_winner_sheet.write(row_count,1,y)
match_winner_sheet.write(row_count,2,teams[x][y]["Win"])
match_winner_sheet.write(row_count,3,teams[x][y]["Loss"])
match_winner_sheet.write(row_count,4,teams[x][y]["Tie"])
match_winner_sheet.write(row_count,5,teams[x][y]["No Result"])
match_winner_sheet.write(row_count,6,"%.2f"%(teams[x][y]["Win"]*1.0/(teams[x][y]["Win"] + teams[x][y]["Loss"]
+teams[x][y]["Tie"]+teams[x][y]["No Result"])))
match_winner_sheet.write(row_count,7,"%.2f"%(teams[x][y]["Loss"]*1.0/(teams[x][y]["Win"] + teams[x][y]["Loss"]
+teams[x][y]["Tie"]+teams[x][y]["No Result"])))
except:
print(x,y)
exit()
row_count+=1
workbook.close()
def team_avg_scores(sheet_name):
teams={}
sheet = open(sheet_name)
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'D':
bat1 = sheet.cell(row_index,col_index+2).value
bat2 = sheet.cell(row_index,col_index+3).value
if bat1 not in teams:
teams[bat1] = [0,0,0,0,0,0]
if bat2 not in teams:
teams[bat2] = [0,0,0,0,0,0]
score1 = float(sheet.cell(row_index,col_index+4).value)
wik1 = float(sheet.cell(row_index,col_index+5).value)
score2 = float(sheet.cell(row_index,col_index+7).value)
wik2 = float(sheet.cell(row_index,col_index+8).value)
result = sheet.cell(row_index,col_index+10).value
if result != "No Result":
teams[bat1][0]+=1
teams[bat2][5]+=1
teams[bat1][1]+=score1
teams[bat2][3]+=score2
teams[bat1][2]+=wik1
teams[bat2][4]+=wik2
#Create new sheet
workbook,team_scores_sheet = create_new_sheet("team_scores.xls")
#Initialize rows,columns
row_count = 0
team_scores_sheet.write(0,0,"Team Name")
team_scores_sheet.write(0,1,"Total No of Matches")
team_scores_sheet.write(0,2,"Avg score-1")
team_scores_sheet.write(0,3,"Avg wkt-1")
team_scores_sheet.write(0,4,"Avg score-2")
team_scores_sheet.write(0,5,"Avg wkt-2")
team_scores_sheet.write(0,5,"Avg wkt-2")
row_count+=1
for x in teams:
team_scores_sheet.write(row_count,0,x)
team_scores_sheet.write(row_count,1,teams[x][0]+teams[x][5])
if teams[x][0] != 0:
team_scores_sheet.write(row_count,2,teams[x][1]//teams[x][0])
team_scores_sheet.write(row_count,3,teams[x][2]//teams[x][0])
else:
team_scores_sheet.write(row_count,2,0)
team_scores_sheet.write(row_count,3,0)
if teams[x][5] != 0:
team_scores_sheet.write(row_count,4,teams[x][3]//teams[x][5])
team_scores_sheet.write(row_count,5,teams[x][4]//teams[x][5])
else:
team_scores_sheet.write(row_count,4,0)
team_scores_sheet.write(row_count,5,0)
row_count+=1
workbook.close()
def toss_stats(sheet_name):
sheet = open(sheet_name)
team_toss_stats = {}
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'B':
curr_team = sheet.cell(row_index,col_index).value
opp_team = sheet.cell(row_index,col_index+1).value
toss_winner = sheet.cell(row_index,col_index+2).value
winner_decision = sheet.cell(row_index,col_index+3).value
match_winner = sheet.cell(row_index,col_index+12).value
if curr_team not in team_toss_stats:
team_toss_stats[curr_team] = [0,0,0,0,0,0]
if opp_team not in team_toss_stats:
team_toss_stats[opp_team] = [0,0,0,0,0,0]
team_toss_stats[curr_team][0]+=1
team_toss_stats[opp_team][0]+=1
if toss_winner == curr_team:
team_toss_stats[curr_team][1]+=1
if winner_decision == "bat":
team_toss_stats[curr_team][2]+=1
if match_winner == curr_team:
team_toss_stats[curr_team][4]+=1
if winner_decision == "field":
team_toss_stats[curr_team][3]+=1
if match_winner == curr_team:
team_toss_stats[curr_team][5]+=1
if toss_winner == opp_team:
team_toss_stats[opp_team][1]+=1
if winner_decision == "bat":
team_toss_stats[opp_team][2]+=1
if match_winner == opp_team:
team_toss_stats[opp_team][4]+=1
if winner_decision == "feild":
team_toss_stats[opp_team][3]+=1
if match_winner == opp_team:
team_toss_stats[opp_team][5]+=1
#Create new sheet
workbook,toss_stats_sheet = create_new_sheet("toss_stats.xls")
#Initialize rows,columns
row_count = 0
toss_stats_sheet.write(0,0,"Team Name")
toss_stats_sheet.write(0,1,"Total No of Matches")
toss_stats_sheet.write(0,2,"Toss Wins")
toss_stats_sheet.write(0,3,"Toss wins bat")
toss_stats_sheet.write(0,4,"Toss wins bowl")
toss_stats_sheet.write(0,5,"Toss wins bat win")
toss_stats_sheet.write(0,6,"Toss wins bowl win")
row_count+=1
for x in team_toss_stats:
toss_stats_sheet.write(row_count,0,x)
for y in range(0,6):
toss_stats_sheet.write(row_count,y+1,team_toss_stats[x][y])
row_count+=1
workbook.close()
def ducks_stats(sheet_name):
sheet = open(sheet_name)
batsmen_duck_count = {}
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'A':
curr_player = sheet.cell(row_index,col_index+1).value
player_runs = sheet.cell(row_index,col_index+2).value
is_notout = sheet.cell(row_index,col_index+4).value
if player_runs == 0 and is_notout == "NO":
if curr_player not in batsmen_duck_count:
batsmen_duck_count[curr_player]=0
batsmen_duck_count[curr_player]+=1
#Create new sheet
workbook,ducks_stats_sheet = create_new_sheet("ducks_stats.xls")
#Initialize rows,columns
row_count = 0
ducks_stats_sheet.write(0,0,"Player Name")
ducks_stats_sheet.write(0,1,"Ducks Count")
row_count+=1
for x in batsmen_duck_count:
ducks_stats_sheet.write(row_count,0,x)
ducks_stats_sheet.write(row_count,1,batsmen_duck_count[x])
row_count+=1
workbook.close()
def largest_margin(sheet_name):
matchid = 0
max_margin = -1
sheet = open(sheet_name)
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'H':
curr_matchid = sheet.cell(row_index,col_index-7).value
score1 = sheet.cell(row_index,col_index).value
score2 = sheet.cell(row_index,col_index+3).value
result = sheet.cell(row_index,col_index+6).value
if result != "No Result":
if max_margin < abs(score1-score2):
max_margin = abs(score1-score2)
matchid = curr_matchid
print(matchid)
def extreme_totals(sheet_name):
sheet = open(sheet_name)
max_matchid = 0
min_matchid = 0
max_total = -1
min_total = 500
sheet = open(sheet_name)
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'H':
curr_matchid = sheet.cell(row_index,col_index-7).value
score1 = sheet.cell(row_index,col_index).value
score2 = sheet.cell(row_index,col_index+3).value
result = sheet.cell(row_index,col_index+6).value
if result != "No Result":
if max_total < max(score2,score1):
max_total = max(score2,score1)
max_matchid = curr_matchid
if min_total > min(score2,score1):
min_total = min(score2,score1)
min_matchid = curr_matchid
print(max_matchid,min_matchid)
def mom_count(sheet_name):
sheet = open(sheet_name)
player_mom_count = {}
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'O':
curr_player = sheet.cell(row_index,col_index).value
if curr_player not in player_mom_count and curr_player != "":
player_mom_count[curr_player] = 0
if curr_player != "":
player_mom_count[curr_player]+=1
#Create new sheet
workbook,mom_count_sheet = create_new_sheet("mom_count.xls")
#Initialize rows,columns
row_count = 0
mom_count_sheet.write(0,0,"Player Name")
mom_count_sheet.write(0,1,"MOM Count")
row_count+=1
for x in player_mom_count:
mom_count_sheet.write(row_count,0,x)
mom_count_sheet.write(row_count,1,player_mom_count[x])
row_count+=1
workbook.close()
def total_venues(sheet_name):
sheet = open(sheet_name)
venues = {}
count = 0
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'K':
curr_stadium = sheet.cell(row_index,col_index).value
if curr_stadium not in venues:
venues[curr_stadium]=0
count+=1
venues[curr_stadium]+=1
print(count)
def total_runs_wkts_ties(sheet_name):
sheet = open(sheet_name)
total_runs = 0
total_wkts = 0
total_ties = 0
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'H':
scr1 = sheet.cell(row_index,col_index).value
w1 = sheet.cell(row_index,col_index+1).value
scr2 = sheet.cell(row_index,col_index+3).value
w2 = sheet.cell(row_index,col_index+4).value
result = sheet.cell(row_index,col_index+6).value
if result == "Tie":
total_ties+=1
total_runs+=(scr1+scr2)
total_wkts+=(w1+w2)
print("Total Runs : ",total_runs)
print("Total Wkts : ",total_wkts)
print("Total Ties : ",total_ties)
def total_c_hc(sheet_name):
sheet = open(sheet_name)
total_c = 0
total_hc = 0
total_balls = 0
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'C':
curr_score = sheet.cell(row_index,col_index).value
curr_balls = sheet.cell(row_index,col_index+1).value
if curr_score in range(50,100):
total_hc+=1
if curr_score in range(100,220):
total_c+=1
total_balls+=curr_balls
print("Total balls : ",total_balls)
print("Total Centuries : ",total_c)
print("Total Half Centuries : ",total_hc)
def fwkts(sheet_name):
fw = 0
sheet = open(sheet_name)
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'E':
curr_wickets = sheet.cell(row_index,col_index).value
if curr_wickets >= 5:
fw+=1
print("Total 5 Wkt Hauls : ",fw)
def total_boundaries(sheet_name):
total_fours = 0
total_sixes = 0
total_dots = 0
sheet = open(sheet_name)
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'S':
curr_runs = sheet.cell(row_index,col_index).value
if curr_runs == 6:
total_sixes+=1
if curr_runs == 4:
total_fours+=1
if curr_runs == 0:
total_dots+=1
print("Total 6's : ",total_sixes)
print("Total 4's : ", total_fours)
print("Total dots : ", total_dots)
def total_venues(sheet_name):
venues = {}
res = 0
sheet = open(sheet_name)
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'K':
curr_venue = sheet.cell(row_index,col_index).value
if curr_venue not in venues:
venues[curr_venue]=0
res+=1
venues[curr_venue]+=1
print(len(venues))
for x in venues:
print(x,venues[x])
def most_catches_stumps(sheet_name):
catches = {}
stumps = {}
sheet = open(sheet_name)
run_outs = 0
rvic = {}
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'Y':
wicket_kind = sheet.cell(row_index,col_index).value
fielder = sheet.cell(row_index,col_index+1).value
victim = sheet.cell(row_index,col_index+2).value
if wicket_kind == "run out":
run_outs+=1
if victim not in rvic:
rvic[victim] = 0
rvic[victim]+=1
if wicket_kind == "caught":
if fielder not in catches:
catches[fielder] = 0
catches[fielder]+=1
if wicket_kind == "stumps":
if fielder not in stumps:
stumps[fielder] = 0
stumps[fielder]+=1
print(run_outs)
s_rvic = sorted(rvic.items(), key=operator.itemgetter(1))
s_catches = sorted(catches.items(), key=operator.itemgetter(1))
s_stumps = sorted(stumps.items(), key=operator.itemgetter(1))
print(s_rvic)
print(s_catches)
print(s_stumps)
def all_types_outs(sheet_name):
w = {}
sheet = open(sheet_name)
for row_index in range(1,sheet.nrows):
for col_index in range(0,sheet.ncols):
if cellname(row_index,col_index)[0] == 'Y':
wicket_kind = sheet.cell(row_index,col_index).value
if wicket_kind not in w:
w[wicket_kind] = 0
w[wicket_kind]+=1
print(w)
#all_types_outs("Cricket_Dataset.xls")
#most_catches_stumps("Cricket_Dataset.xls")
#total_boundaries("Cricket_Dataset.xls")
#total_c_hc("bat/batsmen_match_stats.xls")
#total_venues("Cricket_Dataset.xls")
#mom_count("match/complete_match_stats.xls")
#extreme_totals("match/complete_match_stats.xls")
#largest_margin("match/complete_match_stats.xls")
#toss_stats("match/complete_match_stats.xls")
#team_avg_scores("match/complete_match_stats.xls")
#winning_probabilities("match/complete_match_stats.xls")
#ducks_stats("bat/batsmen_match_stats.xls")
| true |
60ab49a8d13611648b6a5ea1c467c7c40262a684 | Python | jordisoler/DummyAlphaZero | /selfplay.py | UTF-8 | 1,868 | 3.0625 | 3 | [] | no_license | import numpy as np
from time import time
from games import GameState, GameOutcomes
from mcts import MCTS
def selfplay(nn, game: GameState, **game_args):
states = []
optimal_pis = []
game_outcome = None
state = game.init(**game_args)
mcts = MCTS(game, nn)
turn = -1
times = [time()]
while game_outcome is None:
turn += 1
if turn % 2:
print("Turn {}".format(turn))
print(str(state))
optimal_pi = mcts.search()
states.append(state)
optimal_pis.append(optimal_pi)
action = sample_action(state, optimal_pi)
mcts.next_turn(action)
state = state.take_action(action)
game_outcome = state.game_outcome(last_move=action)
t_i = time()
print("Move time: {:.2f}s".format(t_i - times[-1]))
times.append(t_i)
print(f"Final turn: {turn}")
print("Total time: {:.2f}s".format(times[-1] - times[0]))
if game_outcome == GameOutcomes.DRAW:
print("It's a draw!!")
elif turn % 2 == 0:
print("First player wins!")
print(str(state))
else:
print("Second player wins!")
state.inverse()
print(str(state))
if game_outcome == GameOutcomes.DRAW:
z = [0]*len(states)
elif game_outcome == GameOutcomes.LOSS:
z = [(-1)**(i+1) for i in range(len(states), 0, -1)]
else:
raise Exception('Invalid game outcome: {}'.format(game_outcome))
nn.fit_game_state(states, optimal_pis, z)
def sample_action(state, optimal_pi):
masked_optimal_pi = optimal_pi[state.possible_actions_mask()]
return np.random.choice(state.possible_actions(), p=masked_optimal_pi)
if __name__ == "__main__":
from games import Connect4Board
from neural_network import new_model
nn = new_model(Connect4Board)
selfplay(nn, Connect4Board)
| true |
1569bb8f46dc099c536f122d47352d01d24c2dda | Python | Ramesh-kumar-S/Py_Scripts | /Aadhar Passwd Generator.py | UTF-8 | 724 | 3.5 | 4 | [] | no_license | import time
def counter(count):
return Generator(int(count))
def Generator(count):
SPLITTER=[]
for i in range(count):
NAME=input("Enter your Name :")
DOB=input("Enter the Date of Birth :")
NAMES_SPLITTED=NAME[:4].upper()
DOB_SPLITTED=DOB[-4:]
PASSWD=NAMES_SPLITTED+DOB_SPLITTED
SPLITTER.append(PASSWD)
decorator()
return Printer(SPLITTER)
def decorator():
str="*"
for i in range(10):
print(i*str,end="")
def Printer(SPLITTER):
for i in SPLITTER:
print("\nYour Password is : {}".format(i))
time.sleep(1)
decorator()
COUNT=input("Enter the Number of Passwords to be Generated :")
counter(COUNT)
| true |
ebf671948335594adbfd1915b7c00ad8e8a5a5ff | Python | gauravjoshi1292/TestPrograms | /lab4_checkpoint.py | UTF-8 | 557 | 3.609375 | 4 | [] | no_license | # lab4_checkpoint.py
# CS 1 Lab Assignment #4 checkpoint by THC.
# Creates a dictionary of Vertex objects based on reading in a file.
# Writes out a string for each Vertex object to a file.
from load_graph import load_graph
from bfs import breadth_first_search
vertex_dict = load_graph("dartmouth_graph.txt")
out_file = open("vertices.txt", "w")
for vertex in vertex_dict:
out_file.write(str(vertex_dict[vertex]) + "\n")
out_file.close()
start = vertex_dict["Rocky"]
goal = vertex_dict["AXA"]
print breadth_first_search(start, goal) | true |
943a3d0de69a3bf8d979cd8d31f2361528851505 | Python | ianramzy/old-code | /IPO/ipo9.py | UTF-8 | 147 | 3.21875 | 3 | [
"MIT"
] | permissive | import math
side1 = float(input('Side 1?'))
side2 = float(input('side 2?'))
hypotenuse = side1*side1+side2*side2
print(math.sqrt(hypotenuse))
| true |
daa24390f7abaa8402e7b4c35aeb8e0c5273dece | Python | DamianHusted/CS235-Assignment3 | /appl/domainmodel/genre.py | UTF-8 | 2,274 | 3.46875 | 3 | [] | no_license | class Genre:
__genre_name: str
def __init__(self, genre_name: str):
if genre_name == "" or type(genre_name) is not str or genre_name == "\n":
self.__genre_name = None
else:
sanitized_genre_name = genre_name.strip()
self.genre_list = sanitized_genre_name.split(",")
self.__genre_name = self.genre_list[0]
if len(self.genre_list) > 1:
self.subgenres = self.genre_list[1:]
@property
def genre_name(self) -> str:
return self.__genre_name
def __repr__(self):
return f"<Genre {self.__genre_name}>"
def __eq__(self, other):
return self.__genre_name == other.__genre_name
# noinspection PyUnboundLocalVariable
def __lt__(self, other):
genre_list = [self, other]
if hasattr(self, "genre_name") and self.genre_name is not None \
and hasattr(other, "genre_name") and other.genre_name is not None:
if self.__genre_name != other.__genre_name:
genre_list.sort(key=lambda x: x.__genre_name)
elif self.subgenres != other.subgenres:
genre_list.sort(key=lambda x: x.subgenres)
if self == genre_list[0]:
return True
else:
return False
def __hash__(self):
if hasattr(self, "genre_name"):
has_subgenres = hasattr(self, "subgenres")
hash_string = f"{self.genre_name} - {has_subgenres}"
return hash(hash_string)
else:
return None
# noinspection PyUnusedLocal,PyUnusedLocal
class TestGenreMethods:
def test_innit(self):
genre1 = Genre("Horror")
genre2 = Genre("")
genre3 = Genre("sci-fi")
genre4 = Genre("Action,Adventure,Sci-Fi")
genre5 = Genre("Adventure,Drama,Romance")
assert repr(genre1) == "<Genre Horror>"
assert repr(genre2) == "<Genre None>"
assert repr(genre3) == "<Genre sci-fi>"
assert repr(genre4) == "<Genre Action>"
assert genre5.subgenres == ['Drama', 'Romance']
assert genre1.__eq__(genre2) is False
assert genre1.__eq__(genre1) is True
assert genre1.__lt__(genre2) is True
assert genre1.__lt__(genre3) is True
| true |
8a93578dba84dd31150b3fd76f9e693607ef848a | Python | akswart/phys416code | /hw7/advecth.py | UTF-8 | 8,211 | 2.90625 | 3 | [] | no_license | # advect - Program to solve the advection equation
# using the various hyperbolic PDE schemes - high resolution version
# for questions 1 and 2
# clear all help advecth # Clear memory and print header
from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def hires2(y,u,h,tau,limit):
# hi resolution function that uses a limiter
N=len(y)
n2=len(u)
if N != n2:
print(' lengths of y and u do not match')
return y
# yout(1:N) = y(1:N) - tau/h*u(1:N).*(y(1:N)-y(im));
uplus =np.maximum(u,0)
uminus=np.minimum(u,0)
delta = np.zeros(N)
yout = np.zeros(N)
delta = limiter2(y,u,limit)
for i in range(0,N):
flux=uminus[i]*y[i]+uplus[i]*y[im[i]]+0.5*abs(u[i])*(1-abs(u[i]*tau/h))*delta[i]
fluxp=uminus[i]*y[ip[i]]+uplus[i]*y[i]+0.5*abs(u[i])*(1-abs(u[i]*tau/h))*delta[ip[i]]
yout[i] = y[i] -tau/h*(fluxp-flux)
return yout
def limiter2(y,u,limit):
N=len(y)
n2=len(u)
if N != n2:
print(' lengths of y and u do not match')
return y
deltay=y[0:N]-y[im]
I=np.copy(ip)
ipositive = np.where(u>0)
I[ipositive]=im[ipositive]
theta = np.zeros(N)
for i in range(0,N):
theta[i]=0.0
if deltay[i] != 0:
theta[i]=deltay[I[i]]/deltay[i]
#limit = 'minmod'
phi = np.zeros(N)
if(limit=='upwind'):
phi=np.zeros(N) # upwind
elif(limit=='lw'):
phi = np.ones(N) # lax-wendroff
elif(limit=='bm'):
phi=np.copy(theta) # beam warming
elif(limit=='minmod'):
phi[0:N]=minmod(np.ones(N),theta[0:N]) # minmod method
elif(limit =='mc'):
for i in range(0,N):
phi[i] = np.max([0,np.min([(1+theta[i])/2,2,2*theta[i]])]) # MC limiter
elif(limit =='vanleer'):
phi[0:N] = (theta[0:N] + abs(theta[0:N]))/(1+abs(theta[0:N])) # van leer
elif(limit =='superbee'):
phi[:] = 0.0 # superbee limiter
else:
print('Unknown limiter method.')
delta=phi*deltay
return delta
def minmod(a,b):
# minmod function - array smart
if len(a) != len(b):
print(' minmod, sizes do not match')
phi = ((a*b)>0)*((a*(abs(a)<=abs(b)))+(b*(abs(a)>abs(b))))
return phi
#* Select numerical parameters (time step, grid spacing, etc.).
method = int(input('Choose a numerical method: 1- FTCS, 2-Lax, 3-Lax-Wendroff, 4-Upwind, 5-High res: '))
if method ==5: # select limiter
choice = int(input('For the hires method, choose a limiter: 1-upwind,2-Lax-Wendroff, \
3-beam warming, 4-minmod, 5-MC, 6-van leer, 7-superbee: '))
# convert to text-based limit to pass to the limiter
if (choice==1):
limit='uw'
elif (choice==2):
limit='lw'
elif (choice==3):
limit='bm'
elif (choice==4):
limit='minmod'
elif (choice==5):
limit='mc'
elif (choice==6):
limit='vanleer'
elif (choice==7):
limit='superbee'
N = int(input('Enter number of grid points: '))
L = 1. # System size
h = L/N # Grid spacing
c = 1 # Wave speed
print('Time for wave to move one grid spacing is ',(h/c))
tau = float(input('Enter time step: '))
coeff = -c*tau/(2.*h) # Coefficient used by all schemes
coefflw = 2*coeff**2 # Coefficient used by L-W scheme
print('Wave circles system in %d steps'%(L/(c*tau)))
nStep = int(input('Enter number of steps: '))
#* Set initial and boundary conditions.
sigma = 0.1 # Width of the Gaussian pulse
k_wave = np.pi/sigma # Wave number of the cosine
x = (np.arange(0,N)+1/2)*h - L/2 # Coordinates of grid points
ic=int(input('Input initial condition:, 1-gaussian pulse, 2-square wave, 3-both: '))
#* Set initial and boundary conditions.
if(ic ==1):
sigma = 0.1 # Width of the Gaussian pulse
k_wave = np.pi/sigma # Wave number of the cosine
# Initial condition is a Gaussian-cosine pulse
a = np.cos(k_wave*x) * np.exp(-x**2/(2*sigma**2))
elif(ic==2):
a=np.zeros(N)
for i in range(int(N/4),int(N/2)):
a[i]= 1.
else:
sigma = 0.025; # Width of the Gaussian pulse
k_wave = np.pi/sigma; # Wave number of the cosine
# Initial condition is a Gaussian-cosine pulse
a = np.exp(-(x-L/4)**2/(2*sigma**2))
for i in range(int(N/4),int(N/2)):
a[i] = 1.0
# Use periodic boundary conditions
# Use periodic boundary conditions
ip = np.arange(0,N)+1
ip[N-1] = 0 # ip = i+1 with periodic b.c.
im = np.arange(0,N)-1
im[0] = N-1 # im = i-1 with periodic b.c.
#* Initialize plotting variables.
iplot = 0 # Plot counter
aplot = np.copy(a) # Record the initial state
tplot = np.array([0]) # Record the initial time (t=0)
nplots = 50 # Desired number of plots
plotStep = max(1, np.floor(nStep/nplots)) # Number of steps between plots
#* Loop over desired number of steps.
# plt.ion() # this messes things up
for iStep in range(nStep+1): ## MAIN LOOP ##
#* Compute new values of wave amplitude using FTCS,
# Lax or Lax-Wendroff method.
if( method == 1 ): ### FTCS method ###
a[0:N]= a[0:N] + coeff*(a[ip]-a[im])
elif( method == 2 ): ### Lax method ###
a[0:N] = 0.5*(a[ip]+a[im]) + coeff*(a[ip]-a[im])
elif( method==3): ### Lax-Wendroff method ###
a[0:N] = a[0:N] + coeff*(a[ip]-a[im]) + coefflw*(a[ip]+a[im]-2*a)
elif( method==4): ### Upwind method ###
a[0:N] = a[0:N] + 2*coeff*(a[0:N]-a[im])
elif( method==5): ### Hi res method ###
u=c*np.ones(len(a))
a = hires2(a,u,h,tau,limit)
#* Periodically record a(t) for plotting.
if( (iStep%plotStep) < 1): # Every plot_iter steps record
iplot = iplot+1
aplot = np.vstack((aplot,a)) # Record a(i) for plotting
tplot = np.append(tplot,tau*iStep)
print('%d out of %d steps completed'%(iStep,nStep))
#* Plot the initial and final states.
# need plt.ion() for plot windows to update
animate=1
if(animate==1):
# plots in a movie fashion - comment out if you want the program to be faster
# plt.ion()
for i in range(iplot+1):
plt.figure(1) # Clear figure 1 window and bring forward
plt.clf()
plt.plot(x,aplot[0,:],'-',label='Initial')
plt.plot(x,aplot[i,:],'--',label='current')
plt.legend(['Initial ','Final'])
plt.xlabel('x')
plt.ylabel('a(x,t)')
plt.grid(True)
plt.axis([-0.5, 0.5, -0.5, 1.2])
plt.title(' time ='+str(tplot[iplot]))
if(method == 1):
plt.title('FTCS method, time ='+str(tplot[i]))
elif(method == 2):
plt.title('Lax method, time =' +str(tplot[i]))
elif(method == 3):
plt.title('Lax-Wendroff method, time =' +str(tplot[i]))
elif(method == 4):
plt.title('Upwind method, time='+str(tplot[i]))
elif(method==5):
plt.title('High resolution method, time ='+str(tplot[i]))
plt.legend()
plt.draw()
# if iStep == nStep-1:
# temp=input('Hit any key to stop')
plt.pause(tau)
# end plots in a movie fashion
plt.show()
# plt.ioff()
plt.figure(2)
plt.clf() # Clear figure 2 window and bring forward
plt.plot(x,aplot[0,:],'-',x,a,'--')
plt.legend(['Initial ','Final'])
plt.xlabel('x')
plt.ylabel('a(x,t)')
plt.axis([-0.5, 0.5, -0.5, 1.2])
plt.grid(True)
if (method == 1):
plt.title('FTCS method')
elif(method == 2):
plt.title('Lax method')
elif(method == 3):
plt.title('Lax-Wendroff method')
elif(method == 4):
plt.title('Upwind method')
elif(method==5):
plt.title('High resolution using the '+ limit + ' method')
plt.show()
# #* Plot the wave amplitude versus position and time
tt,xx = np.meshgrid(x,tplot)
fig = plt.figure(3);plt.clf() # Clear figure 3 window and bring forward
ax = fig.gca(projection='3d')
surf = ax.plot_surface(xx, tt, aplot, rstride=1, cstride=1, cmap=cm.jet,linewidth=0, antialiased=False)
ax.set_xlabel('Time')
ax.set_ylabel('Position')
ax.set_zlabel('Amplitude)')
plt.show()
| true |
acfb887896399ee6ad6840800501a0b722a93dc9 | Python | apmoore1/tdsa_augmentation | /tdsa_augmentation/statistics/number_additional_targets.py | UTF-8 | 2,597 | 3.015625 | 3 | [
"Apache-2.0"
] | permissive | import argparse
from collections import Counter
import json
from pathlib import Path
from typing import Dict
from target_extraction.data_types import TargetText
def parse_path(path_string: str) -> Path:
path_string = Path(path_string).resolve()
return path_string
if __name__=='__main__':
parser = argparse.ArgumentParser()
parser.add_argument("augmented_training_dataset", type=parse_path,
help='File path to the augmented training dataset')
parser.add_argument("expanded_targets_fp", type=parse_path,
help='File path to the expanded targets json file')
args = parser.parse_args()
with args.expanded_targets_fp.open('r') as expanded_targets_file:
targets_equivalents: Dict[str, str] = json.load(expanded_targets_file)
assert len(targets_equivalents) > 1
expanded_target_counts = Counter()
number_training_samples = 0
number_targets_expanded = 0
with args.augmented_training_dataset.open('r') as training_file:
for line in training_file:
training_sample = TargetText.from_json(line)
number_targets = len(training_sample['targets'])
number_training_samples += number_targets
for target_index in range(number_targets):
original_target = training_sample['targets'][target_index]
if original_target.lower() not in targets_equivalents:
continue
number_targets_expanded += 1
expanded_target_key = f'target {target_index}'
expanded_targets = training_sample[expanded_target_key]
assert original_target in expanded_targets
number_expanded_targets = len(expanded_targets) - 1
assert len(expanded_targets) == len(set(expanded_targets))
expanded_target_counts.update([number_expanded_targets])
total_more_samples = 0
number_targets_can_be_expanded = 0
for number_expanded, count in expanded_target_counts.items():
total_more_samples += (number_expanded * count)
if number_expanded > 0:
number_targets_can_be_expanded += count
print(f'Number of training samples {number_training_samples}')
print(f'Number of training samples that had targets that can be expanded {number_targets_expanded}')
print(f'Number of samples that can be expanded {number_targets_can_be_expanded}')
print(sorted(expanded_target_counts.items(), key=lambda x: x[0]))
print(f'Total more training samples from augmentation {total_more_samples}') | true |
87c05bd86821fb10fa4caa1d39ab793e725f75bc | Python | DevMine/devmine-prototype | /tools/data_gathering/get_users.py | UTF-8 | 5,260 | 2.75 | 3 | [
"BSD-3-Clause"
] | permissive | import json
import httplib2
import sys
import time
import socket
# User downloader
class UserGet(object):
def __init__(self, oauth_id, oauth_secret, since, stop, outdir):
self.oauth_id = oauth_id
self.oauth_secret = oauth_secret
self.since = since
self.stop = since
self.first_user = since
self.url = "https://api.github.com/users"
self.log_file = open(outdir + "/getter.log", "w")
self.output_dir = outdir
# Get one page of users with id > since
def get_page(self, since):
"""Gets a page of users such that id > since. Waits for"""
h = httplib2.Http() # (".cache")
url = self.url + "?since=%d" % (since)
if self.oauth_id and self.oauth_secret:
url += "&client_id=%s&client_secret=%s" % (self.oauth_id,
self.oauth_secret)
# self.log("Querying " + url)
r, content = h.request(url, "GET")
return r, content
# Gets all pages
def get_all(self):
self.log("Starting...")
users = []
last_user = self.since
try: # Catches KeyboardInterrupts to shutdown gracefully
while last_user < stop:
# Send request, repeat if network problem
try:
r, content = self.get_page(self.since)
except httplib2.HttpLib2Error as e:
self.log("Httplib2 error: %s" % str(e))
self.log("Trying again...")
continue
except socket.error as e:
self.log("Socket error %d: %s" % (e.errno, e.strerror))
self.log("Trying again...")
continue
# Check the response status code to see if the request was
# successful
if r['status'] == '200':
jcontent = json.loads(content)
# If we don't get new users, we stop
if len(jcontent) == 0 or self.since == jcontent[-1]['id']:
self.log("Last request didn't return new users. \
Stopping!")
self.dump(users)
return
else:
self.since = jcontent[-1]['id']
last_user = self.since
users.extend(jcontent)
else:
# If the request was not succesful, print headers and wait
# a little bit
self.log("Received return status: %s" % r['status'])
self.log(str(r))
time.sleep(3)
# Check the number remaining API calls
remaining_calls = int(r['x-ratelimit-remaining'])
if remaining_calls == 0:
waittime = int(r['x-ratelimit-reset']) - time.time()
self.log("Waiting %d minutes for more API calls"
% (waittime / 60))
time.sleep(waittime)
# Dump users if we have more than 5000
if len(users) > 5000:
self.log("Remaining API calls: %d \tLast user obtained: %d"
% (remaining_calls, self.since))
self.dump(users)
users = []
except KeyboardInterrupt:
# Ignore exception and jump to finally
pass
finally:
# Close gracefully
self.log("Writing files...")
self.dump(users)
if len(users) > 0:
self.log("Last user written: %d" % users[-1]['id'])
else:
self.log("No user was fetched")
self.log_file.flush()
self.log_file.close()
# Writes msg both to stdout and to the log file
def log(self, msg):
print(("[%d] UserGetter: " % (int(time.time()))) + msg)
self.log_file.write("[" + str(time.time()) + "] " + msg + "\n")
# Dumps the list of users to a file
def dump(self, users):
if len(users) > 0:
out_name = self.output_dir + "/users"
out_name += "_" + str(users[0]['id']).zfill(7)
out_name += "_" + str(users[-1]['id']).zfill(7)
out_name += ".json"
output = open(out_name, "a")
output.write(json.dumps(users))
output.flush()
output.close()
if __name__ == "__main__":
if len(sys.argv) < 4:
print("Usage: %s <first user> <last user> <output dir> "
"[oauth id] [oauth secret]" % (sys.argv[0]))
print("Gets all the users such that "
"[first user < user id <= last user id ]")
print("When it runs out of API calls it waits")
sys.exit(-1)
else:
since = int(sys.argv[1])
stop = int(sys.argv[2])
outdir = sys.argv[3]
if len(sys.argv) > 5:
oauth_id = sys.argv[4]
oauth_secret = sys.argv[5]
else:
oauth_id = None
oauth_secret = None
getter = UserGet(oauth_id, oauth_secret, since, stop, outdir)
getter.get_all()
| true |
34cf725c73f6d80f6fbfdf5e0d0cc595ab291da4 | Python | robertecurtin/plutos-envy | /populate_game.py | UTF-8 | 1,289 | 2.765625 | 3 | [
"MIT"
] | permissive | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PlutosEnvy.settings')
import django
django.setup()
from game.models import Unit, City, Player
# This uses config_populate.txt to create a set of players, cities, and units while connecting all three.
# See config_populate.txt for instructions on formatting.
def populate():
p = Player()
u = Unit()
c = City()
for line in open("config_populate.txt"):
split = line.split(" ")
instruction = split[0]
if "#" in instruction:
continue
if len(split) == 1:
continue
name = ' '.join(split[1:len(split)]).rstrip("\n")
print(name)
if instruction == "P":
p = add_player(name)
elif instruction == 'C':
c = add_city(name, p)
elif instruction == 'U':
u = add_unit(name, p, c)
p.add_unit(u)
def add_unit(name, player, city):
u = Unit.objects.get_or_create(name=name, owner=player, currentCity=city)[0]
return u
def add_city(name, player):
c = City.objects.get_or_create(name=name, owner=player)[0]
return c
def add_player(name):
p = Player.objects.get_or_create(name=name)[0]
return p
if __name__ == '__main__':
print("Populating...")
populate()
| true |
4c993c72ee28edc8f45ab80aae36d323174caadd | Python | carlpoole/Python_Twitter_Example | /TwitterExample.py | UTF-8 | 2,052 | 2.859375 | 3 | [] | no_license | from twitter import *
import re
# --- oAuth Information -----------------------------------------------
OAUTH_TOKEN = ''
OAUTH_SECRET = ''
CONSUMER_KEY = ''
CONSUMER_SECRET = ''
# ---------------------------------------------------------------------
class Carltwitter:
def __init__(self,OAUTH_TOKEN,OAUTH_SECRET,CONSUMER_KEY,CONSUMER_SECRET):
# Some color constants for formatting
self.BLUE = '\033[94m'
self.GREEN = '\033[92m'
self.RED = '\033[91m'
self.MAGENTA = '\033[95m'
self.ENDCOLOR = '\033[0m'
# Some regex pattern compilations for coloring usernames and hashtags
self.reUser = re.compile(r"(?<=^|(?<=[^a-zA-Z0-9-\.]))@([A-Za-z_]+[A-Za-z0-9_]+)")
self.reHashtag = re.compile(r"(?<=^|(?<=[^a-zA-Z0-9-\.]))#([A-Za-z_]+[A-Za-z0-9_]+)")
# Setup Twitter API
self.t = Twitter(auth=OAuth(OAUTH_TOKEN, OAUTH_SECRET, CONSUMER_KEY, CONSUMER_SECRET))
def printLastTweet(self, username):
try:
timeline = self.t.statuses.user_timeline(screen_name=username,count=1)
print '\n'.join({self.BLUE + '@' + tweet['user']['screen_name'] + self.ENDCOLOR + ": "
+ re.sub(self.reUser, self.RED + r'@\1' + self.ENDCOLOR,
re.sub(self.reHashtag, self.GREEN + r'#\1' + self.ENDCOLOR,tweet['text']))
for tweet in timeline})
except:
print 'There was a problem getting tweets for ' + username + '. Please try again!'
def printUserSummary(self, username):
try:
print 'do something'
except:
print 'error text here'
def printTendingTopics(self):
try:
print 'do something'
except:
print 'error text here'
if __name__ == "__main__":
username = raw_input("Enter a twitter @ username:")
ct = Carltwitter(OAUTH_TOKEN,OAUTH_SECRET,CONSUMER_KEY,CONSUMER_SECRET)
ct.printLastTweet(username)
| true |
a5bf3878cf8d1c37b05907f6d7825d9792f8daf1 | Python | LinganGu/cheetah-agile-api | /cheetahapi/main.py | UTF-8 | 2,049 | 2.734375 | 3 | [
"BSD-3-Clause"
] | permissive | from flask import Flask, request, abort, jsonify
import sys
import getopt
from core.config import Config
from dispacher import Dispacher
app = Flask(__name__)
def parse_arguments(args):
"""
Parses command-line arguments passed to the main program
:param args: List with command-line parameters passed to the main
:return: Dictionary with general configuration
{"-c": "..."}
:raise GetoptError: if error when parsing arguments
"""
optlist, args = getopt.getopt(args, "c:")
parameters = {"-c": Config.DEFAULT_CONFIG_FILE}
for o, v in optlist:
parameters[o] = v
return parameters
def read_configuration(config_file_path):
"""
Reads configuration and loads it into a Config object
:param config_file_path: String with general configuration file path
:return: Config object
:raise Exception: if there is an error during the reading
"""
config_obj = Config()
config_obj.load_config_file(config_file_path)
return config_obj
"""
Usage:
main.py [-c <conf_file>]
:return 0: Application was executed and successfully stopped after a while
:return -1: Wrong input parameters
:return -2: Error when reading the configuration
"""
# parse command-line arguments
try:
params = parse_arguments(sys.argv[1:])
except getopt.GetoptError:
print("\nUsage:\n\tmain.py [-c <conf_file>]\n")
sys.exit(-1)
config = read_configuration(params["-c"])
def get_json_response(response):
return jsonify(response.to_json())
@app.route("/v1/authenticate", methods = ['POST'])
def authenticate():
if not request.is_json:
abort(400)
dispacher = Dispacher(config)
response = dispacher.authenticate(request.get_json())
print(request.get_json())
return get_json_response(response)
@app.route("/ping")
def ping():
return "cheetah-api version {0} up and running!".format(config.get_general()["version"])
if __name__ == "__main__":
app.run(host=config.get_general()["host"], port=int(config.get_general()["port"]))
| true |
398bce3e2cb80fff74cd6dba73bb4f61f6f5b4ca | Python | amarmulyak/Python-Core-for-TA | /hw06/amarm/task3_solution2.py | UTF-8 | 1,024 | 4.1875 | 4 | [] | no_license | import math
"""
Provide full program code of parse_number(num) function which returns the dict
with following structure: {odd: number of odd digits in input value,
even: number of even digits of input value} or false when wrong input value.
num - input number.
NOTE: Assume that the "zero" digit also belongs to even numbers
EXAMPLE OF Inputs/Ouputs when using this function:
print parse_number(34567)
{'odd': 3, 'even': 2}
print parse_number(100)
{'odd': 1, 'even': 2}
print parse_number("word")
False
"""
def get_digit(numbers):
odds = 0
evens = 0
if type(numbers) == str:
return False
elif numbers == 0:
evens += 1
else:
length_of_number = math.floor(math.log10(numbers)) + 1
for n in range(length_of_number):
number = numbers // 10**n % 10
if number == 0 or number % 2 == 0:
evens += 1
else:
odds += 1
return {
"evens": evens,
"odds": odds
}
print(get_digit(1245))
| true |
d313ae44c72db515ee786b8a5e3bf6f4fb2893c6 | Python | karthigabanu/python_programs | /swapcase.py | UTF-8 | 55 | 3.234375 | 3 | [] | no_license | n=str(input('enter the string: '))
print(n.swapcase())
| true |
fef37e2c38b74a8250d737bf113228f0c957a523 | Python | kristan-dev/crisputilities | /csv_parser.py | UTF-8 | 1,783 | 2.71875 | 3 | [] | no_license | import pandas as pd
import io
from itertools import islice
import logging
from s3_object_source import S3_Source
from config import cfg
import logger
class CSVParser:
@classmethod
def parse_csv(cls, s3args):
logging.info("Reading S3 CSV into Dataframe")
s3_source = S3_Source(s3args=s3args)
s3_obj = s3_source.S3ObjectDataSource()
flag = True
logging.info("Processing Dataframe as chunks")
for chunk in pd.read_csv(io.BytesIO(s3_obj["Body"].read()),chunksize=1000000,delimiter=",",keep_default_na=False,):
if flag is True:
keys = chunk.columns.to_list()
flag = False
for row in chunk.iterrows():
yield cls.form_data(keys=keys, value=row)
@classmethod
def parse_csv_from_file(cls, s3args, file_name):
logging.info("Downloading file from s3 bucket")
source_s3 = S3_Source(s3args=s3args)
source_s3.download_file_as_temp(file_abspath=file_name)
flag = True
logging.info("Processing Dataframe as chunks")
for chunk in pd.read_csv(file_name,chunksize=1000000,delimiter=",",keep_default_na=False,):
if flag is True:
keys = chunk.columns.to_list()
flag = False
for row in chunk.iterrows():
yield cls.form_data(keys=keys, value=row)
@staticmethod
def form_data(keys, value):
value = value[1]
data = {}
for key in keys:
data[key] = value[key]
return data
if __name__ == "__main__":
# csv_rows = CSVParser.parse_csv()
# batch_size = 10000
# while True:
# rows =list(islice(csv_rows, 0, batch_size))
# if len(rows) <= 0:
# break
pass
| true |
9a541da512cfa042df144e4644b5654f38613e76 | Python | lauradiosan/MIRPR-2019-2020 | /StudProjects/team06/project/server/text_preprocessor/wordcloud_utils.py | UTF-8 | 1,693 | 2.921875 | 3 | [] | no_license | """Utils for creating wordclouds."""
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import logging
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from text_preprocessor.text_preprocessor import TextPreprocessor
from text_preprocessor import read_utils
def create_wordcloud(data, filename):
"""Creates a wordcloud for the given data.
Preprocesses the data and creates a wordcloud for in that will be saved in
the given filename."""
logging.basicConfig(format='%(asctime)-15s %(message)s')
logger = logging.getLogger('create_wordcloud')
stopwords = read_utils.read_hotel_specific_stopwords(logger)
tp = TextPreprocessor()
preprocessed_entries = [tp.preprocess_text_for_wordcloud(X)
for X in data]
tokens = [Y for X in preprocessed_entries for Y in X.split()]
# Converts each token into lowercase
for token in tokens:
token = token.lower()
comment_words = ''
for words in tokens:
comment_words = comment_words + words + ' '
wordcloud = WordCloud(width=800,
height=800,
background_color='white',
stopwords=stopwords,
min_font_size=10).generate(comment_words)
plt.figure(figsize=(8, 8), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.savefig(filename)
# plt.show()
if __name__ == '__main__':
data = ['house', 'house mouse', 'house mouse']
filename = os.path.join(os.path.dirname(__file__), 'test_wordcloud_image')
create_wordcloud(data, filename)
| true |
3f2da4803a80447c9a9accc0a41fb1a029152e28 | Python | YosriGFX/holbertonschool-machine_learning | /pipeline/0x01-apis/0-passengers.py | UTF-8 | 742 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env python3
'''0. Can I join?'''
import requests
def availableShips(passengerCount):
'''returns the list of ships that can
hold a given number of passengers'''
ships = []
request = {'next': 'https://swapi-api.hbtn.io/api/starships/'}
while request['next'] is not None:
url = request['next']
request = requests.get(url).json()
for row in request['results']:
try:
passenger = row['passengers']
passenger = ''.join(passenger.split(','))
passenger = int(passenger)
except ValueError:
passenger = 0
if passenger >= passengerCount:
ships.append(row['name'])
return ships
| true |
abe88d935c0b579ac6570bf318d65fcdf099d208 | Python | bugsalexander/vmpair | /app.py | UTF-8 | 13,989 | 2.640625 | 3 | [] | no_license | from flask import Flask, Response, session, request, redirect
import json
import config
import mysql.connector
from datetime import datetime
app = Flask(__name__)
app.secret_key = 'This is not a secure secret. Remember to change me in the future!'
@app.route("/api/v1/test")
def hello_world():
with mysql.connector.connect(host='localhost', user='root', port=3307, password='root', database='test_db') as mydb:
print("I connected to the database that was created after I ran 'docker-compose up -d'!")
mycursor = mydb.cursor()
session['email'] = config.EMAIL
mycursor.execute(
f"select * from meetings where (user_1_email = '{session['email']}' \
or user_2_email = '{session['email']}') and meeting_date > current_date;")
result = mycursor.fetchall()
print("the result of the query is", result)
print(type(result))
return json.dumps(str(result))
@app.route("/api/v1/login", methods=["POST", "OPTIONS"])
def login():
''' Update stored info about person currently logged in
email: string
password: string
'''
print(request.json['email'])
session['email'] = request.json['email']
return Response(200)
@app.route("/api/v1/logout", methods=["GET", "OPTIONS"])
def logout():
''' Log the user out
'''
try:
session.pop('email')
except:
pass
return Response(200)
@app.route("/api/v1/welcome", methods=['GET'])
def get_welcome():
''' Return information for welcome page
name: string
nextMeeting:
name: string
date: DateTime
partnerStatus: string
nextPairing: DateTime
willBeAttending: boolean
'''
with mysql.connector.connect(host='localhost', user='root', port=3307, password='root', database='test_db') as mydb:
mycursor = mydb.cursor()
# Query name from the Users table using email
mycursor.execute(f"select full_name from users where email = '{session['email']}';")
name = mycursor.fetchone()
result = {
"name": name,
"nextPairing": 7 - datetime.now().weekday()
}
# Query next meeting info from Meetings table using email
mycursor = mydb.cursor()
mycursor.execute(f'''SELECT user_2_email AS partner_email, meeting_date, user_2_attending AS partner_status, user_1_attending as my_status
FROM meetings
WHERE user_1_email = '{session['email']}' AND meeting_date > CURDATE()
UNION
SELECT user_1_email AS partner_email, meeting_date, user_1_attending AS partner_status, user_2_attending as my_status
FROM meetings
WHERE user_2_email = '{session['email']}' AND meeting_date > CURDATE();''')
next_meeting_info = mycursor.fetchone()
if next_meeting_info != None:
partnerEmail, nextMeetingTime, partnerStatus, my_status = next_meeting_info
# Query partner's name from the Users table using their email
mycursor.execute(f"SELECT full_name FROM users WHERE email = '{partnerEmail}';")
partnerName = mycursor.fetchone()
print("the result of the third query is", partnerName)
result["nextMeeting"] = {
"partnerName": partnerName,
"time": nextMeetingTime.strftime("%m/%d/%Y"),
"partnerStatus": partnerStatus,
}
result["willBeAttending"] = my_status
return Response(
json.dumps(result),
status=200,
mimetype='application/json'
)
@app.route("/api/v1/welcome", methods=['POST'])
def set_welcome():
willBeAttending = request.json['willBeAttending']
print("field type is", type(willBeAttending))
# enter willBeAttending status from welcome page into Meetings table
with mysql.connector.connect(host='localhost', user='root', port=3307, password='root', database='test_db') as mydb:
mycursor = mydb.cursor()
mycursor.execute(f'''UPDATE meetings
SET user_1_attending = {willBeAttending}
WHERE user_1_email = '{session['email']}' AND meeting_date > CURDATE();''')
mycursor.execute(f'''UPDATE meetings
SET user_2_attending = {willBeAttending}
WHERE user_2_email = '{session['email']}' AND meeting_date > CURDATE();''')
mydb.commit()
return Response(
json.dumps({"willBeAttending":willBeAttending}),
status=200,
mimetype='application/json'
)
@app.route("/api/v1/preferences", methods=['GET'])
def get_preferences():
''' Return existing preferences
name: string
preferredPronouns: string
email: string
doesWantMatching: boolean
daysFreeToMeet: string[]
availabilityByDay: weekDayAvail[]
Fields in weekDayAvail object
times: string[] e.g. Monday: [“12pm”, “1pm”]
canVirtual: boolean
canInPerson: boolean
maxMeetingsPerWeek: number
'''
with mysql.connector.connect(host='localhost', user='root', port=3307, password='root', database='test_db') as mydb:
# Query name, preferredPronouns, doesWantMatching from Users table
# From availability table:
# Query daysFreeToMeet, then use that to get availabilityByDay
# Query maxMeetingsPerWeek
with mysql.connector.connect(host='localhost', user='root', port=3307, password='root', database='test_db') as mydb:
mycursor = mydb.cursor()
mycursor.execute(f'''SELECT *
FROM users
INNER JOIN days_of_week_availability AS avail ON users.email = avail.email
WHERE users.email = '{session['email']}';''')
preferences_record = mycursor.fetchone()
print('preferences_record is', preferences_record)
fullName, preferredPronouns, email, role, team, dateStarted, doesWantMatching, sameEmail, maxMeetingsPerWeek, mondayTimesStr, mondayCanVirtual, mondayCanInPerson, tuesdayTimesStr, tuesdayCanVirtual, tuesdayCanInPerson, wednesdayTimesStr, wednesdayCanVirtual, wednesdayCanInPerson, thursdayTimesStr, thursdayCanVirtual, thursdayCanInPerson, fridayTimesStr, fridayCanVirtual, fridayCanInPerson = preferences_record
print(mondayTimesStr)
result = {
"name": fullName,
"preferredPronouns": preferredPronouns,
"email": email,
"doesWantMatching": doesWantMatching,
"availabilityByDay": [
{
"times": json.loads(mondayTimesStr),
"canVirtual": True if mondayCanVirtual == 1 else False,
"canInPerson": True if mondayCanInPerson == 1 else False
},
{
"times": json.loads(tuesdayTimesStr),
"canVirtual": True if tuesdayCanVirtual == 1 else False,
"canInPerson": True if tuesdayCanInPerson == 1 else False
},
{
"times": json.loads(wednesdayTimesStr),
"canVirtual": True if wednesdayCanVirtual == 1 else False,
"canInPerson": True if wednesdayCanInPerson == 1 else False
},
{
"times": json.loads(thursdayTimesStr),
"canVirtual": True if thursdayCanVirtual == 1 else False,
"canInPerson": True if thursdayCanInPerson == 1 else False
},
{
"times": json.loads(fridayTimesStr),
"canVirtual": True if fridayCanVirtual == 1 else False,
"canInPerson": True if fridayCanInPerson == 1 else False
}
],
"maxMeetingsPerWeek": maxMeetingsPerWeek
}
return Response(
json.dumps(result),
status=200,
mimetype='application/json'
)
@app.route("/api/v1/preferences", methods=['POST'])
def set_preferences():
''' Update existing preferences
name: string
preferredPronouns: string
email: string
doesWantMatching: boolean
availabilityByDay: weekDayAvail[]
Fields in weekDayAvail object
times: string[] e.g. Monday: [“12pm”, “1pm”]
canVirtual: boolean
canInPerson: boolean
maxMeetingsPerWeek: number
'''
# enter name, preferredPronouns, doesWantMatching into Users table
# enter availabilityByDay fields, maxMeetingsPerWeek into Availability table
with mysql.connector.connect(host='localhost', user='root', port=3307, password='root', database='test_db') as mydb:
# see if the user already has preferences set up
mycursor = mydb.cursor()
mycursor.execute(f'''SELECT full_name
FROM users
INNER JOIN days_of_week_availability ON users.email = days_of_week_availability.email
WHERE users.email = '{session['email']}';''')
preferences_record = mycursor.fetchall()
if len(preferences_record) > 1:
return Response(json.dumps({'msg': 'you cannot have more than one user per email'}), status=404, mimetyple='application/json')
if len(preferences_record) == 1:
print("len is", len(preferences_record))
# update the user record
mycursor = mydb.cursor()
mycursor.execute(f'''UPDATE users
SET users.full_name = '{request.json['name']}',
users.preferred_pronouns = '{request.json['preferredPronouns']}',
users.does_want_matching = {request.json['doesWantMatching']}
WHERE users.email = '{session['email']}';''')
mydb.commit()
# update the days_of_week_availability record
mycursor = mydb.cursor()
mycursor.execute(f'''UPDATE days_of_week_availability
SET days_of_week_availability.max_weekly_meetings = '{request.json['maxMeetingsPerWeek']}',
days_of_week_availability.monday_times = '{json.dumps(request.json['availabilityByDay'][0]['times'])}',
days_of_week_availability.monday_can_virtual = {request.json['availabilityByDay'][0]['canVirtual']},
days_of_week_availability.monday_can_in_person = {request.json['availabilityByDay'][0]['canInPerson']},
days_of_week_availability.tuesday_times = '{json.dumps(request.json['availabilityByDay'][1]['times'])}',
days_of_week_availability.tuesday_can_virtual = {request.json['availabilityByDay'][1]['canVirtual']},
days_of_week_availability.tuesday_can_in_person = {request.json['availabilityByDay'][1]['canInPerson']},
days_of_week_availability.wednesday_times = '{json.dumps(request.json['availabilityByDay'][2]['times'])}',
days_of_week_availability.wednesday_can_virtual = {request.json['availabilityByDay'][2]['canVirtual']},
days_of_week_availability.wednesday_can_in_person = {request.json['availabilityByDay'][2]['canInPerson']},
days_of_week_availability.thursday_times = '{json.dumps(request.json['availabilityByDay'][3]['times'])}',
days_of_week_availability.thursday_can_virtual = {request.json['availabilityByDay'][3]['canVirtual']},
days_of_week_availability.thursday_can_in_person = {request.json['availabilityByDay'][3]['canInPerson']},
days_of_week_availability.friday_times = '{json.dumps(request.json['availabilityByDay'][4]['times'])}',
days_of_week_availability.friday_can_virtual = {request.json['availabilityByDay'][4]['canVirtual']},
days_of_week_availability.friday_can_in_person = {request.json['availabilityByDay'][4]['canInPerson']}
WHERE days_of_week_availability.email = '{session['email']}';''')
mydb.commit()
else:
# insert the record
# TODO
pass
return Response(
json.dumps({'msg': 'successfully updated preferences'}),
status=200,
mimetype='application/json'
)
@app.route("/api/v1/stats", methods=['GET'])
def get_stats():
''' Return information for stats page
totalPeopleMet: number
totalMeetings: number
peopleMet: map<string<string>>
name (string): date (DateTime)
'''
with mysql.connector.connect(host='localhost', user='root', port=3307, password='root', database='test_db') as mydb:
# get all meetings that have happened for this person where both people said yes
mycursor = mydb.cursor()
mycursor.execute(f'''SELECT meetings.meeting_date AS meeting_dates, users.full_name AS acquaintance_names
FROM meetings
INNER JOIN users ON meetings.user_2_email = users.email
WHERE (meetings.user_1_email = '{session['email']}' OR meetings.user_2_email = '{session['email']}')
AND meetings.user_1_attending = TRUE
AND meetings.user_2_attending = TRUE
AND meetings.meeting_date < CURDATE();''')
all_people_met = mycursor.fetchall()
# create a map that maps an acquaintance's name to the last date they were met as a string
unique_people_met = {}
for person in all_people_met:
unique_people_met[person[1]] = str(person[0])
result = {
"totalPeopleMet": len(unique_people_met),
"totalMeetings": len(all_people_met),
"peopleMet": unique_people_met
}
return Response(
json.dumps(result),
status=200,
mimetype='application/json'
)
if __name__ == "__main__":
app.run(debug=True) | true |
5e6377485d107b1ceb0472d28a7f33a4b4d862fc | Python | coder-sys/Todo-list | /application1.py | UTF-8 | 3,253 | 2.546875 | 3 | [] | no_license | from flask import Flask, render_template, url_for, request, redirect
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime
from datetime import date
import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///buttonname.db'
db1 = SQLAlchemy(app)
button_name = 0
bname = "Show time"
class Buttonname(db1.Model):
id = db1.Column(db1.Integer, primary_key=True)
name = db1.Column(db1.Integer, nullable=False)
buttonname = db1.Column(db1.Integer, nullable=False)
deadline_year = db1.Column(db1.Integer, nullable=False)
deadline_month = db1.Column(db1.Integer, nullable=False)
deadline_day = db1.Column(db1.Integer, nullable=False)
bname = db1.Column(db1.Integer, nullable=False)
@app.route('/',methods=['GET','POST'])
def index():
if request.method == 'POST':
content = request.form['text']
deadlineinfoforyear = request.form['deadline_year']
deadlineinfoformonth = request.form['deadline_month']
deadlineinfoforday = request.form['deadline_day']
info = Buttonname(name=content,buttonname='False',deadline_year=deadlineinfoforyear,deadline_month=deadlineinfoformonth,deadline_day=deadlineinfoforday,bname=bname)
try:
db1.session.add(info)
db1.session.commit()
return redirect('/')
except:
return "There was an error in doing so"
else:
contents = Buttonname.query.order_by(Buttonname.id).all()
return render_template("index1.html",contents=contents,bname=bname)
@app.route('/delete/<int:id>')
def delete_task(id):
tasktobedeleted = Buttonname.query.get_or_404(id)
try:
db1.session.delete(tasktobedeleted)
db1.session.commit()
return redirect('/')
except:
return "There was an error in doing so."
@app.route('/edit/<int:id>',methods=['GET','POST'])
def edit(id):
task = Buttonname.query.get_or_404(id)
if request.method == 'POST':
cont = request.form['text']
task.name = cont
try:
db1.session.commit()
return redirect('/')
except:
return "There was an error in updating the task"
else:
return render_template('update1.html',task=task)
@app.route("/completed/<int:id>",methods=['GET','POST'])
def completed(id):
button = Buttonname.query.get_or_404(id)
if request.method == 'POST':
button.buttonname = 'True'
try:
db1.session.commit()
return redirect('/')
except:
return "There was an error in doing so"
@app.route("/showtimeremaining/<int:id>",methods=['GET','POST'])
def showtime(id):
row = Buttonname.query.get_or_404(id)
if request.method == 'POST':
bname = date(row.deadline_year,row.deadline_month,row.deadline_day)-date(datetime.datetime.today().year,datetime.datetime.today().month,datetime.datetime.today().day)
row.bname = bname.days
try:
db1.session.commit()
return redirect('/')
except:
return "There was an error in doing so"
if __name__ == '__main__':
app.run(debug=True) | true |
35ed201f5cb6e20d94601956fc2fef6e70692c8f | Python | SokKanaTorajd/als-smt3 | /selection_sort.py | UTF-8 | 275 | 3.8125 | 4 | [] | no_license | # # Selection Sort
A = [64,25,12,22,11]
for i in range(len(A)):
min_idx = i
for j in range (i+1, len(A)):
if A[min_idx] > A[j]:
min_idx = j
A[i], A[min_idx] = A[min_idx], A[i]
print("looping ke %s"%(i), A)
print("\nsorted array = ", A) | true |
b4361c8f1a5c09cabdd781588e7ac90b3deb3bab | Python | Aasthaengg/IBMdataset | /Python_codes/p02688/s339044459.py | UTF-8 | 225 | 2.71875 | 3 | [] | no_license | N, K = [int(v) for v in input().split()]
S = [0] * N
for _ in range(K):
snack = int(input())
snukes = [int(v) for v in input().split()]
for snuke in snukes:
S[snuke-1] = 1
print(sum(x == 0 for x in S))
| true |
c0fc25151bece6567df7937f3c0d63f0293fb01e | Python | fyangss/questions | /python/hr/algorithms/forming_a_magic_square_medium.py | UTF-8 | 936 | 3.234375 | 3 | [] | no_license | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the formingMagicSquare function below.
def formingMagicSquare(s):
all_valid_squares = [
[[8,1,6],[3,5,7],[4,9,2]],
[[4,9,2],[3,5,7],[8,1,6]],
[[6,1,8],[7,5,3],[2,9,4]],
[[2,9,4],[7,5,3],[6,1,8]],
[[2,7,6],[9,5,1],[4,3,8]],
[[4,3,8],[9,5,1],[2,7,6]],
[[6,7,2],[1,5,9],[8,3,4]],
[[8,3,4],[1,5,9],[6,7,2]],
]
min_cost = float('inf')
for square in all_valid_squares:
cost = 0
for i in range(len(s)):
for j in range(len(s)):
cost += abs(square[i][j] - s[i][j])
min_cost = min(min_cost, cost)
return min_cost
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
s = []
for _ in range(3):
s.append(list(map(int, input().rstrip().split())))
result = formingMagicSquare(s)
fptr.write(str(result) + '\n')
fptr.close()
| true |
3b3271552e2a7cc2ee3938b0918e56a30003a044 | Python | dsapandora/genetic-unity | /crossover.py | UTF-8 | 3,879 | 3.984375 | 4 | [
"MIT"
] | permissive | # Valentin Macé
# valentin.mace@kedgebs.com
# Developed for fun
# Feel free to use this code as you wish as long as you quote me as author
"""
crossover.py
~~~~~~~~~~
A module to implement all crossover routines used in a genetic algorithm
"""
import copy
from random import randint
from game import *
def crossover(env, parent1, parent2, crossover_method):
"""
Takes two neural nets and produce a child according to :param crossover_method
Example of working (method = 'neuron'):
1- Two networks are created (copies of each parent)
2- Selects a random neuron in a random layer OR a random bias in a random layer
3- Switches this neuron OR bias between the two networks
4- Each network plays a game
5- Best one is selected
Principle is the same for weight or layer methods
:param env:(UnityEnvironment) Environment where evaluation games will be played
:param parent1:(NeuralNetwork) first parent
:param parent2:(NeuralNetwork) second parent
:param crossover_method:(str) to apply crossover over a single weight, a neuron or an entire layer
:return:(NeuralNetwork) Child
"""
net1 = copy.deepcopy(parent1) # making copies (children) otherwise we manipulate the actual parents
net2 = copy.deepcopy(parent2)
weights_or_biases = randint(0, 1)
if weights_or_biases == 0:
if crossover_method == 'weight':
weight_crossover(net1, net2)
elif crossover_method == 'neuron':
neuron_crossover(net1, net2)
elif crossover_method == 'layer':
layer_crossover(net1, net2)
else: # crossover over bias
bias_crossover(net1, net2)
game = Game(unity_env=env, time_scale=100.0, width=0, height=0, target_frame_rate=-1, quality_level=0)
score1 = game.start([net1])
score2 = game.start([net2])
if score1 > score2:
return net1
else:
return net2
def weight_crossover(net1, net2):
"""
Switches a single weight between two NeuralNetwork
:param net1:(NeuralNetwork) First parent
:param net2:(NeuralNetwork) Second parent
"""
layer = randint(0, len(net1.weights) - 1) # random layer
neuron = randint(0, len(net1.weights[layer]) - 1) # random neuron
weight = randint(0, len(net1.weights[layer][neuron]) - 1) # random weight
temp = net1.weights[layer][neuron][weight] # switching weights
net1.weights[layer][neuron][weight] = net2.weights[layer][neuron][weight]
net2.weights[layer][neuron][weight] = temp
def neuron_crossover(net1, net2):
"""
Switches neuron between two NeuralNetwork
:param net1:(NeuralNetwork) First parent
:param net2:(NeuralNetwork) Second parent
"""
layer = randint(0, len(net1.weights) - 1) # random layer
neuron = randint(0, len(net1.weights[layer]) - 1) # random neuron
temp = copy.deepcopy(net1) # switching neurons
net1.weights[layer][neuron] = net2.weights[layer][neuron]
net2.weights[layer][neuron] = temp.weights[layer][neuron]
def layer_crossover(net1, net2):
"""
Switches a whole layer between two NeuralNetwork
:param net1:(NeuralNetwork) First parent
:param net2:(NeuralNetwork) Second parent
"""
layer = randint(0, len(net1.weights) - 1) # random layer
temp = copy.deepcopy(net1) # switching layers
net1.weights[layer] = net2.weights[layer]
net2.weights[layer] = temp.weights[layer]
def bias_crossover(net1, net2):
"""
Switches a single bias between two NeuralNetwork
:param net1: (NeuralNetwork) First parent
:param net2: (NeuralNetwork) Second parent
"""
layer = randint(0, len(net1.biases) - 1) # random layer
bias = randint(0, len(net1.biases[layer]) - 1) # random bias
temp = copy.deepcopy(net1) # switching biases
net1.biases[layer][bias] = net2.biases[layer][bias]
net2.biases[layer][bias] = temp.biases[layer][bias]
| true |
48740820387b2a59f6f76829dab335fc4a2d4ab0 | Python | jejimenez/tsp_psp_fundamental_exercises | /assignment2/assignment3_entrega/assignment1_code/assignment1.py | UTF-8 | 3,302 | 3.828125 | 4 | [] | no_license | """
.. module:: LinkedList
:platform: Unix, Windows
:synopsis: Load the file with the values every. The file must be in the same directory with the name
>>> values.txt.
.. moduleauthor:: Jaime Jimenez
"""
import math
class Node(object):
def __init__(self, value=None, next=None):
self.value = value
self.next = next
class LinkedList(object):
def __init__(self):
self.head = None
def add(self, value):
"""Add new item to linked list.
Args:
value: Value of node.
>>> print add(self, value)
"""
node = Node(value, self.head)
self.head = node
def remove(self, value):
"""Remove item by value.
Args:
value: Value of node.
>>> print remove(self, value)
"""
current = self.head
previous = None
# search the node with the data.
# Keep in memory the previous to validate when it is head so point the new head
while current:
if current.value == value:
break
else:
previous = current
current = current.next
if current is None:
raise ValueError('No se encontró el elemento')
if previous is None:
self.head = current.next
else:
previous.next = current.next
def get_prior(self):
"""Return the first node.
Args:
value: Value of node.
Returns:
Node. The first node
>>> print get_prior(self, value) self.head
"""
return self.head
# Get the node next to the node with match with the value
def get_next_by_value(self, value):
"""Get the node immediatelly next to the first node with that match with the value.
Args:
value: Value of node to search.
Returns:
Node. The next to the node that match with the value
>>> print get_next_by_value(self, value) current.next
"""
current = self.head
while current:
if current.value == value:
return current.next
else:
current = current.next
if current is None:
raise ValueError('No se encontró el elemento')
# Get the next node
def __getitem__(self, index):
"""To able the clase as iterative.
Args:
index: Index of iteration.
Returns:
Node. Node in position = index
>>> print __getitem__(self, index) nd
"""
nd = self.head
for i in range(0,index):
if nd.next is None:
raise StopIteration
nd = nd.next
return nd
print('App initiated...')
print('Loading file value.txt')
f = open('values.txt', 'r+')
print('File values.txt loaded')
n = 0
sum_val = 0
line_val = None
mean = None
dev = None
list_vals = LinkedList()
print('Loading values in LinkedList')
for line in f:
if str(line).rstrip('\r') != '':
n+=1
try:
line_val = float(str(line))
list_vals.add(line_val)
except ValueError:
print('Error al intentar convertir el valor '+line+'.')
raise ValueError('Imposible convertir el valor '+line+'.')
print("--------------------------")
print('Calculating mean')
for nd in list_vals:
sum_val += nd.value
mean = sum_val/n
#print('sum ='+str(sum_val))
print('MEAN = '+str(mean))
print('Calculating SD')
sum_val = 0
for nd in list_vals:
x = (nd.value - mean) * (nd.value - mean)
sum_val+=x
#print('Sumatoria (Xi-Xavg)^2 = '+str(sum_val))
dev = math.sqrt(sum_val/(n-1))
print('SD = '+str(dev)) | true |
8a9c13928db4e743a3eeb10d675e5e6fc89b55b3 | Python | shubhank-saxena/youtube-api-search | /backend/search_api/views.py | UTF-8 | 1,867 | 2.625 | 3 | [] | no_license | import logging
import os
from django.conf import settings
from django.core.paginator import Paginator
from django.http import HttpResponse, JsonResponse
from backend.search_api.models import Youtube
from backend.search_api.serializers import YoutubeSerializer
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(module)s [%(levelname)s] %(message)s')
def index(request):
"""
Demo API for testing
:param request:
:return:
"""
json_payload = {"message": "hello world!"}
return JsonResponse(json_payload)
def get_videos(request):
"""
A GET API which returns the stored video data in a paginated response sorted in descending order of published
datetime.
getvideos/?q=messi&page=1
:param request:
:return:
"""
query_title = request.GET.get('q')
query_desc = request.GET.get('desc')
page_number = int(request.GET.get('page'))
try:
# search_results = Youtube.objects.raw(final_query, [query_title_string, query_desc_string])
"""
Search the stored videos using their title and description
"""
search_results = Youtube.objects.filter(title__icontains=query_title if query_title is not None else '', description__contains=query_title if query_title is not None else '').order_by(
'-published_at'
)
'''
Pagination
'''
paginator = Paginator(search_results, 25)
page_obj = paginator.get_page(page_number)
'''
Serializing results using Django Rest Framework
'''
serialized_results = YoutubeSerializer(page_obj.object_list, many=True)
return JsonResponse({"result": serialized_results.data, "total_page": paginator.num_pages})
except Exception as e:
logging.error(e)
return JsonResponse({"success": "failed", "result": e})
| true |
3154ca1923b2c8b82e743e4205646c658a0c9953 | Python | ErlangZ/projecteuler | /30.py | UTF-8 | 114 | 3.015625 | 3 | [] | no_license | print [i**5 for i in xrange(10)]
print sum([x for x in xrange(2, 600000) if sum([int(i)**5 for i in str(x)])==x ]) | true |
9bba2dbb9410ac6b62a526b0213289af7a5ca340 | Python | FlyingIsland/financial_fundamentals | /financial_fundamentals/edgar.py | UTF-8 | 4,024 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | '''
Created on Jan 26, 2013
@author: akittredge
'''
import requests
from BeautifulSoup import BeautifulSoup
import datetime
from urlparse import urljoin
import blist
import time
from requests.exceptions import ConnectionError
from financial_fundamentals.sec_filing import Filing
import re
def get_filings(symbol, filing_type):
'''Get the last xbrl filed before date.
Returns a Filing object, return None if there are no XBRL documents
prior to the date.
Step 1 Search for the ticker and filing type,
generate the urls for the document pages that have interactive data/XBRL.
Step 2 : Get the document pages, on each page find the url for the XBRL document.
Return a blist sorted by filing date.
'''
filings = blist.sortedlist(key=_filing_sort_key_func)
document_page_urls = _get_document_page_urls(symbol, filing_type)
for url in document_page_urls:
filing = _get_filing_from_document_page(url)
filings.add(filing)
for i in range(len(filings) - 1):
filings[i].next_filing = filings[i + 1]
return filings
SEARCH_URL = ('http://www.sec.gov/cgi-bin/browse-edgar?action=getcompany&'
'CIK={symbol}&type={filing_type}&dateb=&owner=exclude&count=100')
def _get_document_page_urls(symbol, filing_type):
'''Get the edgar filing document pages for the CIK.
'''
search_url = SEARCH_URL.format(symbol=symbol, filing_type=filing_type)
search_results_page = get_edgar_soup(url=search_url)
xbrl_rows = [row for row in
search_results_page.findAll('tr') if
row.find(text=re.compile('Interactive Data'))]
for xbrl_row in xbrl_rows:
documents_page = xbrl_row.find('a', {'id' : 'documentsbutton'})['href']
documents_url = 'http://sec.gov' + documents_page
yield documents_url
def _get_filing_from_document_page(document_page_url):
'''Find the XBRL link on a page like
http://www.sec.gov/Archives/edgar/data/320193/000119312513300670/0001193125-13-300670-index.htm
'''
filing_page = get_edgar_soup(url=document_page_url)
period_of_report_elem = filing_page.find('div', text='Filing Date')
filing_date = period_of_report_elem.findNext('div', {'class' : 'info'}).text
filing_date = datetime.date(*map(int, filing_date.split('-')))
type_tds = []
text_to_find = ['EX-101.INS', ' XBRL INSTANCE DOCUMENT']
for each_text in text_to_find:
type_td_found = filing_page.findAll('td', text=each_text)
if(type_td_found):
tr_d = type_td_found[0].findPrevious('tr')
if(tr_d):
type_tds.append(tr_d)
for type_td in list(set(type_tds)):
try:
# xbrl_link = type_td.findPrevious('a', text=re.compile('\.xml$')).parent['href']
xbrl_link = type_td.find('a', text=re.compile('\.xml$')).parent['href']
except AttributeError:
continue
else:
if not re.match(pattern='\d\.xml$', string=xbrl_link):
# we don't want files of the form 'jcp-20120504_def.xml'
continue
else:
break
xbrl_url = urljoin('http://www.sec.gov', xbrl_link)
filing = Filing.from_xbrl_url(filing_date=filing_date, xbrl_url=xbrl_url)
return filing
def _filing_sort_key_func(filing_or_date):
if isinstance(filing_or_date, Filing):
return filing_or_date.date
elif isinstance(filing_or_date, datetime.datetime):
return filing_or_date.date()
else:
return filing_or_date
def get_edgar_soup(url):
response = get(url)
return BeautifulSoup(response)
def get(url):
'''requests.get wrapped in a backoff retry.
'''
wait = 0
while wait < 5:
try:
return requests.get(url).text
except ConnectionError:
print 'ConnectionError, trying again in ', wait
time.sleep(wait)
wait += 1
else:
raise
| true |
c5f24b4b66e99df6182a5620527da66a6bc45406 | Python | Ang9876/scalapy | /bench/scripts/summary.py | UTF-8 | 1,813 | 2.8125 | 3 | [
"MIT"
] | permissive | from run import benchmarks, runs, configurations
import numpy as np
bench_and_size = []
for (bench, sizes, _) in benchmarks:
for size in sizes:
bench_and_size.append(bench + "-" + str(size))
def config_data(bench, conf):
out = []
for run in range(runs):
try:
points = []
with open('bench/results/{}/{}/{}'.format(conf, bench, run)) as data:
for line in data.readlines():
points.append(float(line))
# take only last 2000 to account for startup
if len(points) < 100:
points = points[-10:]
else:
points = points[-2000:]
# filter out 1% worst measurements as outliers
pmax = np.percentile(points, 99)
for point in points:
if point <= pmax:
out.append(point)
except IOError:
pass
return np.array(out)
def peak_performance():
out = []
for bench, sizes, _ in benchmarks:
for size in sizes:
res = []
for conf in configurations:
try:
processed = config_data(bench + "-" + str(size), conf)
print("{} ({}) - {}: mean {} ns, stddev {} ns".format(bench, size, conf, np.percentile(processed, 50), np.std(processed)))
res.append(np.percentile(processed, 50))
except IndexError:
res.append(0)
out.append([bench, str(size)] + [str(x) for x in res])
return out
if __name__ == '__main__':
leading = ['name', "size"]
for conf in configurations:
leading.append(conf)
zipped_means = peak_performance()
print(','.join(leading))
for res in zipped_means:
print(','.join(res))
| true |
b136c9a20f5a9669ce0adbb236b92e805f7b5dda | Python | fxy1018/Leetcode | /LC_1410_MatrixWaterInjection.py | UTF-8 | 2,057 | 3.921875 | 4 | [] | no_license | '''
Given a two-dimensional matrix, the value of each grid represents the height of the terrain. The flow of water will only flow up, down, right and left, and it must flow from the high ground to the low ground. As the matrix is surrounded by water, it is now filled with water from (R,C) and asked if water can flow out of the matrix.
Example
Given
mat =
[
[10,18,13],
[9,8,7],
[1,2,3]
]
R = 1, C = 1, return "YES"。
Explanation:
(1,1) →(1,2)→Outflow.
Given
mat =
[
[10,18,13],
[9,7,8],
[1,11,3]
]
R = 1, C = 1, return "NO"。
Explanation:
Since (1,1) cannot flow to any other grid, it cannot flow out.
'''
class Solution:
"""
@param matrix: the height matrix
@param R: the row of (R,C)
@param C: the columns of (R,C)
@return: Whether the water can flow outside
"""
def waterInjection(self, matrix, R, C):
# Write your code here
if not matrix:
return("NO")
row = len(matrix)
col = len(matrix[0])
if R >= row or C >=col:
return("NO")
visit = set([])
return(self.helpFun(matrix, R, C, row, col, visit))
def helpFun(self, matrix, R, C, row, col, visit):
if R == row-1 or C == col-1:
return("YES")
curr = matrix[R][C]
if (R-1, C) not in visit and matrix[R-1][C] < curr and self.helpFun(matrix, R-1, C, row, col, visit) == "YES":
visit.add((R-1,C))
return("YES")
if (R, C-1) not in visit and matrix[R][C-1] < curr and self.helpFun(matrix, R, C-1, row, col, visit) == "YES":
visit.add((R,C-1))
return("YES")
if (R+1, C) not in visit and matrix[R+1][C] < curr and self.helpFun(matrix, R+1, C, row, col, visit) == "YES":
visit.add((R+1,C))
return("YES")
if (R, C+1) not in visit and matrix[R][C+1] < curr and self.helpFun(matrix, R, C+1, row, col, visit) == "YES":
visit.add((R,C+1))
return("YES")
return("NO")
| true |
ae18aa3d3efd2af4a7a78f378c67aca4064c8545 | Python | benred42/programming-language-classifier | /programming_language_classifier/tests/test_get_data.py | UTF-8 | 739 | 2.625 | 3 | [] | no_license | from programming_language_classifier import get_data as gd
def test_get_content():
assert gd.get_content("tests/function_testfiles/") == [["C", "This is a C file\n"],
["JavaScript", "This is a javascript file\n"],
["Ruby", "This is a Ruby file\n"],
["Python", "This is a Python file\n"]]
def test_make_dataframe():
test_list = gd.get_content("tests/function_testfiles/")
assert gd.make_dataframe(test_list)[0][0] == "C"
assert gd.make_dataframe(test_list)[1][0] == "This is a C file\n"
assert gd.make_dataframe(test_list)[1][2] == "This is a Ruby file\n"
| true |
a40b8ae777134fef81fcdba24dc56787e95cf205 | Python | tommydemarco/EmployeeManagement-Django | /apps/employees/models.py | UTF-8 | 2,072 | 2.703125 | 3 | [] | no_license | from django.db import models
#importing a model from another app
from apps.fields.models import Field
#importing the third-party app CKEDITOR
from ckeditor.fields import RichTextField
#Employee main model
class Employee(models.Model):
#creating the base choices
BASE_CHOICES = (
("AGP", "Malaga"),
("EDI", "Edinburgh"),
("TFS", "Tenerife South"),
("PVD", "Providence"),
("DUB", "Dublin"),
)
#the first attribute represents the name of the model that will appear in admin
first_name = models.CharField('First name', max_length=50)
last_name = models.CharField('Last Name', max_length=20)
full_name = models.CharField('Full name', max_length=120, blank=True)
contact_phone = models.IntegerField('Contact Number')
address = models.CharField('Adress', max_length=80)
base = models.CharField('Base', max_length=3, choices=BASE_CHOICES)
field = models.ForeignKey(Field, on_delete=models.CASCADE)
#image = models.ImageField()
skills = models.ManyToManyField('Skill')
#adding this field that will be edited with the third-party app "CKEDITOR"
#read the documentation for more information
employee_cv = RichTextField()
#changing the name of the model in the django admin interface and other customizations
class Meta:
verbose_name = "Employees list"
verbose_name_plural = "Employees lists"
#ordering table rows per id
ordering = ['id']
#disallowing the possibility to put a field that has the same attributes as before
unique_together = ('first_name', 'last_name')
def __str__(self):
return "Employee id: {}, Employee name: {}, {}. Base: {}. Contact number: {}".format(self.id, self.first_name, self.last_name, self.base, self.contact_phone)
#secondary model Skills
class Skill(models.Model):
skill = models.CharField("Skill", max_length=60)
class Meta:
verbose_name = "Skill"
verbose_name_plural = "Skills"
def __str__(self):
return "{}".format(self.skill)
| true |
46098bb478f5821f8c5a0bc3cb3cc7d309111a1f | Python | sergiodealencar/courses | /material/curso_em_video/ex047.py | UTF-8 | 135 | 3.609375 | 4 | [
"MIT"
] | permissive | print('Os números pares no intervalo entre 1 e 50 são os seguintes:')
for n in range(2, 51, 2):
print(n, end=' ')
print('\nFIM')
| true |
6739f5702e0f988e318f3d809e92fb54e93186fa | Python | qinghuan1314/python | /excel | UTF-8 | 469 | 3.0625 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'littley'
import xlrd
data = xlrd.open_workbook('test.xlsx')
# table = data.sheets()[0] #通过引索顺序获取
# table = data.sheet_by_index(0) #通过引索顺序获取
table = data.sheet_by_name(u'Sheet1') #通过名称获取
nrows = table.nrows #获取总行数
ncols = table.ncols #获取总列数
print nrows,ncols
#获取第一行
print table.row_values(0)
#获取第一列
print table.col_values(1)
| true |
fb7f47fe447aea1ea608f48a8745627d44547c65 | Python | mountainlandbot/argonaut | /argonaut/model/comment.py | UTF-8 | 1,398 | 2.75 | 3 | [
"BSD-2-Clause",
"BSD-2-Clause-Views"
] | permissive | """The comment model"""
from sqlalchemy import Column, ForeignKey
from sqlalchemy.types import Integer, Unicode, UnicodeText, Date
from argonaut.model.meta import Base, Session
class Comment(Base):
__tablename__ = 'comment'
id = Column(Integer, primary_key=True)
post_id = Column(Integer, ForeignKey('post.id'), nullable=False)
body = Column(UnicodeText, nullable=False)
posted = Column(Date, nullable=False)
author = Column(Unicode(50), nullable=True)
author_website = Column(Unicode(300), nullable=True)
def __init__(self, id=None,post_id=None,body=None,posted=None,author=None,author_website=None):
self.id = id
self.post_id = post_id
self.body = body
self.posted = posted
self.author = author
self.author_website = author_website
def __unicode__(self):
return self.body
def __repr__(self):
return "<Comment('%s','%s', '%s', '%s', '%s', '%s')>" % (self.id,self.post_id,self.body,self.posted,self.author,self.author_website)
__str__ = __unicode__
def new():
return Comment()
def save(comment):
Session.add(comment)
Session.commit()
def get_post_comments(post_id):
return Session.query(Comment).filter_by(post_id=post_id).all()
def get_post_comment_count(id):
return Session.query(Comment).filter_by(post_id=id).count()
| true |
54f27b0783ff7a82a49f218b0af9ce3b2739571d | Python | heshington/amazon_price_checker | /main.py | UTF-8 | 1,693 | 2.828125 | 3 | [] | no_license | import requests
from pprint import pprint
from bs4 import BeautifulSoup
import smtplib
# target_price = input("Whats the price your willing to pay for this thingy?")
target_price = 100
URL = "https://www.amazon.com/TENDLIN-Compatible-Premium-Flexible-Silicone/dp/B07GZDTTXL/ref=sr_1_6?dchild=1&keywords=iphone%2Bxs%2Bmax%2Bleather%2Bcase&qid=1635283348&sr=8-6&th=1"
headers = {
"content-type":"text",
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.71 Safari/537.36",
"Accept-Language" : "en-US,en;q=0.9",
}
response = requests.get(URL, headers=headers)
website_html = response.text
soup = BeautifulSoup(website_html, 'html.parser')
item_title = soup.find(name="span", class_= "a-size-large product-title-word-break")
amazon_price = soup.find(name="span", class_="a-size-medium a-color-price priceBlockSalePriceString")
item_title = item_title.getText().strip()
amazon_price = amazon_price.getText()
amazon_price = amazon_price.strip("$")
print(amazon_price)
print(item_title)
if float(amazon_price) <= target_price:
##Send email
# Sending Email with Python
my_email = "FROM_EMAIL"
password = "EMAIL_PASSWORD"
with smtplib.SMTP("smtp.gmail.com") as connection:
connection.starttls()
connection.login(user=my_email, password=password)
connection.sendmail(
from_addr=my_email,
to_addrs="TO_SEND_EMAIL",
msg=f"Subject:Amazon Price Alert!\n\n"
f"{item_title} has fallen below your target price of ${target_price}, it is now ${amazon_price}. \n"
f"You can buy it now at \n "
f"{URL}"
)
| true |
bba83510e119268416178ff7e0042e8b3e75f242 | Python | barrysteyn/pelican_plugin-render_math | /test_math.py | UTF-8 | 3,365 | 2.75 | 3 | [] | no_license | import os
import unittest
from render_math import parse_tex_macros, _parse_macro, _filter_duplicates
class TestParseMacros(unittest.TestCase):
def test_multiple_arguments(self):
"""Parse a definition with multiple arguments"""
text = r'\newcommand{\pp}[2]{\frac{ #1}{ #2} \cdot 2}'
line = {'filename': '/home/user/example.tex', 'line_num': 1, 'def':
text}
parsed = _parse_macro(line)
expected = {'name':'pp',
'definition': '\\\\\\\\frac{ #1}{ #2} \\\\\\\\cdot 2',
'args': '2',
'line': 1,
'file': '/home/user/example.tex'}
self.assertEqual(parsed, expected)
def test_no_arguments(self):
"""Parse a definition without arguments"""
text = r'\newcommand{\circ}{2 \pi R}'
line = {'filename': '/home/user/example.tex', 'line_num': 1, 'def':
text}
parsed = _parse_macro(line)
expected = {'name':'circ',
'definition': '2 \\\\\\\\pi R',
'line': 1,
'file': '/home/user/example.tex'
}
self.assertEqual(parsed, expected)
def test_repeated_definitions_same_file(self):
"""Last definition is used"""
text1 = r'2 \\\\\\\\pi R'
text2 = r'2 \\\\\\\\pi r'
common_file = '/home/user/example.tex'
def1 = {'name': 'circ', 'line': 1, 'definition': text1,
'file': common_file}
def2 = {'name': 'circ', 'line': 2, 'definition': text2,
'file': common_file}
expected = [{'name':'circ',
'definition': r'2 \\\\\\\\pi r',
'line': 2,
'file': '/home/user/example.tex'
}]
parsed = _filter_duplicates(def1, def2)
self.assertEqual(parsed, expected)
def test_repeated_definitions_different_files(self):
"""Last definition is used"""
text1 = r'2 \\\\\\\\pi R'
text2 = r'2 \\\\\\\\pi r'
file1 = '/home/user/example1.tex'
file2 = '/home/user/example2.tex'
def1 = {'name': 'circ', 'line': 1, 'definition': text1,
'file': file1}
def2 = {'name': 'circ', 'line': 1, 'definition': text2,
'file': file2}
expected = [{'name':'circ',
'definition': r'2 \\\\\\\\pi r',
'line': 1,
'file': '/home/user/example2.tex'
}]
parsed = _filter_duplicates(def1, def2)
self.assertEqual(parsed, expected)
def test_load_file(self):
cur_dir = os.path.split(os.path.realpath(__file__))[0]
test_fname = os.path.join(cur_dir, "latex-commands-example.tex")
parsed = parse_tex_macros([test_fname])
expected = [{'name': 'pp',
'definition': '\\\\\\\\frac{\\\\\\\\partial #1}{'
'\\\\\\\\partial #2}',
'args': '2'},
{'name': 'bb',
'definition': '\\\\\\\\pi R',},
{'name': 'bc',
'definition': '\\\\\\\\pi r',
}]
self.maxDiff = None
self.assertEqual(parsed, expected)
if __name__ == '__main__':
unittest.main() | true |
35e6e81a3b08740ddc254794211e88c15691d172 | Python | ThapaKazii/Myproject | /mini calculator.py | UTF-8 | 7,689 | 4.625 | 5 | [] | no_license | """ A mini calculator project.. """
print("What type of calculator do you wanna use?? ")
print("A. Basic calculator ")
print("B. Financial calculator ")
# print("C. Scientific calculator ")
## Input as user defined.
input1 = input("\tSelect what type of operation you wanna use.. \tA\tB\t") # C\tD\t\n")
# first_num = float(input("Enter the first one.. "))
# second_num = float(input("Enter the second one.. "))
if input1 == 'A':
print("Please select the operation. ")
print("\t1. Addition.")
print("\t2. Subtraction.")
print("\t3. Division.")
print("\t4. Multiplication.")
print("\t5. Calculation of powers.")
print("\t6. Square roots.")
print("\t7. Cube roots.")
print("\t8. Modulus.\n")
input2 = input("What type of operation do tou want to perform? \t1\t2\t3\t4\t5\t6\t7\t8\t\n")
# first_num = float(input("Enter the first one.. "))
# second_num = float(input("Enter the second one.. "))
if input2 == '1':
print("\tFor Addition operation: \n")
first_num = (input("\tEnter the first one.. "))
if first_num.isnumeric() == True or first_num.isalpha() == False == False:
print("Sorry, you have entered string value..")
else:
second_num = input("\tEnter the second one.. ")
if second_num.isnumeric() == False or second_num.isalpha() == True:
print("Sorry, you have entered string value..")
else:
add = (float(first_num) + float(second_num))
print("\t", first_num, "+", second_num, "=", float(add))
elif input2 == '2':
print("\tFor Subtraction operation: \n")
first_num = input("\tEnter the first one.. ")
if first_num.isalpha() == True or first_num.isdigit() == False:
print("Sorry, you have entered string value..")
else:
second_num = input("\tEnter the second one.. ")
if second_num.isalpha() == True or second_num.isdigit() == False:
print("Sorry, you have entered string value..")
else:
subtract = (float(first_num) - float(second_num))
print("\t", first_num, "-", second_num, "=", float(subtract))
elif input2 == '3':
print("\tFor Division operation: \n")
first_num = input("\tEnter the first one.. ")
if first_num.isalpha() == True or first_num.isdigit() == False:
print("Sorry, you have entered string value..")
else:
second_num = input("\tEnter the second one.. ")
if second_num.isalpha() == True or second_num.isdigit() == False:
print("Sorry, you have entered string value..")
else:
division = float(first_num / second_num)
print("\t", first_num, "/", second_num, "=", division)
elif input2 == '4':
print("\tFor Multiplication operation: \n")
first_num = input("\tEnter the first one.. ")
if first_num.isalpha() == True or first_num.isdigit() == False:
print("Sorry, you have entered string value..")
else:
second_num = input("\tEnter the second one.. ")
if second_num.isalpha() == True or second_num.isdigit() == False:
print("Sorry, you have entered string value..")
else:
multiply = float(first_num * second_num)
print("\t", first_num, "*", second_num, "=", multiply)
elif input2 == '5':
print("\tFor Calculation of power form: \n")
first_num = input("\tEnter the first one.. ")
if first_num.isalpha() == True or first_num.isdigit() == False:
print("Sorry, you have entered string value..")
else:
second_num = input("\tEnter the second one.. ")
if second_num.isalpha() == True or second_num.isdigit() == False:
print("Sorry, you have entered string value..")
else:
power = float(first_num.__pow__(second_num))
print("\t", first_num, "^", second_num, "=", power)
elif input2 == '6':
print("\tFor Square root operation: \n")
num = float(input("\tEnter the number.. "))
if num.isalpha() == True or num.isdigit() == False:
print("Sorry, you have entered string value..")
else:
sq_root = float((num.__pow__(0.5)))
print("\tThe square root of %s is %s " % (num, sq_root))
elif input2 == '7':
print("\tFor Cube root operation: \n")
num = float(input("\tEnter the number.. "))
if num.isalpha() == True or num.isdigit() == False:
print("Sorry, you have entered string value..")
else:
cube_root = float(num.__pow__(float(1 / 3)))
print("\tThe cube root of %s is %s " % (num, cube_root))
elif input2 == '8':
print("\tFor Modulus operation: \n")
first_num = input("\tEnter the first one.. ")
if first_num.isalpha() == True or first_num.isdigit() == False:
print("Sorry, you have entered string value..")
else:
second_num = input("\tEnter the second one.. ")
if second_num.isalpha() == True or second_num.isdigit() == False:
print("Sorry, you have entered string value..")
else:
mod = float(first_num.__mod__(second_num))
print("\t", first_num, "%", second_num, "=", mod)
else:
print("\tYou have entered wrong format..Check again.. ")
elif input1 == 'B':
print("Please select the operation. ")
print("1. Simple Interest.")
#print("2. Compound Interest.")
print("2. Conversion.\n")
input3 = input("What type of operation do you want to perform? \t1\t2\t\n ")
if input3 == '1':
print("\tFor Simple Interest: \n")
principle = float(input("\tEnter the principle.. "))
time = float(input("\tEnter the time.. "))
rate = float(input("\tEnter the rate.. "))
interest = float((principle * time * rate) / 100)
print("\tThe required simple interest is: ", interest)
elif input3 == '2':
print("\tFor Conversion: \n")
print("Select the operation.. ")
print("1. AUS and NPR")
print("2. AED and NPR")
print("3. USD and NPR")
print("4. INR and NPR")
print("5. EUS and NPR\n")
input4 = input("What type of conversion do you want?\t1\t2\t3\t4\t5\t6\t\n")
if input4 == '1':
print("\tA.AUS to NPR : \n")
print("\tN.NPR to AUS\n")
input5 = input("Which one do you want?\tA\tN\t")
if input5 == 'A':
print("\t1.Conversion of AUS to NPR : \n")
aud = (input("\tEnter the Australian Dollar:\t"))
if aud.isalpha()==True or aud.isdigit()==True :
print("Invalid input..")
else:
npr = float(aud) * 78.3910
print("\tNepalese rupee:\t", float(npr))
elif input5 == 'N':
print("\t1.Conversion of NPR to AUD : \n")
npr = (input("\tEnter the Nepalese Rupee:\t"))
if npr.isalpha()==True or npr.isdigit()==True :
print("Invalid input..")
else:
aud = float(npr) * (1 / 78.3910)
print("\tAustralian Dollar:\t", float(aud))
else:
print("\tPlease enter your value properly.")
else:
print("\tPlease enter your value properly.")
else:
print("\tPlease enter your value properly.")
else:
print("\tPlease enter your value properly.")
| true |
f8c32e5bfedb697d40764d64854bd7fadcb8b14f | Python | moyersjm/rosebotics2 | /src/examples_sound.py | UTF-8 | 1,214 | 3.625 | 4 | [] | no_license | """ Examples of how to make sounds with the EV3. """
import ev3dev.ev3 as ev3 # You need this!
import time
def main():
print("Beeping:")
ev3.Sound.beep().wait()
time.sleep(1)
print("Speaking:")
ev3.Sound.speak("How are you?").wait() # Must be a SHORT phrase
time.sleep(1)
print("Playing a note:")
ev3.Sound.tone(440, 1500) # Frequency 440 Hz, for 1.5 seconds
# time.sleep(1)
# print("Playing several notes:")
# ev3.Sound.tone([
# (440, 500, 500), # 440 Hz for 0.5 seconds, then 0.5 seconds rest
# (880, 200, 0) # 880 Hz for 0.2 seconds, no rest (straight to next note)
# (385, 1.75, 300) # 385 Hz for 1.75 seconds, 0.3 seconds rest
# ]).wait()
time.sleep(1)
print("Changing the volume:")
ev3.Sound.set_volume(25) # 25% volume
ev3.Sound.speak("Say it a little quieter now...").wait()
time.sleep(1)
ev3.Sound.set_volume(100) # Full volume
ev3.Sound.speak("Say it a little LOUDER now").wait()
ev3.Sound.speak("You know you make me wanna (Shout!)").wait()
# time.sleep(3)
# print("Playing a song:")
# ev3.Sound.play("/home/robot/csse120/assets/sounds/awesome_pcm.wav").wait()
main()
| true |
4bf26b52e04ed6ea70a02a57caf3580b03fb1462 | Python | Mikeladels/18-february-2021 | /nomor 4.py | UTF-8 | 277 | 3.171875 | 3 | [] | no_license | #michelle adelia suwarno / xi mia 1 / 25
#dengan dictionary
vowels = "aiueo"
ip_str = "Halo nama saya mikel, saya sedang belajar python"
ip_str= ip_str.casefold()
count = {}.fromkeys(vowels,0)
for char in ip_str :
if char in count :
count [char] += 1
print(count) | true |
e36e31f0a19ec88139078ec6b2dcb3d533d02ade | Python | miguelhers/huahe | /Margrabe.py | UTF-8 | 2,729 | 2.9375 | 3 | [] | no_license | from __future__ import division
from math import log, sqrt, exp
from scipy.stats import norm
#Default values used for testing
s1 = 200; s2=250
mu1 = 0.10; sigma1= 0.3
mu2=0.10; sigma2= 0.2
rate=0.10; rho=0.75
t=1
sigma = lambda sig1=sigma1, sig2=sigma2, corr=rho: sqrt(sig1**2+sig2**2 -2*corr*sig1*sig2)
m_d1 = lambda stock1=s1, stock2=s2, sig1=sigma1, sig2=sigma2, years=t, corr=rho: (log(stock1/stock2)+1/2*sigma()**2*years)/(sigma()*sqrt(years))
m_d2 = lambda stock1=s1, stock2=s2, sig1=sigma1, sig2=sigma2, years=t, corr=rho: m_d1() -sigma()*sqrt(years)
m_delta1 = lambda stock1=s1, stock2=s2, sig1=sigma1, sig2=sigma2, years=t, corr=rho: norm.cdf(m_d1())
m_delta2 = lambda stock1=s1, stock2=s2, sig1=sigma1, sig2=sigma2, years=t, corr=rho: -norm.cdf(m_d2())
m_gamma11 = lambda stock1=s1, stock2=s2, sig1=sigma1, sig2=sigma2, years=t, corr=rho: norm.pdf(m_d1())/(stock1*sigma()*sqrt(years))
m_gamma22 = lambda stock1=s1, stock2=s2, sig1=sigma1, sig2=sigma2, years=t, corr=rho: norm.pdf(m_d2())/(stock2*sigma()*sqrt(years))
m_gamma12 = lambda stock1=s1, stock2=s2, sig1=sigma1, sig2=sigma2, years=t, corr=rho:-norm.pdf(m_d1())/(stock2*sigma()*sqrt(years))
m_theta = lambda stock1=s1, stock2=s2, sig1=sigma1, sig2=sigma2, years=t, corr=rho: -stock1*sigma()*norm.pdf(m_d1())/(2*sqrt(years))
m_vega1 = lambda stock1=s1, stock2=s2, sig1=sigma1, sig2=sigma2, years=t, corr=rho: stock1*sqrt(t)*norm.pdf(m_d1())*((sig1-(corr*sig2))/sigma())
m_vega2 = lambda stock1=s1, stock2=s2, sig1=sigma1, sig2=sigma2, years=t, corr=rho: stock1*sqrt(t)*norm.pdf(m_d1())*((sig2-(corr*sig1))/sigma())
m_correlation = lambda stock1=s1, stock2=s2, sig1=sigma1, sig2=sigma2, years=t, corr=rho: -stock1*sqrt(t)*norm.pdf(m_d1())*((sig1*sig2)/sigma())
m_margrabe = lambda stock1=s1, stock2=s2, sig1=sigma1, sig2=sigma2, years=t, corr=rho: stock1*norm.cdf(m_d1())-stock2*norm.cdf(m_d2())
def main():
print "Margrabe = "+str(m_margrabe()) + "\n"
print "THE GREEKS \n"
print "Delta Asset 1 = "+str(m_delta1())
print "Delta Stock 2 = "+str(m_delta2()) +"\n"
print "Gamma Asset 11 = "+str(m_gamma11())
print "Gamma Stock 12 = "+str(m_gamma12())
print "Gamma Stock 22 = "+str(m_gamma22()) + "\n"
print "Theta = "+str(m_theta()) +"\n"
print "Vega sigma 1 = "+str(m_vega1())
print "Vega sigma 2 = "+str(m_vega2()) + "\n"
print "Correlation = "+str(m_correlation()) + "\n"
print "sigma"
print sigma()
print "sig1 : " + str(sigma1) + " sig2: " +str(sigma2)
print "d1: " + str(m_d1())
print m_d2()
print str(norm.cdf(-0.647510235324)) + ' , ' + str(norm.cdf(-0.930352947799))
if __name__=='__main__':
main() | true |
9cb30d1578f08e8603b59a4a05b189e2eb8fe221 | Python | Jrk57j/Python-Learning-Python | /ch6.py | UTF-8 | 3,202 | 3.03125 | 3 | [] | no_license | #alien_O={'color':'red','points':5,'x-position':0,'y-position':25}
# alien_O={'color':'red','points':5}
# print(alien_O['color'])
# score = alien_O['points']+alien_O['points']
# print("Your score is "+str(score))
# alien_O['x_position'] = 0
# alien_O['y_position'] = 25
# print(str(alien_O['y_position'])+" position on the screen via y axis")
# print(alien_O)
# alien_P = {}
# alien_P['color'] = "purple"
# alien_P['points'] = 30
# alien_P['x_position'] = 50
# alien_P['y_position'] = 60
# print(alien_P)
# alien_P['color'] = "zebra"
# print(alien_P)
# alien_P['speed'] = "medium"
# print("original position for alien_P is "+str(alien_P['x_position']) + " " + str(alien_P['y_position'])+ " "+ "speed is "+ alien_P['speed'])
# if alien_P['speed'] == 'slow':
# x_increment = 1
# elif alien_P['speed'] == 'medium':
# x_increment = 2
# else:
# x_increment = 3
# alien_P['x_position'] = alien_P['x_position'] + x_increment
# print("new positin is "+str(alien_P['x_position']))
# del(alien_P['speed'])
# print(alien_P)
# # alien_P['speed'] = "fast"
# # print(alien_P)
fave_prog = {
'julian':'python',
'chris':'php',
'eddy':'c',
'evan':'java',
'richard':'nothing'
}
# #print(fave_prog)
# print("Julian's favorite language is "+
# fave_prog['julian'].title())
# chris = {
# 'name':'chris',
# 'address':'someplace utsa',
# 'city':'san antonio',
# 'number':'1800eatadick'
# }
#print(chris)
# print("a good friend of mine is "+
# chris['name']+" and he lives at "+
# chris['address']+
# " and he number is "+
# chris['number'])
# fave_numbers = {'julian':69,'life':42,'chris':108,'samantha':5}
# print("Julain's favorite number is : "+ str(fave_numbers['julian']))
# print("Lifes's favorite number is : "+ str(fave_numbers['life']))
# print("Chris's favorite number is : "+ str(fave_numbers['chris']))
# print("Samantha's favorite number is : "+ str(fave_numbers['samantha']))
#
# fave_num = {'name':'julian','num':43,'o_name':'chris','o_num':203}
# print(fave_num)
# print(fave_num['name']+" "+str(fave_num['num']))
# glossary = {
# 'elif':'elif: a weird way to say else if',
# 'dictonary':'dictonary: a dynamic storage in python',
# 'slice':'slice: a way to start at a position in a list',
# 'sort':'sort: a way to sort the data',
# 'pizza':'pizza: a delicious food'
# }
# print(glossary['elif']+"\n")
# print(glossary['dictonary']+"\n")
# print(glossary['slice']+"\n")
# print(glossary['sort']+"\n")
# print(glossary['pizza']+"\n")
# user_O = {
# 'username':'chillman711',
# 'fname':'julian',
# 'lname':'itwaru'
# }
# for key, value in user_O.items():
# print("\nKey: "+key)
# print("Value: "+value)
# for k,v in fave_prog.items():
# print("\nName " +k.title())
# print("Prog " + v.title())
# print("\n")
# for i in fave_prog.keys():
# print(i.title())
homies = ['eddy','chris', 'julian']
for i in sorted(fave_prog.keys()):
if i in homies:
print("Hello "+ i.title()+
" I see your favorite language is "+
fave_prog[i].title()+"!")
print("\nThe languages mentioned are:")
for v in sorted(fave_prog.values()):
print(v)
left off at page 108
| true |
bd345ca5f66ec3cd0e7c301c27de73286b6e0b5f | Python | Eavinn/AI | /机器学习/量化交易.py | UTF-8 | 2,179 | 3.328125 | 3 | [] | no_license | """
1. 因子处理:缺失值处理、去极值、标准化、PCA降维、中性化(用线性回归剔除因子间相关度高的部分)
2. 因子有效性分析:因子IC分析(确定因子和收益率之间的相关性)
IC(信息系数):某一期的IC指的是该期因子暴露值和股票下期的实际回报值在横截面上的相关系数
因子暴露度-处理(缺失值处理、去极值。标准化)后的因子值,股票下期的实际回报值-下期收益率,相关系数-斯皮尔曼相关系数
3. 因子收益率k:因子收益率 * 因子暴露度 + b = 下期收益率
4. 多因子相关性分析:还是使用斯皮尔曼秩相关系数,但是对象是两个因子的IC值序列分析
5. 多因子选股最常用的方法就是打分法和回归法
6. 收益指标:回测收益,回测年化收益,基准收益,基准年化收益
风险指标:最大回撤越小越好(30%以内), 夏普比率越大越好(1以上)
"""
import pandas as pd
import numpy as np
import scipy.stats as st
from alphalens import tears, performance, plotting, utils
df = pd.DataFrame([[1, 2], [4, 5]], columns=["A", "B"])
# 计算斯皮尔相关系数Rank IC,取值 [-1, 1]之间
print(st.spearmanr(df["A"], df["B"]))
"""使用alphalens更简易的做因子分析"""
# 输入因子表和收盘价表到返回到期收益率表,再将因子表和到期收益表整合返回综合因子数据表
factor_data = utils.get_clean_factor_and_forward_returns("factor", "price")
# 因子IC的计算
IC = performance.factor_information_coefficient(factor_data)
# 因子时间序列和移动平均图,看出一个因子在时间上的正负性、
plotting.plot_ic_ts(IC)
# 因子分布直方图,IC平均值,标准差
plotting.plot_ic_hist(IC)
# 热力图
mean_monthly_ic = performance.mean_information_coefficient(factor_data, by_time="1m")
plotting.plot_monthly_ic_heatmap(mean_monthly_ic)
# IC分析合集
tears.create_information_tear_sheet(factor_data)
# 收益率分析
tears.create_returns_tear_sheet(factor_data)
# 因子的每一期的收益(因子收益)
performance.factor_returns(factor_data).iloc[:, 0].mean()
| true |
dd60d129057b6c0c70b6f2743fccd033cdd2744c | Python | Nishinomiya0foa/djangotest | /test4.py | UTF-8 | 168 | 3.09375 | 3 | [] | no_license | import re
a = 44421
b = str(a)
# matchObj = re.match(r'^([0-9])+', a)
matchObj2 = re.match(r'(\d)+', b)
# print(matchObj.group())
print(matchObj2.group()) | true |
e50b6180d1bde01c6ecdb199e80db6954f84069a | Python | hevensun/sparked-deepwalk | /utils/plot.py | UTF-8 | 1,434 | 3.15625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import csv
import math
import sys
def readCSV(datafile, schema):
data = [[] for header in schema]
with open(datafile, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
for i in range(len(schema)):
data[i].append(row[schema[i]])
return data
def plot(title, xlabel, ylabel, x, y, marker, xticks=[], yticks=[]):
plt.plot(x, y, marker)
if xticks:
plt.xticks(xTicks)
if yticks:
plt.yticks(yTicks)
plt.title(title)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.show()
datafile = sys.argv[1]
dataset = sys.argv[2]
data = readCSV(datafile, ['numberOfVisits', 'numberOfVertices'])
x = [int(a) for a in data[0][:]]
y = [int(a) for a in data[1][:]]
maxVisits = max(x)
maxVertices = max(y)
xTicks = [i for i in range(15) if pow(10, i) < maxVisits]
yTicks = [i for i in range(15) if pow(10, i) < maxVertices]
xTicks.insert(0, -0.1)
yTicks.insert(0, -0.1)
logx = [math.log(v, 10) for v in x]
logy = [math.log(v, 10) for v in y]
plot(
dataset + " - Frequency of Vertex Occurrence in Short Random Walks",
"Vertex Visitation Count",
"# of Vertices",
logx,
logy,
'b+',
xTicks,
yTicks)
'''
data = readCSV("output/"+ dataset +"_vec.csv", ["dim1", "dim2"])
x = [float(a) for a in data[0][:]]
y = [float(a) for a in data[1][:]]
plot(
dataset + " vectors ",
"dimension 1",
"dimension 2",
x,
y,
'bo')
'''
| true |
9f9122970b89416d27021b4b3520d39c8c30b83c | Python | creators1303/VPL-GUI | /data/classes/wind2.py | UTF-8 | 1,166 | 2.65625 | 3 | [] | no_license | from data.classes.object import Object
from data.drawer import draw_sec_table
from data.workers.workerInteractions import update_interactions
class WindowObj(Object):
def __init__(self, table, coords=None,size=None):
Object.__init__(self, table, coords,size)
self.children=[]
self.set_snc(coords,size)
self.image_name='window'
self.image_state='stand'
print("Window born.")
@staticmethod
def virtual_return_image():
return 'testWindow.hmtex'
def children_update(self):
#print(self.children)
for tarObj in self.children:
tarObj.update()
tarObj.updateState(self.table.curFol)
update_interactions(self.children, self)
draw_sec_table(self.table.screen, [self])
draw_sec_table(self.table.screen, self.children)
def update(self):
self.children_update()
def state_action(self):
pass
def remove_child(self,table,child):
print(table)
self.children.remove(child)
def remove_children(self, table):
for child in self.children:
self.remove_child(table,child)
| true |
10393f44d5e3ae115febd3d3901f6e9bc29ed77a | Python | cells2numbers/unet4neutrophils | /utils/evaluation.py | UTF-8 | 3,420 | 2.90625 | 3 | [
"BSD-2-Clause"
] | permissive | import numpy as np
import pandas as pd
def intersection_over_union(ground_truth, prediction):
# Count objects
true_objects = len(np.unique(ground_truth))
pred_objects = len(np.unique(prediction))
# Compute intersection
h = np.histogram2d(ground_truth.flatten(), prediction.flatten(), bins=(true_objects,pred_objects))
intersection = h[0]
# Area of objects
area_true = np.histogram(ground_truth, bins=true_objects)[0]
area_pred = np.histogram(prediction, bins=pred_objects)[0]
# Calculate union
area_true = np.expand_dims(area_true, -1)
area_pred = np.expand_dims(area_pred, 0)
union = area_true + area_pred - intersection
# Exclude background from the analysis
intersection = intersection[1:,1:]
union = union[1:,1:]
# Compute Intersection over Union
union[union == 0] = 1e-9
IOU = intersection/union
return IOU
def measures_at(threshold, IOU):
matches = IOU > threshold
true_positives = np.sum(matches, axis=1) == 1 # Correct objects
false_positives = np.sum(matches, axis=0) == 0 # Extra objects
false_negatives = np.sum(matches, axis=1) == 0 # Missed objects
assert np.all(np.less_equal(true_positives, 1))
assert np.all(np.less_equal(false_positives, 1))
assert np.all(np.less_equal(false_negatives, 1))
TP, FP, FN = np.sum(true_positives), np.sum(false_positives), np.sum(false_negatives)
f1 = 2*TP / (2*TP + FP + FN + 1e-9)
return f1, TP, FP, FN
# Compute Average Precision for all IoU thresholds
def compute_af1_results(ground_truth, prediction, results, image_name):
# Compute IoU
IOU = intersection_over_union(ground_truth, prediction)
if IOU.shape[0] > 0:
jaccard = np.max(IOU, axis=0).mean()
else:
jaccard = 0.0
# Calculate F1 score at all thresholds
for t in np.arange(0.5, 1.0, 0.05):
f1, tp, fp, fn = measures_at(t, IOU)
res = {"Image": image_name, "Threshold": t, "F1": f1, "Jaccard": jaccard, "TP": tp, "FP": fp, "FN": fn}
row = len(results)
results.loc[row] = res
return results
# Count number of False Negatives at 0.7 IoU
def get_false_negatives(ground_truth, prediction, results, image_name, threshold=0.7):
# Compute IoU
IOU = intersection_over_union(ground_truth, prediction)
true_objects = len(np.unique(ground_truth))
if true_objects <= 1:
return results
area_true = np.histogram(ground_truth, bins=true_objects)[0][1:]
true_objects -= 1
# Identify False Negatives
matches = IOU > threshold
false_negatives = np.sum(matches, axis=1) == 0 # Missed objects
data = np.asarray([
area_true.copy(),
np.array(false_negatives, dtype=np.int32)
])
results = pd.concat([results, pd.DataFrame(data=data.T, columns=["Area", "False_Negative"])])
return results
# Count the number of splits and merges
def get_splits_and_merges(ground_truth, prediction, results, image_name):
# Compute IoU
IOU = intersection_over_union(ground_truth, prediction)
matches = IOU > 0.1
merges = np.sum(matches, axis=0) > 1
splits = np.sum(matches, axis=1) > 1
r = {"Image_Name":image_name, "Merges":np.sum(merges), "Splits":np.sum(splits)}
results.loc[len(results)+1] = r
return results
| true |
0bb75a4f5798d58689ed010f15a82a4fb1c8d3d4 | Python | nurlissaipidinov/viceversa_project | /viceversa_project/views.py | UTF-8 | 622 | 2.8125 | 3 | [] | no_license | from django.http import HttpResponse
from django.shortcuts import render
def about(request):
return HttpResponse("This is about page")
def home(request):
return render(
request, 'home.html'
)
def reverse(request):
user_text = request.GET['usertext']
reversed_text = user_text[::-1]
words = user_text.split()
len_of_words = len(words)
return render(
request, 'reverse.html', {'usertext': user_text,
'reversedtext': reversed_text,
'len_of_words': len_of_words
}
)
| true |
9e44c07b4c58da9ccb940f1dba33296bc853d844 | Python | leonardodalvi/estudos-python | /projetos/madlibs/madlibs.py | UTF-8 | 1,122 | 3.578125 | 4 | [] | no_license | """
Very Beginner Python Project by Kylie Ying
Madlibs using string concatenation
YouTube Kylie Ying: https://www.youtube.com/ycubed
Twitch KylieYing: https://www.twitch.tv/kylieying
Twitter @kylieyying: https://twitter.com/kylieyying
Instagram @kylieyying: https://www.instagram.com/kylieyying/
Website: https://www.kylieying.com
Github: https://www.github.com/kying18
Programmer Beast Mode Spotify playlist: https://open.spotify.com/playlist/4Akns5EUb3gzmlXIdsJkPs?si=qGc4ubKRRYmPHAJAIrCxVQ
"""
# # string concatenation (aka how to put strings together)
# # suppose we want to create a string that says "subscribe to _____ "
# youtuber = "Kylie Ying" # some string variable
# # a few ways to do this
# print("subscribe to " + youtuber)
# print("subscribe to {}".format(youtuber))
# print(f"subscribe to {youtuber}")
adjetivo = input("Adjetivo: ")
verbo1 = input("Verbo: ")
verbo2 = input("Verbo: ")
pessoa_famosa = input("Pessoa Famosa: ")
madlib = f"Programar é muito {adjetivo}! Me deixa muito empolgado o tempo inteiro porque \
eu amo {verbo1}. Hidrate-se e {verbo2} como se você fosse {pessoa_famosa}!"
print(madlib) | true |
1339c399d09863eb5c6b32d2a102694fecd489ae | Python | andrey1908/dataset_scripts | /converters/MOTS2coco.py | UTF-8 | 3,028 | 2.71875 | 3 | [] | no_license | import argparse
import json
from pycocotools.mask import toBbox
from PIL import Image
import os
def build_parser():
parser = argparse.ArgumentParser()
parser.add_argument('-ann', '--annotation-file', type=str, required=True)
parser.add_argument('-img-fld', '--images-folder', type=str, required=True)
parser.add_argument('-cls', '--classes', type=str, required=True, nargs='+')
parser.add_argument('-out', '--out-file', type=str, required=True)
return parser
def get_categories(classes):
categories = list()
cat_id = 1
for cl in classes:
cl_dict = {'name': cl, 'id': cat_id}
categories.append(cl_dict)
cat_id += 1
return categories
def get_images(images_folder):
images = list()
images_files = os.listdir(images_folder)
assert len(images_files[0].split('.')) == 2
pad = len(images_files[0].split('.')[0])
for image_file in images_files:
assert len(image_file.split('.')) == 2
assert len(image_file.split('.')[0]) == pad
image_id = 0
time_frame_to_image_id = dict()
for image_file in images_files:
im = Image.open(os.path.join(images_folder, image_file))
width, height = im.size
image = {'file_name': image_file, 'width': width, 'height': height, 'id': image_id}
time_frame_to_image_id[int(image_file.split('.')[0])] = image_id
images.append(image)
image_id += 1
return images, time_frame_to_image_id
def get_annotations(MOTS_lines, time_frame_to_image_id):
annotations = list()
ann_id = 1
for MOTS_line in MOTS_lines:
MOTS_line = MOTS_line.split()
time_frame = int(MOTS_line[0])
cat_id = int(MOTS_line[2])
if cat_id not in [1, 2]:
continue
rleObj = {'counts': MOTS_line[5], 'size': [int(MOTS_line[3]), int(MOTS_line[4])]}
bbox = list(toBbox(rleObj))
annotation = dict()
annotation['id'] = ann_id
annotation["iscrowd"] = 0
annotation["image_id"] = time_frame_to_image_id[time_frame]
annotation["category_id"] = cat_id
annotation["bbox"] = bbox
annotation["area"] = bbox[2] * bbox[3]
annotations.append(annotation)
ann_id += 1
return annotations
def MOTS_txt2coco_dict(MOTS_lines, images_folder, classes):
images, time_frame_to_image_id = get_images(images_folder)
categories = get_categories(classes)
annotations = get_annotations(MOTS_lines, time_frame_to_image_id)
json_dict = {'images': images, 'annotations': annotations, 'categories': categories}
return json_dict
def MOTS2coco(annotation_file, images_folder, classes, out_file):
with open(annotation_file, 'r') as f:
MOTS_lines = f.read().splitlines()
json_dict = MOTS_txt2coco_dict(MOTS_lines, images_folder, classes)
with open(out_file, 'w') as f:
json.dump(json_dict, f, indent=2)
if __name__ == '__main__':
parser = build_parser()
args = parser.parse_args()
MOTS2coco(**vars(args))
| true |
38c6ece9389bc1623d6e154a0c1f2e514a130576 | Python | KirstieJane/bocpdms | /paper_pictures_ICML18_nllmseplot.py | UTF-8 | 3,623 | 2.515625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 25 17:05:16 2018
@author: jeremiasknoblauch
Description: Plots MSE + NLL for GP-models vs. SSBVAR + MS
"""
import numpy as np
import matplotlib.pyplot as plt
dir_ = ("//Users//jeremiasknoblauch//Documents//OxWaSP//BOCPDMS//Code//" +
"SpatialBOCD//Paper//Presentation//MSE")
MSE_vals = [
[0.553, 0.750, 2.62, 29.95], #ARGPCP
[0.583, 0.689, 3.13, 30.17], #GPTSCP
[0.585, 0.618, 3.17], #NSGP
[0.55, 0.681, 1.74, 25.93] #BBVAR
]
NLL_vals = [
[1.15, -0.604, 4.07, 39.5495], #ARGPCP
[1.19, 1.17, 4.54, 39.44], #GPTSCP
[1.15, -1.98, 4.19], #NSGP
[1.13, 0.923, 3.57, 48.32] #BBVARa
]
MSE_95 = [
[0.0962, 0.0315, 0.195, 0.5], #ARGPCP
[0.0989, 0.0294, 0.241, 0.51], #GPTSCP
[0.0988, 0.0242, 0.230], #NSGP
[0.0948, 0.0245, 0.222, 0.906] #BVAR
]
NLL_95 = [
[0.0555, 0.0385, 0.150, 0.22],
[0.0548, 0.0183, 0.188, 0.22],
[0.0655, 0.0561, 0.0212],
[0.0684, 0.0231, 0.166, 0.964]
]
baseline = np.array([1, 1, 3, 30])
xlabsize, ylabsize, legendsize, ticksize = 15, 15, 13,12
linewidths = [3]*5
linestyles = ["-"]*5
linecolors = ["navy", "purple", "red", "orange"]
ax, fig = plt.subplots(1, figsize = (6,4))
handles, labels = fig.get_legend_handles_labels()
for i in [0,1,2,3]:
if i == 2:
dat = np.array(MSE_vals[i])/baseline[:-1]
err = np.array(MSE_95[i])/baseline[:-1]
x_ = [0,1,2]
else:
dat = np.array(MSE_vals[i])/baseline
err = np.array(MSE_95[i])/baseline
x_ = [0,1,2,3]
handle = fig.errorbar(x=x_,y=dat,yerr = err,
linewidth = linewidths[i],
linestyle = linestyles[i],
color = linecolors[i],
#solid_capstyle='round',
marker = 'o',
ms=7,
capsize=5)
handles.append(handle)
plt.xlabel("Data Set", size = xlabsize)
plt.ylabel("MSE/Variance", size = ylabsize)
labels = ['ARGPCP', 'GPTSCP', 'NSGP','SSBVAR']
plt.legend(handles, labels, prop = {'size':legendsize})
plt.xticks([0,1,2,3],["Nile", "Snow", "Bee", "30PF"])
plt.tick_params(labelsize = ticksize)
plt.savefig(dir_ + "//MSE.pdf",
format = "pdf", dpi = 800)
xlabsize, ylabsize, legendsize, ticksize = 15, 15, 13,12
linewidths = [3]*5
linestyles = ["-"]*5
linecolors = ["navy", "purple", "red", "orange"]
ax, fig = plt.subplots(1, figsize = (6,4))
handles, labels = fig.get_legend_handles_labels()
for i in [0,1,2,3]:
if i == 2:
dat = np.array(NLL_vals[i])/baseline[:-1]
err = np.array(NLL_95[i])/baseline[:-1]
x_ = [0,1,2]
else:
dat = np.array(NLL_vals[i])/baseline
err = np.array(NLL_95[i])/baseline
x_ = [0,1,2,3]
handle = fig.errorbar(x=x_,y=dat,yerr = err,
linewidth = linewidths[i],
linestyle = linestyles[i],
color = linecolors[i],
#solid_capstyle='round',
marker = 'o',
ms=7,
capsize=5)
handles.append(handle)
plt.xlabel("Data Set", size = xlabsize)
plt.ylabel("NLL/Variance", size = ylabsize)
labels = ['ARGPCP', 'GPTSCP', 'NSGP','SSBVAR']
plt.legend(handles, labels, prop = {'size':legendsize})
plt.xticks([0,1,2,3],["Nile", "Snow", "Bee", "30PF"])
plt.tick_params(labelsize = ticksize)
plt.savefig(dir_ + "//NLL.pdf",
format = "pdf", dpi = 800) | true |
6ce6695ea0552a075ebcaef8fcdf620fb400b60f | Python | brunovianarezende/brite-risktypes-api | /data/brite/model/command_line/add_new_type.py | UTF-8 | 1,106 | 2.640625 | 3 | [] | no_license | import os
import argparse
import json
from sqlalchemy import create_engine
from brite.model import Base
from brite.model.service import DbService
def add_new_type_main():
parser = argparse.ArgumentParser(description="Add a new risk type to the db. The db is created if it doesn't exist")
parser.add_argument('db_path', help='The path to sqlite db')
parser.add_argument('json_path', help='The path to the json describing the risk type')
args = parser.parse_args()
if not os.path.exists(args.json_path):
print("'%s' was not found" % args.json_path)
return
if not os.path.exists(args.db_path):
print("there is no db at '%s', one will be created" % args.db_path)
add_new_type(args.db_path, args.json_path)
def add_new_type(db_path, json_path):
engine = create_engine('sqlite:///%s' % db_path, echo=False)
if not os.path.exists(db_path):
Base.metadata.create_all(engine)
service = DbService(engine)
with open(json_path) as f:
obj = json.load(f)
service.add_type(obj)
if __name__ == '__main__':
add_new_type_main()
| true |
a3445a1020a15664bf4bbcdd2d6b4f4255c68bf7 | Python | yamaton/codeeval | /moderate/remove_chars.py | UTF-8 | 864 | 4.15625 | 4 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
"""
remove_chars.py
Created by Yamato Matsuoka on 2012-07-16.
Description:
Write a program to remove specific characters from a string.
Input sample:
The first argument will be a text file containing an input string followed by a comma and then the characters that need to be scrubbed. e.g.
how are you, abc
hello world, def
Output sample:
Print to stdout, the scrubbed strings, one per line. Trim out any leading/trailing whitespaces if they occur.
e.g.
how re you
hllo worl
"""
import sys
def remove_chars(entry):
text, chars = entry
return "".join(c for c in text if c not in chars)
if __name__ == '__main__':
with open(sys.argv[1], "r") as f:
data = [[s.strip() for s in line.rstrip().split(",")] for line in f]
out = (remove_chars(entry) for entry in data)
print "\n".join(out)
| true |
3efc2ca03a32ce160ee6929d35528519b8646df6 | Python | minmax/hashable | /hashable/helpers.py | UTF-8 | 1,112 | 2.65625 | 3 | [
"MIT"
] | permissive | from .equals_builder import EqualsBuilder
from .hash_code_builder import HashCodeBuilder
__all__ = [
'hashable',
'equalable',
]
def hashable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
cls = equalable(cls, attributes, methods)
cls.__hash__ = HashCodeBuilder.auto_generate(cls, attributes, methods)
return cls
return decorator if cls is None else decorator(cls)
def equalable(cls=None, attributes=None, methods=None):
_validate_attributes_and_methods(attributes, methods)
def decorator(cls):
cls.__eq__ = EqualsBuilder.auto_generate(cls, attributes, methods)
cls.__ne__ = EqualsBuilder.auto_ne_from_eq()
return cls
return decorator if cls is None else decorator(cls)
def _validate_attributes_and_methods(attributes, methods):
assert not isinstance(attributes, basestring), 'attributes must be list'
assert not isinstance(methods, basestring), 'methods must be list'
assert attributes or methods, 'attributes or methods must be NOT empty'
| true |
014c33f97761e59a08f0163a13b656e3e1dc3e42 | Python | nhatsmrt/AlgorithmPractice | /LeetCode/1751. Maximum Number of Events That Can Be Attended II/Solution2.py | UTF-8 | 2,361 | 2.984375 | 3 | [] | no_license | class Solution:
def maxValue(self, events: List[List[int]], k: int) -> int:
# Time Complexity: O(N (log W + log N))
# Space Complexity: O(N)
penalty_low = 0
penalty_high = max([event[-1] for event in events]) + 1
events.sort(key=lambda ev: (ev[0], ev[1]))
self.next = []
for i, event in enumerate(events):
low = i + 1
high = len(events)
while low < high:
mid = (low + high) // 2
if events[mid][0] <= events[i][1]:
low = mid + 1
else:
high = mid
self.next.append(low)
ret = 0
while penalty_low < penalty_high:
penalty = (penalty_low + penalty_high) // 2
self.dp = {}
max_sum1, num_choose1 = self.max_sum(events, 0, penalty, True)
self.dp = {}
max_sum2, num_choose2 = self.max_sum(events, 0, penalty, False)
if k < num_choose1: # penalty is too low:
penalty_low = penalty + 1
else:
num_choose = min(k, num_choose2)
ret = max(ret, max_sum1 + penalty * num_choose)
penalty_high = penalty - 1
return ret
def max_sum(self, events: List[List[int]], i: int, penalty: int, min_choose: bool):
if i == len(events):
return 0, 0
if i in self.dp:
return self.dp[i]
max_sum1 = events[i][-1] - penalty
max_sum_next, num_choose_next = self.max_sum(events, self.next[i], penalty, min_choose)
num_choose1 = 1 + num_choose_next
max_sum1 += max_sum_next
max_sum2, num_choose2 = self.max_sum(events, i + 1, penalty, min_choose)
if max_sum1 > max_sum2:
ret = max_sum1, num_choose1
elif max_sum2 > max_sum1:
ret = max_sum2, num_choose2
elif min_choose: # choose as few as possible
if num_choose1 < num_choose2:
ret = max_sum1, num_choose1
else:
ret = max_sum2, num_choose2
else: # choose as many as possible
if num_choose2 < num_choose1:
ret = max_sum1, num_choose1
else:
ret = max_sum2, num_choose2
self.dp[i] = ret
return ret
| true |
e881ebcee0cada39997459e1986f547419b885c4 | Python | Junlings/webfe | /core/imports/marc/backups/import_model.py | UTF-8 | 5,674 | 2.703125 | 3 | [] | no_license | ##### Import the keyword database
import keyword_marc as marcALLlist
import importfun_marc as marcfun
#import keyword_nastran as NastranALLlist
#import keyword_sap as sapALLlist
#import keyword_opensees as openseesALLlist
class import_file:
"""
base class of import file
### Function of this base class including
### 1### Define the data structure of FEM analysis
### 2### Define the
"""
def __init__(self):
""" """
pass
def keyworddetect(self,line,ALLlist,style):
"""the program switcher form the derived class"""
pass ### left blank so the derived class to override tis function
class importfile_marc_dat(import_file):
""" ## class to import marc *.dat file """
def __init__(self,inputfile,stylesetting='Free',keywordlist=None):
import_file.__init__(self) ### initiate the parent class
self.inf = open(inputfile,'r') ### define the input file
self.content = [] ### initialize the file content
self.contentdict = {}
self.ALLlist = marcALLlist.ALLlist ### initialized the keywords list
self.style = stylesetting ### define the style
self.marcfun = marcfun.importfun_marc(self.style)
self.leftkey = [] ### define leftkeys
#self.keywordlist=keywordlist ### optional keyword list for partial extraction
def scanf(self): ##### scan the file and create the key words driven input content
preline = [] # previous line, temporary storage
allline = []
templine = ''
while 1:
line = self.inf.readline() ### read the current line from file
if line[0:1] == '$': #### bypass the comment line
continue
elif len(line) == 0: #### jump out of loop if went to the end of the file
break
elif len(line) == 1: ### only one keyword
testline = line
else:
testline=line.split()[0] ## extract out the keywords
# line start with keywords
if testline in self.ALLlist['ALL']: #### if found the keyword listed in the table
allline.append(preline) ### add the previous collection to allline list
preline = [] ### empty preline
if (line[len(line)-2]=='c' or line[len(line)-2]=='C' ) and line[len(line)-3]==' ': ### detect if the current line is a continue line
templine=line[0:len(line)-2] ### if it is a continue line, put in templine
else: ## no continue line
preline.append(line[0:len(line)-1]) ### if not a continue line, put in preline stack
templine='' ### empty the templine
# line start without keyword, but with a continue line sign
elif (line[len(line)-2]=='c' or line[len(line)-2]=='C' ) and line[len(line)-3]==' ':
if len(templine)>0:
templine=templine+line[0:len(line)-5]
else:
templine=line[0:len(line)-5]
#
else:
if len(templine)>0: #### if templine not empty, add current line to it
templine=templine[0:len(templine)]+line[0:len(line)-1] ### get rid of 'c' and '\n'
else: ### set as templine #### if templine is empty, add current line to templine
templine=line[0:len(line)-1]
## judge the "updated" current line be or not continue line
if len(templine)>0 and (templine[len(templine)-1]=='c' or templine[len(templine)-1]=='C'):
templine=templine[len(templine)-1] # if the current line still a continue line
continue
else: # if not continue, add templine to preline stock and empty templine
preline.append(templine)
templine=''
allline.append(preline) #add the last preline as it will not be triggered in previous loop
self.content=allline
def display_content(self,tag=None):
""" display the key word driven content """
for i in range(1,len(self.content)+1):
if tag == None:
# all detected keywords
print self.content[i]
else:
# only specified keywords
if self.content[i][0].split()[0] == tag:
print self.content[i]
def processf(self):
"""
loop over all keyword driven lines
Scan the file and get content if have not done so far
"""
if self.content == []: # do the scan if have not done so
self.scanf()
for line in self.content:
if len(line) > 0:
keywords = line[0].split(' ')[0]
self.contentdict[keywords] = line
if __name__ == '__main__':
f1 = importfile_marc_dat('pullout_job1.dat',stylesetting='Extended')
f1.processf()
| true |
cb308d2e44dd4b58a8451bb56b71a814865053d2 | Python | fjacob21/automevent | /src/frontend/frontend.py | UTF-8 | 1,222 | 2.578125 | 3 | [
"Apache-2.0"
] | permissive | #! /usr/bin/python3
import asyncio
import os
import signal
import subprocess
import sys
def end_signal_handler(signal, frame):
global loop
loop.stop()
sys.exit()
class AutomeventFrontend(asyncio.Protocol):
def __init__(self, loop):
self.loop = loop
def connection_made(self, transport):
pass
def data_received(self, data):
print('Data received: {!r}'.format(data.decode()))
execute(data.decode())
def connection_lost(self, exc):
print('The server closed the connection')
print('Stop the event loop')
self.loop.stop()
def execute(cmd):
env = os.environ.copy()
# env['SHELL'] = '/usr/bin/fish'
# env['PWD'] = '/home/user'
subprocess.Popen(cmd.split(' '), env=env, start_new_session=True)
if __name__ == '__main__':
signal.signal(signal.SIGINT, end_signal_handler)
signal.signal(signal.SIGTSTP, end_signal_handler)
signal.signal(signal.SIGTERM, end_signal_handler)
loop = asyncio.get_event_loop()
coro = loop.create_connection(lambda: AutomeventFrontend(loop),
'127.0.0.1', 1234)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
| true |
65ca909dcaceaa3e816c360b28fdb1fb3b17afa8 | Python | Stratigraph/GDELT_Predict | /get_trn_test.py | UTF-8 | 2,348 | 2.984375 | 3 | [] | no_license | import pandas
import numpy as np
def get_train_test(df, train_start, train_years, test_years):
min_time = df['date'].min()
max_time = df['date'].max()
train_increment = train_years * 10000
train_end = train_start + train_increment #hoping we don't land on feb. 29th in a leap year
train_frame = df.iloc[np.logical_and((df['date'] >= train_start).ravel(), (df['date'] < train_end).ravel())]
test_start = train_end
test_increment = test_years * 10000
test_end = test_start + test_increment
test_frame = df.iloc[np.logical_and((df['date'] >= test_start).ravel(), (df['date'] < test_end).ravel())]
traindays = list(np.unique(train_frame["date"])) #returns a sorted array in ascending order
testdays = list(np.unique(test_frame["date"]))
unq1 = np.unique(train_frame.country)
unq2 = np.unique(test_frame.country)
persistent_countries = np.intersect1d(unq1,unq2)
train_frame = train_frame.query('country in @persistent_countries')
test_frame = test_frame.query('country in @persistent_countries')
numtraindays = len(traindays)
numtestdays = len(testdays)
numcountries = len(persistent_countries)
train_x = np.zeros((numtraindays-1, numcountries))
train_y = np.zeros((numtraindays-1, numcountries))
test_x = np.zeros((numtestdays-1, numcountries))
test_y = np.zeros((numtestdays-1, numcountries))
for i, c in enumerate(persistent_countries):
temp_df = train_frame.query('country == @c')
temp_dates = list(temp_df["date"])
for d in temp_dates:
train_x_idx = traindays.index(d)
train_y_idx = train_x_idx - 1
if not(train_x_idx == len(traindays)-1):
train_x[train_x_idx,i] = temp_df['weighted_mean_goldstein_x_tone'][temp_df['date']==d]
if train_y_idx >= 0:
train_y[train_y_idx,i] = temp_df['weighted_mean_goldstein_x_tone'][temp_df['date']==d]
temp_df = test_frame.query('country == @c')
temp_dates = list(temp_df["date"])
for d in temp_dates:
test_x_idx = testdays.index(d)
test_y_idx = test_x_idx - 1
if not(test_x_idx == len(testdays)-1):
test_x[test_x_idx,i] = temp_df['weighted_mean_goldstein_x_tone'][temp_df['date']==d]
if test_y_idx >= 0:
test_y[test_y_idx,i] = temp_df['weighted_mean_goldstein_x_tone'][temp_df['date']==d]
return (train_x, train_y), (test_x, test_y), persistent_countries | true |
6c985642766bacf0294959155fb6e15d449c0c8d | Python | tavuong/aidem | /Datacompression-KIT/lib/audio_process.py | UTF-8 | 1,002 | 2.890625 | 3 | [
"MIT"
] | permissive | # audio_process.py
# Object oriented - 1D Audio Processing
# Datum : 13.04.2021
# Authors: Dr.-Ing. The Anh Vuong
import matplotlib.pyplot as plt
import numpy as np
import wave
import sys
import simpleaudio as sa
# Audio file Play
# https://realpython.com/playing-and-recording-sound-python/
# Test from_wave_file
# https://www2.cs.uic.edu/~i101/SoundFiles/
def play(filename):
# filename = './Musik/swift.wav'
wave_obj = sa.WaveObject.from_wave_file(filename)
play_obj = wave_obj.play()
play_obj.wait_done() # Wait until sound has finished playing
return (True)
# Audio File plot
def plot(filename):
spf = wave.open(filename, "r")
p= spf.getparams()
print(p)
# Extract Raw Audio from Wav File
signal = spf.readframes(-1)
# Info
signal = np.fromstring(signal, "Int16")
# If Stereo
if spf.getnchannels() == 2:
print("Just mono files")
sys.exit(0)
plt.figure(1)
plt.title("Signal Wave..." + filename)
plt.plot(signal)
plt.show()
| true |
9cede4aaf2e64fc1609d5a4bec7b848dd90df6b5 | Python | minkyeongk/CodingTest_Algorithm | /5. DFS, BFS/5.8 DFS.py | UTF-8 | 500 | 3.453125 | 3 | [] | no_license | # 5.8 DFS
# 그래프 인접 리스트 방식으로 구현, 노드를 나타내는 건 인덱스
def dfs(i, g, v):
if v[i] == False:
v[i] = True
print(i, '번째 노드 방문')
for n in g[i]:
if v[n] == False:
DFS(n, g, v)
graph = [
[], # 그래프 상에 0번째 노드가 없기 때문
[2, 3, 8],
[1, 7],
[1, 4, 5],
[3, 5],
[3, 4],
[7],
[2, 6, 8],
[1, 7]
]
visit = [False] * 9
dfs(1, graph, visit) | true |
90a334d7c3994e1fb87cdc1d658fa0159ee023c6 | Python | jcoates2/Independ_RBPi_UMW_2017 | /fox_dash_main.py | UTF-8 | 3,102 | 3.15625 | 3 | [] | no_license | #main game
import pygame, time
from fox import Fox
from wolf import Wolf
import sys
def events(fox_avatar):
#responds to keypresses and mouse events
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
fox_avatar.moving_left = True
elif event.key == pygame.K_RIGHT:
fox_avatar.moving_right = True
elif event.key == pygame.K_UP:
fox_avatar.moving_up = True
elif event.key == pygame.K_DOWN:
fox_avatar.moving_down = True
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
fox_avatar.moving_left = False
elif event.key == pygame.K_RIGHT:
fox_avatar.moving_right = False
elif event.key == pygame.K_UP:
fox_avatar.moving_up = False
elif event.key == pygame.K_DOWN:
fox_avatar.moving_down = False
#detection
def check(fox_avatar, wolf_avatar):
#print(str(fox_avatar.rect.centerx)+','+str(fox_avatar.rect.centery)+' : '+str(wolf_avatar.rect.centerx)+','+str(wolf_avatar.rect.centery))
if fox_avatar.rect.centerx == 2-wolf_avatar.rect.centerx or fox_avatar.rect.centery == 4-wolf_avatar.rect.centery:
return True
elif fox_avatar.rect.centerx == 3+wolf_avatar.rect.centerx or fox_avatar.rect.centery == 1+wolf_avatar.rect.centery:
return True
else:
return False
def run():
pygame.init()
size = [700,700]
screen = pygame.display.set_mode(size)
simple = pygame.image.load('l_one.png')
simple_rect = simple.get_rect()
#create fox and wolf
fox_avatar = Fox(screen)
wolf_avatar = Wolf(screen)
#speed
clock = pygame.time.Clock()
#world shift
world_shift_hor = 0
world_shift_vert = 0
pygame.display.set_caption("Fox Dash:Avoid the Wolf for 30 seconds ")
#start and create timer
startTime = pygame.time.get_ticks()
seconds = 0
game_cont = True
while game_cont == True:
#show on screen
screen.blit(simple, simple_rect)
fox_avatar.blitme()
wolf_avatar.blitme()
#check movments
events(fox_avatar)
fox_avatar.update(screen)
wolf_avatar.update(screen,fox_avatar)
pygame.display.update()
#collision detection
if check(fox_avatar, wolf_avatar):
print('Game over')
game_cont = False
#timer 30 seconds
#restart timer
if seconds >= 30:
#startTime = pygame.time.get_ticks()
#seconds = 0
print("Round done, You are safe!")
game_cont = False
seconds=(pygame.time.get_ticks()-startTime)/1000
#check to see if there is a collision
ans = check(fox_avatar, wolf_avatar)
#speed
clock.tick(30)
pygame.display.flip()
run()
pygame.quit()
sys.exit()
| true |
3fc2931c90efac1f52ff337de820295024637191 | Python | bermec/python | /src/Examples/range().py | UTF-8 | 350 | 4.375 | 4 | [] | no_license | # Counter
# Demonstrates the range() function
s = 0
print("Counting:")
for i in range(10):
print(i, end=" ")
print("\n\nCounting by fours:")
for i in range(2, 100, 4):
s = s + 1
print(i, end=" ")
print(s)
print("\n\nCounting backwards:")
for i in range(10, 0, -1):
print(i, end=" ")
input("\n\nPress the enter key to exit.\n")
| true |
d34801c84ef9c1231607b48f63713a7e81423bfe | Python | kolodiytaras/Python_course | /lab_1.py | UTF-8 | 1,758 | 3.671875 | 4 | [] | no_license | import re
incorrect_input = '(({({[1, 3])})'
correct_output = '(({({[1, 3]})}))'
a = incorrect_input.count('(')
b = incorrect_input.count(')')
c = incorrect_input.count('{')
d = incorrect_input.count('}')
e = incorrect_input.count('[')
f = incorrect_input.count(']')
print ("number of '(' is: ", a)
print ("number of ')' is: ", b)
print ("number of '{' is: ", a)
print ("number of '}' is: ", b)
print ("number of '[' is: ", a)
print ("number of ']' is: ", b)
print ("")
if a==b and c==d and e==f:
print ("We are happy, because we don't write a code")
else:
print ("We aren't happy, because we must write a code")
print ("")
list_of_all_digits = re.findall('\d', incorrect_input)
first_dig, second_dig = list_of_all_digits
index_1 = int(incorrect_input.index(first_dig))
first_slicing = incorrect_input[0:index_1]
second_slicing = first_slicing[::-1]
second_slicing = second_slicing.replace('[', ']')
second_slicing = second_slicing.replace('{', '}')
second_slicing = second_slicing.replace('(', ')')
my_correct_output = first_slicing + first_dig + ', ' + second_dig + second_slicing
print ("my_correct_output is: ", my_correct_output)
print ("")
a2 = my_correct_output.count('(')
b2 = my_correct_output.count(')')
c2 = my_correct_output.count('{')
d2 = my_correct_output.count('}')
e2 = my_correct_output.count('[')
f2 = my_correct_output.count(']')
print ("number of '(' is: ", a2)
print ("number of ')' is: ", b2)
print ("number of '{' is: ", a2)
print ("number of '}' is: ", b2)
print ("number of '[' is: ", a2)
print ("number of ']' is: ", b2) | true |
427e5648ac23d46b8d8768039b60ab52f81a23d9 | Python | Offliners/ZeroJugde-writeup | /基礎題庫/Contents/c760/c760.py | UTF-8 | 127 | 3.234375 | 3 | [] | no_license | import sys
for names in sys.stdin:
names = names.strip().split()
for n in names:
print(n[0].upper() + n[1:])
| true |
449486a2056bb6ec9ee5a4da13e621b44d66c7e1 | Python | sharif-42/Advance_Topic_Exploring | /date_time_module_exploring/time_delta.py | UTF-8 | 374 | 3.71875 | 4 | [] | no_license | from datetime import timedelta
# Difference between two timedelta objects
t1 = timedelta(weeks = 2, days = 5, hours = 1, seconds = 33)
t2 = timedelta(days = 4, hours = 11, minutes = 4, seconds = 54)
t3 = t1 - t2
print("t3 =", t3)
# Time duration in seconds
t = timedelta(days = 5, hours = 1, seconds = 33, microseconds = 233423)
print("total seconds =", t.total_seconds()) | true |
bef85d48245983ea6991f86c3de609d0d5c5da23 | Python | HarroJongen/DUHI | /Functions/Visualization.py | UTF-8 | 9,876 | 2.90625 | 3 | [] | no_license | #Title: DUHI visualization
#Date: 04-09-2020
#Author: Harro Jongen
#Visualization functions for the DUHI project
def Boxplot(cat, dataframe, analysis_periodtype, analysis_date):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20,10), nrows=2, ncols=2)
dataframe.boxplot(column='UHI_max', by=cat, ax=axes[0,0])
axes[0,0].set_ylabel('UHI_max')
axes[0,0].set_title('')
dataframe.boxplot(column='UHI_int', by=cat, ax=axes[0,1])
axes[0,1].set_ylabel('UHI_int')
axes[0,1].set_title('')
dataframe.boxplot(column='T_max_urban', by=cat, ax=axes[1,0])
axes[1,0].set_ylabel('T_max in city')
axes[1,0].set_title('')
dataframe.boxplot(column='DTR_urban', by=cat, ax=axes[1,1])
axes[1,1].set_ylabel('DTR in city')
axes[1,1].set_title('')
fig.suptitle('Boxplots by ' + cat + ' for ' + analysis_periodtype + ' ' + analysis_date)
plt.savefig('Figures/Boxplots_' + cat + '_' + analysis_periodtype + '_' + analysis_date)
plt.close()
def Scatter(cat, dataframe, analysis_name):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20,10), nrows=1, ncols=2)
axes[0].scatter(dataframe['sm_cor'], dataframe[cat])
axes[0].set_ylabel(cat)
axes[0].set_xlabel('Soil moisture')
axes[1].scatter(dataframe['API0.85_rural'], dataframe[cat])
axes[1].set_xlabel('Antecedent precipitation index (k = 0.85)')
fig.suptitle('Scatter ' + cat + ' against moisture proxies')
plt.savefig('Figures/Scatter_' + cat + '_SM_' + analysis_name)
plt.close()
def ScatterCity(cat1, cat2, dataframe, analysis_name):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20,10), nrows=2, ncols=2)
axes[0,0].scatter(dataframe[cat2], dataframe[cat1 ])
axes[0,0].set_ylabel(cat1)
axes[0,0].set_title('All cities')
axes[0,1].scatter(dataframe[dataframe['City'] == 'Amsterdam'][cat2], dataframe[dataframe['City'] == 'Amsterdam'][cat1])
axes[0,1].set_title('Amsterdam')
axes[1,0].scatter(dataframe[dataframe['City'] == 'Rotterdam'][cat2], dataframe[dataframe['City'] == 'Rotterdam'][cat1])
axes[1,0].set_ylabel(cat1)
axes[1,0].set_xlabel(cat2)
axes[1,0].set_title('Rotterdam')
axes[1,1].scatter(dataframe[dataframe['City'] == 'Gent'][cat2], dataframe[dataframe['City'] == 'Gent'][cat1])
axes[1,1].set_xlabel(cat2)
axes[1,1].set_title('Gent')
fig.suptitle('Scatter ' + cat1 + ' against ' + cat2 + ' per city')
plt.savefig('Figures/Scatter_' + cat1 + '_' + cat2[0] + '_' + analysis_name)
plt.close()
def ScatterCitySM(cat1, dataframe, analysis_name):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20,10), nrows=2, ncols=2)
axes[0,0].scatter(dataframe['sm_cor'], dataframe[cat1 ])
axes[0,0].set_ylabel(cat1)
axes[0,0].set_title('All cities')
axes[0,1].scatter(dataframe[dataframe['City'] == 'Amsterdam']['sm'], dataframe[dataframe['City'] == 'Amsterdam'][cat1])
axes[0,1].set_title('Amsterdam')
axes[1,0].scatter(dataframe[dataframe['City'] == 'Rotterdam']['sm'], dataframe[dataframe['City'] == 'Rotterdam'][cat1])
axes[1,0].set_ylabel(cat1)
axes[1,0].set_xlabel('sm')
axes[1,0].set_title('Rotterdam')
axes[1,1].scatter(dataframe[dataframe['City'] == 'Gent']['sm'], dataframe[dataframe['City'] == 'Gent'][cat1])
axes[1,1].set_xlabel('sm')
axes[1,1].set_title('Gent')
fig.suptitle('Scatter ' + cat1 + ' against sm per city')
plt.savefig('Figures/Scatter_' + cat1 + '_sm_' + analysis_name)
plt.close()
def ScatterSelect(cat, cat_select, select, dataframe, analysis_name):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20,10), nrows=1, ncols=2)
axes[0].scatter(dataframe[dataframe[cat_select] == select]['sm'], dataframe[dataframe[cat_select] == select][cat])
axes[0].set_ylabel(cat)
axes[0].set_xlabel('Soil moisture')
axes[1].scatter(dataframe[dataframe[cat_select] == select]['API0.85_rural'], dataframe[dataframe[cat_select] == select][cat])
axes[1].set_xlabel('Antecedent precipitation index (k = 0.85)')
fig.suptitle('Scatter ' + cat + ' against moisture proxies for ' + cat_select + ' is ' + select)
plt.savefig('Figures/Scatter_' + cat + '_' + select + '_SM_' + analysis_name)
plt.close()
#%% Subset plots
def ScatterSubset(cat, dataframe, dataframe_sub, analysis_name):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20,10), nrows=1, ncols=2)
axes[0].scatter(dataframe['sm'], dataframe[cat])
axes[0].scatter(dataframe_sub['sm'], dataframe_sub[cat])
axes[0].set_ylabel(cat)
axes[0].set_xlabel('Soil moisture')
axes[1].scatter(dataframe['API0.85_rural'], dataframe[cat])
axes[1].scatter(dataframe_sub['API0.85_rural'], dataframe_sub[cat])
axes[1].set_xlabel('Antecedent precipitation index (k = 0.85)')
fig.suptitle('Scatter ' + cat + ' against moisture proxies')
plt.savefig('Figures/Scatter_' + cat + '_SM_' + analysis_name)
plt.close()
def ScatterSubsetSelect(cat, cat_select, select, dataframe, dataframe_sub, analysis_name):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20,10), nrows=1, ncols=2)
axes[0].scatter(dataframe[dataframe[cat_select] == select]['sm'], dataframe[dataframe[cat_select] == select][cat])
axes[0].scatter(dataframe_sub[dataframe_sub[cat_select] == select]['sm'], dataframe_sub[dataframe_sub[cat_select] == select][cat])
axes[0].set_ylabel(cat)
axes[0].set_xlabel('Soil moisture')
axes[1].scatter(dataframe[dataframe[cat_select] == select]['API0.85_rural'], dataframe[dataframe[cat_select] == select][cat])
axes[1].scatter(dataframe_sub[dataframe_sub[cat_select] == select]['API0.85_rural'], dataframe_sub[dataframe_sub[cat_select] == select][cat])
axes[1].set_xlabel('Antecedent precipitation index (k = 0.85)')
fig.suptitle('Scatter ' + cat + ' against moisture proxies for ' + cat_select + ' is ' + select)
plt.savefig('Figures/Scatter_' + cat + '_' + select + '_SM_' + analysis_name)
plt.close()
def ScatterSubsetC(cat, dataframe, dataframe_sub, analysis_name, c = None):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20,10), nrows=1, ncols=2)
axes[0].scatter(dataframe['sm'], dataframe[cat], c=dataframe[c])
axes[0].scatter(dataframe_sub['sm'], dataframe_sub[cat], c=dataframe_sub[c])
axes[0].set_ylabel(cat)
axes[0].set_xlabel('Soil moisture')
axes[1].scatter(dataframe['API0.85_rural'], dataframe[cat], c=dataframe[c])
axes[1].scatter(dataframe_sub['API0.85_rural'], dataframe_sub[cat], c=dataframe_sub[c])
axes[1].set_xlabel('Antecedent precipitation index (k = 0.85)')
fig.suptitle('Scatter ' + cat + ' against moisture proxies')
plt.savefig('Figures/Scatter_' + cat + '_SM_' + analysis_name)
plt.close()
def ScatterSubsetSelectC(cat, cat_select, select, dataframe, dataframe_sub, analysis_name, c = None):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20,10), nrows=1, ncols=2)
axes[0].scatter(dataframe[dataframe[cat_select] == select]['sm'], dataframe[dataframe[cat_select] == select][cat], c=dataframe[dataframe[cat_select] == select][c])
axes[0].scatter(dataframe_sub[dataframe_sub[cat_select] == select]['sm'], dataframe_sub[dataframe_sub[cat_select] == select][cat], c=dataframe_sub[dataframe_sub[c]])
axes[0].set_ylabel(cat)
axes[0].set_xlabel('Soil moisture')
axes[1].scatter(dataframe[dataframe[cat_select] == select]['API0.85_rural'], dataframe[dataframe[cat_select] == select][cat], c=dataframe[dataframe[cat_select] == select][c])
axes[1].scatter(dataframe_sub[dataframe_sub[cat_select] == select]['API0.85_rural'], dataframe_sub[dataframe_sub[cat_select] == select][cat], c=dataframe_sub[dataframe_sub[c]])
axes[1].set_xlabel('Antecedent precipitation index (k = 0.85)')
fig.suptitle('Scatter ' + cat + ' against moisture proxies for ' + cat_select + ' is ' + select)
plt.savefig('Figures/Scatter_' + cat + '_' + select + '_SM_' + analysis_name)
plt.close()
def ScatterSubsetCity(cat1, cat2, dataframe, dataframe_sub, analysis_name):
import matplotlib.pyplot as plt
fig, axes = plt.subplots(figsize=(20,10), nrows=2, ncols=2)
axes[0,0].scatter(dataframe[cat2], dataframe[cat1 ])
axes[0,0].scatter(dataframe_sub[cat2], dataframe_sub[cat1 ])
axes[0,0].set_ylabel(cat1)
axes[0,0].set_title('All cities')
axes[0,1].scatter(dataframe[dataframe['City'] == 'Amsterdam'][cat2], dataframe[dataframe['City'] == 'Amsterdam'][cat1])
axes[0,1].scatter(dataframe_sub[dataframe_sub['City'] == 'Amsterdam'][cat2], dataframe_sub[dataframe_sub['City'] == 'Amsterdam'][cat1])
axes[0,1].set_title('Amsterdam')
axes[1,0].scatter(dataframe[dataframe['City'] == 'Rotterdam'][cat2], dataframe[dataframe['City'] == 'Rotterdam'][cat1])
axes[1,0].scatter(dataframe_sub[dataframe_sub['City'] == 'Rotterdam'][cat2], dataframe_sub[dataframe_sub['City'] == 'Rotterdam'][cat1])
axes[1,0].set_ylabel(cat1)
axes[1,0].set_xlabel(cat2)
axes[1,0].set_title('Rotterdam')
axes[1,1].scatter(dataframe[dataframe['City'] == 'Gent'][cat2], dataframe[dataframe['City'] == 'Gent'][cat1])
axes[1,1].scatter(dataframe_sub[dataframe_sub['City'] == 'Gent'][cat2], dataframe_sub[dataframe_sub['City'] == 'Gent'][cat1])
axes[1,1].set_xlabel(cat2)
axes[1,1].set_title('Gent')
fig.suptitle('Scatter ' + cat1 + ' against ' + cat2 + ' per city')
plt.savefig('Figures/Scatter_' + cat1 + '_' + cat2[0] + '_' + analysis_name)
plt.close() | true |
002040b79fcb141014d2badec6a2007e75373f99 | Python | alexander-colaneri/python | /studies/curso_em_video/ex038-comparando-numeros.py | UTF-8 | 573 | 4.65625 | 5 | [
"MIT"
] | permissive | # Escreva um programa que leia dois números inteiros e compare-os. mostrando na tela uma mensagem:
# - O primeiro valor é maior
# - O segundo valor é maior
# - Não existe valor maior, os dois são iguais
print()
print('\033[0;32m*\033[m' * 5, 'Comparador de números', '\033[0;32m*\033[m' * 5)
print()
n1 = int(input('Digite um número: '))
n2 = int(input('Digite o segundo número: '))
print()
if n1 > n2:
print(f'O número {n1} é MAIOR que {n2}!')
elif n2 > n1:
print(f'O número {n2} é MAIOR que {n1}!')
else:
print(f'O número {n1} é IGUAL a {n2}!')
| true |
8929e28f50d72b052990567038ae79332739dae9 | Python | youridv1/ProgrammingYouriDeVorHU | /venv/Les7/7_1.py | UTF-8 | 322 | 4.125 | 4 | [] | no_license | def convert(tempCelcius):
tempFahrenheit = tempCelcius * 1.8 + 32
return tempFahrenheit
def table():
print("{1:>3} {0:>5}".format("C", "F"))
for tempCelcius in range(-30, 41, 10):
tempFahrenheit = convert(tempCelcius)
print("{0:5.1f} {1:5.1f}".format(tempFahrenheit, tempCelcius))
table() | true |
502d369398a69dc2d21c8c7985bfeb49f2e44079 | Python | dexion/springbok | /MyGtk/Gtk_SearchBar.py | UTF-8 | 2,667 | 2.59375 | 3 | [] | no_license | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import pygtk
pygtk.require("2.0")
import re
import gtk
import Gtk_Main
from AnomalyDetection.DistributedDetection import DistributedDetection
class Gtk_SearchBar:
"""Gtk_SearchBar class.
Search bar added on the top of a tab to search result.
Parameters
----------
ref_object : The referenced object where to search
gtk_def : the gtk object to modify/add result
gtk_object : the gtk object to add search bar
"""
def __init__(self, ref_object, gtk_def, gtk_object):
self.ref_object = ref_object
self.gtk_def = gtk_def
self.gtk_object = gtk_object
self.hbox = gtk.HBox()
self.entry = gtk.Entry()
self.button = gtk.Button("Search")
self.button.connect("clicked", self.on_search)
self.hbox.pack_start(self.entry)
self.hbox.pack_start(self.button, False, False, 2)
self.vbox = gtk.VBox()
self.vbox.pack_start(self.hbox, False, False, 2)
self.vbox.pack_start(self.gtk_object)
def on_search(self, widget):
"""Event listener. Launch search"""
if isinstance(self.ref_object, DistributedDetection):
self.gtk_def.clear()
Gtk_Main.Gtk_Main().notebook._add_distributed_anomaly(self.ref_object.error_path,
self.gtk_def,
self.entry.get_text().lower())
elif isinstance(self.ref_object, gtk.TextView):
self._conf_highlight()
else:
self.gtk_def.search(self.entry.get_text().lower())
def _conf_highlight(self):
"""Search pattern in the firewall configuration file"""
textbuffer = self.ref_object.get_buffer()
tag_table = textbuffer.get_tag_table()
c_tag = tag_table.lookup("colored")
if not c_tag:
c_tag = textbuffer.create_tag("colored", foreground="#000000", background="#FFFF00")
text = textbuffer.get_text(textbuffer.get_bounds()[0], textbuffer.get_bounds()[1])
textbuffer.delete(textbuffer.get_bounds()[0], textbuffer.get_bounds()[1])
for line in re.split(r'\r\n|\r|\n', text):
for e in re.compile("(" + self.entry.get_text().lower() + ")", re.I).split(line):
if re.search(self.entry.get_text().lower(), e, re.I):
textbuffer.insert_with_tags(textbuffer.get_end_iter(), e, c_tag)
else:
textbuffer.insert_with_tags(textbuffer.get_end_iter(), e)
textbuffer.insert_with_tags(textbuffer.get_end_iter(), '\n') | true |
e82e9a0bc0959198142d609f38bdf6488be3aa58 | Python | 350740378/sklearn | /src/house/House.py | UTF-8 | 14,248 | 3.625 | 4 | [] | no_license | '''
查看和可视化数据集
准备训练集和测试集
'''
import numpy as np
import pandas as pd
housing = pd.read_csv('./datasets/housing/housing.csv')
#print(housing)
# select count(field1) from ... group by
#print(housing['ocean_proximity'].value_counts())
import matplotlib.pyplot as plt
housing.hist(bins=50,figsize=(20,20))
#plt.show()
np.random.seed(315)
#print(housing.describe())
#print(housing)
def split_train_test(data, test_ratio):
shuffled_indices = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
# 获取测试集的索引序列
test_indices = shuffled_indices[:test_set_size]
# 获取训练集的索引序列
train_indices = shuffled_indices[test_set_size:]
return data.iloc[train_indices],data.iloc[test_indices]
train_set,test_set = split_train_test(housing,0.2)
print(len(train_set),"train")
print(len(test_set),"test")
#print(train_set)
'''
如果每次产生的train和test不同,解决方案
1. 将结果保存起来
2. 设置固定的随机种子
产生了新的问题
如果更新数据集,train和test就会被打乱
crc32
>= 2 * 32 * 20%:训练集
< 2 * 32 * 20%:测试集
关键点:找到比较稳定的列作为索引列
'''
from zlib import crc32
def test_set_check1(identifier, test_ratio):
return crc32(np.int64(identifier)) & 0xffffffff < test_ratio * 2 ** 32
# 取hash编码的最后一个字节(0-255),256 * 0.2 = 51 if < 51 :test else train
import hashlib
def test_set_check2(identifier,test_ratio,hash=hashlib.md5):
return hash(np.int64(identifier)).digest()[-1] < 256 * test_ratio
print(test_set_check1(5439,0.2)) # test
print(test_set_check1(5438,0.2)) # train
def split_train_test_by_id(data, test_ratio, id_column):
ids = data[id_column]
in_test_set = ids.apply(lambda id:test_set_check2(id,test_ratio))
return data.loc[~in_test_set],data.loc[in_test_set]
housing_with_id = housing.reset_index() # 为housing添加一个index索引列
train_set,test_set = split_train_test_by_id(housing_with_id,0.2,'index')
print(train_set)
# 可以使用比较稳定的特征值作为id,如经纬度
housing_with_id["id"] = housing['longitude'] * 1000 + housing['latitude']
#print(housing_with_id)
train_set,test_set = split_train_test_by_id(housing_with_id,0.2,"id")
#print(train_set)
'''
用Scikit-Learn API产生训练集和测试集
'''
from sklearn.model_selection import train_test_split
train_set,test_set = train_test_split(housing,test_size=0.2,random_state=315)
housing['median_income'].hist()
#plt.show()
housing['income_cat'] = np.ceil(housing['median_income']/1.5)
housing['income_cat'].where(housing['income_cat']<5,5.0,inplace=True)
print(housing['income_cat'].value_counts())
housing['income_cat'].hist()
#plt.show()
'''
分层抽样
国家的男女比例: 男:51% 女:49%
抽样后,需要男:51% 女:49%
'''
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1,test_size=0.2,random_state=315)
for train_index,test_index in split.split(housing,housing['income_cat']):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
print(strat_test_set['income_cat'].value_counts()/len(strat_test_set))
print(strat_train_set['income_cat'].value_counts()/len(strat_train_set))
print(housing['income_cat'].value_counts()/len(housing))
train_set,test_set = train_test_split(housing,test_size=0.2,random_state=315)
def income_cat_proportions(data):
return data['income_cat'].value_counts() / len(data)
compare_props = pd.DataFrame({
"完整数据集":income_cat_proportions(housing),
"分层抽样测试集":income_cat_proportions(strat_test_set),
"随机抽样测试集":income_cat_proportions(test_set),
}).sort_index()
print(compare_props)
# 通过可视化地理数据寻找模式
housing = strat_train_set.copy()
#housing.plot(kind='scatter', x = 'longitude',y='latitude', alpha=0.1)
'''
半径(s:表示每个地区的人口数量),颜色表示房价(c)【红色表示高房价】
'''
housing.plot(kind='scatter',x = 'longitude',y='latitude',alpha=0.4,
s=housing['population']/100,label='population',figsize=(10,7),
c='median_house_value',cmap=plt.get_cmap('jet'),colorbar=True)
#plt.show()
# 用两种方法检测属性之间的相关度
'''
1. 标准相关系数
corr函数获取标准相关系统(皮尔逊相关系数)
相关系数的取值范围:-1到1 越接近1,表示越强的正相关, 越接近-1,表示越强的负相关
0:表示两个属性没有任何关系
2. Pandas的scatter_matrix函数
进行相关度分析的目的:为了选取和房价相关度很强的属性来预测房价
'''
corr_matrix = housing.corr()
print('---------其他属性与median_house_value属性的相关度')
# 人均收入与平均房价相关度非常大
print(corr_matrix['median_house_value'].sort_values(ascending=False))
# 人数和房屋数有非常强的正相关,而房屋平均年龄与房屋数有非常强的负相关
print(corr_matrix['total_rooms'].sort_values(ascending=False))
# 2. scatter_matrix函数
from pandas.tools.plotting import scatter_matrix
attributes = ['median_house_value','median_income','total_rooms','housing_median_age']
# 清除可能有问题的数据
# housing = housing[housing['median_house_value'] < 490000]
scatter_matrix(housing[attributes],figsize=(12,8))
#plt.show()
# 实验不同属性的组合
# 每户的房间数
housing['rooms_per_household'] = housing['total_rooms'] / housing['households']
# 每间房的卧室数
housing['bedrooms_per_room'] = housing['total_bedrooms'] / housing['total_rooms']
# 每户的人数
housing['population_per_household'] = housing['population'] / housing['households']
corr_matrix = housing.corr()
print(corr_matrix['median_house_value'].sort_values(ascending=False))
housing.plot(kind='scatter',x='rooms_per_household', y = 'median_house_value',alpha = 0.1)
# 0,5:水平坐标 0,520000:纵向坐标
plt.axis([0,5,0,520000])
#plt.show()
# 数据清理-填补缺失值
from sklearn.impute import SimpleImputer
# 平均数(mean)、中位数(median)、出现比较频繁的值(most_frequent)、常量(constant)
imputer = SimpleImputer(strategy = 'median')
# 将ocean_proximity列从housing数据集删除
housing_num = housing.drop('ocean_proximity',axis=1)
'''
# 适配数据集
imputer.fit(housing_num)
# 输出每一列的中位数
print(imputer.statistics_)
print(housing_num.median().values)
X = imputer.transform(housing_num) # Numpy数组
print(X)
housing_tr = pd.DataFrame(X,columns=housing_num.columns)
print(housing_tr)
'''
X = imputer.fit_transform(housing_num)
print(X)
housing_tr = pd.DataFrame(X,columns=housing_num.columns)
print(housing_tr)
'''
处理文本和分类属性
'''
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
housing_ocean_proximity = housing['ocean_proximity']
print(housing_ocean_proximity)
# 将文本按枚举类型转换为数值(0到4)
housing_ocean_proximity_encoded = encoder.fit_transform(housing_ocean_proximity) # NumPy
print(housing_ocean_proximity_encoded)
# 获取所有的枚举值
print(encoder.classes_)
'''
带来的问题:单纯根据枚举值转换,会让算法认为相邻的值相似度高,这和实际情况有些不同
解决方案:
二进制:
10000
01000
00100
00010
00001
独热编码
'''
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(categories = 'auto')
housing_ocean_proximity_encoded1 = encoder.fit_transform(housing_ocean_proximity_encoded.reshape(-1,1))
#print(housing_ocean_proximity_encoded.reshape(-1,1))
# 稀疏矩阵(SciPy)
print(housing_ocean_proximity_encoded1.toarray())
# 通过label_binarize将前面的操作合二为一
from sklearn.preprocessing import label_binarize
housing_ocean_proximity_encoded2 = label_binarize(housing_ocean_proximity,['<1H OCEAN','INLAND','ISLAND','NEAR BAY','NEAR OCEAN'],sparse_output=True)
print(housing_ocean_proximity_encoded2.toarray())
'''
自定义转换器
BaseEstimator
TransformerMixin
鸭子类型(duck typing)
fit:返回转换器实例本身
transform:一般返回NumPy数组
'''
from sklearn.base import BaseEstimator,TransformerMixin
class CustomTransformer(BaseEstimator,TransformerMixin):
def __init__(self,add_bedrooms_per_room = True):
self.add_bedrooms_per_room = add_bedrooms_per_room
def fit(self,X,y=None):
return self
# NumPy数组
def transform(self,X,y=None):
rooms_per_household = X[:, 3] / X[:, 6]
population_per_household = X[:, 5] / X[:, 6]
if self.add_bedrooms_per_room:
bedrooms_per_room = X[:, 4] / X[:, 3]
return np.c_[X,rooms_per_household,population_per_household,bedrooms_per_room]
else:
return np.c_[X, rooms_per_household,population_per_household]
transformer = CustomTransformer(add_bedrooms_per_room=False)
#new_values = transformer.transform(housing.values)
new_values = transformer.fit_transform(housing.values)
print(new_values)
'''
数据转换管道(pipeline)
Pipeline
'''
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline([
('imputer',SimpleImputer(strategy='median')),
('custom',CustomTransformer()),
('std_scaler',StandardScaler())
])
housing_num_tr = num_pipeline.fit_transform(housing_num)
print('-------housing_num_tr----------')
print(housing_num_tr)
class DataFrameSelector(BaseEstimator, TransformerMixin):
def __init__(self,attribute_names):
self.attribute_names = attribute_names
def fit(self,X,y=None):
return self
def transform(self,X):
return X[self.attribute_names].values
num_attribs = list(housing_num)
print(num_attribs)
num_pipeline = Pipeline([
('selector', DataFrameSelector(num_attribs)),
('imputer',SimpleImputer(strategy='mean')),
('custom',CustomTransformer()),
('std_scaler', StandardScaler())
])
cat_attribs = ['ocean_proximity']
cat_pipeline = Pipeline([
('selector',DataFrameSelector(cat_attribs)),
('cat_encoder',OneHotEncoder(sparse=False))
])
from sklearn.pipeline import FeatureUnion
# 并行
full_pipeline = FeatureUnion(transformer_list = [
('num_pipeline',num_pipeline),
('cat_pipeline',cat_pipeline)
])
housing_prepared = full_pipeline.fit_transform(housing)
print(housing_prepared)
'''
选择和训练模型
'''
# 每户的房间数
strat_train_set['rooms_per_household'] = strat_train_set['total_rooms'] / strat_train_set['households']
# 每间房的卧室数
strat_train_set['bedrooms_per_room'] = strat_train_set['total_bedrooms'] / strat_train_set['total_rooms']
# 每户的人数
strat_train_set['population_per_household'] = strat_train_set['population'] / strat_train_set['households']
# 每户的房间数
strat_test_set['rooms_per_household'] = strat_test_set['total_rooms'] / strat_test_set['households']
# 每间房的卧室数
strat_test_set['bedrooms_per_room'] = strat_test_set['total_bedrooms'] / strat_test_set['total_rooms']
# 每户的人数
strat_test_set['population_per_household'] = strat_test_set['population'] / strat_test_set['households']
housing_train_prepared = full_pipeline.transform(strat_train_set)
housing_test_prepared = full_pipeline.transform(strat_test_set)
# 用于训练的特征标签
housing_train_labels = strat_train_set['median_house_value'].copy()
# 用于验证的特征标签
housing_test_labels = strat_test_set['median_house_value'].copy()
#####线性回归模型########
from sklearn.linear_model import LinearRegression
linearRegression = LinearRegression()
# 准备训练数据
linearRegression.fit(housing_train_prepared, housing_train_labels)
line_predictResult = linearRegression.predict(housing_test_prepared)
print('预测结果:',line_predictResult)
print('真实结果:',list(housing_test_labels))
train = np.c_[housing_train_prepared[:,:8],housing_train_prepared[:,9:]]
test = np.c_[housing_test_prepared[:,:8],housing_test_prepared[:,9:]]
linearRegression.fit(train,housing_train_labels)
line_predictResult = linearRegression.predict(test)
print('预测结果:',line_predictResult)
print('真实结果:',list(housing_test_labels))
#####决策树##########
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor(random_state=315)
tree_reg.fit(housing_train_prepared, housing_train_labels)
tree_predictResult = tree_reg.predict(housing_test_prepared)
print('预测结果:',tree_predictResult)
print('真实结果:',list(housing_test_labels))
tree_reg.fit(train, housing_train_labels)
tree_predictResult = tree_reg.predict(test)
print('预测结果:',tree_predictResult)
print('真实结果:',list(housing_test_labels))
'''
评估模型的性能
'''
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
line_mse = mean_squared_error(housing_test_labels,line_predictResult)
line_rmse = np.sqrt(line_mse)
print('line_rmse:', line_rmse)
line_mae = mean_absolute_error(housing_test_labels,line_predictResult)
print('line_mae:', line_mae)
tree_mse = mean_squared_error(housing_test_labels,tree_predictResult)
tree_rmse = np.sqrt(tree_mse)
print('tree_rmse:',tree_rmse)
tree_mae = mean_absolute_error(housing_test_labels,tree_predictResult)
print('tree_mae:',tree_mae)
'''
使用交叉验证评估和选择模型
'''
from sklearn.model_selection import cross_val_score
# 线性回归模型
train = np.c_[housing_prepared[:,:8],housing_prepared[:,9:]]
housing_labels = housing['median_house_value'].copy()
scores = cross_val_score(linearRegression,train,housing_labels,scoring='neg_mean_squared_error',cv=10)
print(scores)
line_rmse_scores = np.sqrt(-scores)
print(line_rmse_scores)
def display_scores(scores):
print('scores:',scores)
print('mean:',scores.mean())
print('Standard deviation:', scores.std())
display_scores(line_rmse_scores)
# 决策树
scores = cross_val_score(tree_reg,train,housing_labels,scoring='neg_mean_squared_error',cv=10)
tree_rmse_scores = np.sqrt(-scores)
display_scores(tree_rmse_scores) | true |
38c49c7c43449742b846cc6dff222448fc26084c | Python | Shivvrat/Machine-Learning-Algorithms | /bayesian-networks/bayesian-networks-master/temp_mixture_of_trees_using_random_forest.py | UTF-8 | 3,667 | 2.703125 | 3 | [
"MIT"
] | permissive | import itertools
import math
import random
from operator import itemgetter
import import_data
import mixture_of_trees_using_EM
import numpy as np
import tree_bayesian_network
def creating_k_bags(train_dataset, k):
num_of_examples = np.shape(train_dataset)[0]
bags = dict()
for each_k in range(k):
data_in_this_bag = np.random.choice(num_of_examples, num_of_examples, replace=True)
bags[each_k] = train_dataset[data_in_this_bag]
return bags
def run_model(train_dataset, test_dataset, valid_dataset, k, r, num_of_iterations):
log_likelihood_for_each_iteration = np.zeros((num_of_iterations, 1))
for each_iteration in range(num_of_iterations):
train_dataset_bags = creating_k_bags(train_dataset, k)
mixture_probabilities = mixture_of_trees_using_EM.initialize_mixture_probabilities(k)
for each_k in range(k):
parameters = tree_bayesian_network.find_parameters(train_dataset_bags[each_k])
mutual_information = tree_bayesian_network.compute_mutual_information(train_dataset_bags[each_k],
parameters)
zero_mutual_information_indices_feature_1 = np.reshape(np.random.choice(np.shape(mutual_information)[0], r),
(r, 1))
zero_mutual_information_indices_feature_2 = np.reshape(np.random.choice(np.shape(mutual_information)[0], r),
(r, 1))
zero_indices = np.concatenate(
(zero_mutual_information_indices_feature_1, zero_mutual_information_indices_feature_2), axis=1)
for each_row in zero_indices:
mutual_information[each_row[0], each_row[1]] = 0
mst = tree_bayesian_network.find_max_spanning_tree(mutual_information)
edges_dict, edges = tree_bayesian_network.get_edges(mst)
test_log_likelihood = tree_bayesian_network.test_log_likelihood(edges_dict, edges, test_dataset, parameters)
log_likelihood_for_each_iteration[each_iteration] = log_likelihood_for_each_iteration[
each_iteration] + test_log_likelihood + np.ma.log2(
mixture_probabilities[each_k])
log_likelihood_for_each_iteration[each_iteration] = (log_likelihood_for_each_iteration[each_iteration]) / float(
k)
log_likelihood_mean = np.mean(log_likelihood_for_each_iteration)
log_likelihood_standard_deviation = np.std(log_likelihood_for_each_iteration)
return log_likelihood_mean, log_likelihood_standard_deviation
def validation_of_model(dataset_name, num_of_iterations):
train_dataset, test_dataset, valid_dataset = import_data.import_data(dataset_name)
k = range(5, 21, 5)
r = range(10, 1000, 100)
best_k = 5
best_r = 10
best_log_likelihood = -math.inf
for each in itertools.product(k, r):
# Here I am testing on the validation dataset
log_likelihood_mean, log_likelihood_standard_deviation = run_model(train_dataset, valid_dataset, test_dataset, each[0], each[1], num_of_iterations)
if log_likelihood_mean > best_log_likelihood:
best_k = each[0]
best_r = each[1]
log_likelihood_mean_final, log_likelihood_standard_deviation_final = run_model(train_dataset, test_dataset, valid_dataset, best_k, best_r, num_of_iterations)
return log_likelihood_mean_final, log_likelihood_standard_deviation_final, best_k, best_r | true |
f5127da03212014d98a49226472726525da346ad | Python | jdynamite/mangotools | /rig/maya/control.py | UTF-8 | 19,523 | 2.609375 | 3 | [
"MIT"
] | permissive | try:
from maya import cmds, mel
except ImportError:
print("Must be in a maya environment!")
raise
# native
import os
import json
import pickle
from six import string_types
from rig.config import naming
from rig.utils import dataIO
from rig.maya import get_logger
from rig.maya.base import MayaBaseNode
from rig.maya.curve import create_from_points
log = get_logger(__name__)
class Control(MayaBaseNode):
"""
Convenience class for common control operations,
like assigning control shapes, colors, etc
:param str name:
:param tuple|list position:
:param str align_to:
:param str parent:
"""
NODETYPE = MayaBaseNode.CONFIG.CONTROL
COL_TO_INT = dict(red=13, yellow=17, blue=6, green=7)
INT_TO_COL = {v: k for k, v in COL_TO_INT.items()}
EXT = '.shapes'
def __init__(self,
name,
role=None,
descriptor=None,
region=None,
side=None):
super(Control, self).__init__(name,
node_type=self.NODETYPE,
role=role,
descriptor=descriptor,
region=region,
side=side)
# Try to populate naming properties
if not any([role, descriptor, region, side]):
self.decompose_name()
# Tag object as controller
if not cmds.controller(query=True, isController=True):
cmds.controller(name)
@classmethod
def create(cls,
name=None,
descriptor=None,
role=None,
region=None,
side=None,
position=(0,0,0),
space='world',
snap_to=None,
color='yellow',
shape='circle'):
"""
Create a new controller in maya and instance as class
"""
if not name:
name = cls.compose_name(node_type=cls.NODETYPE, descriptor=descriptor, role=role, region=region, side=side)
name = cmds.createNode("transform", name=name)
control = cls(name, descriptor=descriptor, role=role, region=region, side=side)
if snap_to:
control.snap_to(snap_to)
else:
control.set_position(position, space=space)
control.set_shape(shape)
control.color = color
return control
@classmethod
def list_shapes(cls):
"""
List available shapes in control library
"""
shapes = dataIO.load(cls.CONFIG.CONTROL_SHAPES_FILE)
for shape in shapes:
print(shape)
@classmethod
def get_default_path(cls, filename=None):
"""
Get default directory where control shapes for this scene can be saved
:param str filename: name for file, not including directory
:return str path: path to file
"""
scene_path = cmds.file(query=True, sceneName=True)
scene_dir, scene_name = os.path.dirname(scene_path)
if filename and isinstance(filename, string_types):
if not filename.endswith(cls.EXT):
filename += cls.EXT
return os.path.join(scene_dir, filename)
else:
return os.path.join(scene_dir, scene_name.split('.')[0] + cls.EXT)
@classmethod
def set_shapes(cls, objects, shape):
if type(objects) not in [list, tuple]:
return
objects = [o for o in objects if isinstance(o, cls)]
map(lambda o: o.set_shape(shape), objects)
@classmethod
def mirror_shape(cls):
"""
Mirror control shapes based on selection
"""
sel = cmds.ls(selection=True) or []
if len(sel) != 2:
err = "Please select two curves. Driver -> Driven"
raise RuntimeError(err)
driver = sel[0]
driven = sel[1]
driver_shapes = cmds.listRelatives(driver, shapes=True, noIntermediate=True) or []
driven_shapes = cmds.listRelatives(driven, shapes=True, noIntermediate=True) or []
if not len(driver_shapes) or not len(driven_shapes):
err = "Couldn't find any shapes attached to one or both objects."
raise RuntimeError(err)
# Format template for accessing cv's
cv = "{0}.cv[{1}]"
for driver_shape, driven_shape in zip(driver_shapes, driven_shapes):
cvs = cmds.getAttr("{0}.cp".format(driver_shape), s=1)
cvs_driven = cmds.getAttr("{0}.cp".format(driven_shape), s=1)
if cvs != cvs_driven:
raise RuntimeError()
for i in range(cvs):
driver_cv = cv.format(driver_shape, str(i))
driven_cv = cv.format(driven_shape, str(i))
driver_pos = cmds.xform(driver_cv, query=True, worldSpace=True, translation=True)
driven_pos = [driver_pos[0] * -1, driver_pos[1], driver_pos[2]]
cmds.xform(driven_cv, worldSpace=True, translation=driven_pos)
@classmethod
def get_controls(cls):
"""
Get all controls in scene as class instances
"""
return [cls(c) for c in cmds.controllers(allControllers=True)]
@classmethod
def save_shapes(cls):
"""
Save scene control shapes onto file relative to current scene
"""
shapes_data = {}
for ctrl in cls.get_controls():
if ctrl not in shapes_data:
shapes_data[ctrl] = {}
for shape in ctrl.shapes:
if shape not in shapes_data[ctrl]:
shapes_data[ctrl][shape] = {}
curve_info = cmds.createNode("curveInfo")
input_plug = "{0}.inputCurve".format(curve_info)
shape_plug = "{0}.worldSpace[0]".format(shape)
cmds.connectAttr(shape_plug, input_plug)
knots = "{0}.knots".format(curve_info)
deg = "{0}.degree".format(shape)
cvs = "{0}.cv[*]".format(shape)
degree = cmds.getAttr(deg)
period = cmds.getAttr("{0}.f".format(shape))
positions = cmds.getAttr(cvs)
# check empty positions
for pos in positions:
if all(p == 0 for p in pos):
cmds.select(shape)
mel.eval('doBakeNonDefHistory( 1, {"prePost"});')
cmds.select(clear=True)
positions = cmds.getAttr(cvs)
degree = cmds.getAttr(deg)
period = cmds.getAttr("{0}.f".format(shape))
break
knots = cmds.getAttr(knots)[0]
if period > 0:
for i in range(degree):
positions.append(positions[i])
knots = knots[:len(positions) + degree - 1]
shapes_data[ctrl][shape]['knots'] = knots
shapes_data[ctrl][shape]['period'] = period
shapes_data[ctrl][shape]['positions'] = positions
shapes_data[ctrl][shape]['degree'] = degree
cplug = "{0}.overrideEnabled"
shapes_data[ctrl][shape]['color'] = 'yellow'
for obj in [ctrl, shape]:
if cmds.getAttr(cplug.format(obj)):
color = "{0}.overrideColor".format(obj)
shapes_data[ctrl][shape]['color'] = cmds.getAttr(color)
cmds.delete(curve_info)
with open(cls.get_default_path(), 'rb') as control_file:
pickle.dump(shapes_data, control_file, pickle.HIGHEST_PROTOCOL)
@classmethod
def load_shapes(cls):
path = cmds.fileDialog(mode=0, directoryMask="*.shapes")
success = "Successfuly loaded shape {0} for {1}."
err = "{0} does not exist, skipping."
with open(path, 'rb') as ctrl_file:
shapes_data = pickle.load(ctrl_file)
for obj in shapes_data:
if not cmds.objExists(obj):
log.error(err.format(obj))
continue
# parent does exist
# delete shapes from obj
cmds.delete(cmds.listRelatives(obj, s=True, type="nurbsCurve"))
# initialize object as curve
con = cls.compose(descriptor=obj, side=cls.CONFIG.LEFT)
for shape in shapes_data[obj]:
shape = shapes_data[obj][shape]
pos = shape['positions']
dg = shape['degree']
knots = shape['knots']
color = shape['color']
period = shape['period']
p = True if period > 0 else False
con.color = color
curve = cmds.curve(degree=dg, point=pos, knot=knots, per=p)
con.get_shape_from(curve, destroy=True, replace=False)
log.info(success.format(shape, obj))
@property
def color(self):
numeric = cmds.getAttr('{}.overrideColor'.format(self.long_name))
return self.INT_TO_COL.get(numeric, numeric)
@color.setter
def color(self, val):
"""
Sets colors for shapes under this control
:param str|int val: color to set for this object's shapes
"""
err = "Must pass an int or string for colors"
assert isinstance(val, string_types) or isinstance(val, int), err
col = self.COL_TO_INT[val] if isinstance(val, string_types) else val
cmds.setAttr("{}.overrideEnabled".format(self.long_name), 1)
cmds.setAttr("{}.overrideColor".format(self.long_name), col)
@property
def null(self):
"""
Get furthest ancestor that is a null to this object
"""
p = cmds.listRelatives(self.long_name, parent=True)
if p and self.CONFIG.NULL in p[0]:
p = MayaBaseNode(p[0])
old_p = p
else:
log.debug("Parent {} is not a null".format(p))
return None
while p and self.CONFIG.NULL in p.short_name:
old_p = p
p = old_p.parent
return old_p
@property
def parent(self):
return self.null
@parent.setter
def parent(self, new_parent):
if isinstance(new_parent, MayaBaseNode):
new_parent = new_parent.long_name
if not self.null:
try:
cmds.parent(self.long_name, new_parent)
log.debug("Parented {} under {}".format(self.nice_name, new_parent))
self._parent = new_parent
except RuntimeError:
msg = "Failed to parent {} under {}".format(self.short_name, new_parent)
log.warning(msg, exc_info=True)
elif new_parent != self.null.parent:
log.debug("Parenting null {} to parent: {}".format(self.null.nice_name, new_parent))
self.null.parent = new_parent
def set_shape(self, new_shape, replace=True):
"""
Sets a new shape under this object
:param str shape: shape to set for this object
:param bool replace:
"""
if new_shape.lower() == 'circle':
circle = cmds.circle(constructionHistory=False)[0]
self.get_shape_from(circle, destroy=True, replace=replace)
else:
# call from prebuilt control shapes saved out to a file
controlDict = dataIO.load(self.CONFIG.CONTROL_SHAPES_FILE)
for child_shape in controlDict[new_shape]["shapes"]:
positions = controlDict[new_shape]["shapes"][child_shape]["positions"]
degree = controlDict[new_shape]["shapes"][child_shape]["degree"]
curve = create_from_points(positions, degree, self.nice_name + "_temp")
self.get_shape_from(curve, destroy=True, replace=replace)
def set_position(self, position, space='world'):
"""
Overloaded method, sets position on parent null/offset group
if one exists
"""
if self.null:
world = space.lower() == 'world'
position = tuple(position)
cmds.xform(self.null, worldSpace=world, translation=position)
else:
super(Control, self).set_position(position, space)
def set_rotation(self, rotation, space='world'):
"""
Overloaded method, sets rotation on parent null/offset group
if one exists
"""
if self.null:
world = space.lower() == 'world'
cmds.xform(self.null, worldSpace=world, rotation=rotation)
else:
super(Control, self).set_rotation(rotation, space)
def mirror(self):
"""
Returns the mirrored control, aligned to opposite side
if an object exists there
"""
sideLower = self.side.lower()
otherSide = ""
align_to = ""
mirror_map_left = {"left": "right", "lf": "rt", "l": "r"}
mirror_map_right = {"right": "left", "rt": "lf", "r": "l"}
if sideLower in mirror_map_left.keys():
otherSide = list(mirror_map_left[sideLower])
elif sideLower in mirror_map_right.keys():
otherSide = list(mirror_map_right[sideLower])
for i, char in enumerate(self.side):
if char.isupper():
otherSide[i] = otherSide[i].upper()
if not len(otherSide):
raise RuntimeError("Could not find opposite side.")
otherSide = "".join(otherSide)
if cmds.objExists(self.aligned_to):
align_to = self.align_to.replace(self.side, otherSide)
else:
align_to = "world"
newName = self.name.replace(self.side, otherSide)
return type(self)(name=newName, position=self.position, align_to=align_to, shape=self.shape)
def set_to_origin(self):
"""
Pops control/null to origin
"""
if cmds.objExists(self.null):
target = self.null
else:
target = self.long_name
cmds.xform(target, cp=True)
temp_grp = cmds.group(em=True, n='temp_grp_#')
cmds.delete(cmds.pointConstraint(temp_grp, target))
cmds.delete(temp_grp)
def get_shape_from(self, obj, destroy=True, replace=True):
"""
Copies the shape(s) from passed object, with the option
to destroy that object or not, and the option to replace
all existing shapes
"""
if not destroy:
obj = cmds.duplicate(obj, rc=True, name="temp_shape_#")
if replace:
if self.shapes:
log.info("Deleting shapes: {}".format(self.shapes))
cmds.delete(self.shapes)
cmds.parent(obj, self.long_name)
cmds.xform(obj, objectSpace=True, translation=(0, 0, 0), ro=(0, 0, 0), scale=(1, 1, 1))
obj_shapes = cmds.listRelatives(obj, shapes=True)
for shape in obj_shapes:
cmds.parent(shape, self.long_name, relative=True, shape=True)
cmds.rename(shape, "%sShape#" % self.short_name)
cmds.delete(obj)
def offset(self, n=1):
"""
Creates null or offset groups above this control
:param int n: number of offsets to create above
"""
i = 0
while n > i:
self.insert_parent()
i += 1
def insert_parent(self):
"""
"""
# Record current parent
orig_par = self.parent # could be None
# Get naming flags
name_args = self.as_dict()
name_args.update(dict(node_type=self.CONFIG.NULL))
null_name = self.compose_name(**name_args)
log.debug("New null name is: {}".format(null_name))
dup = MayaBaseNode(cmds.duplicate(self.short_name, name=null_name)[0])
if dup.shapes:
cmds.delete(dup.shapes)
# Parent this control under duplicate
self.parent = dup
# Parent duplicate under my original parent
if orig_par:
dup.parent = orig_par
def drive_constrained(self, obj, p=False, r=False, s=False, o=False):
"""
Establish driving relationships between control and another object
p = position, r = rotation, s = scale, o = maintain offset
"""
if not cmds.objExists(obj):
return
if s:
cmds.scaleConstraint(self.name, obj, mo=o)
if p and r:
cmds.parentConstraint(self.name, obj, mo=o)
elif p and not r:
cmds.pointConstraint(self.name, obj, mo=o)
elif r and not p:
cmds.orientConstraint(self.name, obj, mo=o)
def drive_parented(self, obj):
"""
parent obj to control directly
"""
if isinstance(obj, string_types):
if cmds.objExists(obj):
cmds.parent(obj, self.name)
else:
err = "Couldn't find passed obj: {0}"
raise RuntimeError(err.format(obj))
def space_switch(self, spaces, aliases=None):
"""
Add space switches to this control object
:param list spaces: A list of spaces
"""
# Arg check
assert isinstance(spaces, list), "Pass spaces as a list"
err = "One or more passed spaces does not exist."
assert all(cmds.objExists(o) for o in spaces), err
spaces = [MayaBaseNode(n) for n in spaces]
parent_con = MayaBaseNode(cmds.parentConstraint(spaces, self.null, maintainOffset=True)[0])
# Figure out how the attribute of weights looks like
# add SPACES display attr in control
if not aliases:
prefixes = [n.nice_name.split(self.delimiter)[0] for n in spaces]
enum_names = [n.nice_name.lstrip(prefix) for prefix, n in zip(prefixes, spaces)]
else:
enum_names = aliases
# Attr names refers to the attributes on the parent constraint
attr_names = [s.nice_name + 'W' + str(i) for i,s in enumerate(spaces)]
# This dictionary maps space's short names to the parent constraint attributes
attr_dict = {k.short_name:v for k,v in zip(spaces, attr_names)}
# Add attributes in this control for spaces
self.add_attr('SPACE', at='enum', enumName='-' * 10, h=False, k=True)
self.add_attr('spaces', at='enum', enumName=':'.join(enum_names), h=False, k=True)
# Lock displayable space enum
cmds.setAttr(self.plug('SPACE'), lock=True)
spaces_set = set(spaces)
# Connect spaces through set driven keys
for enum_name, space in zip(enum_names, spaces):
cmds.setAttr(self.plug('spaces'), enum_names.index(enum_name))
for other_space in spaces_set.difference([space]):
parent_con.set_attr(attr_dict[other_space.short_name], 0)
attr = attr_dict[other_space.short_name]
cmds.setDrivenKeyframe(parent_con.plug(attr), cd=self.plug('spaces'))
attr = attr_dict[space.short_name]
parent_con.set_attr(attr_dict[space.short_name], 1)
cmds.setDrivenKeyframe(parent_con.plug(attr), cd=self.plug('spaces')) | true |
524c2a65a5c8d6f95b2509c73764fe17b281535d | Python | anniekovac/master_mussels | /topology.py | UTF-8 | 2,562 | 3.15625 | 3 | [] | no_license | from data_structures import aPad, aMussel
import random, numpy, os
import util
import time
class Topology(object):
"""
This class will define number of aMussels and
aPads on the surface, their coordinated, and it will
be able to plot them.
"""
def __init__(self):
self.all_agents = []
self.mussels = []
self.pads = []
self.deltat = None
def plot_topology(self, save=False, annotate_energy=True, order_of_passing=False, title=None):
"""
This function is used for 2D plotting
topology. Mussels are marked with one colour,
Pads are marked with another.
:param save: boolean (if you want to save a figure - saving with name of this second so it's unique)
:param annotate_energy: boolean (if you want to write energy levels of agents on plots)
"""
import matplotlib.pyplot as plt
self.mussels.sort(key=lambda x: x.order_of_passing) #, reverse=True)
mussels_x = [item.coordinates[0] for item in self.mussels]
mussels_y = [item.coordinates[1] for item in self.mussels]
area = numpy.pi * (15 * 1) ** 2 # 0 to 15 point radii
fig, ax = plt.subplots()
ax.scatter(mussels_x, mussels_y, s=area, alpha=0.5, c="r", label="Mussels")
if order_of_passing:
ax.plot(mussels_x, mussels_y)
pads_x = [item.coordinates[0] for item in self.pads]
pads_y = [item.coordinates[1] for item in self.pads]
area = numpy.pi * (15 * 1) ** 2
ax.scatter(pads_x, pads_y, s=area, alpha=0.5, c="b", label="Pads")
plt.legend()
for (i, mussel) in enumerate(self.mussels):
if annotate_energy:
ax.annotate(str(mussel.energy), (mussels_x[i] + 0.05, mussels_y[i] + 0.05))
if order_of_passing:
ax.annotate(str(mussel.order_of_passing), (mussels_x[i] - 0.08, mussels_y[i] - 0.08))
for (i, apad) in enumerate(self.pads):
if annotate_energy:
ax.annotate(str(apad.energy), (pads_x[i] + 0.05, pads_y[i] + 0.05))
plt.grid()
plt.xlabel("X coordinates of agents")
plt.ylabel("Y coordinates of agents")
if title:
plt.title(title)
if save:
plt.savefig(str(time.time()).replace(".", "")+".jpg")
else:
plt.show()
if __name__ == '__main__':
topology = util.parser(filename=os.path.join(os.getcwd(), "init_files", "evolutionary_init.txt"))
#topology.plot_topology(order_of_passing=True)
| true |
65e49c34fd3f21eb8678e38b54731337be58928e | Python | bhushanwankhede/Svxlink-VOIP-implementation-using-raspberry-pi-3 | /sender.py | UTF-8 | 608 | 2.78125 | 3 | [] | no_license | import socket # Import socket module
s = socket.socket() # Create a socket object
host = '192.168.1.205' # Remote Server Address
port = 60000 # Reserve a port for your service.
filename = "02_Rozana_SongsMp3_Com_.wav"
addr=(host,port)
buf=1024
s.connect((host, port))
s.sendto(filename,addr)
status = s.recv(128)
print status
f=open(filename,"rb")
data = f.read(buf)
while (data):
if(s.sendto(data,addr)):
print "sending ..."
data = f.read(buf)
print('Successfully sent the file')
f.close()
s.close()
print('connection closed')
| true |
cfc7fa64d80b52878c52dcb27b8f0c1fd0b91b28 | Python | sajevk/Computational-Social-Network-Analysis | /Laboratory 2/laplacian2.py | UTF-8 | 18,523 | 3 | 3 | [] | no_license | import networkx as nx
from itertools import product
import matplotlib.pyplot as plt
from scipy.spatial import Delaunay
from scipy.cluster.vq import vq, kmeans
import numpy as np
import scipy as sp
import random
import platform
import community
import operator
facebook = "facebook_combined.txt"
amazon = "Amazon0301.txt"
# Reference code for networkx from https://networkx.readthedocs.io/en/latest/_modules/networkx/algorithms/community/quality.html
def modularity(G, communities, weight='weight'):
r"""Returns the modularity of the given partition of the graph.
Modularity is defined in [1]_ as
.. math::
Q = \frac{1}{2m} \sum_{ij} \left( A_{ij} - \frac{k_ik_j}{2m}\right)
\delta(c_i,c_j)
where *m* is the number of edges, *A* is the adjacency matrix of
`G`, :math:`k_i` is the degree of *i* and :math:`\delta(c_i, c_j)`
is 1 if *i* and *j* are in the same community and 0 otherwise.
Parameters
----------
G : NetworkX Graph
communities : list
List of sets of nodes of `G` representing a partition of the
nodes.
Returns
-------
Q : float
The modularity of the paritition.
Raises
------
NotAPartition
If `communities` is not a partition of the nodes of `G`.
Examples
--------
>>> G = nx.barbell_graph(3, 0)
>>> nx.algorithms.community.modularity(G, [{0, 1, 2}, {3, 4, 5}])
0.35714285714285704
References
----------
.. [1] M. E. J. Newman *Networks: An Introduction*, page 224.
Oxford University Press, 2011.
"""
# if not is_partition(G, communities):
# raise NotAPartition(G, communities)
multigraph = G.is_multigraph()
directed = G.is_directed()
m = G.size(weight=weight)
if directed:
out_degree = dict(G.out_degree(weight=weight))
in_degree = dict(G.in_degree(weight=weight))
norm = 1 / m
else:
out_degree = dict(G.degree(weight=weight))
in_degree = out_degree
norm = 1 / (2 * m)
def val(u, v):
try:
if multigraph:
w = sum(d.get(weight, 1) for k, d in G[u][v].items())
else:
w = G[u][v].get(weight, 1)
except KeyError:
w = 0
# Double count self-loops if the graph is undirected.
if u == v and not directed:
w *= 2
return w - in_degree[u] * out_degree[v] * norm
Q = sum(val(u, v) for c in communities for u, v in product(c, repeat=2))
return Q * norm
# Performance and associated helper functions taken from networkx source code...
def intra_community_edges(G, partition):
"""Returns the number of intra-community edges according to the given
partition of the nodes of `G`.
`G` must be a NetworkX graph.
`partition` must be a partition of the nodes of `G`.
The "intra-community edges" are those edges joining a pair of nodes
in the same block of the partition.
"""
return sum(G.subgraph(block).size() for block in partition)
def inter_community_edges(G, partition):
"""Returns the number of inter-community edges according to the given
partition of the nodes of `G`.
`G` must be a NetworkX graph.
`partition` must be a partition of the nodes of `G`.
The *inter-community edges* are those edges joining a pair of nodes
in different blocks of the partition.
Implementation note: this function creates an intermediate graph
that may require the same amount of memory as required to store
`G`.
"""
# Alternate implementation that does not require constructing a new
# graph object (but does require constructing an affiliation
# dictionary):
#
# aff = dict(chain.from_iterable(((v, block) for v in block)
# for block in partition))
# return sum(1 for u, v in G.edges() if aff[u] != aff[v])
#
return nx.quotient_graph(G, partition, create_using=nx.MultiGraph()).size()
def inter_community_non_edges(G, partition):
"""Returns the number of inter-community non-edges according to the
given partition of the nodes of `G`.
`G` must be a NetworkX graph.
`partition` must be a partition of the nodes of `G`.
A *non-edge* is a pair of nodes (undirected if `G` is undirected)
that are not adjacent in `G`. The *inter-community non-edges* are
those non-edges on a pair of nodes in different blocks of the
partition.
Implementation note: this function creates two intermediate graphs,
which may require up to twice the amount of memory as required to
store `G`.
"""
# Alternate implementation that does not require constructing two
# new graph objects (but does require constructing an affiliation
# dictionary):
#
# aff = dict(chain.from_iterable(((v, block) for v in block)
# for block in partition))
# return sum(1 for u, v in nx.non_edges(G) if aff[u] != aff[v])
#
return inter_community_edges(nx.complement(G), partition)
def performance(G, partition):
"""Returns the performance of a partition.
The *performance* of a partition is the ratio of the number of
intra-community edges plus inter-community non-edges with the total
number of potential edges.
Parameters
----------
G : NetworkX graph
A simple graph (directed or undirected).
partition : sequence
Partition of the nodes of `G`, represented as a sequence of
sets of nodes. Each block of the partition represents a
community.
Returns
-------
float
The performance of the partition, as defined above.
Raises
------
NetworkXError
If `partition` is not a valid partition of the nodes of `G`.
References
----------
.. [1] Santo Fortunato.
"Community Detection in Graphs".
*Physical Reports*, Volume 486, Issue 3--5 pp. 75--174
<http://arxiv.org/abs/0906.0612>
"""
# Compute the number of intra-community edges and inter-community
# edges.
intra_edges = intra_community_edges(G, partition)
inter_edges = inter_community_non_edges(G, partition)
# Compute the number of edges in the complete graph (directed or
# undirected, as it depends on `G`) on `n` nodes.
#
# (If `G` is an undirected graph, we divide by two since we have
# double-counted each potential edge. We use integer division since
# `total_pairs` is guaranteed to be even.)
n = len(G)
total_pairs = n * (n - 1)
if not G.is_directed():
total_pairs //= 2
return (intra_edges + inter_edges) / total_pairs
def readgraph(readedges):
num_nodes = 100
x = [random.random() for i in range(num_nodes)]
y = [random.random() for i in range(num_nodes)]
x = np.array(x)
y = np.array(y)
# Make a graph with num_nodes nodes and zero edges
# Plot the nodes using x,y as the node positions
graph = nx.Graph()
for i in range(num_nodes):
node_name = str(i)
graph.add_node(node_name)
# Now add some edges - use Delaunay tesselation
# to produce a planar graph. Delaunay tesselation covers the
# convex hull of a set of points with triangular simplices (in 2D)
points = np.column_stack((x, y))
dl = Delaunay(points)
tri = dl.simplices
if readedges:
edges = np.zeros((2, 6 * len(tri)), dtype=int)
data = np.ones(6 * len(points))
j = 0
for i in range(len(tri)):
edges[0][j] = tri[i][0]
edges[1][j] = tri[i][1]
j += 1
edges[0][j] = tri[i][1]
edges[1][j] = tri[i][0]
j += 1
edges[0][j] = tri[i][0]
edges[1][j] = tri[i][2]
j += 1
edges[0][j] = tri[i][2]
edges[1][j] = tri[i][0]
j += 1
edges[0][j] = tri[i][1]
edges[1][j] = tri[i][2]
j += 1
edges[0][j] = tri[i][2]
edges[1][j] = tri[i][1]
j += 1
data = np.ones(6 * len(tri))
adjacency_matrix = sp.sparse.csc_matrix((data, (edges[0, :], edges[1, :])))
for i in range(adjacency_matrix.nnz):
adjacency_matrix.data[i] = 1.0
graph = nx.to_networkx_graph(adjacency_matrix)
return graph
def readgraph(id, state=False):
if id == 'facebook':
print('Analysing Facebook community')
if state:
graph = nx.read_edgelist(facebook,create_using=nx.DiGraph())
else:
graph = nx.read_edgelist(facebook)
if id == 'amazon':
print('Analysing Amazon community')
if state:
graph = nx.read_edgelist(amazon, create_using=nx.DiGraph())
else:
graph = nx.read_edgelist(amazon)
return graph
def readpositions(graph_size):
x = [random.random() for i in range(graph_size)]
y = [random.random() for i in range(graph_size)]
x = np.array(x)
y = np.array(y)
pos = dict()
for i in range(graph_size):
pos[i] = x[i], y[i]
return pos
def read_default(graph):
num_nodes = graph.number_of_nodes()
A = nx.adjacency_matrix(graph)
x = [random.random() for i in range(num_nodes)]
y = [random.random() for i in range(num_nodes)]
x = np.array(x)
y = np.array(y)
# Now add some edges - use Delaunay tesselation
# to produce a planar graph. Delaunay tesselation covers the
# convex hull of a set of points with triangular simplices (in 2D)
points = np.column_stack((x, y))
dl = Delaunay(points)
tri = dl.simplices
edges = np.zeros((2, 6 * len(tri)), dtype=int)
# data = np.ones(6 * len(points))
j = 0
for i in range(len(tri)):
edges[0][j] = tri[i][0]
edges[1][j] = tri[i][1]
j = j + 1
edges[0][j] = tri[i][1]
edges[1][j] = tri[i][0]
j = j + 1
edges[0][j] = tri[i][0]
edges[1][j] = tri[i][2]
j = j + 1
edges[0][j] = tri[i][2]
edges[1][j] = tri[i][0]
j = j + 1
edges[0][j] = tri[i][1]
edges[1][j] = tri[i][2]
j = j + 1
edges[0][j] = tri[i][2]
edges[1][j] = tri[i][1]
j = j + 1
data = np.ones(6 * len(tri))
adjacency_matrix = sp.sparse.csc_matrix((data, (edges[0, :], edges[1, :])))
for i in range(adjacency_matrix.nnz):
adjacency_matrix.data[i] = 1.0
graph = nx.to_networkx_graph(adjacency_matrix)
return graph
def count_edge_cuts(graph, w0, w1, w2, method):
edge_cut_count = 0
edge_uncut_count = 0
for edge in graph.edges_iter():
# This may be inefficient but I'll just check if both nodes are in 0, 1, or two
if edge[0] in w0 and edge[1] in w0:
edge_uncut_count += 1
elif edge[0] in w1 and edge[1] in w1:
edge_uncut_count += 1
elif edge[0] in w2 and edge[1] in w2:
edge_uncut_count += 1
else:
edge_cut_count += 1
print('Community detection method is: ', method)
print('Edge cuts: ', edge_cut_count)
print('Contained edges: ', edge_uncut_count)
return edge_cut_count, edge_uncut_count
def newman(G):
if len(G.nodes()) == 1:
return [G.nodes()]
def find_best_edge(G0):
eb = nx.edge_betweenness_centrality(G0)
eb_il = eb.items()
# eb_il.sort(key=lambda x: x[1], reverse=True)
eb_il_sorted = sorted(eb_il, key=lambda x: x[1], reverse=True)
return eb_il_sorted[0][0]
components = list(nx.connected_component_subgraphs(G))
while len(components) == 1:
G.remove_edge(*find_best_edge(G))
components = list(nx.connected_component_subgraphs(G))
result = [c.nodes() for c in components]
looper = 0
for c in components:
looper += 1
result.extend(newman(c))
return result
def count_edge_cuts_from_list(graph, list_of_partitions, method):
edge_cut_count = 0
edge_uncut_count = 0
for edge in graph.edges_iter():
found = False
for lst in list_of_partitions:
# This may be inefficient but I'll just check if both nodes are in 0, 1, or two
if edge[0] in lst and edge[1] in lst and not found:
edge_uncut_count += 1
found = True
if not found:
edge_cut_count += 1
print('Community detection method is: ', method)
print('Edge cuts: ', edge_cut_count)
print('Contained edges: ', edge_uncut_count)
return edge_cut_count, edge_uncut_count
def modularity_eval(graph, list_of_partitions):
print("Calculating modularity")
mod = modularity(graph, list_of_partitions)
return mod
def analysepartition(graph):
partitions = community.best_partition(graph)
communities = [partitions.get(node) for node in graph.nodes()]
community_count = set(communities)
print("List of Partitions Detected: ", len(community_count))
for i in community_count:
print("Count community {} is {}.".format(i, communities.count(i)))
return communities
def cluster(graph, feat, pos, eigen_pos, cluster_type):
book, distortion = kmeans(feat, 3)
codes, distortion = vq(feat, book)
nodes = np.array(range(graph.number_of_nodes()))
w0 = nodes[codes == 0].tolist()
w1 = nodes[codes == 1].tolist()
w2 = nodes[codes == 2].tolist()
print("W0 ", w0)
print("W1 ", w1)
print("W2 ", w2)
count_edge_cuts(graph, w0, w1, w2, cluster_type)
communities = list()
communities.append(w0)
communities.append(w1)
communities.append(w2)
mod = modularity_eval(graph, communities)
print("Modularity: ", mod)
plt.figure(3)
nx.draw_networkx_nodes(graph, eigen_pos, node_size=40, hold=True, nodelist=w0, node_color='m')
nx.draw_networkx_nodes(graph, eigen_pos, node_size=40, hold=True, nodelist=w1, node_color='b')
plt.figure(2)
nx.draw_networkx_nodes(graph, pos, node_size=40, hold=True, nodelist=w0, node_color='m')
nx.draw_networkx_nodes(graph, pos, node_size=40, hold=True, nodelist=w1, node_color='b')
def GraphCheck(graph):
print(" Dimensions of the Graph:")
print(nx.info(graph))
max_degree = 0
min_degree = 999999
ave_degree = 0
counter = 0
for node in graph.nodes():
degree = graph.degree(node)
if degree > max_degree:
max_degree = degree
if min_degree > degree:
min_degree = degree
ave_degree += degree
counter += 1
ave_degree = ave_degree / counter
print("Maximum Degree Node ", max_degree)
print("Minimum Degree Node ", min_degree)
print("Average Degree Node ", ave_degree)
def newman_eval(G):
comp = newman(G)
print("Newman's list ", len(comp))
return comp
def plot_graph(graph, pos, fig_num):
label = dict()
label_pos = dict()
for i in range(graph.number_of_nodes()):
label[i] = i
label_pos[i] = pos[i][0]+0.02, pos[i][1]+0.02
fig = plt.figure(fig_num, figsize=(8, 8))
fig.clf()
nx.draw_networkx_nodes(graph, pos, node_size=40, hold=False)
nx.draw_networkx_edges(graph, pos, hold=True)
nx.draw_networkx_labels(graph, label_pos, label, font_size=10, hold=True)
fig.show()
def editsidenodes(graph, node, neighbours):
with suppress(Exception): # Needed if the edge was already removed.
first = True
for neighbour in neighbours:
if not first:
graph.remove_edge(node, neighbour)
first = False
return graph
def readbasic(graph):
bt = nx.betweenness_centrality(graph)
sorted_bt = sorted(bt.items(), key=operator.itemgetter(1))
sorted_bt.reverse()
sorted_list = list(sorted_bt)
node_index = 0
while nx.number_connected_components(graph) < 4:
top_node = sorted_list[node_index][0]
top_neighbours = nx.neighbors(graph, top_node)
graph = editsidenodes(graph, top_node, top_neighbours)
node_index += 1
components = sorted(nx.connected_components(graph), key = len, reverse=True)
return_components = list()
for i in range(nx.number_connected_components(graph)):
print(components[i])
return_components.append(components[i])
return return_components
def readcommunity(community_list, index):
return_list = list()
node = 0
for i in community_list:
if community_list[node] == index:
return_list.append(node)
node += 1
return return_list
def execute():
gr = readgraph("facebook")
pos = readpositions(gr.number_of_nodes())
am = nx.adjacency_matrix(gr)
gr = nx.Graph(am)
plot_graph(gr, pos, 1)
num_nodes = gr.number_of_nodes()
GraphCheck(gr)
plot_graph(gr, pos, 2)
# Networkx algorithm
partitions = analysepartition(gr)
partitions_count = set(partitions)
list_of_partitions = list()
length = len(partitions_count)
for i in range(length):
comm = readcommunity(partitions, i)
print(comm)
list_of_partitions.append(comm)
count_edge_cuts_from_list(gr, list_of_partitions, "Extended Community")
mod = modularity_eval(gr, list_of_partitions)
print("Modularity: ==============================> ", mod)
# Modified
gr = nx.Graph(am)
communities = readbasic(gr)
gr = nx.Graph(am)
count_edge_cuts_from_list(gr, communities, "Modified")
mod = modularity_eval(gr, communities)
print("Modularity: ==============================> ", mod)
eigen_pos = dict()
deg = am.sum(0)
diags = np.array([0])
D = sp.sparse.spdiags(deg, diags, am.shape[0], am.shape[1])
Dinv = sp.sparse.spdiags(1 / deg, diags, am.shape[0], am.shape[1])
# Normalised laplacian
L = Dinv * (D - am)
E, V = sp.sparse.linalg.eigs(L, 3, None, 100.0, 'SM')
V = V.real
for i in range(num_nodes):
eigen_pos[i] = V[i, 1].real, V[i, 2].real
plot_graph(gr, eigen_pos, 3)
# Now let's see if the eigenvectors are good for clustering
# Use kmeans to cluster the points in the vector V
features = np.column_stack((V[:, 1], V[:, 2]))
cluster(gr, features, pos, eigen_pos, "Eigen Values")
cluster(gr, am.todense(), pos, eigen_pos, "Adjacency")
gr = nx.Graph(am)
gncomps = newman_eval(gr)
count_edge_cuts_from_list(gr, gncomps, "Newman")
mod = modularity_eval(gr, gncomps)
print("Modularity ==============================> ", mod)
execute() | true |
08e0002dd2b152e32bc116d80252bfcb2036bee0 | Python | gamefang/LeetCodeExcercise | /2. 两数相加.py | UTF-8 | 726 | 3.40625 | 3 | [] | no_license | # 链表机制不明!
from typing import *
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
# 此段代码只能在leetcode下通过
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
res_num = Solution.ln_to_int(l1) + Solution.ln_to_int(l2)
return [ int(num) for num in str(res_num)[::-1] ]
@staticmethod
def ln_to_int(ln):
cur_str = ''
cur_node = ln
while 1:
cur_str = str(cur_node.val) + cur_str
cur_node = cur_node.next
if cur_node is None:break
return int(cur_str)
if __name__ == '__main__':
s = Solution()
| true |
ca7c03d23b6df7ee1d1e96cfd4abe19da842ea19 | Python | usnistgov/core_explore_example_type_app | /core_explore_example_type_app/utils/mongo_query.py | UTF-8 | 1,539 | 2.671875 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive | """Util to build queries for mongo db
"""
from core_explore_example_app.utils import mongo_query as common_mongo_query
from core_explore_example_type_app.components.data_structure_type_element import api as \
data_structure_type_element_api
from core_main_app.commons import exceptions
def fields_to_query(form_values, template_id):
"""Takes values from the html tree and creates a query from them
Args:
form_values:
template_id:
Returns:
"""
# FIXME: Refactor mongo_query to avoid passing a function in parameter.
return common_mongo_query.fields_to_query_custom_dot_notation(form_values, template_id,
get_dot_notation_to_element,
use_wildcard=True)
def get_dot_notation_to_element(data_structure_element, namespaces):
"""Get the dot notation of the data_structure_element.
Args:
data_structure_element:
namespaces:
Returns:
"""
# get data structure element's xml xpath.
try:
data_structure_type_element = data_structure_type_element_api.get_by_data_structure_id(
str(data_structure_element.id))
# get dot_notation
path = data_structure_type_element.path
# replace '/' by '.' (Avoid first '/')
dot_notation = path[1:].replace("/", ".")
except (exceptions.DoesNotExist, exceptions.ModelError, Exception):
dot_notation = ""
return dot_notation
| true |
0ee4c76adbcca07c0ef0e54e0b7189f4a889d5bd | Python | berkeley-cocosci/word-order-phylogeny | /TreeBuilder/dendropy/seqmodel.py | UTF-8 | 9,500 | 3.109375 | 3 | [] | no_license | #! /usr/bin/env python
##############################################################################
## DendroPy Phylogenetic Computing Library.
##
## Copyright 2010 Jeet Sukumaran and Mark T. Holder.
## All rights reserved.
##
## See "LICENSE.txt" for terms and conditions of usage.
##
## If you use this work or any portion thereof in published work,
## please cite it as:
##
## Sukumaran, J. and M. T. Holder. 2010. DendroPy: a Python library
## for phylogenetic computing. Bioinformatics 26: 1569-1571.
##
##############################################################################
"""
Models of molecular character evolution.
"""
import math
import itertools
from dendropy.utility import GLOBAL_RNG
from dendropy.mathlib import probability
import dendropy
class SeqModel(object):
"Base class for discrete character substitution models."
def __init__(self, state_alphabet, rng=None):
"""
__init__ initializes the state_alphabet to define the character type on which
this model acts. The objects random number generator will be `rng` or `GLOBAL_RNG`
"""
self.state_alphabet = state_alphabet
if rng is None:
self.rng = GLOBAL_RNG
else:
self.rng = rng
def pmatrix(self, tlen, rate=1.0):
"""
Returns a matrix of nucleotide substitution
probabilities.
"""
raise NotImplementedError
def generate_descendant_states(self,
ancestral_states,
edge_length,
mutation_rate=1.0,
rng=None):
"""
Returns descendent sequence given ancestral sequence.
"""
if rng is None:
rng = self.rng
pmat = self.pmatrix(edge_length, mutation_rate)
multi = probability.sample_multinomial
desc_states = []
for state in ancestral_states:
anc_state_idx = self.state_alphabet.index(state)
desc_state_idx = multi(pmat[anc_state_idx], rng)
desc_states.append(self.state_alphabet[desc_state_idx])
return desc_states
class NucleotideSeqModel(SeqModel):
"General nucleotide substitution model."
def __init__(self, base_freqs=None, state_alphabet=None):
"__init__ calls SeqModel.__init__ and sets the base_freqs field"
if state_alphabet is None:
state_alphabet = dendropy.DNA_STATE_ALPHABET
SeqModel.__init__(self, state_alphabet)
if base_freqs is None:
self.base_freqs = [0.25, 0.25, 0.25, 0.25]
else:
self.base_freqs = base_freqs
def stationary_sample(self, seq_len, rng=None):
"""
Returns a NucleotideSequence() object with length `length`
representing a sample of characters drawn from this model's
stationary distribution.
"""
probs = self.base_freqs
char_state_indices = [probability.sample_multinomial(probs, rng) for i in range(seq_len)]
return [self.state_alphabet[idx] for idx in char_state_indices]
def is_purine(self, state_index):
"""
Returns True if state_index represents a purine (A or G) row or column
index: 0, 2
"""
return state_index % 2 == 0
def is_pyrimidine(self, state_index):
"""
Returns True if state_index represents a pyrimidine (C or T) row or column
index: 1, 3
"""
return state_index % 2 == 1
def is_transversion(self, state1_idx, state2_idx):
"""
Returns True if the change from state1 to state2, as
represented by the row or column indices, is a transversional
change.
"""
return (self.is_purine(state1_idx) and self.is_pyrimidine(state2_idx)) \
or (self.is_pyrimidine(state1_idx) and self.is_purine(state2_idx))
def is_purine_transition(self, state1_idx, state2_idx):
"""
Returns True if the change from state1 to state2, as
represented by the row or column indices, is a purine
transitional change.
"""
return self.is_purine(state1_idx) and self.is_purine(state2_idx)
def is_pyrimidine_transition(self, state1_idx, state2_idx):
"""
Returns True if the change from state1 to state2, as
represented by the row or column indices, is a pyrimidine
transitional change.
"""
return self.is_pyrimidine(state1_idx) \
and self.is_pyrimidine(state2_idx)
def is_transition(self, state1_idx, state2_idx):
"""
Returns True if the change from state1 to state2, as
represented by the row or column indices, is a
transitional change.
"""
return (self.is_purine(state1_idx) and self.is_purine(state2_idx)) \
or (self.is_pyrimidine(state1_idx) and self.is_pyrimidine(state2_idx))
class Hky85SeqModel(NucleotideSeqModel):
"""
Hasegawa et al. 1985 model. Implementation following Swofford et
al., 1996.
"""
def __init__(self, kappa=1.0, base_freqs=None):
"__init__: if no arguments given, defaults to JC69."
NucleotideSeqModel.__init__(self, base_freqs=base_freqs)
self.correct_rate = True
self.kappa = kappa
if base_freqs is None:
self.base_freqs = [0.25, 0.25, 0.25, 0.25]
else:
self.base_freqs = base_freqs
def __repr__(self):
rep = "kappa=%f bases=%s" % (self.kappa, str(self.base_freqs))
return rep
def corrected_substitution_rate(self, rate):
"""Returns the factor that we have to multiply to the branch length
to make branch lengths proportional to # of substitutions per site."""
if self.correct_rate:
pia = self.base_freqs[0]
pic = self.base_freqs[1]
pig = self.base_freqs[2]
pit = self.base_freqs[3]
f = self.kappa*(pia*pig + pic*pit)
f += (pia + pig)*(pic + pit)
return (rate * 0.5/f) # (rate * 0.5/f)
else:
return rate
def pij(self, state_i, state_j, tlen, rate=1.0):
"""
Returns probability, p_ij, of going from state i to state j
over time tlen at given rate. (tlen * rate = nu, expected
number of substitutions)
"""
nu = self.corrected_substitution_rate(rate) * tlen
if self.is_purine(state_j):
sumfreqs = self.base_freqs[0] + self.base_freqs[2]
else:
sumfreqs = self.base_freqs[1] + self.base_freqs[3]
factorA = 1 + (sumfreqs * (self.kappa - 1.0))
if state_i == state_j:
pij = self.base_freqs[state_j] \
+ self.base_freqs[state_j] \
* (1.0/sumfreqs - 1) * math.exp(-1.0 * nu) \
+ ((sumfreqs - self.base_freqs[state_j])/sumfreqs) \
* math.exp(-1.0 * nu * factorA)
elif self.is_transition(state_i, state_j):
pij = self.base_freqs[state_j] \
+ self.base_freqs[state_j] \
* (1.0/sumfreqs - 1) * math.exp(-1.0 * nu) \
- (self.base_freqs[state_j] / sumfreqs) \
* math.exp(-1.0 * nu * factorA)
else:
pij = self.base_freqs[state_j] * (1.0 - math.exp(-1.0 * nu))
return pij
def qmatrix(self, rate=1.0):
"Returns the instantaneous rate of change matrix."
rate = self.corrected_substitution_rate(rate)
qmatrix = []
for state_i in range(4):
qmatrix.append([])
for state_j in range(4):
if state_i == state_j:
# we cheat here and insert a placeholder till the
# other cells are calculated
qij = 0.0
else:
if self.is_transition(state_i, state_j):
qij = rate * self.kappa * self.base_freqs[state_j]
else:
qij = rate * self.base_freqs[state_j]
qmatrix[state_i].append(qij)
for state in range(4):
qmatrix[state][state] = -1.0 * sum(qmatrix[state])
return qmatrix
def pvector(self, state, tlen, rate=1.0):
"""
Returns a vector of transition probabilities for a given state
over time `tlen` at rate `rate` for `state`. (tlen * rate =
nu, expected number of substitutions)
"""
pvec = []
# in case later we want to allow characters passed in here
state_i = state
for state_j in range(4):
pvec.append(self.pij(state_i, state_j, tlen=tlen, rate=rate))
return pvec
def pmatrix(self, tlen, rate=1.0):
"""
Returns a matrix of nucleotide substitution
probabilities. Based on analytical solution by Swofford et
al., 1996. (tlen * rate = nu, expected number of
substitutions)
"""
pmatrix = []
for state_i in range(4):
pmatrix.append(self.pvector(state_i, tlen=tlen, rate=rate))
return pmatrix
class Jc69SeqModel(Hky85SeqModel):
"""
Jukes-Cantor 1969 model. Specializes HKY85 such that
kappa = 1.0, and base frequencies = [0.25, 0.25, 0.25, 0.25].
"""
def __init__(self):
"__init__: uses Hky85SeqModel.__init__"
Hky85SeqModel.__init__(self,
kappa=1.0,
base_freqs=[0.25, 0.25, 0.25, 0.25])
| true |
e1ac6bc953f3f4697c0f375b83786ab9b04abdce | Python | Konadu360/hw3 | /request.py | UTF-8 | 1,064 | 2.9375 | 3 | [] | no_license | # using the request module to get interface info of a cisco IOS-XE router
# import libraries, request, json and pprint
import requests, json, pprint, urllib3
# store router info, user access details and headers in a dict
router={
"IP": "10.10.20.48",
"PORT": 443,
"name": "developer",
"pass": "C1sco12345"
}
header={
"accept": "application/yang-data+json",
"Content-Type": "application/yang-data+json"
}
# define the url and insert router details
url='https://{}:{}/restconf/data/interfaces/interface=GigabitEthernet2'
url=url.format(router["IP"],router["PORT"])
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
print(url)
# using the request module, get the interface GigabitEthernet1 info
# parse the json data into a python dict and print the info out
req=requests.get(url,auth=(router["name"],router["pass"]),headers=header,verify=False)
response=json.loads(req.text)
pprint.pprint(response['Cisco-IOS-XE-interfaces-oper:interface']['name'])
| true |
7550e0a46bc182d87c5115bd188e47bf89bdc46b | Python | diegoaspinwall/Unit5 | /middleWord.py | UTF-8 | 161 | 3.265625 | 3 | [] | no_license | #Diego Aspinwall
#10-13-17
#middleWord.py
words = input('Enter words: ').split(' ')
print(words[len(words)/2])
if len(words)%2 == 0:
print(words[(len(words)/2)-1])
| true |
c9db6741acca847d3be713ea06c4c540baba359c | Python | Damian1724/Hackerrank | /data-structure/Arrays/Dynamic-Array.py | UTF-8 | 776 | 2.921875 | 3 | [] | no_license | /*
Author: Damian Cruz
source: HackerRank(https://www.hackerrank.com)
problem name: Algorithms >Data Structures>arrays>Dynamic Array
problem url: https://www.hackerrank.com/challenges/dynamic-array/problem
*/
nq = input().split()
n = int(nq[0])
q = int(nq[1])
lista=[]
matrix=[[]]
lastanswer=0
for i in range(q):
lista=list(map(int, input().rstrip().split()))
print(i)
if lista[0]==1:
if len(matrix)<= (lista[1]^lastanswer)%n:
for j in range((len(matrix)-1),(lista[1]^lastanswer)%n):
matrix.append([])
matrix[(lista[1]^lastanswer)%n].append(lista[2])
else:
seq = (lista[1] ^ lastanswer) % n
element = lista[2] % len(matrix[seq])
lastanswer = matrix[seq][element]
print(lastanswer)
| true |
27cfda39809b19ad1e381e890d7a2465e5aae1c3 | Python | p-v-o-s/circuitpython-phant | /code.py | UTF-8 | 1,757 | 2.71875 | 3 | [] | no_license | import socket
import time
import machine
import onewire, ds18x20
BASE_URL = 'http://159.203.128.53/input/'
PUBLIC_KEY = 'bLzgdDwgq4CgqLZmwdrYHGK68908'
PRIVATE_KEY = 'GmPOGBYOW6spAV328wynUBEgzeGz'
def do_connect():
import network
sta_if = network.WLAN(network.STA_IF)
if not sta_if.isconnected():
print('connecting to network...')
sta_if.active(True)
sta_if.connect('InmanSquareOasis', 'portauprince')
while not sta_if.isconnected():
pass
print('network config:', sta_if.ifconfig())
def http_get(url):
_, _, host, path = url.split('/', 3)
addr = socket.getaddrinfo(host, 80)[0][-1]
s = socket.socket()
s.connect(addr)
s.send(bytes('GET /%s HTTP/1.0\r\nHost: %s\r\n\r\n' % (path, host), 'utf8'))
while True:
data = s.recv(100)
if data:
print(str(data, 'utf8'), end='')
else:
break
s.close()
def get_temps():
# the device is on GPIO12
dat = machine.Pin(12)
# create the onewire object
ds = ds18x20.DS18X20(onewire.OneWire(dat))
# scan for devices on the bus
roms = ds.scan()
temps=[]
for rom in roms:
#print(rom)
ds.convert_temp()
time.sleep(1)
temp=ds.read_temp(rom)
temps.append(temp)
print(temps)
return temps
def post_values():
do_connect()
temps = get_temps()
url=BASE_URL+PUBLIC_KEY+'?private_key='+PRIVATE_KEY+'&temp1='+str(temps[0])+'&temp2='+str(temps[1])+'&temp3='+str(temps[2])
http_get(url)
def blink():
led = machine.Pin(0, machine.Pin.OUT)
led.low()
time.sleep(1)
led.high()
time.sleep(1)
while True:
blink()
blink()
post_values()
blink()
time.sleep(20)
| true |
b1d41bf9da3740fc36920cbe9ef2b4c51557e75d | Python | vishwatejharer/warriorpy | /warriorpy/towers/intermediate/level_007.py | UTF-8 | 856 | 2.765625 | 3 | [
"MIT"
] | permissive | # -----
# | sC >|
# |@ s C|
# | s |
# -----
level.description("Another ticking sound, but some sludge is blocking the way.")
level.tip("Quickly kill the sludge and rescue the captive before the "
"bomb goes off. You can't simply go around them.")
level.clue("Determine the direction of the ticking captive and kill any "
"enemies blocking that path. You may need to bind surrounding "
"enemies first.")
level.time_bonus(70)
level.ace_score(134)
level.size(5, 3)
level.stairs(4, 0)
level.warrior(0, 1, 'east')
level.unit('sludge', 1, 0, 'south')
level.unit('sludge', 1, 2, 'north')
level.unit('sludge', 2, 1, 'west')
level.unit('captive', 2, 0, 'west')
def add_abilities(unit):
unit.add_abilities('explode_')
unit.abilities_attr['explode_'].time = 10
level.unit('captive', 4, 1, 'west', func=add_abilities)
| true |
a6ce7cda4a48883ed59feae365d4f6768f3c3aec | Python | junhan-kim/AI-Practice | /#4.py | UTF-8 | 866 | 3.40625 | 3 | [] | no_license | import numpy as np
from matplotlib import pyplot as plt
# slope & coefficient init
slope = 0
coef = 0
# x,y dataset
# correlation between study time and test scores
data = [[3, 35], [4, 50], [5, 45], [6, 64], [7, 66], [8, 70]]
x = [i[0] for i in data]
y = [i[1] for i in data]
# Least Squared Method
def estimate(x, y):
x = np.array(x)
y = np.array(y)
n = np.size(x)
x_m, y_m = np.mean(x), np.mean(y)
m = (np.sum(y*x) - n*x_m*y_m) / (np.sum(x*x) - n*x_m*x_m)
c = y_m - m*x_m
return (m, c)
# regression function
def predict(x):
return slope*x + coef
slope, coef = estimate(x, y)
# set y_pred list
y_pred = []
for i in range(len(x)):
y_pred.append(predict(x[i]))
print("study time=%.f, real score=%.f, prediction score=%.f" % (x[i], y[i], predict(x[i])))
# plotting graph
plt.scatter(x,y)
plt.plot(x,y_pred, color="red")
plt.show()
| true |
0cff94e4120ec065cb3dccf557f4d36151a03d54 | Python | CristianDeluxe/update-ip | /update_ip/configuration.py | UTF-8 | 3,299 | 2.765625 | 3 | [
"BSD-2-Clause"
] | permissive | import ConfigParser
import inspect
from update_ip.services import services_by_name
class InvalidConfigFile( Exception ):
pass
class Configuration(object):
SECTION= "update_ip"
OPTIONS= ('cache_file', 'domains', 'service_name')
OPTIONS_DESCRIPTIONS= ('File where to cache last ip', 'Domains (comma-separated)', 'Name of the updater service')
REQUIRED_OPTIONS= OPTIONS[:2]
def __init__(self, **kwargs):
options= {}
for k,v in kwargs.items():
#read given options
options[k]=v
for k in set(Configuration.OPTIONS).difference(kwargs.keys()):
#set options that were not given to None
options[k]= None
self.__dict__= options #expose options as instance attributes, i.e: configuration.domains
if self.domains:
self.domains= [x.strip() for x in self.domains.split(",")]
def write_to_file(self, filename):
'''writes this configuration to a config file'''
config = ConfigParser.RawConfigParser()
config.add_section( self.SECTION )
for k,v in self.__dict__.items():
if type(v)==list:
v=",".join(v)
if not v is None:
config.set(self.SECTION , k, v)
with open(filename, 'wb') as configfile:
config.write(configfile)
@staticmethod
def read_from_file(filename):
'''creates a Configuration from a config file'''
try:
config = ConfigParser.RawConfigParser()
config.read(filename)
file_options= dict( config.items( Configuration.SECTION ))
return Configuration(**file_options)
except ConfigParser.NoSectionError as e:
raise InvalidConfigFile(
"Failed to read configuration from '{0}': {1}".format(
filename, e))
def configurationWizard():
def read_string( field_name, allow_empty= False ):
while True:
print field_name, ("[Required]" if not allow_empty else "")+":"
x= raw_input()
if x or allow_empty:
return x or None
print "Generating a new configuration file"
print "Available services:"+"\n "+"\n ".join(services_by_name.keys())
filename= read_string("Configuration filename to write")
options={}
for k, desc in zip( Configuration.OPTIONS, Configuration.OPTIONS_DESCRIPTIONS):
required= k in Configuration.REQUIRED_OPTIONS
v= read_string( "{0} ({1})".format(desc,k), allow_empty= not required)
options[k]= v
svc_name = options['service_name']
try:
service= services_by_name[svc_name]
except KeyError:
print "Sorry, '%s' is not a valid service name" % (svc_name)
exit(3)
print "Service parameters:"
args, varargs, keywords, defaults= inspect.getargspec(service.__init__)
for a in args:
if a!='self':
v= read_string( a, allow_empty= False)
options[a]= v
print "Generating and writing configuration to file: ", filename
cfg= Configuration( **options )
cfg.write_to_file(filename)
print '''Finished. Please remember to set restrictive permissions \
if the file contains sensitive data (like a service password)'''
| true |
2e79f1ab27972d63850bd19c9225ae22c675727b | Python | rafaelfolco/leetcode-python | /median-sorted-arrays.py | UTF-8 | 483 | 3.5625 | 4 | [] | no_license | # https://leetcode.com/problems/median-of-two-sorted-arrays
class Solution(object):
def findMedianSortedArrays(self, nums1, nums2):
"""
:type nums1: List[int]
:type nums2: List[int]
:rtype: float
"""
nums = sorted(nums1 + nums2)
nlen = len(nums)
half = abs(nlen / 2)
if nlen % 2 == 0:
median = (nums[half]+nums[half-1])/2.0
else:
median = nums[half]
return median
| true |
57aa917d5310d46539563da577171b78dd8f4caa | Python | zh-wang/leetcode | /solutions/0075_Sort_Colors/exchange_3_loops.py | UTF-8 | 555 | 3.359375 | 3 | [] | no_license | class Solution:
def sortColors(self, nums: List[int]) -> None:
"""
Do not return anything, modify nums in-place instead.
"""
self.exchange(nums, 1, 0)
self.exchange(nums, 2, 0)
self.exchange(nums, 2, 1)
def exchange(self, nums, x, y):
i, j = 0, len(nums) - 1
while i < j:
while i < j and nums[i] != x:
i += 1
while i < j and nums[j] != y:
j -= 1
nums[i], nums[j] = nums[j], nums[i]
i, j = i + 1, j - 1
| true |
34769d9fa4f401a0e91ad1fcf71a984e815e6a17 | Python | HamburgerMonsterSnake/SS_Bot | /new_world_bot_ss.py | UTF-8 | 447 | 2.8125 | 3 | [] | no_license | import pyautogui as pg
import random
import time
try:
while(1):
pg.click()
tmp = random.randint(1,5)
if tmp == 1:
pg.press('w')
elif tmp == 2:
pg.press('a')
elif tmp == 3:
pg.press('s')
elif tmp == 4:
pg.press('d')
else:
pg.press('space')
time.sleep(100)
except KeyboardInterrupt:
print("end \n") | true |
6ef768bd07a24103b7f22fdd3b8bbaebb62754c5 | Python | StudioPuzzle/Python3-Junior | /lesson 5/pr_3.py | UTF-8 | 59 | 3.34375 | 3 | [] | no_license | n = 923456
for i in range(6):
n = n //10
print(n)
| true |
e2d6d6379e2ad118e348da6b190484c1b446a891 | Python | mstallone/mstallone.github.io | /Projects/CodeForces_Python/231A_Team.py | UTF-8 | 262 | 2.984375 | 3 | [] | no_license | numberOfTests = input()
numberOfYesTests = 0
for i in range(0, int(numberOfTests)):
test = input().split()
count = 0
for c in test:
if c == "1":
count += 1
if count >= 2:
numberOfYesTests += 1
print(numberOfYesTests) | true |
2b8951982e638df665408aa6f0e1ad5855e9fbf3 | Python | harinimali/Perceptron | /avg_per_classify.py | UTF-8 | 3,279 | 2.53125 | 3 | [] | no_license |
from __future__ import division
import re
import os
import sys
import json
import io
import math
import string
from collections import defaultdict
indir= sys.argv[1]
finaloutput=open('avg_per_output2.txt','w+')
with open('avg_per_model2.txt', 'r') as fp:
data = json.load(fp)
bias=data['bias']
weights=data['weights']
def calculations(filecount):
hprecision = 0.0
hrecall = 0.0
hf1 = 0.0
sprecision = 0.0
srecall = 0.0
sf1 = 0.0
with open(indir2, 'r') as fp:
data = fp.readlines()
a1 = 0.0
b1 = 0.0
c1 = 0.0
a2 = 0.0
b2 = 0.0
c2 = 0.0
accuracy = 0.0
for d in data:
d = d.strip('\n').split(' ')
#print d[-1]
if (d[0] == 'HAM') and (re.search('ham', d[-1])):
a1 += 1
elif(d[0] == 'HAM') and (re.search('spam', d[-1])):
b1 += 1
elif (d[0] == 'SPAM') and (re.search('ham', d[-1])):
c1 += 1
if (d[0] == 'SPAM') and (re.search('spam', d[-1])):
a2 += 1
elif (d[0] == 'SPAM') and (re.search('ham', d[-1])):
b2 += 1
elif (d[0] == 'HAM') and (re.search('spam', d[-1])):
c2 += 1
# print a1
# print a2
if filecount != 0:
accuracy = float(a1 + a2) / float(filecount)
else:
accuracy = 0
if (a1 + b1):
hprecision = float(a1) / float(a1 + b1)
else:
hprecision = 0
if (a1 + c1):
hrecall = float(a1) / float(a1 + c1)
else:
hrecall = 0
if (a2 + b2):
sprecision = float(a2) / float(a2 + b2)
else:
sprecision = 0
if (a2 + c2):
srecall = float(a2) / float(a2 + c2)
else:
srecall = 0
if (hprecision + hrecall):
hf1 = float((2 * hprecision * hrecall) / float(hprecision + hrecall))
else:
hf1 = 0
if (sprecision + srecall):
sf1 = float(2 * sprecision * srecall) / (sprecision + srecall)
else:
sf1 = 0
print ("Ham precision:", hprecision)
print ("Ham recall:", hrecall)
print ("Ham F1 Score: ", hf1)
print ("Spam precision:", sprecision)
print ("Spam recall:", srecall)
print ("Spam F1:", sf1)
print ("Accurcy:", accuracy)
#print bias
con=0
for root, dirs, files in os.walk(indir):
for x in files:
if (x == '.DS_Store' or x == 'LICENSE' or x == 'README.md' or x == 'README.txt'):
print (" ")
else:
filename = os.path.join(root, x)
with io.open(filename, 'r',encoding='latin1') as f:
con+=1
contents = f.read()
contents = contents.lower()
contents = contents.split()
a=0
for w in contents:
if w in weights.keys():
a += weights[w]
a = a + bias
if ( a >0 ):
print "spam",filename
finaloutput.write("SPAM ")
else:
print "ham" , filename
finaloutput.write("HAM ")
finaloutput.write(filename + "\n")
calculations(con) | true |
6a421a29fe195a4bea199acc5a909f4a4620a2dc | Python | xiajinchun/python-learning | /09_error_debug_test/02_debug.py | UTF-8 | 1,415 | 3.875 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# 第一种方法简单直接粗暴有效,就是用print把可能有问题的变量打印出来看看
def foo(s):
n = int(s)
print '>>> n = %d' % n
return 10 / n
def main():
foo('0')
#main()
# 断言 ———— 凡是用print来辅助查看的地方,都可以用断言(assert)来替代
def foo(s):
n = int(s)
# assert的意思是,表达式n != 0应该是True,否则,后面的代码就会出错
assert n != 0, 'n is zero'
return 10 / n
def main():
foo('0')
#main()
# logging ———— 把print替换为logging是第3种方式,和assert比,logging不会抛出错误,而且可以输出到文件
import logging
# logging允许你指定记录信息的级别,有debug,info,warning,error等几个级别
logging.basicConfig(level = logging.INFO)
s = '0'
n = int(s)
logging.info('n = %d' % n)
print 10 / n
# pdb ———— 启动Python的调试器pdb,让程序以单步方式运行,可以随时查看运行状态
s = '0'
n = int(s)
print 10 / n
# pdb.set_trace()这个方法也是用pdb,但是不需要单步执行
# 我们只需要import pdb,然后,在可能出错的地方放一个pdb.set_trace(),就可以设置一个断点
import pdb
s = '0'
n = int(s)
pdb.set_trace() # 运行到这里会自动暂停
print 10 / n
# 程序会自动在pdb.set_trace()暂停并进入pdb调试环境 | true |
3f7b6f18c147b90c0ea2c7b13e5d86a221d1e192 | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_199/3792.py | UTF-8 | 468 | 3.015625 | 3 | [] | no_license | t = int(raw_input())
for i in range(t):
s, n = raw_input().split()
s = list(s)
n = int(n)
res = 0
for x in range(len(s)-n+1):
if "-" == s[x:x+n][0]:
res += 1
for y in range(x, x+n):
if s[y] == "+":
s[y] = "-"
else:
s[y] = "+"
print "Case #" + str(i+1) + ":",
if len(set(s)) == 1:
print res
else:
print "IMPOSSIBLE"
| true |
7ec0ad177530b81d0716fa460bb9dce734bcac00 | Python | vlad2626/project1v23 | /main.py | UTF-8 | 3,353 | 2.765625 | 3 | [] | no_license | import json
import requests
import nltk
from nltk import sent_tokenize
from nltk import word_tokenize
from nltk.probability import FreqDist
from nltk.corpus import stopwords
import main_functions
from pprint import pprint
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import streamlit as st
import numpy as np
import pandas as pd
nltk.download("punkt")
nltk.download("stopwords")
api_key_dict = main_functions.read_from_file("JSON_Files/api_key.json")
api_key = api_key_dict["my_key"]
my_articles = " "
st.set_option('deprecation.showPyplotGlobalUse', False)
st.title("Well come to New york times Articles\n Project 1 6159250")
option = st.selectbox(
"what section would you like to look into",
["arts", "automobiles", "books", "business", "fashion", "food", "health", " home", "insider", "magazine", "movies", "nyregion"
"obituaries", "opinion", "politics", "realestate" , "science", "sports","sundayreview", "technology", "theater", "t-magazine","travel" , "upshot", "us", "world"]
)
st.write("You have selected " + option)
str = option
l= len(str)
sub = str[0:l]
url = "https://api.nytimes.com/svc/topstories/v2/"+sub+".json?api-key=" + api_key
response = requests.get(url).json()
main_functions.save_to_file(response, "JSON_Files/response.json")
my_articles = main_functions.read_from_file("JSON_Files/response.json")
str1 = " "
for i in my_articles["results"]:
str1 = str1 + i["abstract"]
words = word_tokenize(str1)
word_no_punc = []
for j in words:
if j.isalpha():
word_no_punc.append(j.lower())
stopwords = stopwords.words("english")
clean_words = []
for k in word_no_punc:
if k not in stopwords:
clean_words.append(k)
fdist = FreqDist(clean_words)
str3 = fdist.most_common(10)
#st.write(" the most common 10 words used")
# chart_data = pd.DataFrame(
# str3
# )
chart_data = pd.DataFrame(
str3,
)
if st.checkbox("click here for the most frequent words"):
st.line_chart(chart_data)
#st.line_chart(chart_data)
wordcloud= WordCloud().generate(str1)
plt.figure(figsize=(12,12))
plt.imshow(wordcloud)
plt.axis("off")
if st.checkbox("click here to generate word cloud"):
st.pyplot(figsize=(12,12))
my_articles2= " "
st.title(" PART B - MOST POPULAR ARTICLES")
option2 =st.selectbox("what is you preffered set of articles",
["shared", "emailed", "viewed"])
option3 = st.selectbox("how long you want to collect data for(days)",
["1", "7", "30"])
url2 = "https://api.nytimes.com/svc/mostpopular/v2/" + option2 +"/" + option3+ ".json?api-key=" + api_key
response2 = requests.get(url2).json()
main_functions.save_to_file(response2, "JSON_Files/response2.json")
my_articles2 = main_functions.read_from_file("JSON_Files/response2.json")
pop = " "
for m in my_articles2["results"]:
pop = pop + m["abstract"]
words2 = word_tokenize(pop)
clean_words2 = []
word_no_punc2= []
for p in words2:
if p.isalpha():
word_no_punc2.append(p.lower())
for z in word_no_punc2:
if z not in stopwords:
clean_words2.append(z)
fdist2 =FreqDist(word_no_punc2)
mostCom = fdist2.most_common(10)
#pprint(mostCom)
wordcloud2 = WordCloud().generate(pop)
plt.imshow(wordcloud2)
plt.axis("off")
if st.checkbox("click here for wordloud"):
st.pyplot(figsize=(12, 12))
| true |
375e539c5b4eb744be0044ae7ebc4de9f78c4663 | Python | tonycao/CodeSnippets | /python/homework/Archive/A2/A2Answer_sean.py | UTF-8 | 5,029 | 3.75 | 4 | [] | no_license | ## Assignment 2 - Analyzing water
## Author: Sean Curtis
## Collaborators: None
## Time spent (hours): N/A
## In this assignment, we're going to visualize and analyze data to answer
## meaningful questions. Some of the framework you need is in place, you
## have to fill in the gaps.
import numpy as np
import pylab as plt
# read the data
# depth: a 276 by 2 array with depth of Jordan and Falls lakes
# for each month from Jan 1985 to Dec 2007, which is 23 years.
# Data that is not available is NaN.
depth = np.loadtxt('depth.txt')
# rain: a 276x2 array with total rainfall in inches for each month
rain = np.loadtxt('rain.txt')
# hawgage: a 365x4 array of daily average river or lake height (ft) at
# Haw River, Bynum, and above & below the Jordan Lake Dam by Moncure.
# (These sites are listed upstream to downstream, but the gauges are
# not in that order.)
hawgage = np.loadtxt('hawgage.txt')
# hawrain: a 365x2 array of daily rainfall (in) measured at two
# rain gauges from 29 Aug 07 - 28 Aug 08.
hawrain = np.loadtxt('hawrain.txt')
## QUESTION 1
# 1. Plot a line graph of depths for both lakes.
plt.plot( depth )
# these show how to label the figure
plt.title('Depth of Jordan and Falls lakes') # the title of the figure
plt.ylabel('Depth (feet)') # label for the y-axis
plt.xlabel('Months starting with Jan 1985') # label for the x-axis
plt.savefig('Fig1.png') # the saved output figure
plt.close() # close this plot so it doesn't interfere later
## QUESTION 2
# 2. The targets for Jordan and Falls lakes are 216ft and 251.5ft, respectively.
# For how many months was each lake over its target?
jordanTgt = 216
fallsTgt = 251.5
targets = np.array([ jordanTgt, fallsTgt ] )
overTgt = depth > targets
overTgtCount = np.sum( overTgt, axis=0 )
print 'Months Jordan lake exceeded its target depth:', overTgtCount[0]
print 'Months Falls Lake exceeded its target depth:', overTgtCount[1]
## QUESTION 3
# 3. Plot the rain in August as a line graph over years for both lakes.
augRain = rain[ 7::12, : ]
plt.plot( augRain )
plt.title('Rain in August for Jordan and Falls lakes')
plt.savefig('Fig2.png')
plt.close()
## QUESTION 4
# 4. Compute the average height that Falls Lake is above its target
# for each month over the 23 years from 1985-2007, and display as bar
# chart with a bar for each month. Plot the line for 2007 in red on
# top of this bar chart.
monthVsYear = np.reshape( depth[ :, 1 ], (-1, 12 ) )
FallsByMonth = np.mean( monthVsYear, axis=0 )
FallsByMonth -= fallsTgt
plt.bar( np.arange(1, 13), FallsByMonth, align='center')
year2007 = depth[-12:, 1] - fallsTgt
plt.plot( np.arange(1, 13), year2007, 'r')
plt.title('Average Falls lake depth 85-07, and line for 2007')
plt.ylabel('Height above target(ft)')
plt.xlabel('Month')
plt.savefig('Fig3.png')
plt.close()
## QUESTION 5
# 5. Determine how many days had more than 1 in of precipitation at
# the two sites in hawrain, and how many days had less than 1/4 in.
grtrOne = hawrain > 1
print 'Number of days either lake had more than one inch', np.sum( np.sum( grtrOne, axis=1 ) > 0 )
qrtr = hawrain < 0.25
print 'Number of days either lake had less than 1/4 inch:', np.sum( np.sum( qrtr, axis=1 ) > 0 )
## QUESTION 6
# 6. Plot line graphs showing the cumulative amount of rain over the
# past year at both sites. Which of the two locations (1 or 2)
# received the most rain?
cumRain = np.cumsum( hawrain, 0 )
plt.plot( cumRain )
maxIndex = np.argmax(cumRain[ -1, : ])
plt.title('Cumulative Rainfall')
plt.xlabel('Days since 28Aug07')
plt.ylabel('Cumulative rainfall (in)')
plt.savefig('Fig4.png')
plt.close()
# !!! Determine which site had the most total rain -- the np.argmax function will help !!!
# !!! This print statement should print 1 or 2 (be careful there....) !!!
print 'The site with more total rain:', maxIndex + 1
## QUESTION 7
# 7. Determine the lowest height for each gauge, and create an array
# of adjusted heights by subtracting the corresponding lowest heights.
# Plot these adjust heights as a line graph.
minHeight = hawgage.min( 0 )
adjHeight = hawgage - minHeight
plt.plot( adjHeight )
plt.title('Adjusted gauge heights')
plt.xlabel('Days since 28Aug07')
plt.ylabel('Height above min (ft)')
plt.savefig('Fig5.png')
plt.close()
## QUESTION 8
# 8. Determine the maximum increase and maximum decrease in height
# from one day to the next for each of the four gauges in hawgage.
delta = np.diff( hawgage, axis=0 )
minDelta = delta.min( 0 )
maxDelta = delta.max( 0 )
print 'Maximum one-day change in height:', maxDelta
print 'Minimum one-day change in height:', minDelta
## YOUR THOUGHTS
## Type in some of your thoughts about this assignment below. Make sure that it all begins with
## pound signs (#) or your python script won't run at all. | true |
77709e9774a80b7b2bbeafe714e0d34169449b00 | Python | StringDouble/Python-Projects | /story_generator.py | UTF-8 | 403 | 3.859375 | 4 | [] | no_license | #Copyright Yoana Stankova 2018
obj1 = input("What is the last thing you saw?")
obj2 = input("What is your favorite place?")
obj3 = input("Which is the scariest animal?")
print("Once upon a time there was a " + obj1 + ". One day it decided to go to "
+ obj2 + ". As " + obj1 + " was walking, " + obj1 + " saw a " + obj3 +
". " + obj3.capitalize() + " attacked " + obj1 + " and " + obj1 + " died." )
| true |