blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ba0bb0ef099dc45d7dae01408d58978d7266e11e | Python | nathanlo99/dmoj_archive | /done/mockccc15j1.py | UTF-8 | 171 | 2.9375 | 3 | [] | no_license | area, other = input().split()
if area not in ["416", "647", "437"] or len(other) != 7: print("invalid")
elif area == "416": print("valuable")
else: print("valueless")
| true |
5e252f03cc8114b5625e27b2ae959d758ae21b55 | Python | dmh43/counterfactual_fairness | /main.py | UTF-8 | 2,414 | 2.671875 | 3 | [] | no_license | import pydash as _
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from cff.policies import NaivePolicy, UnawarePolicy, FairPolicy, SimplePolicy
from cff.sim import simulate_exogenous_vars, simulate_endogenous_vars
from cff.ds_helpers import hist_ability_by_policy, kde_ability_by_protected, calc_utility
def main():
# generate some data
nb_obs = 100000
nb_seats = 20000
R, S, A = simulate_exogenous_vars(nb_obs, R_pct=0.75, S_pct=0.6) # simulate exogeous variables
G, L, F = simulate_endogenous_vars(A, R, S) # simulate endogenous variables
# set up naive policy
simplePolicy = SimplePolicy()
# set up naive policy
naivePolicy = NaivePolicy()
naivePolicy.train(R, S, G, L, F)
# set up and train unaware policy
unawarePolicy = UnawarePolicy()
unawarePolicy.train(G, L, F)
# set up and train fair policy
fairPolicy = FairPolicy()
fairPolicy.train(R, S, G, L)
R, S, A = simulate_exogenous_vars(nb_obs, R_pct=0.75, S_pct=0.6) # simulate exogeous variables
G, L, F = simulate_endogenous_vars(A, R, S) # simulate endogenous variables
# form policy dictionary
P = {'simple': simplePolicy.evaluate(G, L, nb_seats),
'naive': naivePolicy.evaluate(R, S, G, L, nb_seats),
'unaware': unawarePolicy.evaluate(G, L, nb_seats),
'fair': fairPolicy.evaluate(R, S, G, L, nb_seats)}
hist_ability_by_policy(P['simple'], A, R, S, G, L, F)
hist_ability_by_policy(P['naive'], A, R, S, G, L, F)
hist_ability_by_policy(P['unaware'], A, R, S, G, L, F)
hist_ability_by_policy(P['fair'], A, R, S, G, L, F)
kde_ability_by_protected(P['simple'], A, R, S, G, L, F)
kde_ability_by_protected(P['naive'], A, R, S, G, L, F)
kde_ability_by_protected(P['unaware'], A, R, S, G, L, F)
kde_ability_by_protected(P['fair'], A, R, S, G, L, F)
unfair_ranking = np.argsort(-F.squeeze())
print('Utility comparison on ranking from unfair policy:')
for policy_name in ['simple', 'naive', 'unaware', 'fair']:
print(_.upper_first(policy_name), 'policy:', calc_utility(P[policy_name], unfair_ranking, nb_seats))
fair_ranking = np.argsort(-A.squeeze())
print('Utility comparison on ranking from fair policy (based only on true ability):')
for policy_name in ['simple', 'naive', 'unaware', 'fair']:
print(_.upper_first(policy_name), 'policy:', calc_utility(P[policy_name], fair_ranking, nb_seats))
if __name__ == "__main__": main()
| true |
8dceffc46954fb1f93786cd12e67e909ad4c693f | Python | czheluo/Python-Script | /consensus/cons.py | UTF-8 | 1,899 | 2.796875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Consensus and Profile
"""
__author__ = 'Meng Luo'
__Email__ = 'meng.luo@majorbio.com'
__copyright__ = 'Copyright (C) 2019 MARJOBIO'
__license__ = 'GPL'
__modified__= '20190815'
import os
import argparse
import sys
import re
parser = argparse.ArgumentParser(description="input parameters")
parser.add_argument('-i', '--fa',required=True, help=' a fasta file ')
parser.add_argument('-o', '--out',required=True, help='ouput file name')
args = parser.parse_args()
'''
python cons.py -i file.fa -o file.result
'''
def famat(seq):
seqmat = []
hd=[]
for line in seq:
if re.match(">", line):
spl=line.split(">")
hd.append(spl)
else:
seqmat.append(line.rstrip())
#mat='\n'.join(seqmat)
#hds=''.join(hd)
return "\n".join(seqmat)
def profile(matrix):
strings = matrix.split()
default = [0] * len(strings[0])
results = {
'A': default[:],
'C': default[:],
'G': default[:],
'T': default[:],
}
for s in strings:
for i, c in enumerate(s):
results[c][i] += 1
return results
def consensus(profile):
result = []
resu= []
keys = profile.keys()
for j in keys:
res=j + ":" + " ".join(str(x) for x in profile[j])
resu.append(res)
for i in range(len(profile[keys[0]])):
max_v = 0
max_k = None
for k in keys:
v = profile[k][i]
if v > max_v:
max_v = v
max_k = k
result.append(max_k)
resuls=''.join(result)+'\n'+'\n'.join(resu)
return resuls
dataset = open(args.fa,"r")
out = open(args.out,"w")
ma = famat(dataset)
pro = profile(ma)
cp = consensus(pro)
out.write(cp)
# show result
'''
ATGCAACT
A:5 1 0 0 5 5 0 0
C:0 0 1 4 2 0 6 1
T:1 5 0 0 0 1 1 6
G:1 1 6 3 0 1 0 0
''' | true |
d8b381ef27039209570fbf7a319ffe36cd3b0feb | Python | Tubbz-alt/mdcd-travelhack | /imgproc/rest/filtering.py | UTF-8 | 1,552 | 2.75 | 3 | [
"MIT"
] | permissive | from flask import Blueprint, request, make_response
class FilterNotRecognized(Exception):
def __init__(self, msg=''):
self.msg = msg
class FilteringService:
def apply(self, src_image_str, filter_name) -> bytes:
"""
apply filter to the image, which is provided as string,
<b>not filepath</b>
:param src_image_str: string which represents an image
:param filter_name: string which represents a filter
:return: string which represents a processed image
"""
raise NotImplementedError()
class FilterController:
def __init__(self, service):
self.blueprint = Blueprint('filtering', __name__)
self.service = service
self.routes()
def routes(self):
@self.blueprint.route('/apply-filter', methods=['POST'])
def apply_filter():
imageString = request.files['image'].read()
filter_name = request.form['filter_name']
if 'filter_name' not in request.form or 'image' not in request.files:
return {"error": "image or filter name or both are not provided"}, 400
try:
repl = self.service.apply(imageString, filter_name)
except FilterNotRecognized as e:
return {"error": "cannot recognize filter"}, 400
response = make_response(repl)
response.headers.set('Content-Type', 'image/jpeg')
response.headers.set('Content-Disposition', 'attachment', filename='r.jpg')
return response
| true |
1adcd7733b77df39f04e1b2d1e613c5181a69820 | Python | fuk/projecteuler | /10.py | UTF-8 | 250 | 3.40625 | 3 | [] | no_license |
def sum(size):
sumPrimes, sieve = 0, [True] * size
for x in range(2, size):
if sieve[x]:
sumPrimes += x
for i in range(2*x, size, x):
sieve[i] = False
return sumPrimes
print sum(2000000)
| true |
0eecae06536a268456c97f29466a450589598a98 | Python | OhEvolve/LabHelper | /methods.py | UTF-8 | 5,703 | 2.859375 | 3 | [] | no_license |
"""
Common internal methods used throughout protocols
"""
# standard libraries
# nonstandard libraries
#import openpyxl
#from openpyxl.utils import get_column_letter
# homegrown libraries
#--------------------------------------#
# External Methods #
#--------------------------------------#
def load_reagent(name):
pass
def load_sequence(name):
pass
def load_enzyme(name):
pass
#--------------------------------------#
def convert_volume(unit,total):
""" Convert volume units """
units = _volume_units()
if unit[1] == 'X':
unit = (total[0]/float(unit[0]),total[1])
else:
assert unit[1] in units.keys(),'units not recognized ({})'.format(unit[1])
unit_converts = [(unit[0]*units[unit[1]]/v,k) for k,v in units.items()]
unit_converts = [(k,v) for k,v in unit_converts if k >= 0.1 and k < 100]
if len(unit_converts) != 1: print 'Something is wrong:',unit_converts
return unit_converts[0]
#--------------------------------------#
def fill_reaction(total,*args):
""" Finds how to fill reaction to total """
units = _volume_units()
reagent_units = [v[0]*units[v[1]]/units[total[1]] for k,v in args]
return (total[0] - sum(reagent_units),total[1])
#--------------------------------------#
def request_xlsx(name,fname,sheet=''):
""" Attempt to pull a value from an xlsx, return as dictionary """
assert fname.endswith('.xlsx'), "fname extension not operable"
assert isinstance(sheet,str), "sheet name not string"
# open database file
wb = openpyxl.load_workbook(fname)
if sheet == '':
sheet = wb.get_sheet_names()[0]
worksheet = wb.get_sheet_by_name(sheet)
# extract enzyme information
value = [i for i in range(2,worksheet.max_row)
if worksheet['{}{}'.format('A',i)].value.strip(' ') ==
name.decode('utf-8')]
# check for matched enzymes
if len(value) == 0:
print 'No recognized labels ({})'.format(name)
database = {}
elif len(value) == 1:
database = dict([(k.value,v.value) for k,v
in zip(tuple(worksheet.rows)[0],tuple(worksheet.rows)[value[0]-1])])
database = _normalize_dict(database)
else:
print 'Multiple matching labels, skipping assignment...'
database = {}
# return extracted values
return database
#--------------------------------------#
def dict_update(base_dict,add_dict):
""" Appends base dictionary with priority towards existing values """
# interate through keys
for k,v in base_dict.items():
if v == None and k in add_dict.keys():
base_dict[k] = add_dict[k]
# return base dictionary
return base_dict
#--------------------------------------#
def obj_update(obj,modifications = None):
""" Update object using internal settings """
if isinstance(modifications,dict):
[setattr(obj,k,v) for k,v in modifications.items()]
elif hasattr(obj,'settings'):
[setattr(obj,k,v) for k,v in obj.settings.items()]
else:
print 'No settings attribute detected, skipping update...'
#--------------------------------------#
def capitalize(my_str):
""" Capitalizes the first letter of each sentence/paragraph """
# available capitalization indicators
inds = ['\n ','. ']
# iterate through string breaks
for ind in inds:
my_str = ind.join([s[0].upper() + s[1:] for s in my_str.split(ind)])
return my_str
#--------------------------------------#
# TODO: consider faster alternatives (no indexing)
#--------------------------------------#
def comp(seq):
""" Complement input sequence """
assert isinstance(seq,str),"submitted sequence is not str"
seq_map = _seq_map()
return ''.join(seq_map[seq[i]] for i in xrange(len(seq)))
#--------------------------------------#
def rcomp(seq):
""" Complement input sequence """
assert isinstance(seq,str),"submitted sequence is not str"
seq_map = _seq_map()
return ''.join(seq_map[seq[len(seq) - i - 1]] for i in xrange(len(seq)))
#--------------------------------------#
# Internal Methods #
#--------------------------------------#
def _seq_map():
# TODO: expand mapping to include degenerative codons
return {'A':'T','T':'A',
'C':'G','G':'C',
'N':'N',' ':' ',
'|':'|','-':'-'}
#--------------------------------------#
def _volume_units():
""" Volume units dictionary """
return {
'nL':1e-9,
'uL':1e-6,
'mL':1e-3,
'L': 1e0
}
#--------------------------------------#
def _normalize_dict(my_dict):
new_dict = {}
# iterate through dictionary items
for k,v in my_dict.items():
# check if float
try:
new_dict[str(k).lower()] = float(v)
continue
except ValueError:
pass
# check if list of strs
if v.strip(' ').startswith('[') and v.strip(' ').endswith(']'):
try:
new_dict[str(k).lower()] = [float(i) for i in v[1:-1].split(',')]
continue
except ValueError:
new_dict[str(k).lower()] = [str(i).strip(' ') for i in v[1:-1].split(',')]
continue
else:
new_dict[str(k).lower()] = str(v).strip(' ')
# returns new dictionary
return new_dict
#--------------------------------------#
# Testing #
#--------------------------------------#
if __name__ == "__main__":
seq = 'AAATTTCCCGGGATCG'
print seq
print comp(seq)
print rcomp(seq)
| true |
2ba68e125415ff2febc6c18cd4dffa1703ad25a4 | Python | ihokamura/python_machine_learning_notebook | /metric_utility.py | UTF-8 | 2,207 | 3.296875 | 3 | [] | no_license | """
utility to compute metrics
"""
import numpy as np
def roc_curve(y_true, y_score, pos_label):
"""
compute ROC (receiver operating characteristic) curve
# Parameters
-----
* y_true : array-like, shape = (n_samples, )
target variable
* y : array-like, shape = (n_samples, )
score (model prediction) corresponding to the target variable
* pos_label : int
label of target variable seen as positive
# Returns
-----
* fpr : array-like, shape = (n_samples, )
false positive rate at each thresholds
* tpr : array-like, shape = (n_samples, )
true positive rate at each thresholds
* threshold : array-like, shape = (n_samples, )
thresholds to compute ROC curve in descending order
# Notes
-----
* n_samples represents the number of samples.
"""
# determine negative label
if y_true[0] == pos_label:
neg_label = y_true[1]
else:
neg_label = y_true[0]
fpr, tpr = [], []
thresholds = np.array(sorted(y_score, reverse=True))
for threshold in thresholds:
# count true positive, true negative, false positive and false negative
N_tp = np.sum([(y == pos_label and score >= threshold) for y, score in zip(y_true, y_score)])
N_tn = np.sum([(y == neg_label and score < threshold) for y, score in zip(y_true, y_score)])
N_fp = np.sum([(y == neg_label and score >= threshold) for y, score in zip(y_true, y_score)])
N_fn = np.sum([(y == pos_label and score < threshold) for y, score in zip(y_true, y_score)])
fpr.append(N_fp/(N_tn + N_fp))
tpr.append(N_tp/(N_tp + N_fn))
fpr = np.array(fpr)
tpr = np.array(tpr)
return fpr, tpr, thresholds
def auc(fpr, tpr):
"""
compute AUC (area under ROC curve)
# Parameters
-----
* fpr : array-like, shape = (n_samples, )
false positive rate at each thresholds
* tpr : array-like, shape = (n_samples, )
true positive rate at each thresholds
# Returns
-----
* _ : float
AUC for the ROC
# Notes
-----
* n_samples represents the number of samples.
"""
return np.trapz(tpr, fpr)
| true |
ed1b2cb264518dd8d3cd538a3dbcb60a1f96764a | Python | ohshige15/AtCoder | /ABC131/D.py | UTF-8 | 319 | 2.9375 | 3 | [] | no_license | N = int(input())
L = [list(map(int, input().split())) for _ in range(N)]
X = {}
for a, b in L:
if b not in X:
X[b] = []
X[b].append(a)
now = 0
for b, Y in sorted(X.items(), key=lambda x: x[0]):
for a in Y:
now += a
if b < now:
print("No")
exit()
print("Yes")
| true |
3e62b22a6f22cf928f4d385540b8242e4d917cba | Python | SoliDeoGloria31/study | /MySQL/mysql_day01/fun.py | UTF-8 | 993 | 3.75 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# fun.py
# 按照一定概率产生福
# 爱国福 30%,敬业福 10%,和谐福 30%,友善福 20%,富强福 10%
# 0 ~ 99 随机数
import random
f1, f2, f3, f4, f5 = 0, 0, 0, 0, 0
def gen_fu():
global f1, f2, f3, f4, f5
num = random.randint(0, 99) # 产生[0,99] 100个整数
if 0 <= num <= 29: # 爱国福 30%
f1 += 1
elif 30 <= num <= 39: # 敬业福 10%
f2 += 1
elif 40 <= num <= 69: # 和谐福 30%
f3 += 1
elif 70 <= num <= 89: # 友善福 20%
f4 += 1
else: # 富强福 10%
f5 += 1
for i in range(10000000):
# print('%d' % i)
gen_fu() # 产生福
total = f1 + f2 + f3 + f4 + f5
print('爱国福:%f' % (f1 / total))
print('敬业福:%f' % (f2 / total))
print('和谐福:%f' % (f3 / total))
print('友善福:%f' % (f4 / total))
print('富强福:%f' % (f5 / total))
| true |
83262a8ec77c6790ac586fc20abbf19c4adee1e4 | Python | isaac-aryan/Backup | /Python/Tuple_in_Python.py | UTF-8 | 1,075 | 4.65625 | 5 | [] | no_license | #Tuples are similar to lists but they are immutable
#Immutable means they cannot be changed i.e a single element of the list cannot be removed or updated
tup1=('physics', 'chemistry', 1997, 2000)
tup2=(1,2,3,4,5)
tup3=("a","b","c","d",)
#Accesing values in tuples is similar to the way we do it in lists
print(tup1[1])
print(tup2[1:3+1])
#We cannot update a tuple as it is but we can always create a new tuple and add elements to it
tup4=tup3+('e',)
print(tup4)
#To delete tuple elements we cannot do so as tuple is immutable but we can delete an entire tuple
del tup4
tup4=tup3+('f',)
print(tup4)
#BASIC EXPRESSIONS ON TUPLES
#1) Length
print(len(tup3))
#2) Addition
print(tup2+tup3)
#3)Repetition or Multiplication
print(('Hi!',)*4)
#4)For going over each element in the tuple(iteration)
for x in (tup2):
print (x)
#INDEXING AND SLICING
print(tup1[0])
print(tup1[-1])
print(tup1[1:])
#The same function used for lists are used for tuples. Some functions are
#1) cmp(tup1,tup2)
#2) max(tup1), min(tup1)
#3)To convert a sequence into a tuple, tuple(seq)
| true |
422bcca12d9e4aac19bfddb85595bc16d17cf25f | Python | suhridgit/6.033 | /new_shell.py | UTF-8 | 423 | 2.734375 | 3 | [] | no_license | import re
def read_file(filename):
with open(filename, 'r') as f:
fr = fd.read()
return re.sub( r'execfile\(\'new_shell\.py\)\n', '', read)
def login(args):
if len(args) != 1:
raise CommandError("Usage: login username")
global username
if username:
raise CommandError("Already logged in.")
username = args[0]
with open("usernames.txt", 'a') as fa:
fa.write(username + "\n") | true |
dfeb12a09cabcf570f499d91f1d6b08639bffd31 | Python | yangrencong/pythonstudy | /3.0/topping.py | UTF-8 | 251 | 2.953125 | 3 | [] | no_license | request_toppings = []
if request_toppings:
for request_topping in request_topings:
print("Sorry,we are out of green peppers right now")
print("\nFinished making your pizza")
else:
print("Are you sure you want a plain pizzas")
| true |
8c45eeef8a98af46bf30e3bbfee7965bed7e4f34 | Python | macrae/artoo | /test_tabular_data.py | UTF-8 | 964 | 2.640625 | 3 | [] | no_license | import numpy as np
from hypothesis import strategies
from hypothesis.strategies._internal.lazy import LazyStrategy
from typing import NamedTuple
from tabular_data import TabularData, tabular_parser
def test_tabular_parser():
tabular_data = tabular_parser("./data/female_names_top100_2019.csv")
assert isinstance(tabular_data, TabularData)
assert isinstance(tabular_data.column_names, np.ndarray)
assert isinstance(tabular_data.data, np.ndarray)
assert len(tabular_data.get_column("Amount")) == 100
def test_create_column_strategy():
tabular_data = tabular_parser("./data/female_names_top100_2019.csv")
some_amount = tabular_data.create_column_strategy("Amount")
assert isinstance(some_amount, LazyStrategy)
def test_create_record_strategy():
tabular_data = tabular_parser("./data/female_names_top100_2019.csv")
some_record = tabular_data.create_record_strategy()
assert isinstance(some_record.Amount, LazyStrategy) | true |
eeca05afd1a7c0298ebd40df5a70a6bd1928be5b | Python | marcinofulus/iCSEbook1 | /Warsztaty/source/MT_logistyczne/code02.py | UTF-8 | 108 | 2.765625 | 3 | [] | no_license | a=4.0
x=0.123
y=0.123+0.000001
pkts = []
for i in range(25):
x = a*x*(1-x)
y = a*y*(1-y)
print x,y
| true |
1dc17c49f0f9735e740b1b067dd9b3bdb5a31581 | Python | polonkaipal/Szkriptnyelvek | /hazi feladatok/11.17/20120815k_alcatraz.py | UTF-8 | 580 | 3.1875 | 3 | [] | no_license | #!/usr/bin/env python3
MAX = 600
def main():
# Az első lépésben úgy is kinyitjuk mindegyiket
cellak = [True for i in range(MAX + 1)]
for i in range(2, MAX + 1):
for j in range(i, MAX + 1, i):
cellak[j] = not cellak[j]
#
#
nyitottAjtokSzama = [str(index) for index, ertek in enumerate(cellak[1:], start=1) if ertek]
# A négyzetszámú cellalakók szabadulnak
print("".join(nyitottAjtokSzama))
##############################################################################
if __name__ == "__main__":
main()
| true |
330567e931d52acec39cbb9e61a33e8bc5a4f847 | Python | bszuchmacher/Python-Coding | /L2Ex3.py | UTF-8 | 409 | 3.5 | 4 | [] | no_license | # list_square_even_power_odd(list_range) :
# a) Use list comprehension
# b) All even indices will get the square root of their index
# c) All odd indices will get the ^2 of their index
# d) Verify range is relevant
import math
out_long = []
out_compac = []
def list_square_even_power_odd():
for i in range(16):
yield i
print([(math.sqrt(i) if (i % 2) == 0 else i ** 2) for i in range(16)])
| true |
34d7f7400bc5142c2a7262e9b239857898cb9618 | Python | hsiangyi0614/X-Village-2018-Exercise | /Lesson09-API-Web-Crawler/exercise2.py | UTF-8 | 760 | 3.46875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
url = 'http://markets.financialcontent.com/stocks/action/gethistoricaldata?Month=12&Symbol=GOOG&Range=300&Year=2017'
google_stock = pd.read_csv(url)
new_google_stock = google_stock.iloc[::-1] # 因為收到的資料是從 12/29/17 開始到 03/28/14,因此要轉個方向變成3/28/14到12/29/17。
new_google_stock = new_google_stock[:30] # 為了讓上下間距區域變明顯,我們只看前面30天的資料
print(new_google_stock)
plt.figure(figsize=(10, 5))
x = range(0,new_google_stock.shape[0])
y = new_google_stock['Open']
yh = new_google_stock['High']
yl = new_google_stock['Low']
plt.plot(x, y, color='blue', linewidth=2.0, linestyle=':')
plt.fill_between(x,yh,yl,color='yellow')
plt.show() | true |
93accd0ce4f910266dfe3fe604f16753767d92af | Python | dvp-git/Python-Crash-Course | /ifstatement_list-5.9.py | UTF-8 | 414 | 2.9375 | 3 | [] | no_license | # Exerecise 5.9 No Users
#user_list = ['server','client','admin','service','support']
user_list = []
if user_list:
for user in user_list:
if (user == 'admin'):
print("Hello admin, Would you like to see the status report?")
else:
print("Hello " + str(user) + ",Thank you for logging in again")
else:
print("We need to find some users")
| true |
0fd2dca3430b22037985a8f21bf559cbe9d90c49 | Python | mennafateen/musicly | /Album.py | UTF-8 | 1,441 | 3.171875 | 3 | [] | no_license | from playsound import playsound
import sqlite3
class Album:
id = 0
name = ""
band = ""
artist = ""
num_of_songs = ""
songs = []
def __init__(self, id="", name="", band="", artist="", num_of_songs="", songs=[]):
self.id = id
self.name = name
self.band = band
self.artist = artist
self.num_of_songs = num_of_songs
self.songs = songs
def viewAlbum(self):
print "Name: ", self.name
print "Band: ",
if self.band is not None:
print self.band.name
else:
print self.band
print "Artist: ",
if self.artist is not None:
print self.artist.name
else:
print self.artist
for song in self.songs:
print "* ", song.name, " ", song.length
def playAlbumSongs(self):
for song in self.songs:
playsound(song.path)
def deleteAlbum(self):
sql = sqlite3.connect('musicly.db')
cur = sql.cursor()
values = [self.id]
cur.execute('DELETE FROM album \
WHERE id = (?)', values)
sql.commit()
sql.close()
def addAlbum(self):
sql = sqlite3.connect('musicly.db')
cur = sql.cursor()
value = [self.name]
cur.execute('INSERT INTO album(name) \
VALUES (?)', value)
sql.commit()
sql.close()
| true |
b2653b79628c470d66d563bda98765645f6dfc99 | Python | Teaching-projects/SOE-ProgAlap1-HF-2020-Teo2002 | /007/main.py | UTF-8 | 520 | 3.015625 | 3 | [] | no_license | egyenleg = 0
szamldij = 2000
negativ = 1.1
pozitiv = 1.05
osszeg = 0
penzmozgas = int(input())
egyenleg += penzmozgas
osszeg += penzmozgas
if egyenleg > 0:
egyenleg *= pozitiv
else:
egyenleg *= negativ
egyenleg = int(egyenleg)
i = 0
while i < 11:
penzmozgas = int(input())
osszeg += penzmozgas
egyenleg -= szamldij
egyenleg +=penzmozgas
if egyenleg > 0:
egyenleg *= pozitiv
else:
egyenleg *= negativ
egyenleg = int(egyenleg)
i += 1
print(egyenleg)
print(osszeg)
| true |
56bd1dd303baf7f75c3ef7a20b23f2431f35af38 | Python | chrisxue815/lintcode_python | /problems/utils.py | UTF-8 | 240 | 2.671875 | 3 | [
"Unlicense"
] | permissive | import collections
import json
def json_object_hook(d):
return collections.namedtuple('Json', d.keys())(*d.values())
def load_json_from_path(path):
with open(path) as f:
return json.load(f, object_hook=json_object_hook)
| true |
7d6462f9e3d5eded7992998d9e3818fa94ca407f | Python | LBJ-Wade/SZ-Filtering | /ilc_tools/ilc.py | UTF-8 | 7,843 | 2.609375 | 3 | [] | no_license | import numpy as np
from astropy.io import fits
from scipy import ndimage
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid1 import make_axes_locatable
import healpy as hp
from ilc_tools import data_tools as dt
from ilc_tools import misc, sz
def project_maps(npix, input_file = None, RA = None, DEC = None, pxsize = None, same_units = False, same_res = False):
'''Project HEALPIX maps using gnomic view given coordinates.
Parameters:
-----------
npix: int
number of pixels
input_file: str, optional
Name of the data file to be used containing RA,DEC and pixel size.
RA: float array, optional
Right acention of objects, ICRS coordinates are required. Default:None
DEC: float array, optional
Declination of objects, ICRS coordinates are required. Default:None
pxsize: float, optional
pixel size in arcminutes. REcommended: 1.5
same_units: bool, optional
if changed to True all Planck maps will be provided in units of K_CMB.
Default: False
same_res: bool, optional
if changed to True all Planck maps will be provided with the resolution
of the lowest-frequency channel. Default: False
Returns:
--------
output: array
single image or data cube containing the projected maps.
If out_path is set, one or several files will be written
'''
if input_file is not None:
data_new = dt.ascii.read(input_file)
ra = np.array(data_new[:]['RA'])
dec = np.array(data_new[:]['DEC'])
pixsize = np.array(data_new[:]['pixel_size'])
nclusters = len(ra)
else:
ra = RA
dec = DEC
pixsize = pxsize
nclusters = len(ra)
freq = [100, 143, 217, 353, 545, 857] #0-353GHz are in K_cmb while 545 and 857GHz are in MJy/sr
nf = len(freq)
A = (2*np.sqrt(2*np.log(2)))
output = np.zeros((nclusters, nf, npix, npix))
for i in np.arange(nclusters):
for f in np.arange(nf):
all_sky = hp.read_map('HFI_{0}_layer.fits'.format(f))
projected_map = hp.gnomview(all_sky, coord=('G','C'), rot=(ra[i],dec[i]), return_projected_map=True, xsize=npix, reso=pixsize[i], no_plot = True)
if same_units is True: #from https://wiki.cosmos.esa.int/planckpla2015/index.php/UC_CC_Tables
if f == 0:
projected_map *= 244.0960
if f == 1:
projected_map *= 371.7327
if f == 2:
projected_map *= 483.6874
if f == 3:
projected_map *= 287.4517
if same_res is True and f != 0:
kernel = np.sqrt(sz.planck_beams(freq[0])**2 - sz.planck_beams(freq[f])**2)
print(sz.planck_beams(freq[0]), sz.planck_beams(freq[f]), kernel/A/pixsize[i])
projected_map = ndimage.gaussian_filter(projected_map, sigma= kernel/A/pixsize[i], order=0, mode = "reflect", truncate = 10)
output[i,f,:,:] = projected_map
print(output.shape)
return(output)
def offset_correction(estimated_map, npix = 400, bins = 300, median = False, gaussian = False, fit = False, plot = False):
'''Corrects the offset present in the ILC maps.
Parameters:
-----------
estimated_map: float array
ILC map obtained after computing weights
npix: int
number of pixels. Default is 400
bins: int
number of bins
median: bool, optional
Subtracts the median from the estiamted map if True.
gaussian: bool, optional
If True, it fits a gaussian to the histogram and
subtracts the best fit centre.
Returns:
--------
estimated_map: float array
offset corrected ILC map
'''
if median is True:
estimated_map = estimated_map - np.median(estimated_map)
if gaussian is True:
popt = misc.fit_data(estimated_map, npix, bins, fit = True, plot = False)
estimated_map = estimated_map - popt[0]
print('Best mean- fit', p_opt[0])
return(estimated_map)
def ilc_run(data,F = None, e = None, constrain = None, offset = False, bins = None ):
'''Runs an internal linear combination on a given set of multi-frequency maps
Parameters
----------
data: flaot array
a 3D array of dimensions n_freq x npix x npix.
F: array
spectral information of the desired map, either CMB or tSZ. The
dimensions have to be n_components x n_freq
e: array, optional
If multible spectral components are constrained, e gives the
response of the ilc weights to the individual spectra
constrain: array
Spectral constraints for the ilc algorithm. If contaminants
are constrained as well, the dimensions have to be
n_components x n_freq
offset: bool, optional
Default: True. Subtracts the median of the data from the estimated_map.
Gaussian can be fit and best mean fit subtracted if 'median' is replaced by 'gaussian'.
bins: int
number of bins not more than the number of pixels.
Returns
--------
reconstructed_map: array
Compton y map or a CMB map that has dimensions npix x npix
'''
nf = data.shape[0]
npix = data.shape[1]
matrix = np.cov(data.reshape(nf,npix*npix))
cov_inverse = np.linalg.inv(matrix)
if e is None: #standard ilc
weights = (cov_inverse@F)/(np.transpose(F)@cov_inverse@F)
print(weights)
else: #constrained ilc
X = np.array([F,constrain])
weights = np.transpose(e)@(np.linalg.inv(X@cov_inverse@np.transpose(X)))@X@cov_inverse
print(weights)
reconstructed_map = ((weights[0]*data[0]) + (weights[1]*data[1]) + (weights[2]*data[2]) + (weights[3]*data[3]) + (weights[4]*data[4]) + (weights[5]*data[5]))
if offset is True:
reconstructed_map = offset_correction(reconstructed_map, npix, bins, gaussian = True)
return(reconstructed_map)
def spatially_decompose(data, RA, DEC, kernels, pxsize = 1.5, npix = 400, bins = None, F = None, e = None, constrained = False , offset = False, out_file = False):
maps = project_maps(npix = 400, input_file = data, RA = RA, DEC = DEC, pxsize = 1.5, same_units = True, same_res = True)
nkernels = len(kernels)
nf = maps.shape[0]
npix = maps.shape[1]
fwhm = (2*np.sqrt(2*np.log(2)))
estimated_map = np.zeros((nkernels,npix,npix))
for i in np.arange(nkernels):
print([i, nkernels])
map_slice = np.zeros((nf, npix, npix))
for f in np.arange(nf):
if i < (nkernels -1):
kernel1 = ndimage.gaussian_filter(maps[f,:,:], sigma=kernels[i]*fwhm/pxsize, order=0, mode = "constant", truncate = 10)
kernel2 = ndimage.gaussian_filter(maps[f,:,:], sigma=kernels[i+1]*fwhm/pxsize, order=0, mode = "constant", truncate = 10)
map_slice[f,:,:] = (kernel1 - kernel2)
else:
map_slice[f,:,:] = (ndimage.gaussian_filter(maps[f,:,:], sigma=kernels[i]*fwhm/pxsize, order=0, mode = "constant", truncate = 10))
print(map_slice.shape)
test_output = ilc_run(data = map_slice, F = F, e = e, constrain = True, offset = False, bins = bins)
print(test_output.shape)
estimated_map[i,:,:] = test_output
y_map = np.sum(estimated_map, axis = 0)
return(y_map)
| true |
1c08a840e29210c03fdf7f48ad49281c22ebddc4 | Python | CianLR/judge-solutions | /leetcode/word_search.py | UTF-8 | 1,239 | 3.34375 | 3 | [] | no_license | from collections import defaultdict
class Node:
def __init__(self, c):
self.c = c
self.seen = False
self.adj = defaultdict(list)
def search(self, word, s=0):
if s == len(word):
return True
self.seen = True
for v in self.adj[word[s]]:
if not v.seen and v.search(word, s + 1):
return True
self.seen = False
return False
class Solution(object):
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
root = Node('')
node_grid = [[None] * len(board[0]) for _ in xrange(len(board))]
for r in xrange(len(board)):
for c, val in enumerate(board[r]):
u = Node(val)
node_grid[r][c] = u
root.adj[val].append(u)
if r > 0:
v = node_grid[r - 1][c]
v.adj[val].append(u)
u.adj[v.c].append(v)
if c > 0:
v = node_grid[r][c - 1]
v.adj[val].append(u)
u.adj[v.c].append(v)
return root.search(word)
| true |
1caf94a4b768747218fe79eb50d5b5385dc8349e | Python | mehulthakral/logic_detector | /backend/dataset/countPrimes/sieve_6.py | UTF-8 | 386 | 3.125 | 3 | [] | no_license | class Solution:
def countPrimes(self, n:int)->int:
prime = [1 for i in range(n+1)]
p = 2
while p * p <= n:
if prime[p]:
for i in range(p * p, n+1, p):
prime[i] = 0
p += 1
res = 0
for k in range(2, n):
if prime[k]:
res += 1
return res
| true |
964d2d2f895888bee41d45e8ec27472e4fcefd6d | Python | soby/html2pdf | /phantom.py | UTF-8 | 2,503 | 3.015625 | 3 | [] | no_license | # coding: utf-8
import os
import tempfile
import subprocess
def html_to_pdf(html, session=None, header=None):
"""Runs phantomjs in a subprocess to render html into a pdf
Args:
html: String of html to render
Returns:
The pdf data in a string. If phantomjs doesn't like the html,
this string can end up empty.
Raises:
OSError: An error occured in running the phantomjs subprocess
"""
# TODO: Use stdin and stdout instead of tempfiles, as Heroku makes no
# guarantees about tempfiles not being destroyed mid-request. This may
# require use of phantomjs version 1.9, which (as of 2013-3-2) hasn't been
# released
html_tmp = tempfile.NamedTemporaryFile(mode='w+b', dir="phantom-scripts", suffix='.html')
html_tmp.write(html.encode('utf-8'))
html_tmp.seek(0)
return url_to_pdf(html_tmp.name, session, header)
def url_to_pdf(url, session=None, header=None):
"""Runs phantomjs in a subprocess to render a URL into a pdf
Args:
url: URL to render
Returns:
File handle to temp file of pdf
Raises:
OSError: An error occured in running the phantomjs subprocess
"""
# get a file name. This has a TOC/TOU problem but it shouldn't matter
pdf_tmp = tempfile.NamedTemporaryFile(mode='w+b', suffix='.pdf', delete=True, dir='generated_pdfs').name
phantom_cmd = [ 'phantomjs',
'--ignore-ssl-errors=true',
'phantom-scripts/rasterize.js',
url,
pdf_tmp]
if session:
phantom_cmd.append("--session")
phantom_cmd.append('%s' % session)
if header:
phantom_cmd.append("--header")
phantom_cmd.append('%s' % header.encode('base64').replace('\n',''))
ret = subprocess.call(phantom_cmd)
if ret:
print 'Call to phantomjs failed'
return None
try:
os.stat(pdf_tmp)
except OSError:
print 'File not created'
return None
try:
size = os.path.getsize(pdf_tmp)
except:
print 'Could not get file size'
return None
else:
if size:
print 'Returning file of %s size' % size
return file(pdf_tmp,'rb')
else:
print 'Empty file created'
return None
| true |
6d22442861105a2d748f2f81a4c6515d0863ed37 | Python | webclinic017/STB | /data/lib/Wiki_Data.py | UTF-8 | 2,191 | 2.609375 | 3 | [] | no_license | #!/usr/bin/python3
"""
Wikipedia data colletion thread
Gets daily view counts of provided wikipedia sites and inserts into sqlite3 database
@params
logger: logging object
DB_file: file location for sqlite3 database
Tickers: list of company wikipedia articles
"""
from time import sleep
from mwviews.api import PageviewsClient
import sys
import logging
import sqlite3
import time
try:
from .Thread import Thread
except:
from Thread import Thread
class Wiki(Thread):
def __init__(self, logger, index, q, tickers):
super().__init__('Wiki', logger)
self.q = q
self.index = index
self.Tickers = tickers
self.Start_date = '20090101'
def run(self):
viewer = PageviewsClient(user_agent="<person@organization.org> Selfie, Cat, and Dog analysis")
self.logger.info('[%s] Starting Wiki thread' % self.Name)
try:
for ticker, article in self.Tickers.items():
End_date = time.strftime('%Y%m%d')
data = viewer.article_views('en.wikipedia', article, granularity='daily', start=self.Start_date, end=End_date)
for row in data:
if data[row][article]:
wikid = {}
wikid['date'] = row.strftime('%m/%d/%Y')
wikid['symbol'] = ticker
wikid['article'] = article
wikid['wiki_views'] = int(data[row][article])
queueDoc(wikid)
self.logger.info('[%s] Collected Info on %s' % (self.Name, ticker))
except Exception as e:
self.logger.error('[%s] Error: %s' % (self.Name, e))
self.logger.info('[%s] Exiting' % self.name)
self.Fin = True
def queueDoc(self, wikid):
doc = {}
doc['_index'] = self.index
doc['_type'] = 'data_wiki'
doc['_source'] = wikid
self.q.put_nowait(doc)
self.logger.debug('[%s] queued document for %s' % (self.Name, doc['_source']['symbol']))
sleep(1/self.RATELIMIT)
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
fmt = logging.Formatter('%(asctime)s - %(threadName)-11s - %(levelname)s - %(message)s')
ch.setFormatter(fmt)
logger.addHandler(ch)
dic = {'AMD': 'Advanced_Micro_Devices', 'MSFT': 'Microsoft'}
import asyncio
q = asyncio.Queue()
s = Wiki(logger, 'stock', q, dic)
s.run('3')
| true |
7102750382d228d5a1e59c3235d17bff9f41bd9e | Python | shreyas725/WordChallenge | /WordChallengeOptimized.py | UTF-8 | 1,298 | 3.265625 | 3 | [] | no_license | #!/usr/bin/env python
import os
import sys
import time
listw = []
f = open('words.txt')
for word in f.read().split():
listw.append(word)
def make_trie(listw):
trie = {}
for word in listw:
temp_trie = trie
for letter in word:
temp_trie = temp_trie.setdefault(letter,{})
temp_trie = temp_trie.setdefault('_end_', '_end_')
return trie
def traverse(word, sometrie, endcounter):
if len(word) == 0:
if endcounter > 0 and '_end_' in sometrie:
return True
return False
letter = word[0]
if letter in sometrie:
if '_end_' in sometrie[letter]:
endcounter = endcounter + 1
if traverse(word[1:], trie, endcounter):
return True
endcounter = endcounter - 1
return traverse(word[1:], sometrie[letter], endcounter)
return False
trie = make_trie(listw)
count = 0
max1 = 0
max2 = 0
long1 = ""
for word in listw:
if traverse(word,trie,0):
if len(word)>max1:
long2 = long1
max2 = max1
long1 = word
max1 = len(word)
else:
if len(word)>max2:
long2 = word
max2 = len(word)
count = count+1
print(count)
print(long1)
print(long2) | true |
53ed3cdd4a4dea99a01359fe721d7eeb7dc782b6 | Python | e-kondr01/finodays | /text_classification/bayes.py | UTF-8 | 1,208 | 3.046875 | 3 | [
"MIT"
] | permissive | import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
def bayes(X_train, y_train, X_test, y_test) -> Pipeline:
"""Train Naive Bayes model"""
nb = Pipeline([('vect', CountVectorizer(lowercase=False)),
('tfidf', TfidfTransformer()),
('clf', MultinomialNB()),
])
nb.fit(X_train, y_train)
y_pred: Pipeline = nb.predict(X_test)
return y_pred
def score_bayes(csv_filename: str) -> float:
"""Выводит точность модели Naive Bayes по обучению
на датасете"""
df = pd.read_csv(csv_filename)
X = df.review
y = df.sentiment
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42)
y_pred: Pipeline = bayes(X_train, y_train, X_test, y_test)
return accuracy_score(y_pred, y_test)
if __name__ == "__main__":
accuracy = score_bayes("preprocessed_rureviews.csv")
print(accuracy)
# 0.7069
| true |
44f7d2e850e15d7e6809db2efeae039427599f03 | Python | xctfmatch/CTF-misc-library | /20200513_一个头两个大_hash头像/HSL.py | UTF-8 | 6,824 | 3.1875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# https://blog.csdn.net/sunny_xsc1994/article/details/78541079
# save from https://github.com/ruozhichen/rgb2Lab-rgb2hsl
import numpy as np
import os
from PIL import Image
import math
def hsl2rgb(inputColor):
''' Converts HSL colorspace (Hue/Saturation/Value) to RGB colorspace.
Formula from http://www.easyrgb.com/math.php?MATH=M19#text19
Input:
h (float) : Hue (0...1, but can be above or below
(This is a rotation around the chromatic circle))
s (float) : Saturation (0...1) (0=toward grey, 1=pure color)
l (float) : Lightness (0...1) (0=black 0.5=pure color 1=white)
Ouput:
(r,g,b) (integers 0...255) : Corresponding RGB values
Examples:
>>> print HSL_to_RGB(0.7,0.7,0.6)
(110, 82, 224)
>>> r,g,b = HSL_to_RGB(0.7,0.7,0.6)
>>> print g
82
'''
h=inputColor[0]
s=inputColor[1]
l=inputColor[2]
def hue2rgb( v1, v2, vH ):
while vH<0.0: vH += 1.0
while vH>1.0: vH -= 1.0
if 6*vH < 1.0 : return v1 + (v2-v1)*6.0*vH
if 2*vH < 1.0 : return v2
if 3*vH < 2.0 : return v1 + (v2-v1)*((2.0/3.0)-vH)*6.0
return v1
if not (0 <= s <=1): raise ValueError("s (saturation) parameter must be between 0 and 1.")
if not (0 <= l <=1): raise ValueError("l (lightness) parameter must be between 0 and 1.")
r,b,g = (l*255,)*3
if s!=0.0:
if l<0.5 : var_2 = l * ( 1.0 + s )
else : var_2 = ( l + s ) - ( s * l )
var_1 = 2.0 * l - var_2
r = 255 * hue2rgb( var_1, var_2, h + ( 1.0 / 3.0 ) )
g = 255 * hue2rgb( var_1, var_2, h )
b = 255 * hue2rgb( var_1, var_2, h - ( 1.0 / 3.0 ) )
r=max(min(r,255.0),0.0)
g=max(min(g,255.0),0.0)
b=max(min(b,255.0),0.0)
return (int(round(r)),int(round(g)),int(round(b)))
def hue2rgb_matrix(v1,v2,vH):
vH=np.where(vH<0.0,vH+1.0,vH)
vH=np.where(vH>1.0,vH-1.0,vH)
res=np.where(vH>=2.0/3,v1,0.0)
res=np.where(vH<2.0/3,v1 + (v2-v1)*((2.0/3.0)-vH)*6.0,res)
res=np.where(vH<1.0/2,v2,res)
res=np.where(vH<1.0/6,v1+(v2-v1)*6.0*vH,res)
return res
# colors size: n*3, when n is very large, it improves speed using matrix calculation
def hsl2rgb_matrix(colors):
colors=np.array(colors)
hs=colors[:,0]
ss=colors[:,1]
ls=colors[:,2]
Rs,Gs,Bs=(ls*255,)*3
#np.seterr(divide='ignore', invalid='ignore')
var_2=np.where(ls<0.5,ls*(1.0+ss),(ls+ss)-(ss*ls))
var_1=2.0*ls-var_2
Rs=255*hue2rgb_matrix(var_1,var_2,hs+(1.0/3.0))
Gs=255*hue2rgb_matrix(var_1,var_2,hs)
Bs=255*hue2rgb_matrix(var_1,var_2,hs-(1.0/3.0))
Rs=np.where(ss==0.0,ls*255,Rs)
Gs=np.where(ss==0.0,ls*255,Gs)
Bs=np.where(ss==0.0,ls*255,Bs)
Rs=np.maximum(np.minimum(Rs,255),0)
Gs=np.maximum(np.minimum(Gs,255),0)
Bs=np.maximum(np.minimum(Bs,255),0)
RGBs=np.vstack((Rs,Gs,Bs)).transpose() # 3*n -> n*3
RGBs=RGBs.astype('int')
return RGBs
def rgb2hsl(inputColor):
''' Converts RGB colorspace to HSL (Hue/Saturation/Value) colorspace.
Formula from http://www.easyrgb.com/math.php?MATH=M18#text18
Input:
(r,g,b) (integers 0...255) : RGB values
Ouput:
(h,s,l) (floats 0...1): corresponding HSL values
Example:
>>> print RGB_to_HSL(110,82,224)
(0.69953051643192476, 0.69607843137254899, 0.59999999999999998)
>>> h,s,l = RGB_to_HSL(110,82,224)
>>> print s
0.696078431373
'''
r=inputColor[0]
g=inputColor[1]
b=inputColor[2]
if not (0 <= r <=255): raise ValueError("r (red) parameter must be between 0 and 255.")
if not (0 <= g <=255): raise ValueError("g (green) parameter must be between 0 and 255.")
if not (0 <= b <=255): raise ValueError("b (blue) parameter must be between 0 and 255.")
var_R = r/255.0
var_G = g/255.0
var_B = b/255.0
var_Min = min( var_R, var_G, var_B ) # Min. value of RGB
var_Max = max( var_R, var_G, var_B ) # Max. value of RGB
del_Max = var_Max - var_Min # Delta RGB value
l = ( var_Max + var_Min ) / 2.0
h = 0.0
s = 0.0
if del_Max!=0.0:
if l<0.5: s = del_Max / ( var_Max + var_Min )
else: s = del_Max / ( 2.0 - var_Max - var_Min )
del_R = ( ( ( var_Max - var_R ) / 6.0 ) + ( del_Max / 2.0 ) ) / del_Max
del_G = ( ( ( var_Max - var_G ) / 6.0 ) + ( del_Max / 2.0 ) ) / del_Max
del_B = ( ( ( var_Max - var_B ) / 6.0 ) + ( del_Max / 2.0 ) ) / del_Max
if var_R == var_Max : h = del_B - del_G
elif var_G == var_Max : h = ( 1.0 / 3.0 ) + del_R - del_B
elif var_B == var_Max : h = ( 2.0 / 3.0 ) + del_G - del_R
while h < 0.0: h += 1.0
while h > 1.0: h -= 1.0
return (h,s,l)
# colors size: n*3, when n is very large, it improves speed using matrix calculation
def rgb2hsl_matrix(colors):
colors=np.array(colors)
var_Rs=colors[:,0]/255.0
var_Gs=colors[:,1]/255.0
var_Bs=colors[:,2]/255.0
var_Min=np.amin(colors/255.0,axis=1) # min(Ri,Gi,Bi) in each row
var_Max=np.amax(colors/255.0,axis=1)
del_Max=var_Max-var_Min
ls=(var_Max+var_Min)/2.0
hs=0.0
ss=0.0
# When del_Max=0,it may exist the situation that var_Max+var_Min or 2.0-var_Max-var_Min = 0
# and the program would report error "invalid value encountered in divide"
# However, at the end it will set zero when del_Max=0
# so I ignore the error here
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.seterr.html
np.seterr(divide='ignore', invalid='ignore')
ss=np.where(ls<0.5,del_Max/(var_Max+var_Min),del_Max/(2.0-var_Max-var_Min))
del_R=np.where(del_Max!=0.0,(((var_Max-var_Rs)/6.0)+(del_Max/2.0))/del_Max,0.0)
del_G=np.where(del_Max!=0.0,(((var_Max-var_Gs)/6.0)+(del_Max/2.0))/del_Max,0.0)
del_B=np.where(del_Max!=0.0,(((var_Max-var_Bs)/6.0)+(del_Max/2.0))/del_Max,0.0)
hs=np.where(var_Rs==var_Max,del_B-del_G,hs)
hs=np.where(var_Gs==var_Max,(1.0/3.0)+del_R-del_B,hs)
hs=np.where(var_Bs==var_Max,(2.0/3.0)+del_G-del_R,hs)
hs=np.where(del_Max!=0,hs,0.0)
hs=np.where(hs<0.0,hs+1.0,hs)
hs=np.where(hs>1.0,hs-1.0,hs)
ss=np.where(del_Max!=0,ss,0.0)
hsl=np.vstack((hs,ss,ls)).transpose()
return hsl
#img = Image.open('input/test1.jpg')
#im = np.array(img)
#print im.shape
#for i in range(len(im[:,1])):
# for j in range(len(im[1,:])):
# h,s,l = RGB_to_HSL(im[i][j][0],im[i][j][1],im[i][j][2])
# h = h+0.3
# if h > 1:
# h = h - 1
# r,g,b = HSL_to_RGB(h,s,l)
# im[i][j][0] = r
# im[i][j][1] = g
# im[i][j][2] = b
#scipy.misc.imsave('result.jpg', im)
| true |
c65356bf495b25b32c87a9312a8506ffcaf7004b | Python | GudniNathan/SC-T-201-GSKI | /recursion 2/reverse.py | UTF-8 | 184 | 3.421875 | 3 | [
"MIT"
] | permissive | def reverse(string):
if len(string) == 0:
return ""
return reverse(string[1:]) + string[0]
print(reverse("wowzers"))
assert reverse("coolbeans") == "coolbeans"[::-1] | true |
b5431e94ad51557602b541ec0c3bd93350d01fa4 | Python | lucascbarbosa/SignalsAndSystems | /sum_signals.py | UTF-8 | 3,857 | 3.078125 | 3 | [
"MIT"
] | permissive | import numpy as np
from numpy.fft import fft,fftfreq,fftshift
from numpy import absolute,angle
import math
import matplotlib.pyplot as plt
def merge(list1, list2):
merged_list = [(list1[i], list2[i]) for i in range(0, len(list1))]
return merged_list
def demerge(tuples):
return list(map(list,zip(*tuples)))
def round(x):
return np.around(x,decimals = 2)
def amp(x):
return absolute(x)
def phase(x):
return angle(x)
class PlotPulses():
def __init__(self, window,fs):
self.window = window
self.fs = fs
self.samples = window*fs+1
self.lista_x = np.linspace((-1)*self.window/2,self.window/2,self.samples)
def pulseStep(self,delta,alfa):
lista_y = []
for x in self.lista_x:
x = np.around(x+alfa,decimals=1)
if x >= -delta and x <= delta:
y = delta
else:
y = 0
lista_y.append(y)
tuples = merge(self.lista_x,lista_y)
plt.plot(self.lista_x,lista_y)
plt.suptitle('Pulso Retangular')
plt.savefig('plots/pulso_ret.png')
plt.show()
return tuples
def pulseTriang(self,delta,alfa):
lista_y = []
offset = float(math.pi*delta/2)
slope = float(offset/delta)
for x in self.lista_x:
x = np.around(x+alfa,decimals=1)
if x < -delta or x > delta:
y = 0
elif x < 0:
y = offset +slope*x
elif x == 0:
y = offset
elif x > 0:
y = offset -slope*x
lista_y.append(y)
tuples = merge(self.lista_x,lista_y)
plt.plot(self.lista_x,lista_y)
plt.savefig('plots/pulso_triang.png')
plt.suptitle('Pulso Triangular')
plt.show()
return tuples
def pulseSemiCirc(self,delta,alfa):
lista_y = []
for x in self.lista_x:
x = np.around(x+alfa,decimals=1)
if x < -delta or x > delta:
y = 0
elif x >= -delta and x <=delta:
y = math.sqrt(delta**2-math.pow(x,2))
lista_y.append(y) #x²+y²=r², r = delta
tuples = merge(self.lista_x,lista_y)
plt.plot(self.lista_x,lista_y)
plt.suptitle('Pulso Semicircular')
plt.savefig('plots/pulso_semicirc.png')
plt.show()
return tuples
def sumSignals(self,signalsWeights):
Y = np.zeros(self.samples)
for weight,signal in signalsWeights:
x,y = demerge(signal)
x = np.around(x,decimals=1)
for tup in signal:
x,y = tup
idx = np.where(self.lista_x == x)
Y[idx] += weight*y
plt.plot(self.lista_x,Y)
plt.suptitle('Soma dos sinais')
plt.savefig('plots/soma_sinais.png')
plt.show()
return Y
def fft(signal,ts):
signal = np.array(signal)
freqs = signal.size
freq = fftshift(fftfreq(freqs,d=ts))
result = fftshift(fft(signal))
amps = list(map(amp,result))
phases = list(map(phase,result))
fig, axs = plt.subplots(2)
plt.suptitle("Resultado da FFT")
axs[0].plot(freq,amps)
axs[0].set_title('Amplitude')
axs[1].plot(freq,phases)
axs[1].set_title('Fase')
plt.plot(self.lista_x,)
plt.savefig('plots/fft.png')
plt.show()
print(amps)
print(phases)
return freq,result
plot = PlotPulses(10,10)
signal1 = plot.pulseTriang(2,-2)
signal2 = plot.pulseSemiCirc(2,2)
signal3 = plot.pulseStep(3,0)
listas = [(1,signal1),(1,signal2),(1,signal3)]
signal_result = plot.sumSignals(listas)
plot.fft(signal_result) | true |
c1990fa1a5e2a607201c308eb2302edcbbba77fa | Python | zhuqixing007/simulation | /task_generator.py | UTF-8 | 570 | 3.09375 | 3 | [] | no_license | """
计算任务生成模块
"""
import random
MAX_DATA_SIZE = 300 # 任务最大数据量 bit
MAX_COM_INTENSITY = 300 # 任务最大计算强度 cycle/bit
MAX_DELAY = 200 # 任务最大延迟 ms
MAX_PRIORITY = 1 # 任务最大重要程度
def task_generator():
size = random.randint(100, MAX_DATA_SIZE)
intensity = random.randint(10, MAX_COM_INTENSITY)
delay = random.randint(10, MAX_DELAY)
priority = random.uniform(0, MAX_PRIORITY)
t = [size, intensity, delay, priority]
return t
# for i in range(10):
# print(task_generator())
| true |
1e11c380fea21df215979048c4724bd1e8ed00f7 | Python | lpawlak1/WDI | /Cwiczenia 7/cw10.py | UTF-8 | 2,363 | 3.6875 | 4 | [] | no_license | null = None
class lista:
def __init__(self, first=null):
self.first = first
def __str__(self):
cp = self.first
if cp == null:
return "List is empty!"
ret = ""
while cp is not None:
ret = ret + str(cp)
cp = cp.next
return ret
def is_empty(self):
return self.first == null
class Node:
# val,next,idx
def __init__(self, val=0, next=None):
self.next = next
self.val = val
def __str__(self):
return f"{self.val}-->"
def tabToLista(tab: list):
ret = lista()
for e in tab[::-1]:
ret.first = Node(e, ret.first)
return ret
# 10. Liczby naturalne reprezentowane jak poprzednim zadaniu. Proszę napisać
# funkcję dodającą dwie takie liczby. W wyniku dodawania dwóch liczb powinna
# powstać nowa lista.
def usun_nadliczby(first):
if first.next == null:
if first.val < 10:
return first
a = Node(first.val // 10, first)
first.val %= 10
return a
cp = first
cp2 = first.next
while cp2.next != null:
cp, cp2 = cp2, cp2.next
cp.next = null
cp.val += cp.val // 10
ret = usun_nadliczby(first)
cp2.val = cp2.val % 10
cp.next = cp2
return ret
def add_two(f1, f2):
def rek(first, second):
if first.next == null and second.next == null:
buffer = (first.val + second.val) // 10
return Node((first.val + second.val) % 10), buffer
if first.next == null:
nowy, buffer = rek(first, second.next)
buffer = (second.val + buffer)
return Node(buffer % 10, nowy), buffer // 10
if second.next == null:
nowy, buffer = rek(first.next, second)
buffer = (first.val + buffer)
return Node(buffer % 10, nowy), buffer // 10
stary, buffer = rek(first.next, second.next)
buffer = (first.val + second.val + buffer) // 10
nowy = Node((buffer + first.val + second.val) % 10, stary)
return nowy, buffer
a, b = rek(f1, f2)
if b > 0:
return Node(1, a)
return a
f1 = tabToLista([1, 2, 3])
f2 = tabToLista([2, 2])
print(f1)
print(f2)
print(lista(add_two(f1.first, f2.first)))
| true |
16b52ad1f04167241c583f83d2fe2e81702ce5ac | Python | zach-bray/juniper-SNMPv3-crypt | /juniperSNMPv3crypt/crypt9.py | UTF-8 | 2,732 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
A python port of https://metacpan.org/pod/Crypt::Juniper
Original Author: Kevin Brintnall
Ported by: Zach Bray
"""
import re
import random
MAGIC = "$9$"
EXTRA = {}
ENCODING = [
[1, 4, 32],
[1, 16, 32],
[1, 8, 32],
[1, 64],
[1, 32],
[1, 4, 16, 128],
[1, 32, 64]
]
# letter families to encrypt with
FAMILY = ["QzF3n6/9CAtpu0O", "B1IREhcSyrleKvMW8LXx", "7N-dVbwsY2g4oaJZGUDj", "iHkq.mPf5T"]
EXTRA = {char: 3-i for i,fam in enumerate(FAMILY) for char in fam}
# builds regex to match valid encrypted string
letters = MAGIC + "([" + ''.join(FAMILY) + "]{4,})"
letters = re.sub(r"([-|/|$])", r"\\\1", letters)
VALID = r"^" + letters + "$"
# forward and reverse dicts
NUM_ALPHA = [char for char in ''.join(FAMILY)]
ALPHA_NUM = {NUM_ALPHA[i]: i for i,c in enumerate(NUM_ALPHA)}
def decrypt(crypt):
m = re.match(VALID, crypt)
if not m:
print('invalid crypt string')
exit(1)
chars = m.group(1)
chars, first = _nibble(chars, 1)
chars,_ = _nibble(chars, EXTRA[first])
prev = first
decrypt = ""
while(chars):
decode = ENCODING[len(decrypt) % len(ENCODING)]
chars, nibble = _nibble(chars, len(decode))
gaps = []
for nib in nibble:
dist = (ALPHA_NUM[nib] - ALPHA_NUM[prev]) % len(NUM_ALPHA) - 1
gaps.append(dist)
prev = nib
decrypt += _gap_decode(gaps, decode)
return decrypt
def _nibble(chars, length):
nib = chars[:length]
chars = chars[length:]
return chars, nib
def _gap_decode(gaps, decode):
num = 0
for i in range(len(gaps)):
num += gaps[i] * decode[i]
return chr(num % 256)
# encrypts <secret> for junipers $9$ format
# allows use of seed for idempotent secrets
def encrypt(secret, seed=False):
if seed:
random.seed(seed)
salt = _random_salt(1)
rand = _random_salt(EXTRA[salt])
pos = 0
prev = salt
crypt = MAGIC + salt + rand
for char in secret:
encode = ENCODING[pos % len(ENCODING)]
crypt += _gap_encode(char, prev, encode)
prev = crypt[-1]
pos += 1
return crypt
# returns number of characters from the alphabet
def _random_salt(length):
salt = ""
for i in range(length):
salt += NUM_ALPHA[random.randrange(len(NUM_ALPHA))]
return salt
# encode plain text character with a series of gaps
def _gap_encode(char, prev, encode):
crypt = ""
val = ord(char)
gaps = []
for enc in encode[::-1]:
gaps.insert(0, val // enc)
val %= enc
for gap in gaps:
gap += ALPHA_NUM[prev] + 1
c = prev = NUM_ALPHA[gap % len(NUM_ALPHA)]
crypt += c
return crypt
| true |
7afbd6a0d1a759c6ca914720c84355d50ede63fb | Python | laxmankusuma/practice_notebook | /ageron_aurelien_numpy (1).py | UTF-8 | 12,195 | 3.25 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# #REFERENCE
#
# https://github.com/ageron/handson-ml2/blob/master/tools_numpy.ipynb
# In[1]:
import numpy as np
# In[2]:
np.zeros(5)
# In[3]:
np.zeros((3,4))
# In[4]:
a = np.zeros((3,4))
a
# In[5]:
a.shape
# In[6]:
a.shape[0]
# In[7]:
a.ndim # equal to len(a.shape)
# In[8]:
a.size
# In[9]:
np.zeros((2,3,4))
# In[10]:
type(np.zeros((3,4)))
# In[11]:
np.ones((3,4))
# In[12]:
np.full((3,4), np.pi)
# In[13]:
np.empty((2,3))
# In[14]:
np.array([[1,2,3,4], [10, 20, 30, 40]])
# In[15]:
np.arange(1, 5)
# In[16]:
np.arange(1.0, 5.0)
# In[17]:
np.arange(1, 5, 0.5)
# In[18]:
print(np.arange(0, 5/3, 1/3)) # depending on floating point errors, the max value is 4/3 or 5/3.
print(np.arange(0, 5/3, 0.333333333))
print(np.arange(0, 5/3, 0.333333334))
# In[19]:
print(np.linspace(0, 5/3, 6))
# In[20]:
np.random.rand(3,4)
# In[21]:
np.random.randn(3,4)
# In[22]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
# In[23]:
plt.hist(np.random.rand(100000), density=True, bins=100, histtype="step", color="blue", label="rand")
plt.hist(np.random.randn(100000), density=True, bins=100, histtype="step", color="red", label="randn")
plt.axis([-2.5, 2.5, 0, 1.1])
plt.legend(loc = "upper left")
plt.title("Random distributions")
plt.xlabel("Value")
plt.ylabel("Density")
plt.show()
# In[24]:
def my_function(z, y, x):
return x * y + z
np.fromfunction(my_function, (3, 2, 10))
# In[25]:
c = np.arange(1, 5)
print(c.dtype, c)
# In[26]:
c = np.arange(1.0, 5.0)
print(c.dtype, c)
# In[27]:
d = np.arange(1, 5, dtype=np.complex64)
print(d.dtype, d)
# In[28]:
e = np.arange(1, 5, dtype=np.complex64)
e.itemsize
# In[29]:
f = np.array([[1,2],[1000, 2000]], dtype=np.int32)
f.data
# In[30]:
if (hasattr(f.data, "tobytes")):
data_bytes = f.data.tobytes() # python 3
else:
data_bytes = memoryview(f.data).tobytes() # python 2
data_bytes
# In[31]:
g = np.arange(24)
print(g)
print("Rank:", g.ndim)
# In[32]:
g.shape = (6, 4)
print(g)
print("Rank:", g.ndim)
# In[33]:
g.shape = (2, 3, 4)
print(g)
print("Rank:", g.ndim)
# In[34]:
g2 = g.reshape(4,6)
print(g2)
print("Rank:", g2.ndim)
# In[35]:
g2[1, 2] = 999 # [row,column]
g2
# In[36]:
g
# In[37]:
g.ravel()
# In[38]:
a = np.array([14, 23, 32, 41])
b = np.array([5, 4, 3, 2])
print("a + b =", a + b)
print("a - b =", a - b)
print("a * b =", a * b)
print("a / b =", a / b)
print("a // b =", a // b)
print("a % b =", a % b)
print("a ** b =", a ** b)
# Broadcasting
#
# In general, when NumPy expects arrays of the same shape but finds that this is not the case, it applies the so-called broadcasting rules:
# In[39]:
h = np.arange(5).reshape(1, 1, 5)
h
# In[40]:
h + [10, 20, 30, 40, 50] # same as: h + [[[10, 20, 30, 40, 50]]]
# In[41]:
k = np.arange(6).reshape(2, 3)
k
# In[42]:
k + [[100], [200]] # same as: k + [[100, 100, 100], [200, 200, 200]]
# In[43]:
k + [100, 200, 300] # after rule 1: [[100, 200, 300]], and after rule 2: [[100, 200, 300], [100, 200, 300]]
# In[44]:
k + 1000 # same as: k + [[1000, 1000, 1000], [1000, 1000, 1000]]
# In[45]:
try:
k + [33, 44]
except ValueError as e:
print(e)
# In[46]:
k1 = np.arange(0, 5, dtype=np.uint8)
print(k1.dtype, k1)
# In[47]:
k2 = k1 + np.array([5, 6, 7, 8, 9], dtype=np.int8)
print(k2.dtype, k2)
# In[48]:
k3 = k1 + 1.5
print(k3.dtype, k3)
# In[49]:
m = np.array([20, -5, 30, 40])
m < [15, 16, 35, 36]
# In[50]:
m < 25 # equivalent to m < [25, 25, 25, 25]
# In[51]:
m[m < 25]
# In[52]:
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
print(a)
print("mean =", a.mean())
# In[53]:
for func in (a.min, a.max, a.sum, a.prod, a.std, a.var):
print(func.__name__, "=", func())
# In[54]:
c=np.arange(24).reshape(2,3,4)
c
# In[55]:
c.sum(axis=0) # sum across matrices
# In[56]:
c.sum(axis=1) # sum across rows
#[0+4+8,1+5+9,2+6+10,3+7+11],[12+16+28,14+22+30,16+24+32,18+26+34]
# In[57]:
[0+4+8,1+5+9,2+6+10,3+7+11],[12+16+20,13+17+21,14+18+22,15+19+23]
# In[58]:
c.sum(axis=(0,2)) # sum across matrices and columns
# In[59]:
0+1+2+3 + 12+13+14+15, 4+5+6+7 + 16+17+18+19, 8+9+10+11 + 20+21+22+23
# In[60]:
a = np.array([[-2.5, 3.1, 7], [10, 11, 12]])
np.square(a)
# In[61]:
print("Original ndarray")
print(a)
for func in (np.abs, np.sqrt, np.exp, np.log, np.sign, np.ceil, np.modf, np.isnan, np.cos):
print("\n", func.__name__)
print(func(a))
# In[62]:
a = np.array([1, -2, 3, 4])
b = np.array([2, 8, -1, 7])
np.add(a, b) # equivalent to a + b
# In[63]:
np.greater(a, b) # equivalent to a > b
# In[64]:
np.maximum(a, b)
# In[65]:
np.copysign(a, b)
# In[66]:
a = np.array([1, 5, 3, 19, 13, 7, 3])
a[3]
# In[67]:
a[2:5]
# In[68]:
a[2:-1]
# In[69]:
a[:2]
# In[70]:
a[2::1]#skip 1-1=0 from indesx 2
# In[71]:
a[2::2]#skip 2-1=1 from indesx 2
# In[72]:
a[2::3]#skip 3-1=2 from indesx 2
# In[73]:
a[2::-1]#skip 1-1=0 from indesx 2(reverse order)
# In[74]:
a[4::-2]#skip 2-1=1 from indesx 4(reverse order)
# In[75]:
a
# In[76]:
a[3]=999
a
# In[77]:
a[2:5] = [997, 998, 999]
a
# In[78]:
a[2:5] = -1
a
# In[79]:
try:
a[2:5] = [1,2,3,4,5,6] # too long
except ValueError as e:
print(e)
# In[80]:
try:
del a[2:5]
except ValueError as e:
print(e)
# In[81]:
a_slice = a[2:6]
a_slice[1] = 1000
a # the original array was modified!
# In[82]:
a[3] = 2000
a_slice # similarly, modifying the original array modifies the slice!
# In[83]:
another_slice = a[2:6].copy()
another_slice[1] = 3000
a # the original array is untouched
# In[84]:
a[3] = 4000
another_slice # similary, modifying the original array does not affect the slice copy
# In[85]:
a
# In[86]:
b = np.arange(48).reshape(4, 12)
b
# In[87]:
b[1, 2] # row 1, col 2
# In[88]:
b[1, :] # row 1, all columns
# In[89]:
b[:, 1] # all rows, column 1
# In[90]:
b[1, :]
# In[91]:
b[1:2, :] #The first expression returns row 1 as a 1D array of shape (12,),
#while the second returns that same row as a 2D array of shape (1, 12).
# In[92]:
b
# In[93]:
b[(0,2), 2:5] # rows 0 and 2, columns 2 to 4 (5-1)
# In[94]:
b[:, (-1, 2, -1)] # all rows, columns -1 (last), 2 and -1 (again, and in this order)
# In[95]:
b[(-1, 2, -1, 2), (5, 9, 1, 9)] # returns a 1D array with b[-1, 5], b[2, 9], b[-1, 1] and b[2, 9] (again)
# In[96]:
c = b.reshape(4,2,6)
c
# In[97]:
c[2, 1, 4] # matrix 2, row 1, col 4
# In[98]:
c[2, :, 3] # matrix 2, all rows, col 3
# In[99]:
c[2, 1] # Return matrix 2, row 1, all columns. This is equivalent to c[2, 1, :]
# In[100]:
c[2, ...] # matrix 2, all rows, all columns. This is equivalent to c[2, :, :]
# In[101]:
c[2, 1, ...] # matrix 2, row 1, all columns. This is equivalent to c[2, 1, :]
# In[102]:
c[2, ..., 3] # matrix 2, all rows, column 3. This is equivalent to c[2, :, 3]
# In[103]:
c[..., 3] # all matrices, all rows, column 3. This is equivalent to c[:, :, 3]
# In[104]:
b = np.arange(48).reshape(4, 12)
b
# In[105]:
rows_on = np.array([True, False, True, False])
b[rows_on, :] # Rows 0 and 2, all columns. Equivalent to b[(0, 2), :]
# In[106]:
cols_on = np.array([False, True, False] * 4)
b[:, cols_on] # All rows, columns 1, 4, 7 and 10
# In[107]:
[False, True, False] * 4
# In[108]:
b[np.ix_(rows_on, cols_on)]
# In[109]:
np.ix_(rows_on, cols_on)
# In[110]:
b
# In[111]:
b[b % 3 == 1]
# In[112]:
c = np.arange(24).reshape(2, 3, 4) # A 3D array (composed of two 3x4 matrices)
c
# In[113]:
for m in c:
print("Item:")
print(m)
# In[114]:
for i in range(len(c)): # Note that len(c) == c.shape[0]
print("Item:")
print(c[i])
# In[115]:
for i in c.flat:
print("Item:", i)
# In[116]:
q1 = np.full((3,4), 1.0)
q1
# In[117]:
q2 = np.full((4,4), 2.0)
q2
# In[118]:
q3 = np.full((3,4), 3.0)
q3
# In[119]:
q4 = np.vstack((q1, q2, q3))
q4
# In[120]:
q4.shape
# In[121]:
q5 = np.hstack((q1, q3))
q5
# In[122]:
q5.shape
# In[123]:
try:
q5 = np.hstack((q1, q2, q3))
except ValueError as e:
print(e)
# In[124]:
q7 = np.concatenate((q1, q2, q3), axis=0) # Equivalent to vstack
q7
# In[125]:
q7.shape
# In[126]:
q8 = np.stack((q1, q3))
q8
# In[127]:
q8.shape
# In[128]:
r = np.arange(24).reshape(6,4)
r
# In[129]:
r1, r2, r3 = np.vsplit(r, 3)
r1
# In[130]:
r2
# In[131]:
r3
# In[132]:
r4, r5 = np.hsplit(r, 2)
r4
# In[133]:
r5
# In[134]:
t = np.arange(24).reshape(4,2,3)
t
# In[135]:
t.shape
# In[136]:
t1 = t.transpose((1,2,0))
t1
# In[137]:
t1.shape
# In[138]:
t2 = t.transpose() # equivalent to t.transpose((2, 1, 0))
t2
# In[139]:
t2.shape
# In[140]:
t3 = t.swapaxes(0,1) # equivalent to t.transpose((1, 0, 2))
t3
# In[141]:
t3.shape
# In[142]:
m1 = np.arange(10).reshape(2,5)
m1
# In[143]:
m1.T
# In[144]:
m2 = np.arange(5)
m2
# In[145]:
m2.T
# In[146]:
m2r = m2.reshape(1,5)
m2r
# In[147]:
m2r.T
# In[148]:
n1 = np.arange(10).reshape(2, 5)
n1
# In[149]:
n2 = np.arange(15).reshape(5,3)
n2
# In[150]:
n1.dot(n2)
# In[151]:
import numpy.linalg as linalg
m3 = np.array([[1,2,3],[5,7,11],[21,29,31]])
m3
# In[152]:
linalg.inv(m3)
# In[153]:
linalg.pinv(m3) # both are same inv or pinv
# In[154]:
m3.dot(linalg.inv(m3))
# In[155]:
np.eye(3)
# In[156]:
m3
# In[157]:
q, r = linalg.qr(m3)
q
# In[158]:
r
# In[159]:
q.dot(r) # q.r equals m3
# In[160]:
linalg.det(m3) # Computes the matrix determinant
# In[161]:
m3
# In[162]:
eigenvalues, eigenvectors = linalg.eig(m3)
eigenvalues # λ
# In[163]:
eigenvectors # v
# In[164]:
m3.dot(eigenvectors) - eigenvalues * eigenvectors # m3.v - λ*v = 0
# In[165]:
m4 = np.array([[1,0,0,0,2], [0,0,3,0,0], [0,0,0,0,0], [0,2,0,0,0]])
m4
# In[166]:
U, S_diag, V = linalg.svd(m4)
U
# In[167]:
S_diag
# In[168]:
S = np.zeros((4, 5))
S[np.diag_indices(4)] = S_diag
S # Σ
# In[169]:
V
# In[170]:
U.dot(S).dot(V) # U.Σ.V == m4
# In[171]:
np.diag(m3) # the values in the diagonal of m3 (top left to bottom right)
# In[172]:
np.trace(m3) # equivalent to np.diag(m3).sum()
# In[173]:
coeffs = np.array([[2, 6], [5, 3]])
depvars = np.array([6, -9])
solution = linalg.solve(coeffs, depvars)
solution
# In[174]:
coeffs.dot(solution), depvars # yep, it's the same
# In[175]:
np.allclose(coeffs.dot(solution), depvars)
# In[176]:
import math
data = np.empty((768, 1024))
for y in range(768):
for x in range(1024):
data[y, x] = math.sin(x*y/40.5) # BAD! Very inefficient.
# In[177]:
x_coords = np.arange(0, 1024) # [0, 1, 2, ..., 1023]
y_coords = np.arange(0, 768) # [0, 1, 2, ..., 767]
X, Y = np.meshgrid(x_coords, y_coords)
X
# In[178]:
Y
# In[179]:
data = np.sin(X*Y/40.5)
# In[180]:
data
# In[181]:
import matplotlib.pyplot as plt
import matplotlib.cm as cm
fig = plt.figure(1, figsize=(7, 6))
plt.imshow(data, cmap=cm.hot, interpolation="bicubic")
plt.show()
# In[182]:
a = np.random.rand(2,3)
a
# In[183]:
np.save("my_array", a)
# In[184]:
with open("my_array.npy", "rb") as f:
content = f.read()
content
# In[185]:
a_loaded = np.load("my_array.npy")
a_loaded
# In[186]:
np.savetxt("my_array.csv", a)
# In[187]:
with open("my_array.csv", "rt") as f:
print(f.read())
# In[188]:
np.savetxt("my_array.csv", a, delimiter=",")
# In[189]:
a_loaded = np.loadtxt("my_array.csv", delimiter=",")
a_loaded
# In[190]:
b = np.arange(24, dtype=np.uint8).reshape(2, 3, 4)
b
# In[191]:
np.savez("my_arrays", my_a=a, my_b=b)
# In[192]:
with open("my_arrays.npz", "rb") as f:
content = f.read()
repr(content)[:180] + "[...]"
# In[193]:
my_arrays = np.load("my_arrays.npz")
my_arrays
# In[194]:
my_arrays.keys()
# In[195]:
my_arrays["my_a"]
# # refer below link for more
# # https://numpy.org/doc/stable/reference/index.html
# In[ ]:
| true |
a2b10995e0bf3ae666646154099cb36602005b70 | Python | shaduk/algorithms-datastructures | /hackerrank/lonelyinteger.py | UTF-8 | 286 | 3.078125 | 3 | [] | no_license | # https://www.hackerrank.com/challenges/lonely-integer
#!/usr/bin/py
def lonelyinteger(a):
answer = 0
for i in a:
answer = answer^i
return answer
if __name__ == '__main__':
a = input()
b = map(int, raw_input().strip().split(" "))
print lonelyinteger(b) | true |
cf7b4297ec6781baf3c76c5de4b95ab0dc948149 | Python | huiminren/RobustVAE | /Tensorflow/main_code/RobustVariationalAutoencoder.py | UTF-8 | 8,345 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 13 23:40:28 2019
@author: Huimin Ren
"""
import numpy as np
import numpy.linalg as nplin
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
from BasicAutoencoder import DeepVAE as VAE
from shrink import l1shrink as SHR
import time
import sys
import os
def corrupt(X_in,corNum=10):
X = X_in.copy()
N,p = X.shape[0],X.shape[1]
for i in range(N):
loclist = np.random.randint(0, p, size = corNum)
for j in loclist:
if X[i,j] > 0.5:
X[i,j] = 0
else:
X[i,j] = 1
return X
class RVDAE(object):
"""
@Original author: Chong Zhou
Des:
X = L + S
L is a non-linearly low rank matrix and S is a sparse matrix.
argmin ||L - Decoder(Encoder(L))|| + ||S||_1
Use Alternating projection to train model
"""
def __init__(self, sess, input_dim,learning_rate = 1e-3, n_z = 5,
lambda_=1.0, error = 1.0e-7):
"""
sess: a Tensorflow tf.Session object
input_dim: input dimension
learning_rate: learning rate for optimizing VAE
n_z: dimension of latent layer(coder)
lambda_: tuning the weight of l1 penalty of S
error: converge criterior for jump out training iteration
"""
self.errors = []
self.lambda_ = lambda_
self.error = error
self.vae = VAE.VariantionalAutoencoder(sess = sess, input_dim = input_dim,
learning_rate = learning_rate, n_z = n_z)
def fit(self, X, path = "", num_gen=10, iteration=20, num_epoch = 100,
batch_size=64, verbose=False):
"""
X: input data
path: path of saving loss and generation
num_gene: number of generated images
iteration: number of outer iteration
num_epoch: number of epoch for each VAE (inner iteration)
batch_size: batch size of VAE
"""
## initialize L, S, mu(shrinkage operator)
self.L = np.zeros(X.shape)
self.S = np.zeros(X.shape)
mu = (X.size) / (4.0 * nplin.norm(X,1))
print ("shrink parameter:", self.lambda_ / mu)
LS0 = self.L + self.S
XFnorm = nplin.norm(X,'fro')
if verbose:
print ("X shape: ", X.shape)
print ("L shape: ", self.L.shape)
print ("S shape: ", self.S.shape)
print ("mu: ", mu)
print ("XFnorm: ", XFnorm)
for it in range(iteration):
if verbose:
print ("Out iteration: " , it)
## alternating project, first project to L
self.L = X - self.S
## Using L to train the auto-encoder
self.vae.fit(X = self.L, path = path, file_name = "vae_loss"+str(it),
num_epoch = num_epoch, batch_size = batch_size)
## get optmized L
self.L = self.vae.reconstructor(self.L)
## alternating project, now project to S
self.S = SHR.shrink(self.lambda_/mu, (X - self.L).reshape(X.size)).reshape(X.shape)
## break criterion 1: the L and S are close enough to X
c1 = nplin.norm(X - self.L - self.S, 'fro') / XFnorm
## break criterion 2: there is no changes for L and S
c2 = np.min([mu,np.sqrt(mu)]) * nplin.norm(LS0 - self.L - self.S) / XFnorm
self.errors.append(c1)
# generate sample images for visual check and FID computation
if it == iteration-1:
print("generation images:")
self.vae.gen_plot(FLAG_gen = True, x="", num_gen=num_gen,
path=path, fig_name="generator_"+str(it)+".png")
print("generate fid images")
self.vae.generation_fid(path=path)
if verbose:
print ("c1: ", c1)
print ("c2: ", c2)
if c1 < self.error and c2 < self.error :
print ("early break")
break
## save L + S for c2 check in the next iteration
LS0 = self.L + self.S
return self.L , self.S, np.array(self.errors)
# x --> z
def transform(self, X):
L = X - self.S
return self.vae.transformer(L)
# x -> x_hat
def getRecon(self, X):
# L = X - self.S
return self.vae.reconstructor(self.L)
# z -> x
def generator(self, z):
return self.vae.generator(z)
def main(data_source, noise_method, noise_factors,lambdas, debug = True):
"""
data_source: data set of training. Either 'MNIST' or 'FASHION'
noise_method: method of adding noise. Either 'sp' (represents salt-and-pepper)
or 'gs' (represents Gaussian)
noise_factors: noise factors
lambdas: multiple values of lambda
debug: True or False
"""
if data_source == 'MNIST':
data_dir = 'input_data/MNIST_data/'
mnist = input_data.read_data_sets(data_dir, one_hot=True)
if data_source == 'FASHION':
data_dir = 'input_data/Fashion_data/'
mnist = input_data.read_data_sets(data_dir,
source_url='http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/')
x_train = mnist.train.images
n_z = 49
batch_size = 200
iteration = 30
num_epoch = 20
num_gen = 100
if debug:
x_train = mnist.train.images[:1000]
batch_size = 64
iteration = 2
num_epoch = 2
num_gen = 10
output = "output/"
if not os.path.exists(output):
os.mkdir(output)
for lambda_ in lambdas:
print("lambda:",lambda_)
path = output+"RVAE_"+data_source+"_"+noise_method+"/"
if not os.path.exists(path):
os.mkdir(path)
path = path+"lambda_"+str(lambda_)+"/"
if not os.path.exists(path):
os.mkdir(path)
for noise_factor in noise_factors:
print("noise factor: ",noise_factor)
path = output+"RVAE_"+data_source+"_"+noise_method+"/"+"lambda_"+str(lambda_)+"/"
path = path+"noise_"+str(noise_factor)+"/"
if not os.path.exists(path):
os.mkdir(path)
np.random.seed(595)
if noise_method == 'sp':
x_train_noisy = corrupt(x_train, corNum = int(noise_factor*784))
if noise_method == 'gs':
x_train_noisy = x_train + noise_factor * np.random.normal(
loc=0.0, scale=1.0, size=x_train.shape)
x_train_noisy = np.clip(x_train_noisy, 0., 1.)
start_time = time.time() # record running time for training
tf.reset_default_graph()
sess = tf.Session()
rvae = RVDAE(sess = sess, input_dim = x_train_noisy.shape[1],
learning_rate = 1e-3, n_z = n_z, lambda_=lambda_, error = 1.0e-7)
L, S, errors = rvae.fit(X = x_train_noisy, path = path,
num_gen = num_gen,
iteration=iteration, num_epoch = num_epoch,
batch_size=batch_size, verbose=True)
sess.close()
np.save(path+'running_time.npy',np.array(time.time()-start_time))
if __name__ == "__main__":
if len(sys.argv)>4:
data_source = sys.argv[1]
noise_method = sys.argv[2]
noise_factors = [float(sys.argv[3])]
lambdas = [int(sys.argv[4])]
main(data_source, noise_method, noise_factors,lambdas, debug = True)
else:
lambdas = [1,5,10,15,20,25,50,70,100,250]
if noise_method == 'sp':
noise_factors = [round(i*0.01,2) for i in range(1,52,2)]
if noise_method == 'gs':
noise_factors = [round(i*0.1,1) for i in range(1,10)]
data_sources = ['MNIST','FASHION']
noise_methods = ['sp','gs']
for data_source in data_sources:
for noise_method in noise_methods:
main(data_source, noise_method, noise_factors,lambdas, debug = True) | true |
db34f62939120de024f98a9ff791cc00d4eb3c36 | Python | leticiamsfernandes/Curso-Basico-Python | /ContaTempos.py | UTF-8 | 2,907 | 3.484375 | 3 | [] | no_license | import Ordenador
import random
import time
import Buscador
class ContaTempos:
def lista_aleatoria(self, n):
lista = [random.randrange(1000) for x in range(n)]
return lista
def lista_quase_ordenada(self, n):
lista = [x for x in range(n)] #lista ordenada
lista[n//10] = -500
return lista
def lista_ordenada(self, n):
lista = [x for x in range(n)] #lista ordenada
return lista
def compara_ordenadores(self, n):
lista1 = self.lista_aleatoria(n)
lista2 = lista1[:] #clonando uma lista
lista3 = lista2[:]
o = Ordenador.Ordenador()
print("Comparando com listas aleatórias")
antes = time.time()
o.bolha_curta(lista1)
depois = time.time()
print("O algoritmo da bolha curta demorou: ", depois-antes)
antes = time.time()
o.selecao_direta(lista2)
depois = time.time()
print("O algoritmo da selação direta demorou: ", depois-antes)
antes = time.time()
o.insercao_direta(lista3)
depois = time.time()
print("O algoritmo da inserção direta demorou: ", depois-antes)
print("\nComparando com listas quase ordenadas")
lista1 = self.lista_quase_ordenada(n)
lista2 = lista1[:] #clonando uma lista
lista3 = lista2[:]
antes = time.time()
o.bolha_curta(lista1)
depois = time.time()
print("O algoritmo da bolha curta demorou: ", depois-antes)
antes = time.time()
o.selecao_direta(lista2)
depois = time.time()
print("O algoritmo da selação direta demorou: ", depois-antes)
antes = time.time()
o.insercao_direta(lista3)
depois = time.time()
print("O algoritmo da inserção direta demorou: ", depois-antes)
def compara_buscadores(self, n):
b = Buscador.Buscador()
print("Comparando com listas ordenadas")
lista1 = self.lista_ordenada(n)
lista2 = lista1[:] #clonando uma lista
antes = time.time()
b.busca_sequencial(lista1, x)
depois = time.time()
print("O algoritmo da busca sequencial demorou: ", depois-antes)
antes = time.time()
b.busca_binaria(lista2, x)
depois = time.time()
print("O algoritmo da busca binária demorou: ", depois-antes)
print("\nComparando com listas aleatórias")
lista1 = self.lista_aleatoria(n)
lista2 = lista1[:] #clonando uma lista
antes = time.time()
b.busca_sequencial(lista1, x)
depois = time.time()
print("O algoritmo da busca sequencial demorou: ", depois-antes)
antes = time.time()
b.busca_binaria(lista2, x)
depois = time.time()
print("O algoritmo da busca binária demorou: ", depois-antes)
| true |
9ffdfae99f8143578163fd0e11888cd1d22f13a4 | Python | jacquesCedric/WiiUScripts | /wwp/PlazaImages.py | UTF-8 | 5,258 | 2.6875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Jacob Gold"
__copyright__ = "Copyright 2007, Jacob Gold"
__credits__ = ["Jacob Gold"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Jacob Gold"
__status__ = "Prototype"
"""
Translating images to and from Nintendos Base64, zlib-compressed format.
"""
import sys, io, os
import argparse
import zlib
import base64
from PIL import Image
parser = argparse.ArgumentParser(description="Convert an image to a base64, zlib-compressed string - for use in Wara Wara Plaza.")
parser.add_argument("file", metavar="File", type=str,
help="the image or file to be processed", nargs="?", const="")
parser.add_argument("-d", "--decode", action="store_true",
help="decode a string from Wara Wara Plaza into a .png image")
parser.add_argument("-i", "--icon", action="store_true",
help="Set desired output dimensions to that of an icon(128x128), default dimensions are of painting size(320x120)")
def main():
args = parser.parse_args()
if (args.file):
if args.decode:
print("Attempting decoding of image")
decode(args.file)
# mv2tga(args.file)
print("Through the gauntlet")
else:
print("Attempting encode of image")
encode(args.file, args.icon)
# tga2mv(args.file)
print("Through the gauntlet")
else:
with open('titleIDs', 'r') as f:
for line in f:
t = line[:16]
try:
tga2mv("input/" + t + ".tga")
print("converted: " + t)
except:
print("failed conversion for: " + t)
# Take data string, base64decode, decompress result, convert to png
# Output is [FileName].[TGA/PNG]
def decode(stringToDecode):
with open(stringToDecode) as file:
decode = base64.b64decode(file.read())
image = zlib.decompress(decode)
newName = removeExt(stringToDecode) + ".tga"
with open(newName, "wb") as newImage:
newImage.write(image)
newImage.close()
convertTGAtoPNG(newName)
# Convert image to tga, zlib compress, then base64encode
# Output is [Filename].data
# returns base64 encoded, compressed iamge, and size of the compressed image
def encode(imageToEncode, isIcon = False):
if imageToEncode[-4:] != ".tga":
if imageToEncode[-4:] != ".png":
convertIMGtoPNG(imageToEncode)
imageToEncode = removeExt(imageToEncode) + ".png"
convertIMGtoTGA(imageToEncode, isIcon)
imageToEncode = removeExt(imageToEncode) + ".tga"
with open(imageToEncode, "rb") as image:
byteImage = bytearray(image.read())
compress = zlib.compress(byteImage, 6)
sizeOfImage = sys.getsizeof(compress)
encode = base64.b64encode(compress)
newName = removeExt(imageToEncode) + ".data"
with open(newName, "wb") as newFile:
newFile.write(encode)
newFile.close()
return encode, sizeOfImage
# We may want to resize an image if this script is fed something that's the wrong size
def resize(imageToResize, isIcon):
sizeToFit = (128,128) if isIcon else (320,120)
# Find ratios
widthRatio = float(sizeToFit[0]) / float(imageToResize.size[0])
heightRatio = float(sizeToFit[1]) / float(imageToResize.size[1])
# Find potential new values
potentialWidth = float(imageToResize.size[0]) * heightRatio
potentialHeight = float(imageToResize.size[1]) * widthRatio
if (potentialWidth < sizeToFit[0]):
newSize = sizeToFit[0], int(potentialHeight)
else:
newSize = int(potentialWidth), sizeToFit[1]
imageToResize = imageToResize.resize(newSize, Image.ANTIALIAS)
# crop
imageToResize = imageToResize.crop((0, 0, sizeToFit[0], sizeToFit[1]))
#return image
return imageToResize
# Converting to PNG
def convertTGAtoPNG(imageToConvert):
old = Image.open(imageToConvert)
old.convert("I")
new = old.save(removeExt(imageToConvert) + ".png")
def convertIMGtoPNG(imageToConvert):
conv = Image.open(imageToConvert)
conv = conv.convert('1')
conv = conv.convert('RGBA')
return conv.save(removeExt(imageToConvert) + ".png")
# Converting to TGA
def convertIMGtoTGA(imageToConvert, isIcon):
old = Image.open(imageToConvert)
resized = resize(old, isIcon)
# if resized.mode == "RGB":
# alph = Image.new('L', resized.size, 255)
# resized.putalpha(alph)
resized.convert("I")
new = resized.save(removeExt(imageToConvert) + ".tga")
# These are more concise and written by the talented, CaramelKat
# def mv2tga(filename):
# with open(filename) as f:
# data=f.read()
# with open(os.path.splitext(filename)[0]+'.tga','wb') as f:
# f.write(zlib.decompress(base64.b64decode(data)))
# def tga2mv(filename):
# with open(filename, 'rb') as f:
# data=f.read()
# with open(os.path.splitext(filename)[0], 'wb') as f:
# f.write(base64.b64encode(zlib.compress(data, 6)))
# Clean up any trailing extensions
def removeExt(stringToEdit):
return stringToEdit.split(".")[0]
if __name__ == "__main__":
main()
| true |
fa8b8a3159322e6e7520a8f508538d2ba38ff699 | Python | FarcasiuRazvan/AI-Projects | /AILab2ACO/src/Problem.py | UTF-8 | 1,150 | 3.25 | 3 | [] | no_license | '''
Created on Apr 15, 2019
@author: Wolf
'''
class problem(object):
'''
classdocs
'''
def __init__(self, file_name):
'''
Constructor
'''
self.filename=file_name
self.words=[]
self.trace=[]
self.loadProblem()
print("words: ",self.words)
def loadProblem(self):
'''
reading the parameters
'''
f=open(self.filename,"r")
self.words=[line.strip(" ").strip("\n") for line in f]
self.words=self.words+self.get_column_words(self.words)
self.n=len(self.words)
f.close()
for i in range(0,self.n):
self.trace.append([])
for j in range(0,self.n):
if j>=i:
self.trace[i].append(1)
else:
self.trace[i].append(0)
def get_column_words(self,words):
return ["".join([words[row][col] for row in range(len(words))]) for col in range(len(words[0]))]
| true |
3989888dcb8c24a74e27a5d56ba5d2b43e39fc29 | Python | Kawser-nerd/CLCDSA | /Source Codes/CodeJamData/12/32/17.py | UTF-8 | 3,039 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
import math
def process_file(file):
fsock = open(file)
text = fsock.read()
fsock.close()
lines = text.split('\n')
return lines
def process_lines(lines):
ans = []
first = True
N = -1
n = 0
tx = {}
for line in lines:
if first == True:
first = False
elif N == -1:
if line != '':
case = {}
D = -1
N = -1
A = -1
tx = []
a = []
for num in line.split(' '):
if D == -1:
D = float(num)
elif N == -1:
N = int(num)
elif A == -1:
A = int(num)
case['D'] = D
case['N'] = N
case['A'] = A
n = 1
else:
return ans
elif len(tx) < N:
split = line.split(' ')
tx.append((float(split[0]), float(split[1])))
n += 1
if n > N:
case['tx'] = tx
else:
for num in line.split(' '):
a.append(float(num))
case['a'] = a
ans.append(case)
N = -1
return ans
def process_case(line):
D = line['D']
N = line['N']
A = line['A']
a = line['a']
tx = line['tx']
result = []
if len(tx) < 2:
for acc in a:
a = 0.5 * acc
b = 0
c = -D
t = solveQuadratic(a, b, c)
result.append(str(t))
return '\n' + '\n'.join(result)
if tx[-1][1] > D:
txNormalize = normalize(tx[-2], tx[-1], D)
tx[-1] = txNormalize
#print 'tx'
#print tx
for acc in a:
result.append(solve(D, N, A, acc, tx))
return '\n' + '\n'.join(result)
def normalize(tx0, tx1, D):
v = (tx1[1] - tx0[1]) / (tx1[0] - tx0[0])
newT = tx0[0] + (D - tx0[1]) / v
return (newT, D)
def solve(D, N, A, acc, tx):
tpv = (0.0, 0.0, 0.0)
while tpv[1] < D and len(tx) > 1:
#print tpv
tpv = catchUp(D, N, A, acc, tx, tpv)
tx = tx[1:]
return str(tpv[0])
def catchUp(D, N, A, acc, tx, tpv):
t0 = tpv[0]
v0 = tpv[2]
p0 = tpv[1]
txCur = tx[0]
txNext = tx[1]
vx = (txNext[1] - txCur[1]) / (txNext[0] - txCur[0])
px = getPosition(t0, tx, vx)
'''
print 'tpv'
print tpv
print 'txCur'
print txCur
print 'txNext'
print txNext
print 'vx'
print vx
print 'px'
print px
print 'acc'
print acc
'''
a = 0.5 * acc
b = v0 - vx
c = p0 - px
t = solveQuadratic(a, b, c)
'''
print 't'
print t0
print t
'''
if t > txNext[0]:
#print '>'
tv = getTimeVel(txNext[1], acc, tpv)
return (tpv[0] + tv[0], txNext[1], tv[1])
else:
#print '<'
return (txNext[0], txNext[1], vx)
def getTimeVel(p, acc, tpv):
v = tpv[2]
'''
print 'time vel tpv'
print tpv
print 'time vel acc'
print acc
print 'time vel p'
print p
'''
a = 0.5 * acc
b = v
c = tpv[1] - p
t = solveQuadratic(a, b, c)
vel = v + t * acc
'''
print 'time vel t'
print t
print 'time vel vel'
print vel
'''
return (t, vel)
def solveQuadratic(a, b, c):
#print 'QUAD'
#print str(a) + ' ' + str(b) + ' ' + str(c)
return (-b + math.sqrt(b * b - 4 * a * c)) / (2 * a)
def getPosition(t0, tx, vx):
return tx[0][1] + (t0 - tx[0][0]) * vx
if __name__ == "__main__":
import sys
filename = sys.argv[1]
lines = process_file(filename)
lines = process_lines(lines)
c = 0
for line in lines:
c += 1
print "Case #%d: %s" % (c, process_case(line)) | true |
6f838f79190b62d04c90b23cf032770c11dddb39 | Python | calpe20/PYTHONIZANDO | /practica_1.py | UTF-8 | 235 | 3.234375 | 3 | [] | no_license | #!usr/bin/dev/ python
#--*-- conding: utf-8-*-
# ESTRUCTURA CONDICIONA IF
import random
''' hola a todos '''
y = 5
x = random.randint(1,10)
print x
if x == y:
print str(y) + ' es igual al numero generado'
else:
print 'Fallando' | true |
0d408326e80a70503d7c5aa8d90e4bde3075b5e4 | Python | BhagyashriNBhamare/NITK-IT-DEPARTMENT-LAB-2022 | /da lab/lab 1/edit.py | UTF-8 | 112 | 2.578125 | 3 | [] | no_license | from openpyxl import load_workbook
workbook = load_workbook(filename="4.csv")
print(sheet.cell(row=2, column=2)) | true |
4d660ee983f428838f7b1d87b8c5591f0dd0efab | Python | jhill1440/anonDemuxer | /getbarcode.py | UTF-8 | 588 | 2.546875 | 3 | [] | no_license | import os
import shutil
inputFolder = "data1"
path1 = "%s/" %inputFolder
fList1 = os.listdir(path1)
os.chdir(path1)
folderName2 = 'data2'
path2 = "%s/test1.txt" %folderName2
#read in first line, use wildcard to open only file in folder
fList2 = open(path2).readline()
#look for barcode
#split firstline on ":" and use last item in list
groups = fList2.split(":")
barcode = groups[-1]
#os.chdir("/Users/joshuahill/Dropbox (TxGen)/Share/Lab/joshtest/out1")
#os.mkdir(barcode)
#shutil.copytree(path3/folderName2, barcode)
print groups
print barcode
print os.getcwd()
| true |
3c1d93ff6a10e0400c90c8e0c0f4add90dd9c91f | Python | srihariprasad-r/workable-code | /hackerearth/Algorithms/Graphs/Bishu_and_his_girlfriend.py | UTF-8 | 1,401 | 3.375 | 3 | [] | no_license | '''
# Sample code to perform I/O:
name = input() # Reading input from STDIN
print('Hi, %s.' % name) # Writing output to STDOUT
# Warning: Printing unwanted or ill-formatted data to output will cause the test cases to fail
'''
# Write your code here
arr = [int(x) for x in input().split()]
n = arr[0]
m = arr[1] if len(arr) > 1 else 0
min_city = 999999
out = -1
visited = [0] * (n + 1)
distance = [0] * (n + 1)
adj_dict = {}
girl_city_list = []
def dfs(node, dist):
visited[node] = 1
distance[node] = dist
if node in adj_dict:
for k in adj_dict[node]:
if not visited[k]:
dfs(k, distance[node] + 1)
return
if m == 0:
m = n - 1
for i in range(m):
input_list = list(map(int,input().split()))
adj_dict.setdefault(int(input_list[0]), []).append(int(input_list[1]))
adj_dict.setdefault(int(input_list[1]), []).append(int(input_list[0]))
q = int(input())
for i in range(1, q+1):
girl_city_list.append(int(input()))
for i in range(1, n):
if not visited[i]:
dfs(i, 0)
for i in range(len(girl_city_list)):
if distance[girl_city_list[i]] < min_city:
min_city = distance[girl_city_list[i]]
out = girl_city_list[i]
elif distance[girl_city_list[i]] == min_city and out < min_city:
min_city = girl_city_list[i]
out = girl_city_list[i]
print(out) | true |
91a01c2eb981ab90cf36a690330deaa4c2ba31b5 | Python | wangjf1993/others | /encode_decode.py | UTF-8 | 433 | 3 | 3 | [] | no_license | import binascii
import struct
s = b'hello'
h = binascii.b2a_hex(s)
print(h)
print(binascii.a2b_hex(h))
data = b'\x00\x124V\x00x\x90\xab\x00\xcd\xef\x01\x00#\x004'
x1 = int.from_bytes(data, 'little')
x2 = int.from_bytes(data, 'big')
print(len(data), x1, x2)
print(x1.to_bytes(16, 'little'), x2.to_bytes(16, 'big'))
ss = struct.pack('<idd', 1, 3, 4)
print(ss)
print(struct.unpack('<idd', ss))
x = 10
a = lambda y:x +y
print(a(10)) | true |
59601ca6e1b13a4bca14925065ff77ba6e226f60 | Python | negibokken/sandbox | /leetcode/1512_number_of_good_pairs/main.py | UTF-8 | 572 | 3.09375 | 3 | [] | no_license | #!/usr/bin/python3
from typing import List
import json
import math
def combinations(n, r):
return math.factorial(n) // (math.factorial(n - r) * math.factorial(r))
class Solution:
def numIdenticalPairs(self, nums: List[int]) -> int:
mp = {}
for n in nums:
if n not in mp:
mp[n] = 0
mp[n] += 1
ans = 0
for k, v in mp.items():
if v > 1:
ans += combinations(v, 2)
return ans
arr = json.loads(input())
sol = Solution()
print(sol.numIdenticalPairs(arr))
| true |
429ebfab06ec73197b36b6fc02ab522b82f80574 | Python | vavpavlov/GB_HW | /Rec_sys/src/utils.py | UTF-8 | 2,121 | 2.84375 | 3 | [] | no_license | def prefilter_items(data_train):
# Оставим только 5000 самых популярных товаров
popularity = data_train.groupby('item_id')['quantity'].sum().reset_index()
popularity.rename(columns={'quantity': 'n_sold'}, inplace=True)
top_5000 = popularity.sort_values('n_sold', ascending=False).head(5000).item_id.tolist()
#добавим, чтобы не потерять юзеров
data_train.loc[~data_train['item_id'].isin(top_5000), 'item_id'] = 999999
# Уберем самые популярные
# Уберем самые непопулряные
# Уберем товары, которые не продавались за последние 12 месяцев
# Уберем не интересные для рекоммендаций категории (department)
# Уберем слишком дешевые товары (на них не заработаем). 1 покупка из рассылок стоит 60 руб.
# Уберем слишком дорогие товарыs
# ...
return data_train
def postfilter_items(data):
pass
def get_similar_items_recommendation(user, model, item, N=5):
'''Рекомендуем товары, похожие на топ-N купленных юзером товаров'''
res = [id_to_itemid[rec[0]] for rec in model.similar_items(itemid_to_id[item], N=N)]
return res
"""
user_rec = """
def get_similar_users_recommendation(user, model, sparse_user_item, item, N=5):
'''Рекомендуем топ-N товаров'''
res = [id_to_itemid[rec[0]] for rec in
model.recommend(userid=userid_to_id[user],
user_items=sparse_user_item, # на вход user-item matrix
N=N,
filter_already_liked_items=False,
filter_items=[itemid_to_id[item]],
recalculate_user=True)]
return res
| true |
1b6698f6253ed56ba61de6d8f85335a672ce2a25 | Python | zhutcho/csc100-koala-kode | /app/RegisterPage.py | UTF-8 | 3,609 | 3.09375 | 3 | [] | no_license | import tkinter as tk
import keyring as kr
import re
# the the id in which keyring assosciates the passwords/usernames.
service_id = 'Wildlife Hospital'
username = ""
class RegisterPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent)
# Controller references the parent class(App.App())
self.controller = controller
# Register_Frame
register_frame = tk.Frame(self, width=480, height=250)
register_frame.pack()
# New Username Entry Box
self.new_username = tk.Entry(register_frame, width=35)
self.new_username.place(
in_=register_frame, relx=0.5, rely=0.25, anchor=tk.CENTER)
self.new_username.insert(0, "admin")
self.new_username.config(state="readonly")
# New Username Label
register_label = tk.Label(register_frame,
text="New Username: ")
register_label.place(in_=self.new_username, relx=.2,
rely=-0.75, anchor=tk.CENTER)
# New Password Entry Box
self.new_password = tk.Entry(register_frame, width=35)
self.new_password.place(
in_=register_frame, relx=0.5, rely=0.45, anchor=tk.CENTER)
self.new_password.config(show='*')
# New Password Label
password_label = tk.Label(register_frame,
text="New Password: ")
password_label.place(in_=self.new_password,
relx=0.19, rely=-0.75, anchor=tk.CENTER)
# Create Button
create_button = tk.Button(register_frame, text="Create", command=lambda:
self.register_entry())
create_button.place(in_=self.new_password, relx=0.1,
rely=2, anchor=tk.CENTER)
# Warning Display Label
self.register_warning_label = tk.Label(register_frame,
text="", fg='#C03A3A')
self.register_warning_label.place(
in_=register_frame, relx=0.28, rely=0.7, anchor=tk.W)
def register_entry(self):
"""Register Requirement Checks
Parameters:
self: the class instance
Returns:
boolean: True as successful Register, False as unsuccessful
"""
register_page = self.controller.get_page("RegisterPage")
# Requirements to be met to create user
if kr.get_password(service_id, register_page.new_username.get()) != None:
register_page.register_warning_label["text"] = "User Already Exists"
elif len(register_page.new_username.get()) <= 7 and register_page.new_username.get() != "admin":
register_page.register_warning_label["text"] = "Username must be greater than 7 digits long"
elif bool(re.search(r"\s", register_page.new_username.get())) == True:
register_page.register_warning_label["text"] = "Username must not contain spaces"
elif bool(re.search(r"\d", register_page.new_password.get())) == False\
or bool(re.search(r"[!@#$%^&*()<>,./\{}?]", register_page.new_password.get())) == False:
register_page.register_warning_label["text"] = "Password must contain a digit and a special character"
else:
kr.set_password(service_id, register_page.new_username.get(
), register_page.new_password.get())
register_page.register_warning_label["text"] = "Successfully Created User"
self.controller.show_frame("LoginPage")
| true |
fb8360d26293bae9e65910bc7f2254abc1d080f8 | Python | K123AsJ0k1/StoryApp | /text.py | UTF-8 | 1,192 | 3.078125 | 3 | [] | no_license | from app import app
def rows(text):
return len(text.split("\r\n"))
def check_title_requirements(title):
text_lenght = len(title)
if text_lenght == 0:
return False
if text_lenght > 50:
return False
return True
def check_text_requirements(text):
text_lenght = len(text)
if text_lenght == 0:
return False
if text_lenght <= 100:
return True
if text.find("\r\n") == -1:
return False
line_size = 0
for letter in text:
if letter == "\r":
if line_size > 100:
return False
line_size = 0
continue
if letter == "\n":
continue
line_size = line_size + 1
if line_size > 100:
return False
return True
def get_source_text(text):
text_rows = text.split("\r\n")
return "|".join(text_row for text_row in text_rows)
def get_original_text(source_text):
source_text_rows = source_text.split("|")
return "\r\n".join(source_text_row for source_text_row in source_text_rows)
def get_source_text_array(source_text):
return source_text.split("|")
| true |
942155741695918ebe5c024f4c4122a33f51fe6b | Python | onlyphantom/elangdev | /elang/word2vec/utils/cleansing.py | UTF-8 | 2,607 | 3.515625 | 4 | [
"CC0-1.0"
] | permissive | import os
import re
realpath = os.path.dirname(os.path.realpath(__file__))
def _open_file(filename):
filepath = realpath + "/negative/" + filename
wordlist = list(open(filepath).read().splitlines())
return wordlist
def _remove_words(sentence, words2remove):
for word in words2remove:
sentence = re.sub(r"\b" + word + r"\b", "", sentence.lower())
sentence = re.sub(r"\s+", " ", sentence).strip()
return sentence
# main cleansing function
def remove_stopwords_id(sentence):
"""remove_stopwords_id Removes Bahasa Indonesia Stopwords
Stopwords are generally the most common "function" words in a language, and they're routinely eliminated in natural language processing tasks.
Bahasa Indonesia stopwords (eg. "ya", "kan", "dong", "loh") are removed from the input string.
:param sentence: An input string
:type sentence: str
:return: A string where common Bahasa Indonesia stopwords are filtered out
:rtype: str
"""
stopwords = _open_file("stopwords-id.txt")
sentence = _remove_words(sentence, stopwords)
return sentence
def remove_region_id(sentence):
"""remove_region_id Removes name of places in Indonesia
Regions are name of places (provinces and cities) in Indonesia (eg. "Jakarta", "Bali", "Sukabumi")
:param sentence: An input string
:type sentence: str
:return: A string where common name of places in Indonesia are filtered out
:rtype: str
"""
regions = _open_file("indonesian-region.txt")
sentence = _remove_words(sentence, regions)
return sentence
def remove_vulgarity_id(sentence):
"""remove_vulgarity_id Removes uncivilised words in Bahasa Indonesia
Prevent words such as "anjir", "babi" etc to be included in natual language generation tasks
:param sentence: An input string
:type sentence: str
:return: A string where common swear words in Indonesia are filtered out
:rtype: str
"""
swears = _open_file("swear-words.txt")
sentence = _remove_words(sentence, swears)
return sentence
def remove_calendar_id(sentence):
"""remove_datetime_id Removes common "calendar words" in Bahasa Indonesia
Calendar words include day of weeks (eg. senin, selasa, ...), months (`maret`, 'juni'), and their abbreviated forms (`okt`, 'jul')
:param sentence: An input string
:type sentence: str
:return: A string where calendar words in Indonesia are filtered out
:rtype: str
"""
swears = _open_file("calendar-words.txt")
sentence = _remove_words(sentence, swears)
return sentence
| true |
f021593216d9a607d03ae2ec8a5b137868c6804d | Python | michaelhsuke/learn-fron-end | /2019-10-29/python/01.py | UTF-8 | 1,933 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#python模拟键盘操作
#https://www.jianshu.com/p/7058c735bc96
#https://blog.csdn.net/u012474716/article/details/79072497
#https://blog.csdn.net/lshdp/article/details/83788812
# https://blog.csdn.net/Xylon_/article/details/100176606
TARGET_LINE_NO = 4
from pykeyboard import *
from pymouse import *
from time import sleep
import win32api
m = PyMouse()
k = PyKeyboard()
file = open('../js/04.txt', 'r', encoding="UTF-8")
# file = open('./01.txt', 'rb')
count = 0
# targetUrl = ''
# targetLineNo = TARGET_LINE_NO
# for line in file:
# count = count + 1
# if count == TARGET_LINE_NO:
# targetUrl = line
# break
# win32api.ShellExecute(0, 'open', r'D:\Program Files (x86)\Tencent\微信web开发者工具\微信开发者工具.exe', '','',1)
# sleep(3)
# k.press_keys([k.control_l_key, 'l'])
# k.press_keys([k.control_l_key, 'a'])
# k.type_string(targetUrl)
# sleep(1)
# k.tap_key(k.enter_key)
# # k.tap_key(k.enter_key)
# print(TARGET_LINE_NO)
for line in file:
count = count + 1
# win32api.ShellExecute(0, 'open', r'D:\Program Files (x86)\Tencent\微信web开发者工具\微信开发者工具.exe', '','',1)
win32api.ShellExecute(0, 'open', r'D:\微信web开发者工具\微信web开发者工具.exe', '','',1)
sleep(1)
k.press_keys([k.control_l_key, 'l'])
k.press_keys([k.control_l_key, 'a'])
k.tap_key(k.backspace_key)
k.tap_key(k.escape_key)
k.type_string(line)
sleep(1)
k.tap_key(k.enter_key)
# k.tap_key(k.enter_key)
print(str(count) + ':' + line)
# sleep(5)
a = input('任意键继续')
# k.tap_key('L')
# k.type_string(‘abcdefg’) --输入内容
# k.press_key(‘a’) --按a键
# k.release_key(‘a’) --松开a键
# k.tap_key(‘a’) --点击a键
# k.tap_key(‘a’, n=2, interval=5) --点击a键2次,每次间隔5秒
# k.tap_key(k.function_keys[5]) --功能键F5
# m.click(1145, 700)
# k.type_string('123456')
# k.tap_key(k.enter_key)
| true |
ea7db2ebd9961af21e93e947425ead6fda9712f1 | Python | cjlee112/logtree | /fig.py | UTF-8 | 4,871 | 2.75 | 3 | [] | no_license | import test
import mut
from math import log, exp
from matplotlib import pyplot
def linear_fig(sizes, times, xlabel='number of sequences',
ylabel='CPU time (sec)'):
pyplot.plot(sizes, times, marker='o')
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel)
def loglog_fig(sizes, times, xlabel='n log(n)',
ylabel='CPU time (sec)'):
xdata = [x * log(x) for x in sizes]
pyplot.loglog(xdata, times, 'bo')
xavg = exp(sum([log(x) for x in xdata]) / len(xdata))
yavg = exp(sum([log(x) for x in times]) / len(times))
line = [yavg * x / xavg for x in xdata]
pyplot.loglog(xdata, line, 'k--')
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel)
def plot_cubic_time(sizes, times):
xdata = [x * log(x) for x in sizes]
t0 = times[0] / (sizes[0] * sizes[0] * sizes[0])
cubic = [t0 * x * x * x for x in sizes]
pyplot.loglog(xdata, cubic, color='r')
def plot_total_pairs(sizes):
xdata = [x * log(x) for x in sizes]
pairs = [x * (x-1) / 2 for x in sizes]
pyplot.loglog(xdata, pairs, color='r')
def time_data(r=range(4, 14), maxP=.01, **kwargs):
return mut.test_range(r, maxP=maxP, **kwargs)
def time_fig(sizes, times, distances, nseqs):
pyplot.subplot(311)
linear_fig(sizes, times)
pyplot.subplot(312)
loglog_fig(sizes, times)
plot_cubic_time(sizes, times)
pyplot.subplot(313)
loglog_fig(sizes, distances, ylabel='number of distances')
plot_total_pairs(sizes)
def error_fig(x=None, y=None, xmin=1e-8, xlabel='p-value',
ylabel='FDR', plotargs={},
**kwargs):
if x is None:
monitor = Monitor(**kwargs)
x, y = monitor.analyze()
pyplot.loglog(x, y, **plotargs)
pyplot.xlim(xmin=xmin)
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel)
def error_data(mapFunc=map, **kwargs):
monitor = test.Monitor(scoreFunc=mut.quartet_p_value_gmean,
nsample=None, mapFunc=mapFunc, **kwargs)
monitor2 = test.Monitor(mapFunc=mapFunc, maxP=0.0002, **kwargs)
monitor3 = test.Monitor(scoreFunc=mut.quartet_p_value2_mean,
mapFunc=mapFunc, **kwargs)
return monitor, monitor2, monitor3
def roc_figure(monitor, monitor2, monitor3, xlabel='FPR',
ylabel='TPR'):
fpr, tpr, aoc = monitor2.roc()
pyplot.plot(fpr, tpr)
fpr, tpr, aoc = monitor.roc()
pyplot.plot(fpr, tpr, color='r', linestyle=':')
fpr, tpr, aoc = monitor3.roc()
pyplot.plot(fpr, tpr, color='g', linestyle='-.')
pyplot.plot((0.,1.),(0.,1.), color='k', linestyle='--')
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel)
def roc_all_fig(**kwargs):
'ROC plot using MonitorAll data'
monitorA, monitor2A, monitor3A = fig.error_data(monitorClass=test.MonitorAll, **kwargs)
roc_figure(monitorA, monitor2A, monitor3A)
def error_fig2(monitor, monitor2, monitor3):
pyplot.subplot(211)
x, y = monitor.analyze()
pyplot.loglog(x, y, color='r', linestyle=':')
x, y = monitor3.analyze()
pyplot.loglog(x, y, color='g', linestyle='-.')
x, y = monitor2.analyze()
error_fig(x, y, plotargs=dict(color='b'))
pyplot.subplot(212)
roc_figure(monitor, monitor2, monitor3)
def neighbor_data(r=range(200,1001, 100), **kwargs):
l = []
for length in r:
naybs, degrees = test.analyze_neighbors(length=length, **kwargs)
l.append(sum(naybs) / float(len(naybs)))
return l
def histogram_data(keys, naybs):
l = []
for k in keys:
m = []
for vals in naybs:
m.append(sum([i==k for i in vals]) / float(len(vals)))
l.append(m)
return l
def calc_mean_dist(naybs):
return [sum(vals) / float(len(vals)) for vals in naybs]
def neighb_fig1(x, dists1, dists2, xlabel='length',
ylabel='mean neighbor distance', xmax=1000, xmin=None):
if xmin is None:
xmin = x[0]
pyplot.plot(x, dists1, marker='+', color='r', linestyle='--')
pyplot.plot(x, dists2, marker='o', color='b')
pyplot.xlim(xmin=xmin, xmax=xmax)
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel)
def neighb_fig2(x, histNaive, histDR, histNDR1, histNDR2, xlabel='length',
ylabel='Fraction of Neighbors Matched', xmax=1000, xmin=None):
if xmin is None:
xmin = x[0]
pyplot.plot(x, histNaive, marker='+', color='r', linestyle='--')
pyplot.plot(x, histDR, marker='o', color='b')
pyplot.plot(x, histNDR1, marker='^', color='g', linestyle=':')
pyplot.plot(x, histNDR2, marker='s', color='k', linestyle='-.')
pyplot.xlim(xmin=xmin, xmax=xmax)
pyplot.xlabel(xlabel)
pyplot.ylabel(ylabel)
def neighb_composite(x, dists1, dists2, histNaive, histDR, histNDR1, histNDR2):
pyplot.subplot(211)
neighb_fig1(x, dists1, dists2)
pyplot.subplot(212)
neighb_fig2(x, histNaive, histDR, histNDR1, histNDR2)
| true |
e29a1c82ebd4fc44526076a1f783ce6409c3d37b | Python | Image-Py/sciwx | /sciwx/demo/canvas6_frame_toolbar.py | UTF-8 | 1,240 | 2.5625 | 3 | [
"BSD-3-Clause"
] | permissive | import sys, wx
sys.path.append('../../')
from skimage.draw import line
from sciwx.canvas import CanvasFrame
from sciapp.action import Tool, ImageTool
class Pencil(ImageTool):
title = 'Pencil'
def __init__(self):
self.status = False
self.oldp = (0,0)
def mouse_down(self, ips, x, y, btn, **key):
self.status = True
self.oldp = (y, x)
def mouse_up(self, ips, x, y, btn, **key):
self.status = False
def mouse_move(self, ips, x, y, btn, **key):
if not self.status:return
se = self.oldp + (y,x)
rs,cs = line(*[int(i) for i in se])
rs.clip(0, ips.shape[1], out=rs)
cs.clip(0, ips.shape[0], out=cs)
ips.img[rs,cs] = (255, 0, 0)
self.oldp = (y, x)
key['canvas'].update()
def mouse_wheel(self, ips, x, y, d, **key):pass
if __name__=='__main__':
from skimage.data import camera, astronaut
from skimage.io import imread
app = wx.App()
cf = CanvasFrame(None, autofit=False)
cf.set_imgs([astronaut(), 255-astronaut()])
cf.set_cn((0,1,2))
bar = cf.add_toolbar()
bar.add_tool('M', ImageTool)
bar.add_tool('P', Pencil)
cf.Show()
app.MainLoop()
| true |
a5dfa4becf30708f4411fa31a91b7c10f5ca56bf | Python | niwanowa/AtCoder | /ABC017/a.py | UTF-8 | 147 | 3.015625 | 3 | [] | no_license | s1, e1 = map(int, input().split())
s2, e2 = map(int, input().split())
s3, e3 = map(int, input().split())
print(int(s1*0.1*e1+s2*0.1*e2+s3*0.1*e3)) | true |
3c52594498b1769c072bcae03fae1714ece83db8 | Python | Ryan-F-McAlister/CFB2020 | /CFBmaster.py | UTF-8 | 3,016 | 3.125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 19:58:30 2020
@author: rynmc
"""
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow import keras
#%%
n_embedding=20
n_hidden1=35
n_hidden2=12
n_epochs=15
p_val=0.1
#%%
def cleanTeamNames(teamNameArray):
cleanedTeamNameArray=[]
for team in teamNameArray:
team=team.replace("Miami (FL)", "Miami-FL")
team=team.replace("Miami (OH)", "Miami-OH")
team=team.split(")")
if len(team)>1:
team=team[-1].strip()
else:
team=team[0]
cleanedTeamNameArray.append(team)
return cleanedTeamNameArray
def buildTeamDictionary(winners, losers):
dictionary = {}
for team in np.append(winners.values, losers.values):
if team not in dictionary.keys():
dictionary[team]=len(dictionary)
return dictionary, len(dictionary)
def buildWindows(df, dictionary):
tensorListX=[]
tensorListY=[]
for index, row in df.iterrows():
x1=dictionary[row['Winner']]
x2=dictionary[row['Loser']]
x_win=tf.constant([x1,x2], shape=(1,2))
y_win=tf.constant([int(row['Pts'])], shape=(1,1))
x_lose=tf.constant([x2,x1],shape=(1,2))
y_lose=tf.constant([int(row['Pts.1'])], shape=(1,1))
tensorListX.append(x_win)
tensorListX.append(x_lose)
tensorListY.append(y_win)
tensorListY.append(y_lose)
x_train=tf.stack(tensorListX)
y_train=tf.stack(tensorListY)
return x_train, y_train
#%%
resultDf = pd.read_html("https://www.sports-reference.com/cfb/years/2020-schedule.html")[0]
#remove subheader rows
resultDf=resultDf[resultDf['Rk']!='Rk']
#remove unplayed games
resultDf=resultDf[[not x for x in pd.isna(resultDf['Pts'])]]
#remove rank from team name
resultDf['Winner']=cleanTeamNames(resultDf['Winner'])
resultDf['Loser']=cleanTeamNames(resultDf['Loser'])
#build dictionary of teams
dictionary, n_teams =buildTeamDictionary(resultDf['Winner'], resultDf['Loser'])
#build windows of labels
x_train, y_train = buildWindows(resultDf, dictionary)
#build model
model=keras.Sequential()
model.add(keras.layers.Input((1,2)))
model.add(keras.layers.Embedding(n_teams, n_embedding))
model.add(keras.layers.Reshape(target_shape=(1,-1)))
model.add(keras.layers.Dense(n_hidden1))
model.add(keras.layers.Dense(n_hidden2))
model.add(keras.layers.Dense(1))
model.compile(loss="MeanSquaredError", optimizer='adam')
model.fit(x=x_train, y=y_train, epochs=n_epochs, validation_split=p_val)
#%% predict games
def predictGame(team1, team2, model,dictionary):
x1=dictionary[team1]
x2=dictionary[team2]
points1=model(tf.constant([x1,x2], shape=(1,2)))
points2=model(tf.constant([x2,x1], shape=(1,2)))
print(team1 + ":" + str(points1.numpy()[0][0][0]) + " " + team2 + ":" + str(points2.numpy()[0][0][0]))
predictGame("Ohio State", "Michigan State", model, dictionary)
| true |
2bd3bc74a0c65be29c054701196793e9f1083417 | Python | zhangwei725/Py1802Adv | /apps/session01/views.py | UTF-8 | 4,366 | 2.6875 | 3 | [] | no_license | from django.contrib.sessions.backends.cache import SessionStore
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
# cookie 存储数据的格式 key:value
"""
如何获取cookie信息 -----> 通过request对象获取
如何设置 HttpResponse-----> 设置cookie信息
说明一下
当用户第一次访问网站的时候 获取不到任何cookie信息
拿响应的时候可以设置一些cookie信息
下次来的时候浏览器会自动把cookie信息传入服务器,拿服务器就可以通过
request.COOKIE.get(key)拿到相关的数据
"""
"""
set_cookie
参数说明
key 键
value 值
max_age cookie信息过期的时间 多久过期
expires = 过期时间 具体的某个时间点 主要不要用 max_age同时使用
path='/' cookie生效的路径 默认所有的连接都能获取cookie信息
domain cooke生效的域名
httponly=False 默认为True 如果是False只能http协议传输 js无法获取cookie
"""
# session
def test_cookie(request):
resp = HttpResponse('测试cookie')
msg = request.COOKIES.get('msg')
print(msg)
if not msg:
resp.set_cookie('msg', 'hello', path='/sc/cookie/')
return resp
def test_signed_cookie(request):
resp = HttpResponse('测试cookie')
msg = request.get_signed_cookie('num', salt='afsfsdfs')
print(msg)
if not msg:
# resp.set_cookie('msg', 'hello', path='/sc/cookie/')
resp.set_signed_cookie('num', 1, alt='afsfsdfs')
return resp
def test02(request):
resp = HttpResponse('测试cookie')
msg = request.COOKIES.get('msg')
print(msg)
return resp
def session01(request):
# SessionStore()
session_store = request.session
# 不存在就设置,存在就覆盖
session_store['s1'] = 123
# 存在就不设置,不存在就设置
session_store.setdefault('s2', '111111')
return HttpResponse('session, test01')
def session02(request):
# SessionStore()
session_store = request.session
s1 = session_store['s1'] # 当key不存在就出异常
# 获取值
s1 = session_store.get('s2')
print(s1)
# 获取当前session所有的键
keys = session_store.keys()
# 获取所有的值
values = session_store.values()
for value in values:
print(value)
# 获取所有的键值对
items = session_store.items()
for key, value in items:
print(key)
print(value)
# session_id
# session_id = session_store.session_key
# # 005ethyda0ltv46nbxa4fzkuq652cy2v
# c = request.COOKIES.get("sessionid")
# print(session_id)
# 删除
# session_store.pop()
return HttpResponse('session, 获取session')
def del_session(request):
session_store = request.session
# 判断key是否存在,存在就是True
if session_store.exists('s1'):
del session_store['s1']
print('删除成功')
return HttpResponse('session,删除session')
def set_exp(request):
session_store = request.session
"""
设置session过期时间
参数说明
value
1> 如果value是一个正整数 session会在设置的秒数后失效
2> datetime session 会在指定的时间失效
3> 0 用于关闭浏览器失效
4> None 默认 依赖全局
"""
session_store.set_expiry(0)
session_store.setdefault('expiry', 'test')
return HttpResponse('session,局部设置过期时间')
# 测试重定向技术
def test_redirect(request):
return redirect('/session/5/')
def test(request):
session_store = request.session
expiry = session_store.get('expiry')
return HttpResponse('session,删除session')
# 7天免登陆
def login(request):
if request.method == 'POST':
username = request.POST.get('username')
password = request.POST.get('password')
if username == 'xiaoming' and password == '123':
request.session['user'] = {'username': 'xiaming', 'uid': 1}
return redirect('/session/index/')
else:
return render(request, 'login.html')
# cookie 4k 只能存asc||
def index(request):
user = request.session.get('user')
return render(request, 'index.html', {'user': user})
def register(request):
return None
def loginout(request):
del request.session['user']
return redirect('/session/index')
| true |
674d17b972f7d4868970350fbf205bafff7180b2 | Python | monsher/opengl-samples | /rasterization/main.py | UTF-8 | 8,915 | 2.828125 | 3 | [] | no_license | import glfw
import itertools as itertools
from OpenGL.GL import *
import numpy as np
def key_callback(window, key, scancode, action, mods):
global clear_all, clear_buffers, mode, complete
if key == glfw.KEY_1 and action == glfw.PRESS and mode != 1:
mode = 1
clear_all = True
elif key == glfw.KEY_1 and action == glfw.PRESS:
mode = 1
elif key == glfw.KEY_2 and action == glfw.PRESS:
mode = 2
clear_buffers = True
elif key == glfw.KEY_3 and action == glfw.PRESS:
mode = 3
clear_buffers = True
elif key == glfw.KEY_C and action == glfw.PRESS:
clear_all = True
elif key == glfw.KEY_ENTER and action == glfw.PRESS:
complete = True
def mouse_button_callback(window, button, action, mods):
global x, y, clicked
if button == glfw.MOUSE_BUTTON_1 and action == glfw.PRESS:
x, y = glfw.get_cursor_pos(window)
clicked = True
def resize_callback(window, width, height):
global clear_all, window_height
window_height = height
glViewport(0, 0, width, height)
clear_all = True
def is_doubling_need(x, y, edges):
edges_with_vertex = list(filter(lambda e: (x, y) in e, edges))
if len(edges_with_vertex) != 2:
return True
# находим вершины, соседние от данной в списке рёбер
_, y1 = list(filter(lambda e: e[0] != x or e[1] != y, edges_with_vertex[0]))[0]
_, y2 = list(filter(lambda e: e[0] != x or e[1] != y, edges_with_vertex[1]))[0]
# дублировать нужно, если вершина не лежит между соседними
return (y1 > y and y2 > y) or (y1 < y and y2 < y)
def merge_dicts(dicts, edges):
keys = set().union(*dicts)
dicts = {k: [i.get(k, []) for i in dicts] for k in keys}
dicts = {k: sorted(list(itertools.chain(*v))) for k, v in dicts.items()}
# устранения дублирующихся значений х там, где это не нужно
for y, xs in dicts.items():
new_xs = []
for x in xs:
x_count = len(list(filter(lambda i: i == x, xs)))
if x_count > 1 and ((x not in new_xs) or (x in new_xs and is_doubling_need(x, y, edges))):
new_xs.append(x)
elif x_count == 1:
new_xs.append(x)
dicts[y] = new_xs
return dicts
def compute_intersections(x1, y1, x2, y2):
global window_height
if y1 == y2:
return {}
intersections = {}
for y in range(window_height):
x = (y - y1) * (x2 - x1) / (y2 - y1) + x1
if (min(y1, y2) <= y <= max(y1, y2)) and (min(x1, x2) <= x <= max(x1, x2)):
intersections.setdefault(y, []).append(round(x))
return intersections
def compute_pixels_smoothing(x1, y1, x2, y2, color, matrix):
if y2 - y1 == 0:
# случай с горизонтальным ребром не рассматривается
return {}
# используется целочисленный алгоритм Брезенхэма
dx = abs(x2 - x1)
dy = abs(y2 - y1)
slope = dy > dx
if slope:
dx, dy = dy, dx
if slope and ((x2 < x1 and y2 > y1) or (x1 < x2 and y2 < y1)):
y2, y1 = y1, y2
x1, x2 = x2, x1
elif not slope and ((y1 < y2 and x1 < x2) or (y2 < y1 and x2 < x1)):
y2, y1 = y1, y2
x1, x2 = x2, x1
# определяется направление движения
y_step = 1 if y1 < y2 else -1
x_step = 1 if x1 < x2 else -1
y = y1
x = x1
bright_max = 8 # максимальный коэффициент яркости умножается на 10, чтобы арифметика с ним была целочисленной
bright_min = 0.2
# все константы домножены на 2 * dx
e = bright_max * dx
m = 2 * dy * bright_max
w = 2 * e
e_max = w - m
matrix[y][x] = tuple(map(lambda x: x * (bright_min + bright_max / 20), color))
for i in range(dx):
if e >= e_max:
if slope:
x += x_step
else:
y += y_step
e -= w
if slope:
y += y_step
else:
x += x_step
e += m
bright = bright_min + e / (2 * dx * 10)
matrix[y][x] = tuple(map(lambda x: x * bright, color))
return matrix
def create_edges(vertexes):
return [(vertexes[i], vertexes[i + 1]) for i in range(-1, len(vertexes) - 1)]
def get_sorted_intersections(vertexes):
edges = create_edges(vertexes)
print(edges)
intersections = [compute_intersections(x1, y1, x2, y2) for (x1, y1), (x2, y2) in edges]
# предыдущее действие возвращало список словарей, где ключ - y, а значение - x
# далее происходит слияние этих словарей в один
intersections = merge_dicts(intersections, edges)
intersections = {k: sorted(intersections[k]) for k in sorted(intersections.keys())}
return intersections
def create_matrix(width, height, intersections, color):
matrix = np.zeros((width, height), dtype='3float32')
for y, xs in intersections.items():
for i in range(0, len(xs) - 1, 2):
for x in range(xs[i], xs[i + 1] + 1):
matrix[y][x] = color
return matrix
def add_lines_to_matrix(vertexes, color, matrix):
edges = create_edges(vertexes)
for (x1, y1), (x2, y2) in edges:
compute_pixels_smoothing(x1, y1, x2, y2, color, matrix)
return matrix
def prepare_projection(width, height):
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0.0, width, 0.0, height, 1.0, -1.0)
glMatrixMode(GL_MODELVIEW)
def draw_points(coordinates, color):
glPointSize(2.0)
glBegin(GL_POINTS)
glColor3f(*color)
for x, y in coordinates:
glVertex2f(x, y)
glEnd()
def draw_lines(coordinates, color, is_complete):
if len(coordinates) > 1:
if is_complete:
glBegin(GL_LINE_LOOP)
else:
glBegin(GL_LINE_STRIP)
glColor3f(*color)
for x, y in coordinates:
glVertex2f(x, y)
glEnd()
def create_figure_view(width, height, figure, color):
intersections = get_sorted_intersections(figure)
matrix = create_matrix(width, height, intersections, color)
return matrix
# 1. произвольное задание многоугольника и отрисовка через GL_LINE
# 2. заливка многоугольника пикселями
# 3. применение сглаживания
def main():
global clear_all, clear_buffers, x, y, clicked, mode, complete, window_height
mode = 1
clicked, complete = False, False
clear_all, clear_buffers = True, True
window_height = 640
if not glfw.init():
print("GLFW not initialized")
return
window = glfw.create_window(640, window_height, "Rasterization", None, None)
if not window:
print("Window not created")
glfw.terminate()
return
glfw.make_context_current(window)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LESS)
glfw.set_key_callback(window, key_callback)
glfw.set_framebuffer_size_callback(window, resize_callback)
glfw.set_mouse_button_callback(window, mouse_button_callback)
figure = []
color = (1.0, 1.0, 1.0)
while not glfw.window_should_close(window):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
width, height = glfw.get_window_size(window)
prepare_projection(width, height)
if clear_buffers:
matrix = None
clear_buffers = False
if clear_all:
figure = []
matrix = None
clear_all, complete = False, False
if mode == 1:
if clicked:
figure.append((int(x), int(height - y)))
draw_points(figure, color)
draw_lines(figure, color, complete)
clicked = False
elif mode == 2:
if matrix is None:
matrix = create_figure_view(width, height, figure, color)
glDrawPixels(width, height, GL_RGB, GL_FLOAT, matrix)
elif mode == 3:
# фильтрация работает только при задании вершин против часовой стрелки!
# отрисовка сглаженных границ
if matrix is None:
matrix = create_figure_view(width, height, figure, color)
matrix = add_lines_to_matrix(figure, color, matrix)
glDrawPixels(width, height, GL_RGB, GL_FLOAT, matrix)
glfw.swap_buffers(window)
glfw.poll_events()
print("Terminate...")
glfw.terminate()
if __name__ == "__main__":
main()
| true |
cf930e5fe7884b9170fba4427f131059d1532e4e | Python | Defalt1/ip_parse-concept | /main.py | UTF-8 | 737 | 3.265625 | 3 | [] | no_license |
ip = []
ip_hosts = [192, 255, 255, 1]
ip_hosts_config = '.'.join(str(i) for i in ip_hosts)
def address():
while len(ip) != 4:
ip_input = input("Input IP address: ")
if len(ip_input) > 3:
print("Please input a valid IP address")
continue
elif len(ip_input) < 1:
print("Please input a valid IP address")
continue
else:
ip.append(ip_input)
return (ip)
ips = '.'.join(str(i) for i in address())
if ips[0] == ip_hosts_config[0]:
print("*Shared Hosting!*")
print("Home Network " + '.'.join(ip) + " : " + "Shared Network " + (ip_hosts_config))
else:
print("*Not a shared hosting*")
print("Home Network " + '.'.join(ip) + " : " + "Other Network " + (ip_hosts_config))
| true |
a6c46340ea6a4293a7c7851db21fe57476333cbe | Python | abhi542136/CS384_1801ee02 | /Assignment2/tutorial02.py | UTF-8 | 7,644 | 3.734375 | 4 | [] | no_license | # All decimal 3 places
# Function to compute mean
def mean(first_list):
n = len(first_list)
if(n == 0):
return 0
ans = 0
for i in first_list:
if(isinstance(i, int) or isinstance(i, float)):
ans += i
else:
return 0
#ans += i
mean_value = ans/n
#mean_value = round(mean_value, 3)
# mean Logic
return mean_value
# Function to compute median. You cant use Python functions
def median(first_list):
median_value = 0
n = len(first_list)
if(n == 0):
return 0
for i in first_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
temp_list = first_list.copy()
#new_list = first_list
new_list = sorting(temp_list)
if(n % 2 == 0):
y = n//2
sm = (new_list[y]+new_list[y-1])/2
median_value = sm
else:
z = n//2
sm = new_list[z]
median_value = sm
# median Logic
return median_value
# Function to compute Standard deviation. You cant use Python functions
def standard_deviation(first_list):
standard_deviation_value = 0
temp_list = first_list.copy()
for i in temp_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
ans = 0
for x in temp_list:
ans += x
n = len(temp_list)
if(n == 0):
return 0
mn = ans/n
sm = 0
for x in temp_list:
y = abs(x-mn)
t = y*y
sm += t
sm /= n
z = sm ** 0.5
standard_deviation_value = z
# Standard deviation Logic
return standard_deviation_value
# Function to compute variance. You cant use Python functions
def variance(first_list):
variance_value = 0
temp_list = first_list.copy()
ans = standard_deviation(temp_list)
sm = ans*ans
variance_value = sm
# variance Logic
return variance_value
# Function to compute RMSE. You cant use Python functions
def rmse(first_list, second_list):
rmse_value = 0
for i in first_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
for i in second_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
n = len(first_list)
m = len(second_list)
if(n != m):
return 0
if(n == 0 or m == 0):
return 0
ans = mse(first_list, second_list)
rmse_value = ans ** 0.5
# RMSE Logic
return rmse_value
# Function to compute mse. You cant use Python functions
def mse(first_list, second_list):
mse_value = 0
n = len(first_list)
m = len(second_list)
for i in first_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
for i in second_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
if(n == 0 or m == 0):
return 0
if(n != m):
return 0
for x, y in zip(first_list, second_list):
if((isinstance(x, int) or isinstance(x, float)) and (isinstance(y, int) or isinstance(y, float))):
t = abs(x-y)
l = t*t
mse_value += l
#print(x, y)
else:
return 0
# mse Logic
ans = mse_value/n
mse_value = ans
return mse_value
# Function to compute mae. You cant use Python functions
def mae(first_list, second_list):
mae_value = 0
n = len(first_list)
m = len(second_list)
if(n != m):
return 0
if(n == 0):
return 0
for i in first_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
for i in second_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
ans = 0
for x, y in zip(first_list, second_list):
z = abs(x-y)
ans += z
mae_value = ans/n
# mae Logic
return mae_value
# Function to compute NSE. You cant use Python functions
def nse(first_list, second_list):
nse_value = 0
n = len(first_list)
m = len(second_list)
if(n != m):
return 0
if(n == 0):
return 0
for i in first_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
for i in second_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
ans = 0
sm = 0
mn = mean(first_list)
for x, y in zip(first_list, second_list):
i = abs(x-y)
z = i ** 2
ans += z
k = abs(x-mn)
k = k ** 2
sm += k
t = ans/sm
nse_value = 1-t
# nse Logic
return nse_value
# Function to compute Pearson correlation coefficient. You cant use Python functions
def pcc(first_list, second_list):
pcc_value = 0
n = len(first_list)
m = len(second_list)
if(n != m):
return 0
if(n == 0):
return 0
for i in first_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
for i in second_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
ans = 0
mn1 = mean(first_list)
mn2 = mean(second_list)
sm1 = 0
sm2 = 0
sm3 = 0
ans1 = 0
for x, y in zip(first_list, second_list):
i = x - mn1
j = y - mn2
k = i ** 2
l = j**2
t = i*j
sm1 += t
sm2 += k
sm3 += l
sm2 = sm2 ** 0.5
sm3 = sm3 ** 0.5
ans1 = sm2*sm3
ans = sm1/ans1
pcc_value = ans
# nse Logic
return pcc_value
# Function to compute Skewness. You cant use Python functions
def skewness(first_list):
skewness_value = 0
temp_list = first_list.copy()
n = len(temp_list)
if(n == 0):
return 0
for i in temp_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
mn = mean(temp_list)
sd = standard_deviation(temp_list)
ans = 0
for i in temp_list:
x = (i-mn)/sd
y = x ** 3
ans += y
ans /= n
skewness_value = ans
# Skewness Logic
return skewness_value
def sorting(first_list):
sorted_list = []
temp_list = first_list.copy()
new_list = []
n = len(first_list)
if(n == 0):
return new_list
i = 0
while i < n:
min = temp_list[0]
if(isinstance(min, int) == 0 and isinstance(min, float) == 0):
return new_list
for x in temp_list:
if x < min:
min = x
sorted_list.append(min)
i += 1
temp_list.remove(min)
# Sorting Logic
temp_list = first_list
return sorted_list
# Function to compute Kurtosis. You cant use Python functions
def kurtosis(first_list):
kurtosis_value = 0
temp_list = first_list.copy()
n = len(temp_list)
if(n == 0):
return 0
for i in temp_list:
if(isinstance(i, int) == 0 and isinstance(i, float) == 0):
return 0
mn = mean(temp_list)
sd = standard_deviation(temp_list)
ans = 0
for i in temp_list:
x = (i-mn)/sd
y = x ** 4
ans += y
ans /= n
kurtosis_value = ans
# Kurtosis Logic
return kurtosis_value
# Function to compute sum. You cant use Python functions
def summation(first_list):
n = len(first_list)
if(n == 0):
return 0
summation_value = 0
for i in first_list:
if(isinstance(i, int) or isinstance(i, float)):
summation_value += i
else:
return 0
# sum Logic
summation_value = round(summation_value, 3)
return summation_value
| true |
c2dd15e9377967a39cc6a89047e79d5597037cea | Python | yousef-ctrl/Lib-System | /library_systems.py | UTF-8 | 4,350 | 3.46875 | 3 | [] | no_license | import time
'''
The module communicates between the main file and "library_database.txt".
'''
class Book:
'''
The class edit your books with a special form.
'''
def __init__(self, writer, name):
'''
You must write a writer and a name for your books. First parameter is writer of book and second parameter is name of book.
'''
self.name = name
self.writer = writer
self.edited()
def edited(self):
return self.writer.lower() + " - " + self.name.lower() + "\n"
def decorator(func):
'''
It calculate time of the process. It's a decorator function.
'''
def wrapper(obj):
start = time.time()
func(obj)
end = time.time()
result = (end-start)
return [func(obj),result]
return wrapper
def install(bookobj):
'''
Its parameter must be a book object. Book object is adding with this method in "library database.txt".
'''
with open("library_database.txt", "a", encoding="utf-8") as file:
file.write(bookobj.edited())
sort()
@decorator
def search(obj):
'''
This method search its parameter that must be a string in "library_database.txt". Result of the searching is a list data.
First index of the list is boolean and second index of the list is float because the second index is time of the searching.
You can use for search your book with its writer and its name.
'''
with open("library_database.txt", "r", encoding="utf-8") as file:
liste = file.readlines()
for i in liste:
if i == (obj.lower().strip() + "\n"):
return True
else:
return False
def removelines(obj):
'''
Its parameter must be a string. It remove the value in "database_library.txt".
'''
with open("library_database.txt", "r", encoding="utf-8") as file:
liste = file.readlines()
obj = obj+"\n"
liste.remove(obj)
with open("library_database.txt", "w+", encoding="utf-8") as file:
for i in liste:
file.write(i)
def sort():
'''
The method alphabetic sort the books.
'''
with open("library_database.txt", "r", encoding="utf-8") as file:
liste = file.readlines()
liste.sort()
with open("library_database.txt", "w+", encoding="utf-8") as file:
file.writelines(liste)
@decorator
def searchName(obj):
'''
This method search its parameter that must be a string in "library_database.txt". Result of the searching is a list data.
First index of the list is False or a list and second index of the list is float because the second index is time of the searching.
Result of searching is unsuccessful if first index is False. If first index is a list Your result of searching is successful and the list is your result.
You can use for search your book with just its name.
'''
with open("library_database.txt", "r", encoding="utf-8") as file:
liste = file.readlines()
results = []
for i in liste:
i = i.split(" - ")
if ( i[1] == (obj.lower().strip() + "\n") ):
results.append(" - ".join(i))
if results==[]:
return False
else:
return results
@decorator
def searchWriter(obj):
'''
This method search its parameter that must be a string in "library_database.txt". Result of the searching is a list data.
First index of the list is False or a list and second index of the list is float because the second index is time of the searching.
Result of searching is unsuccessful if first index is False. If first index is a list Your result of searching is successful and the list is your result.
You can use for search your book with just its writer.
'''
with open("library_database.txt", "r", encoding="utf-8") as file:
liste = file.readlines()
results = []
for i in liste:
i = i.split(" - ")
if ( i[0] == (obj.lower().strip()) ):
results.append(" - ".join(i))
if results==[]:
return False
else:
return results
| true |
365e5e5652a521afe88531c336071b4f8ee3f44d | Python | mcprentiss/password-generator | /password.py | UTF-8 | 727 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python3
"""Generate random passwords."""
# -*- coding: utf-8 -*-
import random
# import q
# tail -f /tmp/q
# @q
# q(fff)
def main():
""" Main function of password."""
try:
fff = open('/usr/share/dict/words')
except IOError:
print('/usr/share/dict/word will not open')
sss = [x.strip() for x in fff.readlines()]
print(random.choice(sss).capitalize())
pword = ''.join(random.choice(sss)).capitalize()
# pword = ''.join(random.choice(sss) for i in range(1)).capitalize()
lll = list(pword[0:8] + '!@#$%^&*' + str(random.randint(10000000, 99999999)))
random.shuffle(lll)
result = ''.join(lll)
return result
if __name__ == '__main__':
print(main())
| true |
58d32f0a949900dcf43031ae83ee91db27dc2132 | Python | soloEric/soloCADServer | /Template Tools/PV_Tool/excel_handler.py | UTF-8 | 788 | 3.34375 | 3 | [] | no_license |
from pyxlsb import open_workbook
import csv
# import_sheet finds the specified sheet in the excel_path workbook the sheet is
# iterated through and interpreted as a dataframe (array/table)
# sparse=True skips blank cells (Excel does not define a cell as blank if there is a formula in that cell,
# even if there is no output)
def import_sheet(excel_path, sheet_name, df):
with open_workbook(excel_path) as wb:
with wb.get_sheet(sheet_name) as sheet:
for row in sheet.rows(sparse=True):
df.append([item.v for item in row])
return df
def write_to_csv(csv_file_path, list_to_write):
with open(csv_file_path, 'w', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerows(list_to_write)
csv_file.close()
| true |
4adb0baafe60884bbec219043c855bc6b6fa9037 | Python | pawpiotro/HierarchicalClassifier | /lemma_tokenizer.py | UTF-8 | 1,418 | 2.90625 | 3 | [] | no_license | from nltk import word_tokenize, pos_tag
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords, wordnet
# Modul odpowiedzialny za dostarczenie mechanizmu lemmatyzacji
def get_wordnet_pos(word):
tag = pos_tag([word])[0][1][0].upper()
tag_dict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tag_dict.get(tag, wordnet.NOUN)
class LemmaTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [self.wnl.lemmatize(t)
for t in word_tokenize(doc)]
# Pelna lemmatyzacja
class LemmaTokenizer2(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [self.wnl.lemmatize(t, get_wordnet_pos(t))
for t in word_tokenize(doc)]
# Nie potrzeba podawac stopwordsow
class LemmaTokenizer3(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
def __call__(self, doc):
return [self.wnl.lemmatize(t, get_wordnet_pos(t))
for t in word_tokenize(doc)
if t not in stopwords.words('english')]
lemma_stopwords = [WordNetLemmatizer().lemmatize(t) for t in stopwords.words('english')]
lemma_stopwords2 = [WordNetLemmatizer().lemmatize(t, get_wordnet_pos(t)) for t in stopwords.words('english')]
| true |
8a277fa67d0817d90afb4c921f14e85199e065ce | Python | improvbutterfly/python-challenge | /PyPoll/main.py | UTF-8 | 2,616 | 3.578125 | 4 | [] | no_license | # Program to read CSV with "voter ID," "county" and "candidate" data, analyse results, and output to screen and text file
# Authored by Dominica Corless
# import the os module to join the file paths across operating systems
import os
# import Module for reading CSV files
import csv
election_path = os.path.join('.', 'Resources', 'election_data.csv')
# Path for output text file
election_results = os.path.join('.', 'analysis', 'election_results.txt')
# Create a dictionary to store the votes for each candidate
candidates = {}
#Open CSV file
with open(election_path) as election_file:
# read the CSV file
election_reader = csv.reader(election_file, delimiter=',')
# Store header info
csv_header = next(election_reader)
#Initialize variables
total_votes = 0
for row in election_reader:
# store data in row
voter_ID = row[0]
county = row[1]
candidate = row[2]
total_votes = total_votes + 1
# Check if candidate already exists in dictionary
if candidate in candidates:
# Add a new vote
votes = candidates[candidate] + 1
# Update key
update_candidate = {candidate: votes}
candidates.update(update_candidate)
# If candidate does not exist in dictionary, add them
else:
candidates.update({candidate: 1})
# Open file to write results
write_file = open(election_results, 'w')
# Print election results to screen and text file
print("Election Results")
print("----------------------------")
write_file.write(f"Election Results\n")
write_file.write(f"----------------------------\n")
# Print total votes
print(f"Total Votes: {total_votes}")
print("----------------------------")
write_file.write(f"Total Votes: {total_votes}\n")
write_file.write(f"----------------------------\n")
# Initialize comparison vote counter
most_votes = 0
# Print data from candidate dictionary
for candidate, votes in candidates.items():
# Calculate percentage of vote
vote_percentage = "{:.3f}".format(votes / total_votes * 100)
# Determine if candidate has more votes than previous candidate
if votes > most_votes:
most_votes = votes
winner = candidate
# Print candidate data to screen
print(f"{candidate}: {vote_percentage}% ({votes})")
# Print candidate data to text file
write_file.write(f"{candidate}: {vote_percentage}% ({votes})\n")
# Print winner to screen
print("----------------------------")
print(f"Winner: {winner}")
print("----------------------------")
# Print winner to text file
write_file.write(f"----------------------------\n")
write_file.write(f"Winner: {winner}\n")
write_file.write(f"----------------------------\n")
# Close text file
write_file.close()
| true |
f0a1f4db7da95aa7fa560b95c29732e916c0a69a | Python | mlnrv/test_webim | /tests/test_connect_as_operator.py | UTF-8 | 1,730 | 2.59375 | 3 | [] | no_license | import unittest
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from test_open_chat import load_data
class TestConnectAsOperator(unittest.TestCase):
def setUp(self):
self.link_operator = "http://demo.webim.ru/webim/"
self.operator = {
"email": "o@webim.ru",
"pw": "password"
}
self.driver = webdriver.Firefox()
self.driver.get(self.link_operator)
# fill login fields
self.driver.find_element_by_id("login_or_email").send_keys(self.operator["email"])
self.driver.find_element_by_id("password").send_keys(self.operator["pw"])
self.driver.find_element_by_id("is_remember").click()
self.driver.find_element_by_css_selector("button.btn-primary").click()
self.driver.find_element_by_css_selector(
".nav > li:nth-child(1) > a:nth-child(1)").click() # go to the operator panel
def test_06_operator_replies(self):
time.sleep(2) # while page with clients will be updated
user_name = load_data["name"]
chats = self.driver.find_elements_by_class_name("visitor-name") # SE objects of different chats
for chat_name in chats:
if chat_name.text == user_name:
chat_name.click() # go to the chat with our user
time.sleep(1.5) # wait while loading chat
self.driver.find_element_by_css_selector(".chat_message_textarea").send_keys("молодец")
self.driver.find_element_by_css_selector(".chat_message_textarea").send_keys(Keys.ENTER)
time.sleep(2)
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()
| true |
7a76ae9f0bedf7709dd2fef8aa27d6d8969d43d6 | Python | wakabame/kyopro_educational_90 | /pysol/013.py | UTF-8 | 2,870 | 3.359375 | 3 | [] | no_license | """
AGC 王国には N 個の交差点があり、それぞれ 1, 2, 3, ..., N と番号付けられています。
また M 本の道路があり、道路 i は交差点 A[i] と交差点 B[i] を双方向に結び、交差点間の移動にかかる時間は C[i] 秒です。
今日から数えて i (1≦i≦N) 日目には交差点 i でイベントが開催されるため、移動の際には交差点 i を経由しなければなりません、
i = 1, 2, 3, ..., N それぞれについて、i 日目に交差点 1 から交差点 N まで移動するのにかかる時間の最小値を求めてください。
【制約】
・2 ≦ N ≦ 100000 ← こっちが正しいです!
・1 ≦ M ≦ 100000
・1 ≦ A[i] < B[i] ≦ N
・(A[i], B[i]) ≠ (A[j], B[j]) [i ≠ j]
・1 ≦ C[i] ≦ 10000
・いくつかの道路を通って、都市 1 から都市 N までたどり着ける
・入力はすべて整数
"""
# 各 i について、「頂点1から頂点iまでの距離」+「頂点iから頂点Nまでの距離」を算出
# 任意の二点間の距離を求めるワーシャルフロイドだとO(N^3)なので間に合わない
# ダイクストラで「頂点1から頂点iまでの距離」を計算しておく
# ダイクストラで「頂点iから頂点Nまでの距離」をを計算しておく
from heapq import heappush, heappop
N, M = map(int, input().split())
adj = [[] for i in range(N)] # 始点を i とするedgeの集合
for _ in range(M):
a, b, c = map(int, input().split())
adj[a - 1] += [(b - 1, c)]
adj[b - 1] += [(a - 1, c)]
distance_from_start = [float("inf") for i in range(N)] # 頂点 i までの最短距離
distance_from_start[0] = 0
distance_from_destination = [float("inf") for i in range(N)] # 頂点 i までの最短距離
distance_from_destination[N - 1] = 0
confirm = [False] * N # 頂点までの距離が確定しているか
# hq は [頂点kまでの最短距離, 頂点k] を要素に持つ
# ヒープキューのソートキーにするため、最短距離が第一変数
hq = [(0, 0)]
# ダイクストラ法
while hq:
d, v = heappop(hq)
if confirm[v]:
continue
confirm[v] = True
for w, c in adj[v]:
if not confirm[w] and d + c < distance_from_start[w]:
distance_from_start[w] = d + c
heappush(hq, (distance_from_start[w], w))
confirm = [False] * N # 頂点までの距離が確定しているか
hq = [(0, N - 1)]
# ダイクストラ法
while hq:
d, v = heappop(hq)
if confirm[v]:
continue
confirm[v] = True
for w, c in adj[v]:
if not confirm[w] and d + c < distance_from_destination[w]:
distance_from_destination[w] = d + c
heappush(hq, (distance_from_destination[w], w))
for i in range(N):
print(distance_from_start[i] + distance_from_destination[i])
| true |
9ddcbb438c4f1ec603589601571e0af8c4231398 | Python | madhavkhoslaa/skclone | /skclone/Regression/Linear.py | UTF-8 | 967 | 3.71875 | 4 | [] | no_license |
class Linear():
"""Performs the least squares regression aka linear
regression on the data "fitted" onto its instance"""
def __init__(self):
self.coef = 0
self.bias = 0
def fit(self, X, y):
assert len(X) == len(
y), "X and y shapes are not same , x:shape={} and y:shape={}".format(len(X), len(y))
self.X = X
self.y = y
x_bar = sum(X) / len(X)
y_bar = sum(y) / len(y)
m1 = [_ - x_bar for _ in self.X]
m2 = [_ - y_bar for _ in self.y]
d1 = [(_ - x_bar)**2 for _ in self.X]
numer = []
for _ in range(len(X)):
numer.append(m1[_] * m2[_])
self.coef = sum(numer) / sum(d1)
self.bias = y_bar - self.coef * (x_bar)
def predict(self, data):
"""Accepts a list or a tuple of data and sends the corresponding y values"""
def pred(x): return self.coef * x + self.bias
return map(pred, data)
| true |
b93eecaebf76ed8781b17ad6a1513544e6460020 | Python | qiding321/my_strategy1_ofpn | /test/decision_tree_regressor.py | UTF-8 | 2,114 | 3.03125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on 2016/9/28 10:19
@author: qiding
"""
import numpy as np
import pandas as pd
import statsmodels.api as sm
def generate_random_normal(size, mu, std):
d = np.random.normal(loc=mu, scale=std, size=size)
return pd.DataFrame(d)
def generate_sample(func, length):
mu = 0
std = 1
mu_err = 0
std_err = 0.1
x = generate_random_normal([length, 2], mu=mu, std=std)
err = generate_random_normal(length, mu=mu_err, std=std_err)[0]
y = func(x, err)
return y, x
def cal_r_squared(y_raw, y_predict, y_training):
e = y_raw - y_training.mean()
mse = (e * e).mean()
r = y_raw - y_predict
msr = (r * r).mean()
r_sq = 1 - msr / mse
return r_sq
def main():
# transform function
# func = lambda x, err: x[0] * x[0] + x[1] * x[1] + err
func = lambda x, err: np.where((x[0] + x[1] + err) > 0, [1] * len(x[0]), [0] * len(x[0]))
# data generation
training_length = 10000
training_y, training_x = generate_sample(func=func, length=training_length)
testing_length = 1000
testing_y, testing_x = generate_sample(func=func, length=testing_length)
# training and fitting
# rng = np.random.RandomState(1)
# max_depth = 10
# # training_model = DecisionTreeRegressor(max_depth=max_depth)
# training_model = AdaBoostRegressor(DecisionTreeRegressor(max_depth=max_depth), n_estimators=300, random_state=rng)
# training_model.fit(training_x, training_y)
# y_predict = training_model.predict(training_x)
# y_predict_oos = training_model.predict(testing_x)
#
# # r-squared
# r_sq_in_sample = cal_r_squared(y_raw=training_y, y_predict=y_predict, y_training=training_y)
# r_sq_out_of_sample = cal_r_squared(y_raw=testing_y, y_predict=y_predict_oos, y_training=training_y)
# print('rsq_in_sample: {}\nrsq_out_of_sample: {}'.format(r_sq_in_sample, r_sq_out_of_sample))
model = sm.Logit(endog=training_y, exog=training_x)
params = model.fit()
predict_y = model.predict(params=params.params, exog=training_x)
if __name__ == '__main__':
main()
| true |
36e84abb46672578b90d69930c4b969953113ab2 | Python | StoveJunJun/RaspiCar | /moto.py | UTF-8 | 376 | 2.765625 | 3 | [] | no_license | from time import sleep
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
pan=17
titl=4
GPIO.setup(pan,GPIO.OUT)
GPIO.setup(titl,GPIO.OUT)
def setServoAngle(servo,angle):
assert angle >=0 and angle<=150
pwm=GPIO.PWM(servo,50)
pwm.start(8)
dutyCycle=angle/18.+3.
pwm.ChangeDutyCycle(dutyCycle)
sleep(0.3)
pwm.stop()
| true |
0613e9dcc34e92d0762cc4a7eaec2a36f368334e | Python | takecian/ProgrammingStudyLog | /LeetCode/400/410.py | UTF-8 | 642 | 2.953125 | 3 | [] | no_license | class Solution:
def splitArray(self, nums: List[int], m: int) -> int:
l = max(nums)
r = sum(nums)
target = m
ans = r
while l <= r:
mid = (l + r) // 2
count = 1
total = 0
for num in nums:
if total + num > mid:
count += 1
total = num
else:
total += num
# print(l, r, mid, count, target)
if count <= target:
ans = min(ans, mid)
r = mid - 1
else:
l = mid + 1
return ans | true |
43157abe0a09226c347959ad55a74e9c4344e704 | Python | jhallen978/dijkstras | /shortestpath.py | UTF-8 | 2,711 | 3.828125 | 4 | [] | no_license | '''
Implementation of Dijkstra's shortest path algorithm
@author: Jonathan Allen
'''
import sys
'''
Generates the shortest path from startingVertex to all other
vertices specified in the graph in inputFile.
'''
def main(inputFile, startingVertex):
'''
input file containing directed-graph with positive weights
file contents is
[begin vertex] [end vertex] [cost]
'''
graph = open(inputFile)
'''
an initially empty dictionary containing mapping
[vertex]:[adjacency list]
'''
adjacency = { }
'''
The following reads in the input file
and constructs an adjacency list of
the graph.
'''
for line in graph:
entry = line.split()
if entry[0] not in adjacency:
adjacency[entry[0]] = []
# construct an edge for the adjacency list
edge = (entry[1], int(entry[2]))
adjacency[entry[0]].append(edge)
'''
output the adjacency list
'''
for v in adjacency:
print(v, adjacency[v])
'''
YOUR LOGIC WILL GO HERE
'''
distance = {}
previous = {}
unvisited = []
current = startingVertex
for v in adjacency:
distance[v] = float("inf")
unvisited.append(v)
for v in unvisited:
if v in adjacency:
for x in adjacency[v]:
if x[0] not in unvisited:
unvisited.append(x[0])
distance[x[0]] = float("inf")
distance[startingVertex] = 0
while(unvisited):
unvisited.remove(current)
while(adjacency[current]):
temp = adjacency[current].pop()
if distance[temp[0]] > (temp[1] + distance[current]):
distance[temp[0]] = temp[1] + distance[current]
previous[temp[0]] = current
min = float("inf")
nextVertexNotFound = True
while(nextVertexNotFound and unvisited):
#print((min > distance[v]) and (v in unvisited))
for v in distance:
if ((min > distance[v]) and (v in unvisited)):
if v in adjacency:
min = distance[v]
current = v
nextVertexNotFound = False
else:
unvisited.remove(v)
nextVertexNotFound = True
min = float("inf")
print(distance)
print(previous)
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage python shortestpath.py [input file] [starting vertex]')
quit()
main(sys.argv[1], sys.argv[2])
| true |
a13686c8ef2c1fef8a5cbddbe1c249fc9cd38ecb | Python | hackergong/Python-TrainingCourseLearning | /day004/1-装饰器.py | UTF-8 | 1,615 | 4.375 | 4 | [] | no_license | '''
装饰器
'''
#1
'''
def func1():
print("tracy is a good man")
def outer():
print("1")
func1()
outer()
'''
#2
'''
def func1():
print("tracy is a good man")
def outer(func):
print("2")
func()
func5 = outer(func1)
outer(func1)
'''
#3
'''
#将func1赋给outer的形参,然后函数体内调用func(){即调用func1函数}
def func1():
print("tracy is a good man")
def outer(func):
def inner():
print("3")
func()
return inner
#f是函数func1的加强版本,用inner对func1进行装饰。
f = outer(func1)
f()
# 将inner赋给f,则f具有了该函数的功能,可输出print("3"),以及func()
'''
#4
'''
# 复杂装饰器
# 当say函数不能够修改时,则需要使用装饰器
def outer(func):
def inner(age):
if age < 0:
age = 0
func(age)
return inner
# @的使用将装饰器应用到函数
# @在python2.4支持使用@符号
@outer #say = outer(say)
def say(age):
print("Tracy is a %d years old" % (age))
# say_dec = outer(say)
# 直接使用封闭的函数名,而不需要使用装饰器的函数名
say(-10)
'''
#5
##通用装饰器
'''
def outer(func):
def inner(*args,**kwargs):#可以传任意参数
#添加修饰的功能
print("&&&&&&&")
func(*args,**kwargs)
return inner
@outer #say = outer(say)
def say(name,age):
print("my name is %s,I'm %d years old" % (name,age))
say("tracy",18)
'''
#函数的参数理论上是无限制的,但实际上不要超过6-7个
#say函数不允许修改,所以使用outer作为装饰器来添加一个print
| true |
cc1cd74db02c264c2574f613be31187c0d042984 | Python | RiksEddy/HackMIT-EduCast-4Idiots | /EduBox/flaskApp/upload.py | UTF-8 | 2,809 | 2.75 | 3 | [] | no_license | from flask import Flask, render_template, request, redirect, url_for, abort #imports
from werkzeug.utils import secure_filename
import os
app = Flask(__name__) #create instance of server app
app.config['UPLOAD_PATH'] = '/home/pi/EduBox/Storage'
### URL to Upload One File ###
@app.route('/basic/') #default GET request url route
def index(): #display index.html for above url
return render_template('index.html')
@app.route('/basic/', methods=['POST']) #POST request with same url route
def upload_file(): #store uploaded_file, save file, redirect back to index.html
uploaded_file = request.files['file'] #'file' is name of input tag in html
if uploaded_file.filename != '':
uploaded_file.save(os.path.join(app.config['UPLOAD_PATH'], uploaded_file.filename))
return redirect(url_for('index'))
### URL to Upload File with Responsive HTML Design and single function for all request methods###
@app.route('/responsive/', methods=['GET','POST']) #Requests for responsive url
def responsive(): #store uploaded_file, redirect back to index_responsive.html
if request.method == 'POST':
uploaded_file = request.files['file']
if uploaded_file.filename != '':
uploaded_file.save(os.path.join(app.config['UPLOAD_PATH'], uploaded_file.filename))
return redirect(url_for('responsive'))
return render_template('responsive_index.html')
### URL to Upload Multiple Image and Video Files Only ###
@app.route('/multiple/', methods=['GET','POST']) #Requests and url route
def multiple(): #store multiple files, redirect to accept_multiple.html
if request.method == 'POST':
for uploaded_file in request.files.getlist('images_and_videos'):
if uploaded_file.filename != '':
uploaded_file.save(os.path.join(app.config['UPLOAD_PATH'], uploaded_file.filename))
return redirect(url_for('multiple'))
return render_template('accept_multiple.html')
### URL to Securely Upload Image and Video Files - USE THIS ONE FOR UPLOAD###
@app.route('/', methods=['GET','POST']) #Requests and url route
def secure(): #store multiple files, redirect to multiple_css.html
if request.method == 'POST':
for uploaded_file in request.files.getlist('images_and_videos'):
filename = secure_filename(uploaded_file.filename)
if filename != '':
file_ext = os.path.splitext(filename)
if file_ext[1] == ".MOV":
filename = file_ext[0] + ".mp4"
uploaded_file.save(os.path.join(app.config['UPLOAD_PATH'], filename))
return redirect(url_for('secure'))
return render_template('multiple_css.html') #same as multiple but w/ style
if __name__ == '__main__':
app.debug = True
app.run(host = '0.0.0.0',port=5000)
| true |
4fe704f3b297bcb6589ea126f4a250f09d49b310 | Python | topcnm/python-selenium-learn | /demo/open_baidu_unittest_search.py | UTF-8 | 1,328 | 3.03125 | 3 | [] | no_license | # coding:utf-8
"""
编程语言 python --------------------------
| |
| 测试工具selenium
| |
| web驱动webdriver + 浏览器驱动Chrome
| |
| 测试案例 本例
|
断言 unittest
"""
from selenium import webdriver
from selenium.webdriver.common.by import By
import unittest
import time
class OpenBaidu(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome()
self.driver.implicitly_wait(10)
self.driver.maximize_window()
self.driver.get("http://www.baidu.com")
def tearDown(self):
self.driver.quit()
search_input_loc = (By.ID, 'kw')
search_button_loc = (By.ID, 'su')
def test_open_baidu(self):
self.assertEqual(self.driver.title, u'百度一下,你就知道', msg='打不开啊!!')
def test_search_baidu(self, keywords='selenium'):
self.driver.find_element(*self.search_input_loc).send_keys(keywords)
self.driver.find_element(*self.search_button_loc).click()
time.sleep(2)
self.assertEqual(
self.driver.title,
u"{}_百度搜索".format(keywords),
msg=u'搜索失败')
if __name__ == '__main__':
unittest.main()
| true |
e8db3598255bd43dc85de1e2a639c304a8524bce | Python | elrossco2/LearnPython | /CH3/HW/TheSumN.py | UTF-8 | 960 | 4 | 4 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 4 13:15:32 2016
@author: kiaph
This small script will sum numbers from 0 to your desired value n.
"""
def main():
print(" This program will sum numbers starting from 0 ending at n. ")
n = eval(input("Please enter a value for n: "))
total = 0
p = n
i = 0
if n < 0:
print("Please try again, without use of a negative number.")
if n == 0:
print("The sum of numbers between 0 and 0 is 0")
print("Do you really need help with such a thing?")
print("Please try harder next time.")
else:
while i != n:
i = i + 1
### print("the current value of n is: ", p)
### this was used for trouble shooting.
total = total + i
p = p - 1
else:
print( total, "is the sum of the numbers between 0 and ", n )
main() | true |
e5376df17ba2ad4bc31c917d5eee6e5740e2030a | Python | aaronshawcs/Natural-Language-Processing | /assn1 - Tokenizer/q1/Q1P2.py | UTF-8 | 7,062 | 3.21875 | 3 | [] | no_license | def tokenizer(tokens, text):
word = []
state = "not in word"
carriagereturn = "false"
for letter in text:
if (letter == '\r') or (letter == '\n'):
if carriagereturn == "false":
carriagereturn = "true"
addtoken(tokens, ''.join(word))
if state == "period":
addtoken(tokens, ".")
state = "not in word"
else:
carriagereturn = "false"
if letter != '"':
if (state == "in word") or (state == "period"):
if letter == " ":
addtoken(tokens, ''.join(word))
if state == "period":
addtoken(tokens, ".")
state = "not in word"
elif (letter == ",") or (letter == "?") or (letter == "!") or (letter == ";"):
addtoken(tokens, ''.join(word))
addtoken(tokens, letter)
state = "not in word"
elif letter == ".":
if state == "period":
skipnextletter = "true"
addtoken(tokens, ''.join(word))
addtoken(tokens, "...")
state = "not in word"
elif len(word) > 1:
if (word[0] == "m") and ((word[1] == "r") or (word[1] == "s")):
word.append(letter.lower())
addtoken(tokens, ''.join(word))
state = "not in word"
else:
state = "period"
else:
state = "period"
else:
if state == "period":
word.append(".")
state = "in word"
word.append(letter.lower())
else:
if letter is not " ":
word = []
state = "in word"
word.append(letter.lower())
return tokens
def addtoken(inlist, intoken):
inlist.append(intoken)
return
def sentencecount(inlist):
count = 0
for word in inlist:
if (word == ".") or (word == "!") or (word == "?"):
count += 1
return count
def wordtypes(inlist):
vocabulary = []
tuples = []
for word in inlist:
if word in vocabulary:
for tuple in tuples:
if tuple[0] == word:
tuple[1]+=1
break
else:
vocabulary.append(word)
tuples.append([word, 1])
newarr = sorted(tuples, key=lambda a: a[1], reverse=True)
return newarr
def stopwordtypes(inlist, stoplist, punctuationlist):
vocabulary = []
tuples = []
for word in inlist:
if (word in stoplist) or (word in punctuation):
if word in vocabulary:
for tuple in tuples:
if tuple[0] == word:
tuple[1]+=1
break
else:
vocabulary.append(word)
tuples.append([word, 1])
newarr = sorted(tuples, key=lambda a: a[1], reverse=True)
return newarr
def mostcommon(num, inlist, wordcount):
for x in xrange (num):
print("\t{0}:\t{1}\t{2}\t({3}%)".format((x+1), (inlist[x][0]), (inlist[x][1]), round(((100.0*(inlist[x][1]))/wordcount), 2)))
return
def removestops(inlist, instops):
stoppedlist = []
for word in inlist:
if word not in instops:
stoppedlist.append(word)
return stoppedlist
def depunctuate(inlist, inpunctuation):
depunctuatedlist = []
for word in inlist:
if word not in inpunctuation:
depunctuatedlist.append(word)
return depunctuatedlist
def singletons(inlist):
singletonlist = []
for tuple in inlist:
if tuple[1] == 1:
singletonlist.append(tuple)
return singletonlist
def containsdigits(inword):
for letter in inword:
if letter.isdigit():
return True
return False
def tokenswithdigits(inlist):
tokenswithdigitslist = []
for word in inlist:
if containsdigits(word):
tokenswithdigitslist.append(word)
return tokenswithdigitslist
def containsalphas(inword):
for letter in inword:
if letter.isalpha():
return True
return False
def alphanums(inlist):
alphanumlist = []
for word in inlist:
if containsalphas(word):
alphanumlist.append(word)
return alphanumlist
def tokenswithpunc(inlist):
tokenswithpunclist = []
for word in inlist:
if not word.isalnum():
tokenswithpunclist.append(word)
return tokenswithpunclist
def pairs(inlist):
pairlist = []
index = 0
for word in inlist:
pairlist.append((inlist[index], inlist[index+1]))
index += 1
if index == len(inlist)-1:
break
return pairlist
print("generating token list...")
tokenlist = tokenizer([], open('sample.txt', 'r').read())
print("generating stop words...")
stopwords = tokenizer([], open('stopwords.txt', 'r').read())
punctuation = ['.',',','?','!',':',';']
print("filtering tokens...")
filteredlist = depunctuate((removestops(tokenlist, stopwords)), punctuation)
print("generating token pairs...")
tokenpairs = pairs(filteredlist)
print("generating vocabulary...")
vocabulary = wordtypes(filteredlist)
print("generating fullvocab...")
fullvocab = vocabulary + stopwordtypes(tokenlist, stopwords, punctuation)
fullvocab = sorted(fullvocab, key=lambda tup: tup[1], reverse=True)
print("a. Number of Sentences: {0}".format(sentencecount(tokenlist)))
print("b. Number of word tokens: {0}".format(len(tokenlist)))
print("c. Number of word types: {0}".format(len(vocabulary)))
print("d. Word count and percentage of the 100 most frequent tokens in the vocabulary (including stopwords and punctuations):")
mostcommon(100, fullvocab, len(tokenlist))
print("e. Word count and percentage of the 100 most frequent tokens in the vocabulary (excluding stopwords and punctuations):")
mostcommon(100, vocabulary, len(filteredlist))
print("f. Number of singletons in the corpus: {0}".format(len(singletons(vocabulary))))
print("g. Number of tokens in the vocabulary containing digits [0-9]: {0}".format(len(tokenswithdigits(filteredlist))))
print("h. Number of tokens in the vocabulary containing punctuation: {0}".format(len(tokenswithpunc(tokenlist))))
print("i. Number of tokens in the vocabulary containing both alpha [A-Za-z] and numerics [0-9]: {0}".format(len(alphanums(tokenswithdigits(filteredlist)))))
print("j. Frequency and percentage of the 100 most frequent word pairs in the corpus (excluding stopwords and punctuations):")
mostcommon(100, wordtypes(tokenpairs), len(tokenpairs))
| true |
69d066521d2efef8a316f1c2ab1944940c73f6cb | Python | Avigdor-Kolonimus/ScanProcess | /2/main.py | UTF-8 | 15,625 | 2.71875 | 3 | [
"MIT"
] | permissive | #-------------------------------------------------------------------------------------------------------------------------------------------------------------
#The main for Assignment 2 of Laboratory protection
#@Author: Alexey Titov
#@Version: 4.0
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
#libraries
import os
import re
import time
from datetime import datetime
from sys import platform
from subprocess import Popen, PIPE, check_output
from classes.process import Process
if platform == "linux" or platform == "linux2":
import psutil
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
#First laboratory
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
#the function returns number
def getNumeric(message):
while True:
response=input(message)
try:
if (int(response)>0):
return int(response)
else:
print("Please enter a number >0")
except ValueError:
print("Please enter a number")
#the function gives the running processes
def get_processes_running():
"""
Takes tasklist output and parses the table into a dict
"""
p = [] #array of processes
if platform == "linux" or platform == "linux2":
for proc in psutil.process_iter():
try:
tmp=Process(proc.name(),int(proc.pid),proc.username(),int(0),int(0))
p.append(tmp)
except:
continue
return (p)
tasks = check_output(['tasklist']).decode('cp866', 'ignore').split("\r\n")
for task in tasks:
m = re.match(b'(.*?)\\s+(\\d+)\\s+(\\w+)\\s+(\\w+)\\s+(.*?)\\s.*', task.encode())
if m is not None:
tmp=Process(m.group(1).decode(),int(m.group(2).decode()),m.group(3).decode(),int(m.group(4).decode()),int(m.group(5).decode('ascii', 'ignore')))
p.append(tmp)
#m.group(1).decode() image name
#m.group(2).decode() process id
#m.group(3).decode() session_name
#m.group(4).decode() session_num
#m.group(5).decode('ascii', 'ignore') memory usage
return(p)
#the function creates new files and rename old files
def RemCreFiles():
old_file1 = os.path.join("Scanner","processList.txt")
old_file2 = os.path.join("Scanner","Status_Log.txt")
prefix=datetime.strftime(datetime.now(), "%Y_%m_%d %H_%M_%S")
new_file1 = os.path.join("Scanner",prefix+"_processList.txt")
new_file2 = os.path.join("Scanner",prefix+"_Status_Log.txt")
folder="Scanner"
#if file is exist
try:
os.stat(folder)
except:
os.mkdir(folder)
#rename old files
try:
os.rename(old_file1,new_file1)
os.rename(old_file2,new_file2)
except:
return
#the function writes processes to processList.txt
def WriteToProcessList(lst):
try:
fileProcessList="Scanner//processList.txt"
my_file = open(fileProcessList, "a")
my_file.write("------------------------------------------------------------------------------------------------------\n")
my_file.write("Time: %r \n" %datetime.strftime(datetime.now(),"%Y.%m.%d %H:%M:%S"))
my_file.write("------------------------------------------------------------------------------------------------------\n")
for p in lst:
my_file.write("%r\n" %p.toString())
my_file.close()
return
except KeyboardInterrupt:
print('\n\nKeyboard exception received. Exiting.')
my_file.close()
exit()
#the function return proccesses running now
def GetSetList():
#call the function get_processes_running()
lstp = get_processes_running()
WriteToProcessList(lstp)
return(lstp)
#the function monitors old and new processes
def SetStatus(old,new,run,flag):
try:
fileStatus_Log="Scanner//Status_Log.txt"
my_file = open(fileStatus_Log, "a")
my_file.write("------------------------------------------------------------------------------------------------------\n")
my_file.write("Time: %r \n" %run)
my_file.write("------------------------------------------------------------------------------------------------------\n")
#someone changed Status_Log.txt file
if (flag==1):
my_file.write("DANGER! someone could change or delete Status_Log.txt\n")
print("----------------------------\nDANGER! someone could change or delete Status_Log.txt\n-------------------------------\n")
#someone changed processList.txt file
if (flag==2):
my_file.write("DANGER! someone could change or delete processList.txt\n")
print("----------------------------\nDANGER! someone could change or delete processList.txt\n-------------------------------\n")
#someone changed processList.txt and Status_Log.txt files
if (flag==3):
my_file.write("DANGER! someone could change or delete processList.txt\n")
my_file.write("DANGER! someone could change or delete Status_Log.txt\n")
print("----------------------------\nDANGER! someone could change or delete Status_Log.txt\n-------------------------------\n")
print("----------------------------\nDANGER! someone could change or delete processList.txt\n-------------------------------\n")
#killed processes
for p1 in old:
flag=0
for p2 in new:
if (p1.compareProc(p2)):
flag=1
break
if (flag==0):
my_file.write("Killed process: %r\n" %p1.toString())
print("Killed process: %r\n" %p1.toString())
#new processes
for p1 in new:
flag=0
for p2 in old:
if (p1.compareProc(p2)):
flag=1
break
if (flag==0):
my_file.write("New process: %r\n" %p1.toString())
print("New process: %r\n" %p1.toString())
my_file.close()
return(new)
except KeyboardInterrupt:
print('\n\nKeyboard exception received. Exiting.')
my_file.close()
exit()
#the function checks if someone has changed files
def HackerModFile(last_modifications):
flag=0;
try:
Lmod0=os.path.getmtime("Scanner//processList.txt")
except: #file is deleted
Lmod0=-1
try:
Lmod1=os.path.getmtime("Scanner//Status_Log.txt")
except: #file is deleted
Lmod1=-1
#someone changed Status_Log.txt file
if (last_modifications[1]!=-1 and last_modifications[1]!=Lmod1):
#someone changed processList.txt file
if (last_modifications[0]!=-1 and last_modifications[0]!=Lmod0):
flag=3
else:
flag=1
elif (last_modifications[0]!=-1 and last_modifications[0]!=Lmod0):
flag=2
return flag
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
#Second laboratory
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
#the function returns datetime
def getDateTime(message):
while True:
response=raw_input(message)
#response=input(message)
try:
date_time=datetime.strptime(response, "%Y-%m-%d %H:%M:%S")
return date_time
except ValueError:
print("Please enter a datetime")
#the function compares strings
def CompareStrings(a,b):
try:
if (a[1]==b[1] and a[2]==b[2] and a[3]==b[3]):
return True
return False
except:
print("The data in the processList.txt file is not correct")
os._exit(0)
#the function demonstrates the difference between samples
def Different(first,second):
#old
for item_i in first:
flag=0
for item_j in second:
if (CompareStrings(item_i.split(":"),item_j.split(":"))):
flag=1
if (flag==0):
item_i= re.sub("['\n]",'',item_i)
print("Killed process: %r" %item_i)
#new
for item_i in second:
flag=0
for item_j in first:
if(CompareStrings(item_i.split(":"),item_j.split(":"))):
flag=1
if (flag==0):
item_i= re.sub("['\n]",'',item_i)
print("New process: %r" %item_i)
#the function checks which file exists
def CheckExist():
file_1_path = "Scanner//processList.txt"
file_2_path = "processList.txt"
if os.access(file_1_path, os.F_OK) == True:
return file_1_path
elif os.access(file_2_path, os.F_OK) == True:
return file_2_path
else:
print("The processList.txt file does not exist or is not in the correct directory")
os._exit(0)
#the function read file
def ReadFile(first_time,second_time):
try:
boarder="------------------------------------------------------------------------------------------------------\n"
num_processes=0 #number of check entries
firsttest=first_time;
secondtest=second_time;
list_arr=[]
first_arr=[]
second_arr=[]
first_st=float('inf') #to store the scan time
min_first=float('inf') #variable for storing the value of the minimum difference between tmp_time and first_time
min_second=float('inf') #variable for storing the value of the minimum difference between tmp_time and second_time
fileName=CheckExist()
with open(fileName,"r") as file_handler:
#for line in file_handler: #problem for python
while True:
line = file_handler.readline()
if not line:
break
if (line.startswith("Time:")):
num_processes+=1
tmp=line.split("'")
tmp_time=datetime.strptime(tmp[1], "%Y.%m.%d %H:%M:%S")
#correct calculation of the difference in seconds for the first input
if(tmp_time>=first_time):
first_d=(tmp_time-first_time).seconds
else:
first_d=(first_time-tmp_time).seconds
#correct calculation of the difference in seconds for the second input
if(tmp_time>=second_time):
second_d=(tmp_time-second_time).seconds
else:
second_d=(second_time-tmp_time).seconds
line=file_handler.readline()
line=file_handler.readline()
#reading processes that ran
while(line.startswith("'image:")):
list_arr.append(line)
if (len(line.split(":"))!=6):
print("The data in the processList.txt file is not correct:")
print(line)
os._exit(0)
line=file_handler.readline()
if(line!=boarder and line!=''):
print("The data in the processList.txt file is not correct:")
print(line)
os._exit(0)
#close to the first input
if (min_first>first_d):
if(min_second>first_st):
secondtest=firsttest
second_arr.clear()
second_arr=list(first_arr)
min_second=first_st
firsttest=tmp_time
first_arr.clear()
first_arr=list(list_arr)
min_first=first_d
first_st=second_d
#close to the first input
elif(min_second>second_d):
secondtest=tmp_time
second_arr.clear()
second_arr=list(list_arr)
min_second=second_d
list_arr.clear()
elif(line!=boarder and line!=''):
print("The data in the processList.txt file is not correct:")
print(line)
os._exit(0)
if (num_processes<=1):
print("There is only one record or file is empty")
else:
if (firsttest<secondtest):
Different(first_arr,second_arr)
else:
Different(second_arr,first_arr)
except IOError:
print("An IOError has occurred!")
finally:
file_handler.close()
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
#main
#-------------------------------------------------------------------------------------------------------------------------------------------------------------
#assignment 1
def proc1():
try:
run=1
old=[]
new=[]
RemCreFiles()
X=getNumeric("Hello user, enter X time: ")
old=GetSetList()
last_modifications=[-1,-1]
while True:
time.sleep(X)
flag=HackerModFile(last_modifications)
new=GetSetList()
old=SetStatus(old,new,run,flag)
run+=1
last_modifications[0]=os.path.getmtime("Scanner//processList.txt")
last_modifications[1]=os.path.getmtime("Scanner//Status_Log.txt")
except KeyboardInterrupt:
print('\n\nKeyboard exception received. Exiting.')
exit()
#assignment 2
def proc2():
first_time=getDateTime("Please enter first datetime according to the format %Y-%m-%d %H:%M:%S :\n")
second_time=getDateTime("Please enter second datetime according to the format %Y-%m-%d %H:%M:%S :\n")
if (first_time!=second_time):
if (first_time<second_time):
ReadFile(first_time,second_time)
else:
ReadFile(second_time,first_time)
else:
print("You entered the same time, so the answer is empty")
if __name__ == '__main__':
try:
X=getNumeric("User select:\n1-Assignment 1\n2-Assignment 2\nany other-EXIT\n")
if (X==1):
proc1()
elif (X==2):
proc2()
else:
print("Good bye!")
except KeyboardInterrupt:
print('\n\nKeyboard exception received. Exiting.')
exit()
| true |
7fbc49858f55f348ecbc19df26d245b9c88af801 | Python | nantha42/DeepForex | /src/transformerxl.py | UTF-8 | 902 | 2.53125 | 3 | [
"MIT"
] | permissive | from typing import *
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
device = torch.device("cpu") if not torch.cuda.is_available() else torch.device("cuda:0")
class PositionalEmbedding(nn.Module):
def __init__(self, d):
super().__init__()
self.d = d
inv_freq = 1 / (10000 ** (torch.arange(0.0, d, 2.0) / d))
# register buffer tells pytorch that this tensor is part of the modle
# this means that it will be saved in the state_dict and moved to the GPU
# along with the model
self.register_buffer("inv_freq", inv_freq)
def forward(self, positions: torch.LongTensor, # (seq, )
):
# outer product
sinusoid_inp = torch.einsum("i,j->ij", positions.float(), self.inv_freq)
pos_emb = torch.cat([sinusoid_inp.sin(), sinusoid_inp.cos()], dim=-1)
return pos_emb[:,None,:] | true |
f2a8cf714df12fde071a3de9cfe1e8a28627fa13 | Python | AkshdeepSharma/Classroom | /Kattis/differentdistances.py | UTF-8 | 366 | 2.90625 | 3 | [] | no_license | def calculatePNormDistance(x1, y1, x2, y2, p):
return (abs(x1 - x2) ** p + abs(y1 - y2) ** p) ** (1 / p)
def main():
inp = list(map(float, input().split()))
while inp != [0]:
x1, y1, x2, y2, p = inp
print(calculatePNormDistance(x1, y1, x2, y2, p))
inp = list(map(float, input().split()))
if __name__ == "__main__":
main()
| true |
9d0ce32ec7e5c40241fc8b43560dc16a0fdb9b5f | Python | Redwoods87/CS167 | /search.py | UTF-8 | 1,173 | 4.03125 | 4 | [] | no_license | def linear_search(keys, target):
"""Find the index of target in a list of keys.
Parameters:
keys: a list of key values
target: a value for which to search
Return value:
the index of an occurrence of target in keys
"""
for i in range(len(keys)):
if keys[i] == target:
return i
else:
return -1
def binary_search(keys, target):
"""Find the index of target in a sorted list of keys.
Parameters:
keys: a list of key values
target: a value for which to search
Return value:
the index of an occurrence of target in keys
or None if the target does not occur in keys
"""
n = len(keys)
left = 0
right = n-1
while left <= right:
mid = (left + right) // 2
print(left, mid, right, keys[mid])
if target < keys[mid]:
right = mid - 1
elif target > keys[mid]:
left = mid + 1
else:
while keys[mid] == keys[mid - 1]:
mid = mid - 1
return mid
return right, left
data = ['B', 'E', 'N', 'N', 'N', 'N' , 'U', 'U']
print(binary_search(data, 'N'))
| true |
2d641fe7d8cc1861ac45fe0ccc6e897db340bd2e | Python | geomapping/raspi-python | /random_omg.py | UTF-8 | 439 | 3.015625 | 3 | [
"MIT"
] | permissive | from sense_hat import SenseHat
import time
import random
sense = SenseHat()
r = random.randint(0,255)
sense.show_letter("O",text_colour=[r, 0, 0])
time.sleep(1)
r = random.randint(0,255)
sense.show_letter("M",text_colour=[0, 0, r])
time.sleep(1)
r = random.randint(0,255)
sense.show_letter("G",text_colour=[0, r, 0])
time.sleep(1)
sense.show_letter("!", text_colour=[0, 0, 0], back_colour=[255, 255, 255])
time.sleep(1)
sense.clear()
| true |
bfbc21b51fac88aeb77f4ad846c185875302b60f | Python | jerrol06/python | /corona virus tracker 2019 2nd edition/Corona_Virus_2019_Tracker.py | UTF-8 | 10,440 | 2.9375 | 3 | [] | no_license | from tkinter import *
from tkinter import Tk
from covid import Covid
from tkinter.ttk import Combobox
from time import strftime
from tkinter import messagebox
windows_form = Tk()
windows_form.title('Corona Virus 2019 Tracker')
windows_form.geometry('1200x620') # set size windows form
windows_form.configure(bg='skyblue4') # set background color windows form
windows_form.iconbitmap('corona.ico') # set icon and get the pic from the given path
windows_form.resizable(False, False) # set to non resizeable windows form
list_country = [] # assgin a list for a container country
corona_virus = Covid(source="worldometers") # call the covid module or api
get_data = corona_virus.get_status_by_country_name('World')
get_new_cases = get_data['new_cases']
get_critical = get_data['critical']
get_new_deaths = get_data['new_deaths']
all_list_country = corona_virus.list_countries() # get all list country from covid module
confirmed = corona_virus.get_total_confirmed_cases() # get all total confirmed from covid module
deaths = corona_virus.get_total_deaths() # get all total deaths from covid module
recovered = corona_virus.get_total_recovered() # get all total recovered from covid module
active = corona_virus.get_total_active_cases() # get all total active from covid module
# create a for loop to get all country from covid module
for country in all_list_country:
list_country.append(country.capitalize())
list_country.sort() # arrange all country in order
# photo image,size and location of corona picture
corona_pic = PhotoImage(file="newcoronapic.png") # set a other photo image
img = corona_pic.subsample(3) # mechanically, here it is adjusted
photo_icon = Label(windows_form, image=img, bg='skyblue4')
photo_icon.configure(textvariable=img)
photo_icon.place(x=990, y=65)
# create definition for getting confirmed,deaths,recovered,active from selected country in combobox
def data_for_selected_country(event):
try:
country = country_combo.get()
if country == '':
messagebox.showinfo('Message', 'Please Select a Country.')
local_confirmed_counter.configure(text='')
local_deaths_counter.configure(text='')
local_recovered_counter.configure(text='')
local_active_counter.configure(text='')
local_new_cases_counter.configure(text='')
local_critical_counter.configure(text='')
local_new_deaths_counter.configure(text='')
local_total_tests_counter.configure(text='')
else:
get_country = corona_virus.get_status_by_country_name(country)
local_confirmed_counter.configure(text=get_country['confirmed'])
local_deaths_counter.configure(text=get_country['deaths'])
local_recovered_counter.configure(text=get_country['recovered'])
local_active_counter.configure(text=get_country['active'])
local_new_cases_counter.configure(text=get_country['new_cases'])
local_critical_counter.configure(text=get_country['critical'])
local_new_deaths_counter.configure(text=get_country['new_deaths'])
local_total_tests_counter.configure(text=get_country['total_tests'])
selected_country.configure(text=f'{country.capitalize()}')
except:
print('hello')
# create combobox and bind with def data_for_selected_country
country_combo = Combobox(windows_form, values=list_country)
country_combo.place(x=10, y=380)
country_combo.bind("<<ComboboxSelected>>", data_for_selected_country)
# defin function to create date and time
def orasan():
oras = strftime("%x - %I:%M:%S %p")
orasan_label.configure(text=oras)
orasan_label.after(200, orasan)
# funtion for label
def all_label():
# global and local design
global_Design_line = Label(bg='azure3', width=1150, height=2)
global_Design_line.grid()
# global text design
global_text_label = Label(text='World Wide Case', font=('arial', 16, 'bold'), bg='azure3', fg='Black')
global_text_label.place(x=510, y=4)
# global confirmed text
global_confirmed = Label(text='Confirmed', bg='skyblue4', font=('arial', 16, 'bold'))
global_confirmed.place(x=10, y=55)
# global confirmed counter
global_confirmed_counter = Label(text=' ', bg='skyblue4', fg='sea green')
global_confirmed_counter.configure(text=f'{confirmed}', font=('arial', 16, 'bold'), fg='white')
global_confirmed_counter.place(x=10, y=120)
# global deaths text
global_deaths = Label(text='Deaths', bg='skyblue4', font=('arial', 16, 'bold'))
global_deaths.place(x=330, y=55)
# global deaths counter
global_deaths_counter = Label(text=' ', bg='skyblue4', fg='sea green')
global_deaths_counter.configure(text=f'{deaths}', font=('arial', 16, 'bold'), fg='white')
global_deaths_counter.place(x=330, y=120)
# global recovered text
global_recovered = Label(text='Recovered', bg='skyblue4', font=('arial', 16, 'bold'))
global_recovered.place(x=560, y=55)
# global recovered counter
global_recovered_counter = Label(text=' ', bg='skyblue4')
global_recovered_counter.configure(text=f'{recovered}', font=('arial', 16, 'bold'), fg='white')
global_recovered_counter.place(x=560, y=120)
# global active text
global_active = Label(text='Active', bg='skyblue4', font=('arial', 16, 'bold'))
global_active.place(x=810, y=55)
# global active counter
global_active_counter = Label(text=' ', bg='skyblue4', fg='sea green')
global_active_counter.configure(text=f'{active}', font=('arial', 16, 'bold'), fg='white')
global_active_counter.place(x=810, y=120)
# global new cases
global_new_cases = Label(text='New Cases', bg='skyblue4', font=('arial', 16, 'bold'))
global_new_cases.place(x=10, y=180)
# global new cases counter
global_new_cases_counter = Label(text=f'{get_new_cases}', font=('arial', 16, 'bold'), bg='skyblue4', fg='white')
global_new_cases_counter.place(x=10, y=240)
# global critical cases
global_critical = Label(text='Critical', bg='skyblue4', font=('arial', 16, 'bold'))
global_critical.place(x=330, y=180)
# global critical counter
global_critical_counter = Label(text=f'{get_critical}', font=('arial', 16, 'bold'), bg='skyblue4', fg='white')
global_critical_counter.place(x=330, y=240)
# global new deaths text
global_new_deaths = Label(text='New Deaths', bg='skyblue4', font=('arial', 16, 'bold'))
global_new_deaths.place(x=560, y=180)
# global new deaths counter
global_new_deaths_counter = Label(text=f'{get_new_deaths}', font=('arial', 16, 'bold'), bg='skyblue4', fg='white')
global_new_deaths_counter.place(x=560, y=240)
# local confirm text
local_confirmed_case = Label(text='Confirmed', bg='skyblue4', font=('arial', 16, 'bold'))
local_confirmed_case.place(x=330, y=330)
# local deaths text
local_deaths_case = Label(text='Deaths', bg='skyblue4', font=('arial', 16, 'bold'))
local_deaths_case.place(x=550, y=330)
# local recovered text
local_recovered = Label(text='Recovered', bg='skyblue4', font=('arial', 16, 'bold'))
local_recovered.place(x=780, y=330)
# local active text
local_active = Label(text='Active', bg='skyblue4', font=('arial', 16, 'bold'))
local_active.place(x=1040, y=330)
# local new cases
local_new_case = Label(text='New Cases', bg='skyblue4', font=('arial', 16, 'bold'))
local_new_case.place(x=330, y=470)
# local critical
local_Critical = Label(text='Critical', bg='skyblue4', font=('arial', 16, 'bold'))
local_Critical.place(x=550, y=470)
# local new deaths text
local_new_deaths = Label(text='New Deaths', bg='skyblue4', font=('arial', 16, 'bold'))
local_new_deaths.place(x=780, y=470)
# local total tests text
local_total_tests = Label(text='Total Tests', bg='skyblue4', font=('arial', 16, 'bold'))
local_total_tests.place(x=1040, y=470)
# corona words
corona_words_label = Label(text='Stay at Home, Stop Covid19', bg='skyblue4', font=('arial', 8, 'bold'))
corona_words_label.place(x=10, y=590)
# country text
country_label = Label(text='Select Country', bg='skyblue4', font=('arial', 15, 'bold'))
country_label.place(x=10, y=330)
# design line label with selected country label
local_Design_line = Label(bg='azure3', width=1150, height=1)
local_Design_line.place(x=0, y=290)
# local counter label for confirmed
local_confirmed_counter = Label(text=' ', font=('arial', 16, 'bold'), bg='skyblue4', fg='cyan')
local_confirmed_counter.place(x=330, y=400)
# local counter label for deaths
local_deaths_counter = Label(text=' ', font=('arial', 16, 'bold'), bg='skyblue4', fg='cyan')
local_deaths_counter.place(x=550, y=400)
# local counter label fro recovered
local_recovered_counter = Label(text=' ', font=('arial', 16, 'bold'), bg='skyblue4', fg='cyan')
local_recovered_counter.place(x=780, y=400)
# local counter label for active
local_active_counter = Label(text=' ', font=('arial', 16, 'bold'), bg='skyblue4', fg='cyan')
local_active_counter.place(x=1040, y=400)
# local new case counter
local_new_cases_counter = Label(text='', font=('arial', 16, 'bold'), bg='skyblue4', fg='cyan')
local_new_cases_counter.place(x=330, y=530)
# local critical counter
local_critical_counter = Label(text='', font=('arial', 16, 'bold'), bg='skyblue4', fg='cyan')
local_critical_counter.place(x=550, y=530)
# local new deaths
local_new_deaths_counter = Label(text='', font=('arial', 16, 'bold'), bg='skyblue4', fg='cyan')
local_new_deaths_counter.place(x=780, y=530)
# local total tests counter
local_total_tests_counter = Label(text='', font=('arial', 16, 'bold'), bg='skyblue4', fg='cyan')
local_total_tests_counter.place(x=1040, y=530)
# display selected country
selected_country = Label(text='', font=('arial', 19, 'bold'), bg='skyblue4', fg='Limegreen')
selected_country.place(x=10, y=430)
# label to display date and time
orasan_label = Label(text='', bg='skyblue4', font=('arial', 8, 'bold'))
orasan_label.place(x=1060, y=590)
all_label() # call def all_label
orasan() # call the orasan function
windows_form.mainloop()
| true |
1ca4ceb998271223148acee4769acc32deb6716e | Python | ibelievem/Python_Reptile | /6.12、selenium动作链/3.拖拽动作.py | UTF-8 | 1,460 | 3.109375 | 3 | [] | no_license | from selenium import webdriver
from time import sleep
from selenium.webdriver.common.action_chains import ActionChains
url='http://sahitest.com/demo/dragDropMooTools.htm'
driver = webdriver.Firefox()
driver.get(url)
# driver.maximize_window()
# sleep(5)
# driver.save_screenshot("./dragMe.png")
# 找到相关需要拖拽的元素
dragMe = driver.find_element_by_xpath("//div[@class='drag']")
item1 = driver.find_element_by_xpath("//div[contains(.,'Item 1')]")
item2= driver.find_element_by_xpath("//div[contains(.,'Item 2')]")
item3 = driver.find_element_by_xpath("//div[contains(.,'Item 3')]")
item4 = driver.find_element_by_xpath("//div[contains(.,'Item 4')]")
# # drag_and_drop 拖某一个到某一个元素
ActionChains(driver).drag_and_drop(dragMe,item1).perform()
sleep(2)
# # drag_and_drop_by_offset 按当前鼠标的位置的偏移 拖拽一个元素
ActionChains(driver).drag_and_drop_by_offset(dragMe,190,250).perform()
sleep(2)
# # click_and_hold 鼠标左键持续按住一个元素 move_to_element 移动到某一个元素
# # release 释放鼠标
ActionChains(driver).click_and_hold(dragMe).move_to_element(item3).release().perform()
sleep(2)
# # click_and_hold 鼠标左键持续按住一个元素
# # move_to_element_with_offset 移动到某一个元素再偏移一定像素
# # release 释放鼠标
ActionChains(driver).click_and_hold(dragMe).move_to_element_with_offset(item3,150,10).release().perform()
sleep(2)
driver.quit()
| true |
9f2ce8fbcb6d45744cfcdc5de884db00b7747e23 | Python | priyankagaru/Python-Practice | / wikitable filmography johny depp.py | UTF-8 | 850 | 3.28125 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup as bs
import pandas as pd
URL='https://en.wikipedia.org/wiki/Johnny_Depp_filmography'
response=requests.get(URL)
soup=bs(response.text,'html.parser')
filmography={
"year":[],
'title':[],
'roles':[],
'notes':[],
'ref':[],
}
table_body=soup.find('table',attrs={'class':'wikitable'}).find('tbody').find_all('tr')
#table_data=table_body.find('tr')[1]
#print(table_data.text)
for row in table_body[1:]:
cells1=row.find_all('th')
cells=row.find_all('td')
filmography['year'].append(cells1[0].text.strip())
filmography['title'].append(cells[0].text.strip())
filmography['roles'].append(cells[1].text.strip())
filmography['notes'].append(cells[2].text.strip())
filmography['ref'].append(cells[3].text.strip())
df=pd.DataFrame(filmography)
print(df.to_string())
| true |
3f0c57b2d36267ffa142bb393dd229b6b28c8e77 | Python | jiaxionglee/Python | /base/operator/operator.py | UTF-8 | 440 | 3.84375 | 4 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time : 2019-01-02 22:52
# @Author: jiaxiong
# @File : operator.py
# 算术运算符
# 3的2次方
print(3 ** 2)
# 取商
print(10 // 3)
# 取模,返回余数
print(10 % 3)
# 比较运算符
print(10 == 11)
print('a'!='a')
print('a'<'b')
# 赋值运算符 =、+=、-=、*=、/=
# 逻辑运算符
print('x' and 'y')
print('x' or 'y')
print(not 'x')
# 成员运算符 in、not in
| true |
36568d80664f8045171fcd1af309dae8d4172192 | Python | AmbujChoudha/PyCharm_Projects-Jetbrains- | /Tic-Tac-Toe/Tic-Tac-Toe/What's up on the field?/Stage3.py | UTF-8 | 2,651 | 3.953125 | 4 | [] | no_license | class TicTocToe:
def __init__(self, cells):
self.x_list = []
self.o_list = []
self.x_win = 0
self.o_win = 0
self.cells = cells
self.impossible = None
self.result = None
self.win = None
def print_box(self): # this part is for printing the box
print("---------")
for i in range(0, 9, 3):
print('|', *self.cells[i:i + 3], '|')
print('---------')
def win_check(self): # this part checks whether X wins or O or both
self.result = 0
self.win = [[0, 1, 2], [3, 4, 5], [6, 7, 8], [0, 3, 6],
[1, 4, 7], [2, 5, 8], [0, 4, 8], [2, 4, 6]]
for i in range(9): # makes a list of positions of X
if self.cells[i] == 'X':
self.x_list.append(i)
elif self.cells[i] == 'O': # makes a list of positions of O
self.o_list.append(i)
for state in self.win: # Checks if X wins
if all(x in self.x_list for x in state):
self.x_win += 1
break
for state in self.win: # Checks if Y wins
if all(x in self.o_list for x in state):
self.o_win += 1
break
if self.x_win == 1 and self.o_win == 0:
self.result = 1
elif self.x_win == 0 and self.o_win == 1:
self.result = 2
elif self.x_win == 1 and self.o_win == 1:
self.result = 3
return self.result # returns the result of the evaluation
def impossible_state(self): # checks whether impossible condition or not
self.impossible = False
if abs(self.cells.count('X') - self.cells.count('O')) > 1:
self.impossible = True
elif self.result == 3:
self.impossible = True
return self.impossible
def print_result(self): # prints the result as passed by 'win_check' method
if self.impossible:
print('Impossible')
elif self.result == 1:
print('X wins')
elif self.result == 2:
print('O wins')
else:
if self.cells.count('_') == 0:
print('Draw')
else:
print('Game not finished')
def tic_tac(self): # uses all the previous methods to provide the soultion
self.print_box() # can be skipped by calling the methods individually
self.win_check()
self.impossible_state()
self.print_result()
user_input = list(input('Enter cells: '))
my_game = TicTocToe(user_input)
my_game.tic_tac()
| true |
24d5c0f11cb61c449d886a8a1a0e90b478e9f4cb | Python | kennethhhh/data-structures | /Project1/perm_lex.py | UTF-8 | 415 | 3.0625 | 3 | [] | no_license | def perm_gen_lex(a):
#base cases
if len(a)==0:
return([])
if len(a)==1:
return ([a])
new_L=[]
for idx in range(len(a)):
#separating into simpler permutation
perms= perm_gen_lex(a[:idx]+a[idx+1:])
for letters in perms:
#adding back letter that was subtracted from above
new_L.append(a[idx]+letters)
return (new_L)
| true |
e32bc96296e5f41569b39717199b12b260202028 | Python | joonholee95/Sibu-taikai | /sibu/sample_programs/python/파이썬/C072003/pythonII_ーュタヌシメスコ/lib_1.py | UTF-8 | 492 | 3.140625 | 3 | [] | no_license | # pickle모듈: 객체 형태를 그대로 유지해서 파일에 저장 시키고, 불러올 수 있게 하는 모듈이다.
# 바이너리 형태로 저장한다.
# Pickle이용해서 파일에 저장 및 조회할 때는 꼭 바이너리 처리를 해야한다. b를 입력해서
# 바이너리라는 것을 표시해야 한다.
# 저장시 pickle.dump(objet, file), 불러올때는 pickle.load(file)
# pickle.dumps(object) ---->string
# pickle.loads(String) ---->object
| true |
3ce5f42a787557c9a8b538431425734b33bf813c | Python | wxlovolxw/GIWON-S-STUDY | /Machine_Leanring/Classification/Dbscan/dbscan.py | UTF-8 | 3,804 | 2.953125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
from sklearn.datasets import make_circles, make_moons
from sklearn.cluster import DBSCAN
n_samples = 1000
np.random.seed(2)
X1, y1 = make_circles(n_samples=n_samples, factor=.5, noise=.09)
# n_samples - 포인트 수
# factor - 0에서 1사이의 값. default=.8 내부원과 외부원 사이의 Scale factor
# noise - double or None의 값을 가지고, 노이즈가 가우시안 분포에 따라 데이터에 추가된다.
# Return되는 X는 생성된 샘플로 [n_samples, 2] 생성된 샘플의 좌표이다.
# y는 해당 좌표가 내부원에 속하는지(class1) 외부원에 속하는지(class0)을 알려준다.
# 예를 들어 X[y==0, 0]은 외부원에 속하는 모든 좌표들의 x좌표를 의미한다.
X2, y2 = make_moons(n_samples=n_samples, noise=.1)
def plot_DBSCAN(title, X, eps, xlim, ylim):
# xlim과 ylim은 플랏상에 표현할 값들의 범위.
# min_sample은 default값이 5이다. 해당 예시에서는 따로 설정하지 않았다.
# # 주요 파라미터는 eps와 min_sample. 일정거리(eps)내에 일정갯수(min_sample)이상인 경우 핵심 샘플로 인식
model = DBSCAN(eps=eps)
y_pred = model.fit_predict(X)
# 객체를 생성하고 모델을 만든다.
# 병합군집은 predict메서드가 없고, 클러스터를 만들고 정보를 얻기 위해 fit_predict()메서드를 사용한다.
idx_outlier = model.labels_ == -1
# labels_ - 군집 번호를 의미한다. 아웃라이어는 -1의 값을 갖는다.
plt.scatter(X[idx_outlier, 0], X[idx_outlier, 1], marker='x', lw=1, s=20)
plt.scatter(X[model.labels_ == 0, 0], X[model.labels_ == 0, 1], marker='o', facecolor='g', s=5)
plt.scatter(X[model.labels_ == 1, 0], X[model.labels_ == 1, 1], marker='s', facecolor='y', s=5)
X_core = X[model.core_sample_indices_, :]
# labels_에 따라서 다른 형태로 표시한다. lables_가 -1이면 x표시, 내부원이면 s, 외부원이면 o로 표시한다.
# core_sample_indices_에는 핵심 데이터의 인덱스가 들어간다. 여기에 포함되지 않은 데이터는 경계 데이터이다.
idx_core_0 = np.array(list(set(np.where(model.labels_ == 0)[0]).intersection(model.core_sample_indices_)))
idx_core_1 = np.array(list(set(np.where(model.labels_ == 1)[0]).intersection(model.core_sample_indices_)))
# np.where은 조건에 해당하는 인덱스를 찾아 준다. idx_core_0은 내부원의 좌표들을 numpy array의 형태로 저장한다.
# .intersection()을 통해 두 리스트의 교집합을 선택한다.
plt.scatter(X[idx_core_0, 0], X[idx_core_0, 1], marker='o', facecolor='g', s=80, alpha=0.3)
plt.scatter(X[idx_core_1, 0], X[idx_core_1, 1], marker='s', facecolor='y', s=80, alpha=0.3)
plt.grid(False)
plt.xlim(*xlim)
plt.ylim(*ylim)
plt.xticks(())
plt.yticks(())
plt.title(title)
return y_pred
plt.figure(figsize=(10, 5))
plt.subplot(121)
y_pred1 = plot_DBSCAN("Concentric-circles Clustering", X1, 0.1, (-1.2, 1.2), (-1.2, 1.2))
plt.subplot(122)
y_pred2 = plot_DBSCAN("Crecsent Clustering", X2, 0.1, (-1.5, 2.5), (-0.8, 1.2))
plt.tight_layout()
plt.show()
# 해당 예시에서는 eps는 0.1로 0.1이라는 범위 내에 5개 이상의 데이터가 존재한다면 Core_sample에 저장된다.
# 클러스터수를 따로 설정하지 않고, eps를 통해 암시적으로 통제한다.
# 데이터 스케일링 후에 eps를 설정하는 것이 좋다.
# DBSCAN의 Attributes는 다음과 같이 세가지 존재한다.
# core_sample_indices로 core sample들의 인덱스들이다.
# components는 트레이닝에 대한 core sample의 카피이다.
# labels_는 데이터셋의 각 지점들에 대한 클러스터의 라벨들이다. | true |
3591f671050f302612cfa6b005150b6c03c2604c | Python | polysquare/polysquare-cmake-linter | /polysquarecmakelinter/check_access.py | UTF-8 | 3,122 | 2.6875 | 3 | [
"MIT"
] | permissive | # /polysquarecmakelinter/check_access.py
#
# Linter checks for access rights
#
# See /LICENCE.md for Copyright information
"""Linter checks for access rights."""
from polysquarecmakelinter import find_all
from polysquarecmakelinter import find_variables_in_scopes
from polysquarecmakelinter.types import LinterFailure
def only_use_own_privates(abstract_syntax_tree):
"""Check that all private definitions used are defined here."""
calls, defs = find_all.private_calls_and_definitions(abstract_syntax_tree)
errors = []
for call, info in calls.items():
if call not in defs.keys():
for line in info:
msg = "Used external private definition {0}".format(call)
errors.append(LinterFailure(msg, line))
return errors
def _find_violating_priv_uses(variable, current_scope):
"""Generate a list of uses of private variables not in this module."""
for use in find_all.variables_used_in_expr(variable.node):
if use.startswith("_"):
# Used a private, check if it was set
private_var_was_set = False
traverse_scope = current_scope
while traverse_scope is not None:
for set_var in traverse_scope.set_vars:
if (set_var.node.contents == use and
(set_var.node.line,
set_var.node.col) != variable):
private_var_was_set = True
break
traverse_scope = traverse_scope.parent
if not private_var_was_set:
yield (use, variable.node.line)
def only_use_own_priv_vars(ast):
"""Check that all private variables used are defined here."""
used_privs = []
global_set_vars = find_variables_in_scopes.set_in_tree(ast)
global_used_vars = find_variables_in_scopes.used_in_tree(ast)
_, global_definitions = find_all.private_calls_and_definitions(ast)
# The big assumption here is that the "scopes" structure in both
# trees are the same
def _scope_visitor(set_vars_scope, used_vars_scope):
"""Visit scope's set vars and used vars.
If a var was private and used, but not set in this scope or any
parents, then report an error
"""
assert len(set_vars_scope.scopes) == len(used_vars_scope.scopes)
for index in range(0, len(set_vars_scope.scopes)):
_scope_visitor(set_vars_scope.scopes[index],
used_vars_scope.scopes[index])
for variable in used_vars_scope.used_vars:
used_privs.extend(list(_find_violating_priv_uses(variable,
set_vars_scope)))
_scope_visitor(global_set_vars, global_used_vars)
# Filter out definitions of private functions of the same name
# as functions can be used as variables.
used_privs = [up for up in used_privs if up[0] not in global_definitions]
err_msg = "Referenced external private variable {0}"
return [LinterFailure(err_msg.format(u[0]), u[1]) for u in used_privs]
| true |
315c01c11dd77f7e5973122a5e94955f47e7df4a | Python | vishal-chillal/assignments | /new_tasks_23_june/46_solutions/01_max_of_two.py | UTF-8 | 556 | 4.25 | 4 | [] | no_license | # Define a function max() that takes two numbers as arguments and returns the largest of them. Use the if-then-else construct available in Python
def max(num1,num2):
''' takes two numbers and return maximum of them'''
if num1 > num2:
return num1
return num2
if __name__ == "__main__":
inputs = raw_input("Enter space saperated 2 numbers: ").split()
if len(inputs) != 2:
inputs.append(input())
try:
print max(float(inputs[0]),float(inputs[1]))
except ValueError:
print "Invalid numbers"
| true |
01eca57f0c3d2f067096ac74920c5f4fd6ed535a | Python | dr-dos-ok/Code_Jam_Webscraper | /solutions_python/Problem_208/88.py | UTF-8 | 2,499 | 2.8125 | 3 | [] | no_license | import sys
filename = sys.argv[1]
f = open('%s.in' % filename)
g = open('%s.out' % filename, 'w')
DEBUG = sys.argv[2] if len(sys.argv) >= 3 else False
def dlog(s, *n):
if DEBUG:
if n:
print(s % tuple(n))
else:
print(s)
def solve(horses, roads, start, end, visited, cost=0, best=-1, curr_horse=None):
dlog('%d %d %f %f %r %r', start, end, cost, best, curr_horse, visited)
if start == end:
if best == -1:
return cost
return min(cost, best)
if cost > best and best > 0:
return best
for next_city in range(len(roads)):
if next_city == start or next_city in visited:
dlog("in %d, visited %d", start, next_city)
continue
dist = roads[start][next_city]
if dist == -1:
continue
dlog('going to %d from %d on %d', next_city, start, dist)
visited.add(next_city)
if curr_horse and curr_horse[0] >= dist and (curr_horse[0] >= horses[start][0] or curr_horse[1] >= horses[start][1]):
curr_horse[0] -= dist
time = 1.0 * dist / curr_horse[1]
best = solve(horses, roads, next_city, end, visited, cost + time, best, curr_horse)
curr_horse[0] += dist
if curr_horse is None or (horses[start][0] >= curr_horse[0] or horses[start][1] >= curr_horse[1]):
curr_horse = list(horses[start])
if curr_horse[0] >= dist:
curr_horse[0] -= dist
time = 1.0 * dist / curr_horse[1]
best = solve(horses, roads, next_city, end, visited, cost + time, best, curr_horse)
curr_horse[0] += dist
visited.remove(next_city)
return best
T = int(f.readline())
for t in range(T):
dlog("Case %d", t)
line = f.readline().strip().split()
N = int(line[0])
Q = int(line[1])
horses = [0 for _ in range(N)]
for n in range(N):
line = f.readline().strip().split()
endur = int(line[0])
vel = int(line[1])
horses[n] = (endur, vel)
roads = []
for n in range(N):
line = f.readline().strip().split()
roads.append([int(i) for i in line])
pairs = []
for q in range(Q):
line = f.readline().strip().split()
pairs.append([int(i) for i in line])
ans = ' '.join(['%f' % solve(horses, roads, pair[0] - 1, pair[1] - 1, set()) for pair in pairs])
g.write('Case #%d: %s' % (t + 1, ans))
g.write('\n')
| true |
0903802f82f50e7a530a88b093a68e505295b40a | Python | ppy2790/douban | /douban/spiders/DoubanSpider.py | UTF-8 | 1,716 | 2.796875 | 3 | [] | no_license | #coding=utf-8
from scrapy.spiders import CrawlSpider
from scrapy.selector import Selector
from scrapy.http import Request
from douban.items import DoubanItem
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
class DoubanSpider(CrawlSpider):
name = "douban"
start_urls=['https://book.douban.com/top250']
def parse(self, response):
selector = Selector(response)
infos = selector.xpath('//tr[@class="item"]')
item = DoubanItem()
for info in infos:
bookname = info.xpath('td/div/a/@title').extract()[0]
url = info.xpath('td/div/a/@href').extract()[0]
author_info = info.xpath('td/p/text()').extract()[0]
author_info = str(author_info)
author_infos = author_info.split('/')
price = str(author_infos[len(author_infos)-1])
rating = info.xpath('td/div/span[2]/text()').extract()[0]
comment_nums = info.xpath('td/div/span[3]/text()').extract()[0]
quote = info.xpath('td/p/span/text()').extract()
if len(quote)>0 :
quote = quote[0]
else:
quote = ''
item['bookname']= bookname
item['author']=author_infos[0]
item['rating_nums']=rating
item['quote']=quote
item['comment_nums'] = filter(str.isdigit, (str(comment_nums)))
item['pubday']=author_infos[len(author_infos)-2]
item['price'] = price
item['url']=url
yield item
for i in range(25,250,25):
url = 'https://book.douban.com/top250?start=%s'%i
yield Request(url,callback=self.parse)
| true |
5b34396c592da507a5728f26bbba4d4f50c015d0 | Python | natnaelmesfun/OOAD | /project 2/python-code/Cat.py | UTF-8 | 597 | 3.75 | 4 | [] | no_license | import random
from Feline import Feline
from Animals import *
# Creates constructor with name and sound
class Cat(Feline):
def __init__(self,name):
self.name = name
self.sound = hissNoise()
# def makeNoise(self):
# x = random.random()
# if(x <= 0.5):
# self.makeNoise1()
# else:
# self.makeNoise2()
def makeNoise1(self):
print("My type is", self.__class__.__name__, " ", self.name, "Me_yao")
def makeNoise2(self):
print("My type is", self.__class__.__name__, " ", self.name, "Hello")
| true |
fc7c90b9e37227fc287d4dc6af78aaa720714bfd | Python | yyht/BERT | /BERT-pytorch/BERT-pytorch/bert_pytorch/__main__.py | UTF-8 | 3,864 | 2.703125 | 3 | [
"Apache-2.0"
] | permissive | import argparse
from torch.utils.data import DataLoader
from .model import BERT
from .trainer import BERTTrainer
from .dataset import BERTDataset, WordVocab
def train():
parser = argparse.ArgumentParser()
parser.add_argument("-c", "--train_dataset", required=True, type=str, help="train dataset for train bert")
parser.add_argument("-t", "--test_dataset", type=str, default=None, help="test set for evaluate train set")
parser.add_argument("-v", "--vocab_path", required=True, type=str, help="built vocab model path with bert-vocab")
parser.add_argument("-o", "--output_path", required=True, type=str, help="ex)output/bert.model")
parser.add_argument("-hs", "--hidden", type=int, default=256, help="hidden size of transformer model")
parser.add_argument("-l", "--layers", type=int, default=8, help="number of layers")
parser.add_argument("-a", "--attn_heads", type=int, default=8, help="number of attention heads")
parser.add_argument("-s", "--seq_len", type=int, default=20, help="maximum sequence len")
parser.add_argument("-b", "--batch_size", type=int, default=64, help="number of batch_size")
parser.add_argument("-e", "--epochs", type=int, default=10, help="number of epochs")
parser.add_argument("-w", "--num_workers", type=int, default=5, help="dataloader worker size")
parser.add_argument("--with_cuda", type=bool, default=True, help="training with CUDA: true, or false")
parser.add_argument("--log_freq", type=int, default=10, help="printing loss every n iter: setting n")
parser.add_argument("--corpus_lines", type=int, default=None, help="total number of lines in corpus")
parser.add_argument("--cuda_devices", type=int, nargs='+', default=None, help="CUDA device ids")
parser.add_argument("--on_memory", type=bool, default=True, help="Loading on memory: true or false")
parser.add_argument("--lr", type=float, default=1e-3, help="learning rate of adam")
parser.add_argument("--adam_weight_decay", type=float, default=0.01, help="weight_decay of adam")
parser.add_argument("--adam_beta1", type=float, default=0.9, help="adam first beta value")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="adam first beta value")
args = parser.parse_args()
print("Loading Vocab", args.vocab_path)
vocab = WordVocab.load_vocab(args.vocab_path)
print("Vocab Size: ", len(vocab))
print("Loading Train Dataset", args.train_dataset)
train_dataset = BERTDataset(args.train_dataset, vocab, seq_len=args.seq_len,
corpus_lines=args.corpus_lines, on_memory=args.on_memory)
print("Loading Test Dataset", args.test_dataset)
test_dataset = BERTDataset(args.test_dataset, vocab, seq_len=args.seq_len, on_memory=args.on_memory) \
if args.test_dataset is not None else None
print("Creating Dataloader")
train_data_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.num_workers)
test_data_loader = DataLoader(test_dataset, batch_size=args.batch_size, num_workers=args.num_workers) \
if test_dataset is not None else None
print("Building BERT model")
bert = BERT(len(vocab), hidden=args.hidden, n_layers=args.layers, attn_heads=args.attn_heads)
print("Creating BERT Trainer")
trainer = BERTTrainer(bert, len(vocab), train_dataloader=train_data_loader, test_dataloader=test_data_loader,
lr=args.lr, betas=(args.adam_beta1, args.adam_beta2), weight_decay=args.adam_weight_decay,
with_cuda=args.with_cuda, cuda_devices=args.cuda_devices, log_freq=args.log_freq)
print("Training Start")
for epoch in range(args.epochs):
trainer.train(epoch)
trainer.save(epoch, args.output_path)
if test_data_loader is not None:
trainer.test(epoch)
| true |
e2dcb21578e1c3c22b512fdbe71ca383d64133ef | Python | dboudreau4/independent-projects | /tetris_main.py | UTF-8 | 8,540 | 3.046875 | 3 | [] | no_license | import pygame
import random
pygame.font.init()
# GLOBAL VARIABLES
WINDOW_WIDTH = 900
WINDOW_HEIGHT = 800
PLAYAREA_WIDTH = 350
PLAYAREA_HEIGHT = 700
SHAPE_SIZE = 35
UPPER_LEFT_X = (WINDOW_WIDTH - PLAYAREA_WIDTH)//2
UPPER_LEFT_Y = WINDOW_HEIGHT - PLAYAREA_HEIGHT
# COLORS-------------------
RED = (255, 0, 0)
GREEN = (0, 255, 0)
BLUE = (0, 0, 255)
YELLOW = (255, 255, 0)
ORANGE = (153, 0, 153)
PINK = (255, 51, 153)
PURPLE = (102, 0, 204)
# SHAPES-------------------
rL = [['*****',
'***X*',
'*XXX*',
'*****',
'*****'],
['*****',
'**X**',
'**X**',
'**XX*',
'*****'],
['*****',
'*****',
'*XXX*',
'*X***',
'*****'],
['*****',
'*XX**',
'**X**',
'**X**',
'*****']]
lL = [['*****',
'*X***',
'*XXX*',
'*****',
'*****'],
['*****',
'**XX*',
'**X**',
'**X**',
'*****'],
['*****',
'*****',
'*XXX*',
'***X*',
'*****'],
['*****',
'**X**',
'**X**',
'*XX**',
'*****']]
T = [['*****',
'**X**',
'*XXX*',
'*****',
'*****'],
['*****',
'**X**',
'**XX*',
'**X**',
'*****'],
['*****',
'*****',
'*XXX*',
'**X**',
'*****'],
['*****',
'**X**',
'*XX**',
'**X**',
'*****']]
square = [['*****',
'*****',
'*XX**',
'*XX**',
'*****']]
rZ = [['*****',
'******',
'**XX**',
'*XX***',
'*****'],
['*****',
'**X**',
'**XX*',
'***X*',
'*****']]
lZ = [['*****',
'*****',
'*XX**',
'**XX*',
'*****'],
['*****',
'**X**',
'*XX**',
'*X***',
'*****']]
bar = [['**X**',
'**X**',
'**X**',
'**X**',
'*****'],
['*****',
'XXXX*',
'*****',
'*****',
'*****']]
shapes = [rL, lL, T, square, rZ, lZ, bar]
colors = [RED, GREEN, BLUE, YELLOW, ORANGE, PINK, PURPLE]
class Shape(object):
def __init__(self, x, y, shape):
self.x = x
self.y = y
self.shape = shape
self.color = colors[shapes.index(shape)]
self.rot = 0
def grid_lines(surface, grid):
for i in range(len(grid)):
pygame.draw.line(surface, BLUE, (UPPER_LEFT_X, UPPER_LEFT_Y + i * SHAPE_SIZE), (UPPER_LEFT_X + PLAYAREA_WIDTH, UPPER_LEFT_Y + i * SHAPE_SIZE))
for j in range(len(grid[i])):
pygame.draw.line(surface, BLUE, (UPPER_LEFT_X + j * SHAPE_SIZE, UPPER_LEFT_Y), (UPPER_LEFT_X + j * SHAPE_SIZE, UPPER_LEFT_Y + PLAYAREA_HEIGHT))
def set_window(surface, grid):
surface.fill((0, 0, 0))
pygame.font.init()
font = pygame.font.SysFont('comicsansms', 50)
label = font.render('Tetris', 1, YELLOW)
surface.blit(label, (UPPER_LEFT_X + PLAYAREA_WIDTH/2 - (label.get_width()/2), 40))
for i in range(len(grid)):
for j in range(len(grid[i])):
pygame.draw.rect(surface, grid[i][j], (UPPER_LEFT_X + j * SHAPE_SIZE, UPPER_LEFT_Y + i * SHAPE_SIZE, SHAPE_SIZE, SHAPE_SIZE), 0)
pygame.draw.rect(surface, RED, (UPPER_LEFT_X, UPPER_LEFT_Y, PLAYAREA_WIDTH, PLAYAREA_HEIGHT), 5)
grid_lines(surface, grid)
def create_grid(locked_pos={}):
grid = [[(0, 0, 0) for x in range(10)] for x in range(20)]
for i in range(len(grid)):
for j in range(len(grid[i])):
if (j, i) in locked_pos:
key = locked_pos[(j, i)]
grid[i][j] = key
return grid
def get_block():
return Shape(5, 0, random.choice(shapes))
def shape_format(shape):
positions = []
form = shape.shape[shape.rot % len(shape.shape)]
for i, line in enumerate(form):
row = list(line)
for j, column in enumerate(row):
if column == 'X':
positions.append((shape.x + j, shape.y + i))
for i, pos in enumerate(positions):
positions[i] = (pos[0] - 2, pos[1] - 4)
return positions
def valid_space(shape, grid):
accepted_pos = [[(j, i) for j in range(10) if grid[i][j] == (0, 0, 0)] for i in range(20)]
accepted_pos = [j for sub in accepted_pos for j in sub]
formatted = shape_format(shape)
for pos in formatted:
if pos not in accepted_pos:
if pos[1] > -1:
return False
return True
def check_lost(positions):
for pos in positions:
x, y = pos
if y < 1:
return True
return False
def next_shape(shape, surface):
font = pygame.font.SysFont('comicsans', 30)
label = font.render('Next Shape', 1, YELLOW)
sx = UPPER_LEFT_X + PLAYAREA_WIDTH + 50
sy = UPPER_LEFT_Y + PLAYAREA_HEIGHT/2 - 100
form = shape.shape[shape.rot % len(shape.shape)]
for i, line in enumerate(form):
row = list(line)
for j, column in enumerate(row):
if column == 'X':
pygame.draw.rect(surface, shape.color, (sx + j*SHAPE_SIZE, sy + i*SHAPE_SIZE, SHAPE_SIZE, SHAPE_SIZE), 0)
surface.blit(label, (sx + 10, sy - 30))
def clear_rows(grid, locked):
inc = 0
for i in range(len(grid) - 1, -1, -1):
row = grid[i]
if (0, 0, 0) not in row:
inc += 1
ind = i
for j in range(len(row)):
try:
del locked[(j, i)]
except:
continue
if inc > 0:
for key in sorted(list(locked), key=lambda x: x[1])[::-1]:
x, y = key
if y < ind:
newKey = (x, y + inc)
locked[newKey] = locked.pop(key)
return inc
def main(win):
locked_pos = {}
grid = create_grid(locked_pos)
change_piece = False
game_running = True
curr_piece = get_block()
next_piece = get_block()
timer = pygame.time.Clock()
fall_time = 0
fall_speed = 0.27
while game_running:
grid = create_grid(locked_pos)
fall_time += timer.get_rawtime()
timer.tick()
if fall_time/1000 > fall_speed:
fall_time = 0
curr_piece.y += 1
if not(valid_space(curr_piece, grid)) and curr_piece.y > 0:
curr_piece.y -= 1
change_piece = True
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_running = False
pygame.display.quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
curr_piece.x -= 1
if not valid_space(curr_piece, grid):
curr_piece.x += 1
if event.key == pygame.K_RIGHT:
curr_piece.x += 1
if not valid_space(curr_piece, grid):
curr_piece.x -= 1
if event.key == pygame.K_UP:
curr_piece.rot += 1
if not valid_space(curr_piece, grid):
curr_piece.rot -= 1
if event.key == pygame.K_DOWN:
curr_piece.y += 1
if not valid_space(curr_piece, grid):
curr_piece.y -= 1
shape_pos = shape_format(curr_piece)
for i in range(len(shape_pos)):
x, y = shape_pos[i]
if y > -1:
grid[y][x] = curr_piece.color
if change_piece:
for pos in shape_pos:
p = (pos[0], pos[1])
locked_pos[p] = curr_piece.color
curr_piece = next_piece
next_piece = get_block()
change_piece = False
clear_rows(grid, locked_pos)
set_window(win, grid)
next_shape(next_piece, win)
pygame.display.update()
if check_lost(locked_pos):
game_running = False
#pygame.display.quit()
window = pygame.display.set_mode((WINDOW_WIDTH, WINDOW_HEIGHT))
pygame.display.set_caption('Tetris')
main(window)
| true |
43521e51bd9e1a019d03e1a7e3a3629e207ed76c | Python | GKE20/210CTGregEvans | /week 7 question 13 pseudocode.py | UTF-8 | 981 | 3.46875 | 3 | [] | no_license | //Pseudocode
class node
def initialise(node.no):
name <- node
id <- no //indivdual node identification
def getID(): //The ID will be used for the postion of the nodes
return ID //in the adjacency list
def getName():
return name
class graph
def initialise():
nodes <- []
nodeNumber <- 0 //nodeNumber will create a indivdual number for each node that is added
adjlist = []
//Adding a new node function
def addingNode(node):
newNode <- new Node(node,nodenumber)
nodenumber <- nodenumber +1 //adding the nodenumber to the nodes for identification
nodes.add(newNode)
//Adding an edge function
def addingEdge(nodenumber1,nodenumber2):
adjlist[nodenumber1.getID]
add(node2)
adjlist[nodenumber2.getID
add(node1) //finding the ID of the nodes
def getadjlist()
return adjlist
| true |
c96f4bd595a839c27cc0d9be5eea784b67f063f2 | Python | morelandjs/elora | /tests/test_elora.py | UTF-8 | 6,062 | 3.265625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
import numpy as np
from elora import Elora
def assert_almost_equal(x, y, TINY=1e-4):
"""
Check that x == y within some tolerance
"""
assert abs(x - y) < TINY
def test_class_init():
"""
Checking elora class constructor
"""
# dummy class instance
elora = Elora(0)
# single comparison
time = np.datetime64('now')
label1 = 'alpha'
label2 = 'beta'
value = np.random.uniform(-10, 10)
# fit to the training data
elora.fit(time, label1, label2, value)
# multiple comparisons
times = np.arange(100).astype('datetime64[s]')
labels1 = np.repeat('alpha', 100)
labels2 = np.repeat('beta', 100)
values = np.random.normal(0, 10, size=100)
# randomize times
np.random.shuffle(times)
elora.fit(times, labels1, labels2, values)
examples = elora.examples
# check comparison length
assert examples.shape == (100,)
# check that comparisons are sorted
assert np.array_equal(
np.sort(examples.time), examples.time)
# check first and last times
assert elora.first_update_time == times.min()
assert elora.last_update_time == times.max()
def test_equilibrium_rating():
"""
Check that ratings regress to equilibrium rating
"""
init_rating = np.float(np.random.uniform(-10, 10, 1))
equilibrium_rating = np.float(np.random.uniform(-10, 10, 1))
class EloraTest(Elora):
def initial_state(self, time, label):
return {'time': time, 'rating': init_rating}
@property
def equilibrium_rating(self):
return equilibrium_rating
def regression_coeff(self, elapsed_time):
return 1e-3
samples = 10**4
times = np.arange(samples).astype('datetime64[s]')
labels1 = np.repeat('alpha', samples)
labels2 = np.repeat('beta', samples)
values = np.random.random(samples)
for commutes in [True, False]:
k = 1e-2
elora = EloraTest(k, commutes=commutes)
elora.fit(times, labels1, labels2, values)
# check equilibrium rating for label1
rating_alpha = elora.get_rating(times[-1], 'alpha')
assert_almost_equal(rating_alpha, elora.equilibrium_rating, TINY=k)
# check equilibrium rating for label2
rating_beta = elora.get_rating(times[-1], 'beta')
assert_almost_equal(rating_beta, elora.equilibrium_rating, TINY=k)
def test_regression_coeff():
"""
Check rating regression functionality
"""
sec = np.timedelta64(1, 's')
step = np.random.randint(0, 100)
class EloraTest(Elora):
@property
def equilibrium_rating(self):
return 0
def regression_coeff(self, elapsed_time):
return 0.5 if elapsed_time > sec else 1
for commutes in [True, False]:
elora = EloraTest(np.random.rand(1), commutes=commutes)
times = np.linspace(0, 1000, 100).astype('datetime64[s]')
labels1 = np.repeat('alpha', 100)
labels2 = np.repeat('beta', 100)
values = np.random.uniform(-10, 10, 100)
elora.fit(times, labels1, labels2, values)
# test rating regression for label1
rating_alpha = elora.get_rating(times[step] + sec, 'alpha')
rating_alpha_regressed = elora.get_rating(times[step] + 2*sec, 'alpha')
assert rating_alpha_regressed == 0.5*rating_alpha
# test rating regression for label2
rating_beta = elora.get_rating(times[step] + sec, 'beta')
rating_beta_regressed = elora.get_rating(times[step] + 2*sec, 'beta')
assert rating_beta_regressed == 0.5*rating_beta
def test_rating_conservation():
"""
Check that rating is conserved when commutes is False
"""
_initial_rating = np.random.uniform(low=-10, high=10)
_equilibrium_rating = np.random.uniform(low=-10, high=10)
_k = np.random.uniform(low=0.01, high=1)
class EloraTest(Elora):
@property
def equilibrium_rating(self):
return _equilibrium_rating
def initial_rating(self, time, label):
return _initial_rating
elora = EloraTest(_k, commutes=False)
times = np.linspace(0, 1000, 100).astype('datetime64[s]')
labels1 = np.repeat('alpha', 100)
labels2 = np.repeat('beta', 100)
values = np.random.uniform(-30, 30, size=100)
elora.fit(times, labels1, labels2, values)
# test rating conservation at random times
for time in np.random.uniform(0, 1000, size=10).astype('datetime64[s]'):
ratings = [elora.get_rating(time, label) for label in elora.labels]
assert_almost_equal(
sum(ratings), _equilibrium_rating*elora.labels.size)
def test_get_rating():
"""
Checking rating query function
"""
# dummy class instance
elora = Elora(0)
# single entry
time = np.datetime64('now')
label1 = 'alpha'
label2 = 'beta'
value = np.random.uniform(-10, 10)
# train the model
elora.fit(time, label1, label2, value)
# populate record data
one_hour = np.timedelta64(1, 'h')
elora.record['alpha'] = np.rec.array(
[(time - one_hour, 1), (time, 2), (time + one_hour, 3)],
dtype=[('time', 'datetime64[s]'), ('rating', 'float')]
)
# check rating value at time
rating = elora.get_rating(time, 'alpha')
assert_almost_equal(rating, 1)
# check rating value at time plus one hour
rating = elora.get_rating(time + one_hour, 'alpha')
assert_almost_equal(rating, 2)
def test_rate():
"""
Checking core rating function
"""
# dummy class instance
k = np.random.rand()
elora = Elora(k)
# alpha wins, beta loses
times = np.arange(2).astype('datetime64[s]')
labels1 = np.repeat('alpha', 2)
labels2 = np.repeat('beta', 2)
values = [1, -1]
# instantiate ratings
elora.fit(times, labels1, labels2, values)
# rating_change = k * (obs - prior) = 2 * (1 - 0)
rec = elora.record
assert rec['alpha'].rating[0] == k
assert rec['beta'].rating[0] == -k
| true |
38fba1d5b9d7bc383ba8ad7d13a6d1e1ccbf3abd | Python | pseudonode/pynet_free_course | /week5/ex2.py | UTF-8 | 439 | 2.90625 | 3 | [] | no_license | #! /usr/bin/env python
from __future__ import print_function, unicode_literals
from random import randint
def netgen(network='10.10.10.', mask='/24'):
#print(' The assigned IP address is: {}{}{}'.format(network, octet, mask))
ip = network + str(randint(1,254)) + mask
print('The assigned IP address is: {}'.format(ip))
print('-') * 50
netgen()
print('-') * 50
netgen('25.25.25.')
print('-') * 50
netgen(network='20.20.20.')
| true |