blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ce4116af0b3a61bc9d64422c88cbd0612ec51750 | Python | JenMart/Encyclopedia_Project | /src/Logic/Wikipedia.py | UTF-8 | 1,505 | 2.84375 | 3 | [] | no_license | __author__ = 'anthonyclasen'
from src.Database.EncyclopiaData import *
import wikipedia
class WikipediaSearch:
def __init__(self, EncyclopediaData, searchstring = 'Wikipedia'):
self.ED = EncyclopediaData
self.searchstring = searchstring
def createarticlefromfirstsearchresult(self):
searchresults = wikipedia.search(self.searchstring)
if searchresults != []:
articletitle = searchresults[0].encode('ascii', 'ignore')
articlesummary = wikipedia.summary(articletitle).encode('ascii', 'ignore')
article = WikipediaArticle(articletitle,articlesummary,self.searchstring)
return article
else:
return None
def saveresult(self, article):
self.ED.insertwikidata(article.articletitle, article.articlesummary, article.searchstring)
def getrelatedsavedresults(self):
savedresults = self.ED.getrelatedsavedwikiarticle(self.searchstring)
articles = []
for result in savedresults:
article = WikipediaArticle(result[0],result[1],result[2])
articles.append(article)
return articles
class WikipediaArticle:
def __init__(self, articletitle, articlesummary, searchstring):
self.articletitle = articletitle
self.articlesummary = articlesummary
self.searchstring = searchstring
def displayarticle(self):
displaystring = self.articletitle + '\n' + self.articlesummary
return displaystring
| true |
dc27dc2a33f1fe4a06bd7db3375f3e69465fb87c | Python | mayararysia/livro-python | /cap07/excecao.py | UTF-8 | 285 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import zipfile
banco_zip = None
try:
banco_zip = zipfile.ZipFile("saida.zip")
banco_zip.extractall(path="banco")
except PermissionError:
print("Algum problema ao ler o arquivo")
finally:
#banco_zip.extractall(path="banco")
banco_zip.close() | true |
1d23fe2d02ad8399b8798b0e9a0d7666630ab063 | Python | DAVIDDAIJUNCHEN/Property-Transaction- | /house_rental.py | UTF-8 | 329 | 2.578125 | 3 | [] | no_license | # Create a class named HouseRental, a subclass of House and Rental #
from house import House
from purchase_rental import Rental
class HouseRental(House, Rental):
def prompt_init():
init = House.prompt_init()
init.update(Rental.prompt_init())
return init
prompt_init = staticmethod(prompt_init)
| true |
ec19eff5d60e4e2a30a101073333dab3528a6b53 | Python | jithin8mathew/Data_visualization_in_python | /MedicalMisinfo.py | UTF-8 | 11,199 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# Developed originally from the following code
# https://towardsdatascience.com/textrank-for-keyword-extraction-by-python-c0bae21bcec0
# https://github.com/DerwenAI/pytextrank
# https://github.com/susanli2016/NLP-with-Python/blob/master/AutoDetect_COVID_FakeNews.ipynb
# https://towardsdatascience.com/automatically-detect-covid-19-misinformation-f7ceca1dc1c7
import time
from nltk.corpus import stopwords
stop_words = set(stopwords.words('english'))
from nltk.tag import pos_tag
from nltk import word_tokenize
from collections import Counter
import pandas as pd
import plotly.express as px
import plotly.figure_factory as ff
import plotly.graph_objects as go
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
from sklearn.svm import LinearSVC
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import cross_val_score
start_time = time.time()
df = pd.read_csv('corona_fake.csv')
df.loc[df['label'] == 'Fake', ['label']] = 'FAKE'
df.loc[df['label'] == 'fake', ['label']] = 'FAKE'
df = df.sample(frac=1).reset_index(drop=True)
df.title.fillna('missing', inplace=True)
df.source.fillna('missing', inplace=True)
from collections import OrderedDict
import numpy as np
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
nlp = spacy.load('en_core_web_sm')
class TextRank4Keyword():
def __init__(self):
self.d = 0.85 # damping coefficient, usually is .85
self.min_diff = 1e-5 # convergence threshold
self.steps = 10 # iteration steps
self.node_weight = None # save keywords and its weight
def set_stopwords(self, stopwords):
"""Set stop words"""
for word in STOP_WORDS.union(set(stopwords)):
lexeme = nlp.vocab[word]
lexeme.is_stop = True
def sentence_segment(self, doc, candidate_pos, lower):
"""Store those words only in cadidate_pos"""
sentences = []
for sent in doc.sents:
selected_words = []
for token in sent:
# Store words only with cadidate POS tag
if token.pos_ in candidate_pos and token.is_stop is False:
if lower is True:
selected_words.append(token.text.lower())
else:
selected_words.append(token.text)
sentences.append(selected_words)
return sentences
def get_vocab(self, sentences):
"""Get all tokens"""
vocab = OrderedDict()
i = 0
for sentence in sentences:
for word in sentence:
if word not in vocab:
vocab[word] = i
i += 1
return vocab
def get_token_pairs(self, window_size, sentences):
"""Build token_pairs from windows in sentences"""
token_pairs = list()
for sentence in sentences:
for i, word in enumerate(sentence):
for j in range(i+1, i+window_size):
if j >= len(sentence):
break
pair = (word, sentence[j])
if pair not in token_pairs:
token_pairs.append(pair)
return token_pairs
def symmetrize(self, a):
return a + a.T - np.diag(a.diagonal())
def get_matrix(self, vocab, token_pairs):
"""Get normalized matrix"""
# Build matrix
vocab_size = len(vocab)
g = np.zeros((vocab_size, vocab_size), dtype='float')
for word1, word2 in token_pairs:
i, j = vocab[word1], vocab[word2]
g[i][j] = 1
# Get Symmeric matrix
g = self.symmetrize(g)
# Normalize matrix by column
norm = np.sum(g, axis=0)
g_norm = np.divide(g, norm, where=norm!=0) # this is ignore the 0 element in norm
return g_norm
def get_keywords(self, number=10):
keywordList = []
rankList = []
node_weight = OrderedDict(sorted(self.node_weight.items(), key=lambda t: t[1], reverse=True))
for i, (key, value) in enumerate(node_weight.items()):
keywordList.append(key)
rankList.append(value)
# print(key + ' - ' + str(value))
if i > number:
break
return keywordList, rankList
def analyze(self, text,
candidate_pos=['NOUN', 'PROPN'],
window_size=4, lower=False, stopwords=list()):
self.set_stopwords(stopwords)
# Pare text by spaCy
doc = nlp(text)
# Filter sentences
sentences = self.sentence_segment(doc, candidate_pos, lower) # list of list of words
# Build vocabulary
vocab = self.get_vocab(sentences)
# Get token_pairs from windows
token_pairs = self.get_token_pairs(window_size, sentences)
# Get normalized matrix
g = self.get_matrix(vocab, token_pairs)
# Initionlization for weight(pagerank value)
pr = np.array([1] * len(vocab))
# Iteration
previous_pr = 0
for epoch in range(self.steps):
pr = (1-self.d) + self.d * np.dot(g, pr)
if abs(previous_pr - sum(pr)) < self.min_diff:
break
else:
previous_pr = sum(pr)
node_weight = dict()
for word, index in vocab.items():
node_weight[word] = pr[index]
self.node_weight = node_weight
def TextRank(text):
try:
tr4w = TextRank4Keyword()
tr4w.analyze(text, candidate_pos = ['NOUN', 'ADJECTIVE'], window_size=4, lower=False)
l, v = tr4w.get_keywords(10)
return l
except Exception:pass
def TextRankValues(text):
try:
tr4w = TextRank4Keyword()
tr4w.analyze(text, candidate_pos = ['NOUN', 'ADJECTIVE'], window_size=4, lower=False)
l, v = tr4w.get_keywords(10)
return list(v)
except Exception:pass
df['keywords'] = df.apply(lambda row: TextRank(row['text']), axis=1)
df['TextRankvalues'] = df.apply(lambda row: TextRankValues(row['text']), axis=1)
df.dropna()
def wordCount(lst,txt):
val=0
for x in lst:
val+= txt.count(x)
avg = val/len(lst)
return avg
avg_appearance=[]
for index, row in df.iterrows():
try:
df['avg_app'].loc[index]= wordCount(row['keywords'], row['text'])
except Exception:pass
df = df.fillna(value=np.nan)
df['wordLength'] = pd.DataFrame(df['keywords'].map(lambda x: len(str(x))))
def first_occurance(kwrdLst, text):
if isinstance(text, str):
text = text.lower().split(' ')
txtLn= len(text)
else:
pass
l1, l2 = [], []
if isinstance(kwrdLst, list):
for kwrd in kwrdLst:
try:
FO = text.index(str(kwrd).lower())
FO_freq = (FO / txtLn)
l1.append(FO)
l2.append(FO_freq)
except Exception:
l1.append(0)
l2.append(0)
else:
return [0],[0]
return l1, l2
FOF = []
for index, row in df.iterrows():
F = (first_occurance(row['keywords'],row['text']))
FOF.append(F[1])
df['F_frequency']= FOF
def freq(kwrdLst, text):
if isinstance(text, str):
text = text.lower().split(' ')
txtLn= len(text)
else:
pass
F1 = []
if isinstance(kwrdLst, list):
for kwrd in kwrdLst:
F1.append(text.count(kwrd))
return(F1)
feqLst=[]
for index, row in df.iterrows():
F = (freq(row['keywords'],row['text']))
feqLst.append(F)
df['keywordFrequency'] = feqLst
count=0
finList = []
for index, row in df.iterrows():
try:
fin = row['TextRankvalues'] + row['F_frequency']+ row['keywordFrequency']
except Exception:
fin = [0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99] + [0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99] + [0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99,0.99]
count+=1
finList.append(fin)
df['final'] = finList
print('Number of error encountered ', count)
def label(inp):
if inp.lower() == "true":
return 1
else:
return 0
df['wordLength'] = pd.DataFrame(df['keywords'].map(lambda x: len(str(x))))
ll=[]
for index, row in df.iterrows():
ll.append(row['final'])
n = np.empty([len(ll), len(ll[0])])
n.shape
###################################
###################################
###################################
# np.array(ll)
c1=0
for lst in ll:
c2=0
for vv in lst:
n[c1][c2]= vv
#print(c1,c2)
c2+=1
c1+=1
labeL = []
for index, row in df.iterrows():
labeL.append(label(str(row['label'])))
df['label2']=labeL
n = np.nan_to_num(n)
X, y = n, df['label2']
from sklearn.preprocessing import StandardScaler
scaled_features = StandardScaler().fit_transform(X)
#from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=109)
from sklearn import metrics
from sklearn.svm import SVC
from sklearn.pipeline import make_pipeline
SVCclf = make_pipeline(StandardScaler(), SVC(gamma='auto'))
SVCclf.fit(X_train, y_train)
y_pred = SVCclf.predict(X_test)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
from sklearn.naive_bayes import GaussianNB
gnb = GaussianNB()
gnb.fit(X_train, y_train)
y_pred = gnb.predict(X_test)
from sklearn import metrics
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
from sklearn.linear_model import Perceptron
clf = Perceptron(tol=1e-3, random_state=0,verbose=True)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
C_range=list(range(1,26))
acc_score=[]
accGNB_score=[]
accNN_score = []
for c in C_range:
#svc = LinearSVC(dual=False, C=c)
svc = make_pipeline(StandardScaler(), SVC(gamma='auto'))
gnb = GaussianNB()
clf = Perceptron(tol=1e-3, random_state=0)
scores = cross_val_score(svc, X, y, cv=10, scoring='accuracy')
scoresGNB = cross_val_score(gnb, X, y, cv=10, scoring='accuracy')
scoresNN = cross_val_score(clf, X, y, cv=10, scoring='accuracy')
acc_score.append(scores.mean())
accGNB_score.append(scoresGNB.mean())
accNN_score.append(scoresNN.mean())
C_values=list(range(1,26))
fig = go.Figure(data=go.Scatter(x=C_values, y=acc_score, name='SVM LC'))
fig.add_scatter(x=C_values, y=accGNB_score, mode='lines', name='Gaussian')
fig.add_scatter(x=C_values, y=accNN_score, mode='lines', name='Neural Net')
fig.update_layout(xaxis_title='Value of iteration through CV data',
yaxis_title='Cross Validated Accuracy', template='plotly_white',xaxis = dict(dtick = 1))
fig.show()
print("--- %s seconds ---" % (time.time() - start_time))
| true |
6197292f1bc2f186f69a6d080fba47ea622b28c9 | Python | bobrikov35/GB_Algorithms | /Python/Tasks/task-1.6.py | UTF-8 | 1,386 | 4.15625 | 4 | [] | no_license | # 6. По длинам трех отрезков, введенных пользователем, определить возможность существования треугольника, составленного
# из этих отрезков. Если такой треугольник существует, то определить, является ли он разносторонним, равнобедренным или
# равносторонним.
print('Введите длину трех отрезков')
a = int(input(' первый отрезок: '))
b = int(input(' второй отрезок: '))
c = int(input(' третий отрезок: '))
if a > b and a > c:
a, c = c, a
elif b > c:
b, c = c, b
if a > b:
a, b = b, a
print(f'Выбраны отрезки с длиннами: {a}, {b} и {c}')
if c >= a + b:
print('Невозможно получить треугольник из данных отрезков')
elif a == b == c:
print('Из данных отрезков можно построить равносторонний треугольник')
elif a == b or b == c:
print('Из данных отрезков можно построить равнобедренний треугольник')
else:
print('Из данных отрезков можно построить треугольник')
| true |
e9f6e1b440515233cd14a1d6b42ce63d500d021f | Python | ITSec-UR/praktomat-utils-adp | /praktomat_limit_submissions.py | UTF-8 | 2,260 | 2.59375 | 3 | [] | no_license | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import psycopg2
import os
def limit_submissions(conn, task, max_upload):
query_above = "UPDATE solutions_solution SET accepted = 'f', final = 'f' WHERE number > {} AND task_id = {};".format(
max_upload, task
)
query_max = "UPDATE solutions_solution SET accepted = 't', final = 't' WHERE number = {} AND task_id = {};".format(
max_upload, task
)
print(query_above)
print(query_max)
run_sql(conn, query_above)
run_sql(conn, query_max)
def get_tasks(conn, regex_task):
query_get_tasks = "SELECT id FROM tasks_task WHERE title SIMILAR TO '{}' AND submission_date < now() AND publication_date > now() - INTERVAL '14 DAY'ORDER BY id ASC;".format(
regex_task
)
print(query_get_tasks)
return run_sql(conn, query_get_tasks)
def get_rating(conn, rating_name):
query_get_rating = "SELECT id FROM attestation_ratingscale WHERE name = '{}' ORDER BY id ASC;".format(
rating_name
)
print(query_get_rating)
rating = run_sql(conn, query_get_rating)[0][0]
return rating
def run_sql(conn, sql):
try:
cursor = conn.cursor()
cursor.execute(sql)
results = [record for record in cursor]
cursor.close()
return results
except (Exception, psycopg2.DatabaseError) as error:
print(error)
def connect_db():
try:
ps_conn = psycopg2.connect(
"host={} port={} dbname={} user={} password={}".format(
os.environ["DB_HOST"],
os.environ["DB_PORT"],
os.environ["DB_NAME"],
os.environ["DB_USER"],
os.environ["DB_PASS"],
)
)
ps_conn.autocommit = True
return ps_conn
except (Exception, psycopg2.DatabaseError) as error:
print(error)
max_uploads = (
os.environ["PRAKTOMAT_MAX_UPLOADS"] if "PRAKTOMAT_MAX_UPLOADS" in os.environ else 3
)
print("Run Praktomat limit homework solutions to {}".format(max_uploads))
conn = connect_db()
if not conn:
print("No connection is established!")
exit(1)
tasks = get_tasks(conn, "(OOP|ADP): H[0-9]{2}%")
for task in tasks:
limit_submissions(conn, task[0], max_uploads)
conn.close()
| true |
43a45cc20c8ee1985ca6a7394a6498b380b3730c | Python | srhmtonk-aa/data-science-box | /provisioning/templates/rkernel.py | UTF-8 | 759 | 2.625 | 3 | [
"BSD-2-Clause-Views"
] | permissive | """A "native" IPython R kernel in 15 lines of code.
This isn't a real native R kernel, just a quick and dirty hack to get the
basics running in a few lines of code.
Put this into your startup directory for a profile named 'rkernel' or somesuch,
and upon startup, the kernel will imitate an R one by simply prepending `%%R`
to every cell.
"""
from IPython.core.interactiveshell import InteractiveShell
print '*** Initializing R Kernel ***'
ip = get_ipython()
ip.run_line_magic('load_ext', 'rpy2.ipython')
ip.run_line_magic('config', 'Application.verbose_crash=True')
old_run_cell = InteractiveShell.run_cell
def run_cell(self, raw_cell, **kw):
return old_run_cell(self, '%%R\n' + raw_cell, **kw)
InteractiveShell.run_cell = run_cell
| true |
7838d27ee8113b23e227b387be31f4c23cf71246 | Python | zxy987872674/LearnCode | /python/commonPackage/Re/learnMatch.py | UTF-8 | 191 | 3.046875 | 3 | [] | no_license | #coding=utf-8
import re
text = "JGood is a handsome boy, he is cool, clever, and so on..."
m = re.match(r"(\w+)\s", text)
if m:
print m.group(0), '\n', m.group(1)
else:
print 'not match'
| true |
90497ff97a6b17a68dd5494a7e3a1f2c0d8850f0 | Python | xndong1020/python3-deep-dive-02 | /4. Iterables and Iterators/3.manually_comsume_iterator.py | UTF-8 | 2,427 | 3.5625 | 4 | [] | no_license | # import os
# from collections import namedtuple
# # simplest way to read file line by line
# def cast_data_type(data_type, value):
# if data_type == "DOUBLE":
# return float(value)
# elif data_type == "INT":
# return int(value)
# else:
# return str(value)
# def cast_row(data_types, data_row):
# # return [cast_data_type(data_types[idx], data) for idx, data in enumerate(data_row)]
# # or another way of doing the same thing
# return [
# cast_data_type(data_type, data) for data_type, data in zip(data_types, data_row)
# ]
# cars_data = []
# with open(f"{os.path.dirname(__file__)}/cars.csv") as file:
# row_index = 0
# for line in file:
# # for first line, which is the 'columns' header
# if row_index == 0:
# columns = line.strip("\n").split(";")
# Car = namedtuple("Car", columns) # Declaring namedtuple
# # for second line, which is the 'data types' header
# elif row_index == 1:
# data_types = line.strip("\n").split(";")
# else:
# # initializing a "Car" namedtuple object, unpack data, then add to "cars_data" list
# cars_data.append(Car(*cast_row(data_types, line.strip("\n").split(";"))))
# row_index += 1
import os
from collections import namedtuple
# simplest way to read file line by line
def cast_data_type(data_type, value):
if data_type == "DOUBLE":
return float(value)
elif data_type == "INT":
return int(value)
else:
return str(value)
def cast_row(data_types, data_row):
# return [cast_data_type(data_types[idx], data) for idx, data in enumerate(data_row)]
# or another way of doing the same thing
return [cast_data_type(data_type, data) for data_type, data in zip(data_types, data_row)]
cars_data = []
with open(f"{os.path.dirname(__file__)}/cars.csv") as file:
# retrieve the iterator from file iterable
file_iterator = iter(file)
# manually read first line from iterator
columns = next(file_iterator).strip("\n").split(";")
# manually read second line from iterator
data_types = next(file_iterator).strip("\n").split(";")
Car = namedtuple("Car", columns) # Declaring namedtuple
# start to read the remaining lines
cars_data = [Car(*cast_row(data_types, line.strip("\n").split(";"))) for line in file_iterator]
print(cars_data)
| true |
8b2485875dd238ac0a64c638d3743e5a71c5a877 | Python | dipanjal/scripts | /wallpaper-changer.py | UTF-8 | 2,164 | 2.546875 | 3 | [] | no_license | import ctypes
import os
import platform
import random
import re
import time
from enum import Enum
from pathlib import Path
import schedule
class OSTypes(Enum):
WINDOWS = 'Windows'
LINUX = 'Linux'
class WallpaperChanger:
def is_image_file(self, file_abs_path):
IMAGE_REGEX = "^.+\\.(jpeg|jpg|png)$"
if os.path.isfile(file_abs_path) and re.search(IMAGE_REGEX, file_abs_path):
return True
return False
def pick_an_image(self, image_abs_dir):
image_file_abs = os.path.join(image_abs_dir, random.choice(os.listdir(image_abs_dir)))
while not self.is_image_file(image_file_abs):
image_file_abs = os.path.join(image_abs_dir, random.choice(os.listdir(image_abs_dir)))
return image_file_abs
def change_wallpaper_from_queue(self, image_abs_dir):
# image_abs_path = image_queue.pop()
image_name = self.pick_an_image(image_abs_dir)
image_abs_path = os.path.join(image_abs_dir, image_name)
if platform.system() == OSTypes.LINUX.value:
gnome_command = "/usr/bin/gsettings set org.gnome.desktop.background picture-uri " + image_abs_path
os.system(gnome_command)
elif platform.system() == OSTypes.WINDOWS.name:
SPI_SETDESKWALLPAPER = 20
ctypes.windll.user32.SystemParametersInfoW(SPI_SETDESKWALLPAPER, 0, image_abs_path, 0)
def __init__(self):
image_dir = "downloads/@Dreadnaught/"
image_abs_dir = str(Path(image_dir).expanduser().absolute())
# only_image_files = [os.path.join(image_abs_dir, f) for f in os.listdir(image_dir) if
# self.is_image_file(os.path.join(image_abs_dir, f))]
# image_queue = collections.deque(only_image_files)
# schedule.every(10).seconds.do(self.change_wallpaper_from_queue, image_queue)
self.change_wallpaper_from_queue(image_abs_dir)
schedule.every(10).seconds.do(self.change_wallpaper_from_queue, image_abs_dir)
while True:
schedule.run_pending()
time.sleep(1)
# self.change_wallpaper_from_queue(image_abs_dir)
WallpaperChanger()
| true |
a972dcc72ccb1dd8036e158351348dee2e61d961 | Python | jyjoo94/quant | /키움api/kiwoom-master/kiwoom/simulation/information.py | UTF-8 | 888 | 3.375 | 3 | [] | no_license | __author__ = 'sangchae'
''' It shows news or other information such as Oil price, Currency, and so on
'''
from bs4 import BeautifulSoup
from urllib.request import urlopen
class Information():
def __init__(self):
res = urlopen('http://www.infostock.co.kr')
soup = BeautifulSoup(res, 'html.parser')
soup.prettify()
data = []
for string in soup.strings:
if(repr(string) != '\'\\n\'' and repr(string) != '\' \''):
data.append(repr(string))
#print(data)
print("\t 가격\t 전일대비")
for i in range(len(data)):
if data[i] == '\'국제유가\'':
print("국제유가:\t", "$",data[i+1][1:-2], "\t", data[i+2][1:-5])
if data[i] == '\'Won/Dollar\'':
print("환 율:\t", "W",data[i+1][1:-2], "\t", data[i+2][1:-5])
| true |
ddabf5a58babbc5655cf105da8609c92d88410b3 | Python | ES654/assignment-3-rkreddy99 | /q8_compare_time.py | UTF-8 | 1,523 | 2.84375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from linearRegression.linearRegression import LinearRegression
import copy
from metrics import *
import time
np.random.seed(42)
N = [1000*i for i in range(1,5)]
P = 500
grad_time = []
theor_grad = []
norm_time = []
theor_norm = []
for i in range(len(N)):
X = pd.DataFrame(np.random.randn(N[i], P))
y = pd.Series(np.random.randn(N[i]))
LR = copy.deepcopy(LinearRegression(fit_intercept=True))
a = time.time()
LR.fit_vectorised(X.copy(),y.copy(),n_iter = 100,batch_size=N[i])
LR.predict(X.copy())
b = time.time()
t = (N[i]*(P+1)**2 + (P+1)**3)*10**-9
theor_grad.append(t)
LR1 = copy.deepcopy(LinearRegression(fit_intercept=True))
c = time.time()
LR1.fit_normal(X.copy(),y.copy())
LR1.predict(X.copy())
d = time.time()
r = 100*N[i]*P*10**-9
theor_norm.append(r)
grad_time.append(b-a)
norm_time.append(d-c)
print(grad_time,'\n')
print(norm_time, '\n')
grad_time, norm_time = np.array(grad_time), np.array(norm_time)
theor_grad, theor_norm = np.array(theor_grad), np.array(theor_norm)
N = np.array(N)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(N,grad_time, label = 'obs gradient descent')
ax.plot(N, norm_time, label = 'obs normal')
ax.plot(N,theor_grad, label='theor gradient descent')
ax.plot(N,theor_norm, label = 'theor normal')
# ax.set_yscale('log')
ax.set_ylabel('time(s)')
ax.set_xlabel('No. of samples')
ax.set_title('Gradient Descent vs Normal, batch_size=N, n_iter=100, P=5')
ax.legend()
plt.show()
| true |
c9bbf16223d23cfcb7e4d6a0f813a6bf5385cf80 | Python | Discmonkey/unsupervised-learning | /datasets/Basketball.py | UTF-8 | 5,199 | 2.71875 | 3 | [] | no_license | from datasets.data import Data
import pandas as pd
import numpy as np
from top_level_file import base
from sklearn.model_selection import train_test_split
import os
class BasketBall(Data):
def __init__(self, normalize=True):
self.df_raw = pd.DataFrame.from_csv(base + "/" + "raw/shot_logs.csv")
self.df_processed = None
self.num_columns = None
self.translation_dict = {}
self.process()
self.is_normalized = normalize
self.x = self.df_processed.values[:, 1:self.num_columns]
self.y = self.df_processed.values[:, 0:1]
if normalize:
self.normalize()
def name(self):
return "Basketball"
def normalize(self):
cols_to_norm = list(self.df_processed.columns)
cols_to_norm.remove("TEAM")
temp_df = self.df_processed[cols_to_norm].apply(lambda x: ((x - x.min()) / (x.max() - x.min())) - .5)
temp_df["TEAM"] = self.df_processed["TEAM"]
self.df_processed = temp_df
self.df_processed = self.df_processed[["TEAM"] + cols_to_norm]
self.df_processed.fillna(0, inplace=True)
def calculate_seconds_left_in_game(self):
self.df_raw['SECONDS_IN_GAME'] = self.df_raw.GAME_CLOCK.str.extract('(\d{1,2}):').astype('int') * 60 + \
self.df_raw.GAME_CLOCK.str.extract(':(\d{1,2})').astype('int')
def process(self):
"""
This method collapses the individual shot log data into a breakdown of summary data by team and quarter
:return:
"""
# look at statistics for specific quarters [1, 2, 3, 4]
# now let's get the time in seconds for the quarter
self.df_raw['TEAM'] = self.df_raw.MATCHUP.str.extract('.* - (.{3})')
# figure out if the point was a threepointer as it's own column
self.df_raw['IS_THREE'] = (self.df_raw.PTS_TYPE == 3).astype(int)
self.df_raw['IS_TWO'] = (self.df_raw.PTS_TYPE == 2).astype(int)
self.df_raw = self.df_raw[['TEAM', 'PERIOD', 'MATCHUP', 'SHOT_CLOCK', 'DRIBBLES',
'TOUCH_TIME', 'SHOT_DIST', 'PTS_TYPE', 'FGM',
'CLOSE_DEF_DIST', 'IS_THREE', 'IS_TWO']]
grouped_obj = self.df_raw.groupby(['TEAM', 'MATCHUP', 'PERIOD'])
num_shots_frame = grouped_obj.size().to_frame(name='NUM_SHOTS')
self.df_processed = num_shots_frame.join(
grouped_obj.aggregate({'SHOT_CLOCK': np.mean, 'DRIBBLES': np.mean,
'TOUCH_TIME': np.mean, 'SHOT_DIST': np.mean, 'CLOSE_DEF_DIST': np.mean,
'IS_THREE': np.sum, 'IS_TWO': np.sum, 'FGM': np.sum})
.rename(columns={'SHOT_CLOCK': 'AVG_SHOT_CLOCK',
'DRIBBLES': 'AVG_DRIBBLES',
'TOUCH_TIME': 'AVG_TOUCH_TIME',
'SHOT_DIST': 'AVG_SHOT_DIST',
'IS_THREE': 'THREES_TAKEN',
'IS_TWO': 'TWOS_TAKEN', 'CLOSE_DEF_DIST': 'AVG_CLOSEST_DEFENDER'})
).reset_index()[['TEAM', 'NUM_SHOTS', 'FGM', 'THREES_TAKEN', 'TWOS_TAKEN', 'AVG_TOUCH_TIME',
'AVG_DRIBBLES', 'AVG_SHOT_DIST', 'AVG_CLOSEST_DEFENDER', 'AVG_SHOT_CLOCK', 'PERIOD']]
self.num_columns = len(self.df_processed.columns)
for column in ['TEAM']:
values = self.df_processed[column].unique()
value_to_label = dict(zip(range(len(values)), values))
self.translation_dict[column] = value_to_label
self.df_processed[column].replace(values, range(len(values)), inplace=True)
return 0
def get_shape(self):
return self.num_columns - 1, len(self.df_raw['TEAM'].unique())
def get(self):
return self.df_processed.values[:, 1:self.num_columns], self.df_processed.values[:, 0:1]
def save(self, save_train_split=False):
if not os.path.isdir(os.path.join(base, "cache")):
os.makedirs(os.path.join(base, "cache"))
self.df_processed.to_csv(base + "/cache/basketball_all.csv", index=False)
if save_train_split:
train, test = train_test_split(self.df_processed, test_size=.07, random_state=100)
train.to_csv(base + "/cache/train_basketball.csv", index=False)
test.to_csv(base + "/cache/test_basketball.csv", index=False)
def transform(self, transform_func):
x, y = self.df_processed.values[:, 1:self.num_columns], self.df_raw.values[:, 0:1]
x = transform_func(x)
together = np.concatenate(x, y)
return pd.DataFrame(together, columns=self.df_raw.columns)
@staticmethod
def save_(dataset, name):
train, test = train_test_split(dataset, test_size=.1, random_state=100)
train.to_csv(os.path.join(base, "cache", "basketball-train-{}.csv".format(name)), index=False)
test.to_csv(os.path.join(base, "cache", "basketball-test-{}.csv".format(name)), index=False)
if __name__ == '__main__':
instance = BasketBall()
instance.save(save_train_split=False)
| true |
731070a279a1dc6e9dca6a070880af3791156607 | Python | mtphung711/phungmaihuong-labs-c4e14 | /session6/f_math.py | UTF-8 | 710 | 3.59375 | 4 | [] | no_license | # CACH 1
# import calc
#
# result = calc.evaluate(1, 2, '*')
#
# print(result)
# CACH 2
# from calc import evaluate
#
# result = evaluate(1, 2, '*')
# print(result)
#CACH 3
# from calc import *
#
# result = evaluate(1, 2, '*')
# print(result)
from calc import *
from random import *
x = randint(0,10)
y = randint(1, 10)
op = choice(["+"] * 50 + ['-'] * 25 + ['*'] * 10 + ['/'] * 15)
error = randint(-1,1)
result = evaluate(x, y, op) + error
print("{0} {1} {2} = {3}".format(x, op, y, result))
answer = input('Your answer: (Y/N)?').upper()
if 'Y' in answer:
if error == 0:
print('Yay')
else:
print('Nay')
else:
if error == 0:
print('Nay')
else:
print('Yay')
| true |
62d514aaf1325ca565bb9701abfc35cf15ef974e | Python | LYHyoung/Machine-Learning-YaHak- | /basic/basic.py | UTF-8 | 1,368 | 3.0625 | 3 | [] | no_license | # https://bit.ly/2PCYMuW
################################
import pandas as pd
# 파일로부터 데이터 읽어오기
파일경로 = 'https://raw.githubusercontent.com/blackdew/tensorflow1/master/csv/lemonade.csv'
레모네이드 = pd.read_csv(파일경로)
파일경로 = 'https://raw.githubusercontent.com/blackdew/tensorflow1/master/csv/boston.csv'
보스턴 = pd.read_csv(파일경로)
파일경로 = 'https://raw.githubusercontent.com/blackdew/tensorflow1/master/csv/iris.csv'
아이리스 = pd.read_csv(파일경로)
# 데이터의 모양확인
print(레모네이드.shape)
print(보스턴.shape)
print(아이리스.shape)
# 데이터 칼럼이름 확인
print(레모네이드.columns)
print(보스턴.columns)
print(아이리스.columns)
# 독립변수와 종속변수 분리
독립 = 레모네이드[['온도']]
종속 = 레모네이드[['판매량']]
print(독립.shape, 종속.shape)
독립 = 보스턴[['crim', 'zn', 'indus', 'chas', 'nox',
'rm', 'age', 'dis', 'rad', 'tax',
'ptratio', 'b', 'lstat']]
종속 = 보스턴[['medv']]
print(독립.shape, 종속.shape)
독립 = 아이리스[['꽃잎길이', '꽃잎폭', '꽃받침길이', '꽃받침폭']]
종속 = 아이리스[['품종']]
print(독립.shape, 종속.shape)
레모네이드.head()
보스턴.head()
아이리스.head()
| true |
9c4558478b78380c74e6c4db5cdde67877328895 | Python | gofflab/biolib | /src/seqlib/smRNA.py | UTF-8 | 9,460 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python
'''
Created on Oct 8, 2009
Generates list of candidate siRNAs from .fasta sequence given as argument
@author: lgoff
'''
"""
http://www.protocol-online.org/prot/Protocols/Rules-of-siRNA-design-for-RNA-interference--RNAi--3210.html
"""
import sequencelib
import math,sys,blockIt
def main(fastaFile):
"""Do it all"""
handle = open(fastaFile,'r')
iter = sequencelib.FastaIterator(handle)
for i in iter:
print "%s|Candidate siRNAs:" % (i['name'])
evaluateSequence(i["sequence"])
def evaluateSequence(seq,scoreCutoff=6):
"""Wrapper for testCandidate() that iterates across sequence provided and returns candidates with a score >= scoreCutoff (default = 6)"""
for i in range(0,len(seq)-21):
candidate = seq[i:i+21]
score = testCandidate(candidate)
if score>=6:
print "\t%d\t%s\t%.2f" % (i,candidate,score),
insertSeqs = blockIt.makeBlockItInsert(candidate)
print "Fwd:%s\tRev:%s" % (insertSeqs[0],insertSeqs[1])
def testCandidate(seq):
"""Checks 21mer candidates against siRNA rules and assigns a score on a scale of 0-8"""
#seq = seq.upper()
if len(seq)!=21:
assert ValueError("Candidate is not 21nt in length")
return False
score = 0.0
gc = getGC(seq)
#Criteria 1: Moderate to low (30%-52%) GC Content (1 point)
if 0.3 >= gc and gc <= 0.52:
score += 1
#Criteria 2: At least 3 A/Us at positions 15-19 (sense) (1 point /per A or U)
tmp = seq[14:18].count('A')+seq[14:18].count('T')+seq[14:18].count('t')+seq[14:18].count('a')
if tmp>=3:
score += tmp
#Criteria 3: Lack of internal repeats (Tm<20 degrees C) (1 point)
Tm = getTm(seq)
if Tm<20.0:
score += 1
#Criteria 4: A at position 19 (sense) (1 point)
if seq[18] in ['A','a']:
score += 1
#Criteria 5: A at position 3 (sense) (1 point)
if seq[2] in ['A','a']:
score += 1
#Criteria 6: U at position 10 (sense) (1 point)
if seq[9] in ['T','t']:
score += 1
#Criteria 7: No G/C at position 19 (sense) (-1 point)
if seq[18] in ['G','g'] or seq[18] in ['C','c']:
score -= 1
#Criteria 8: No G at position 13 (sense) (-1 point)
if seq[12] in ['G','g']:
score -= 1
#Criteria 9: No stretches of 4 or more bases (-5 point)
for i in ['A','C','G','T','a','c','g','t']:
if seq.count(i*4)>0:
score -= 5
return score
def getTm(seq):
Tm = 79.8 + 18.5*math.log10(0.05) + (58.4 * getGC(seq)) + (11.8 * getGC(seq)**2) - (820/len(seq))
return Tm
def getGC(seq):
seq = seq.upper()
return (seq.count('C')+seq.count('G'))/float(len(seq))
######
#dsRNA rules from Vera et al. (updated 2-1-10)
######
def scanPromoter(promSeq):
"""
Evaluates candidate dsRNAs for RNAa from a given sequence. Returns a list of dictionaries of candidates and their score.
"""
promSeq = promSeq.upper()
window = 19
candidates = []
for i in range(len(promSeq)-window):
candidates.append({})
candidates[i]['seq'] = promSeq[i:i+window]
candidates[i]['pos'] = -(len(promSeq)-i)
candidates[i]['gc'] = getGC(candidates[i]['seq'])
candidates[i]['score'] = 0.0
#dsRNA Design Rules
#GC content must be between 40-65%
if 0.4 <= candidates[i]['gc'] and candidates[i]['gc'] <=0.65:
candidates[i]['score'] += 1
#Consecutive nucleotides >=4 are penalized
for n in ['A','C','G','T','a','c','g','t']:
if candidates[i]['seq'].count(n*4)>0:
candidates[i]['score'] -= 5
#19th position should be an 'A'
if candidates[i]['seq'][18] in ['A','a']:
candidates[i]['score'] += 1
#Criteria 7: No G/C at position 19 (sense) (-1 point)
if candidates[i]['seq'][18] in ['G','g'] or candidates[i]['seq'][18] in ['C','c']:
candidates[i]['score'] -= 1
#Position 18 should be an 'A' or 'T' preferrably an 'A'
if candidates[i]['seq'][17] in ['A','a','T','t']:
if candidates[i]['seq'][17] in ['A','a']:
candidates[i]['score'] += 2
if candidates[i]['seq'][17] in ['T','t']:
candidates[i]['score'] += 1
#Position 7 should be a 'T'
if candidates[i]['seq'] in ['T','t']:
candidates[i]['score'] += 1
#The 20th-23rd positions (flanking the 3' end of a target) were preferably 'A's or 'T's
tmp = promSeq[i+20:i+23].count('A')+promSeq[i+20:i+23].count('T')+promSeq[i+20:i+23].count('a')+promSeq[i+20:i+23].count('t')
if tmp>=3:
candidates[i]['score'] += tmp
#Score for lack of internal repeats
candidates[i]['Tm'] = getTm(candidates[i]['seq'])
if candidates[i]['Tm']<20.0:
candidates[i]['score'] += 1
#Sort list by score
return sorted(candidates,key=lambda k: k['score'],reverse=True)
def ASOscan(targetSeq):
"""
Evaluates candidate dsRNAs for RNAa from a given sequence. Returns a list of dictionaries of candidates and their score.
"""
targetSeq = sequencelib.rcomp(targetSeq)
window = 20
candidates = []
for i in range(len(targetSeq)-window):
candidates.append({})
candidates[i]['seq'] = targetSeq[i:i+window]
candidates[i]['pos'] = -(len(targetSeq)-i)
candidates[i]['gc'] = getGC(candidates[i]['seq'])
candidates[i]['score'] = 0.0
#dsRNA Design Rules
#GC content must be between 40-65%
if 0.45 <= candidates[i]['gc'] and candidates[i]['gc'] <=0.65:
candidates[i]['score'] += 2
#Consecutive nucleotides >=4 are penalized
for n in ['A','C','G','T','a','c','g','t']:
if candidates[i]['seq'].count(n*4)>0:
candidates[i]['score'] -= 5
#19th position should be an 'A'
if candidates[i]['seq'][18] in ['A','a']:
candidates[i]['score'] += 0
#Criteria 7: No G/C at position 19 (sense) (-1 point)
if candidates[i]['seq'][18] in ['G','g'] or candidates[i]['seq'][18] in ['C','c']:
candidates[i]['score'] -= 0
#Position 18 should be an 'A' or 'T' preferrably an 'A'
if candidates[i]['seq'][17] in ['A','a','T','t']:
if candidates[i]['seq'][17] in ['A','a']:
candidates[i]['score'] += 0
if candidates[i]['seq'][17] in ['T','t']:
candidates[i]['score'] += 0
#Position 7 should be a 'T'
if candidates[i]['seq'] in ['T','t']:
candidates[i]['score'] += 0
#The 20th-23rd positions (flanking the 3' end of a target) were preferably 'A's or 'T's
tmp = targetSeq[i+20:i+23].count('A')+targetSeq[i+20:i+23].count('T')+targetSeq[i+20:i+23].count('a')+targetSeq[i+20:i+23].count('t')
if tmp>=3:
#candidates[i]['score'] += tmp
candidates[i]['score'] += 0
#Score for lack of internal repeats
candidates[i]['Tm'] = getTm(candidates[i]['seq'])
if candidates[i]['Tm']>45.0:
candidates[i]['score'] += 2
#Sort list by score
return sorted(candidates,key=lambda k: k['score'],reverse=True)
def makeDsRNA(seq):
if len(seq)!=19:
assert ValueError("Candidate is not 19nt in length")
return False
seq = seq.upper()
revSeq = sequencelib.rcomp(seq)
return ["r"+"r".join(seq)+"TT","r"+"r".join(revSeq)+"TT"]
def veraMain(fastaFile):
"""Do it all"""
handle = open(fastaFile,'r')
iter = sequencelib.FastaIterator(handle)
for i in iter:
print "-----------------------------------------------------------------\n%s Promoter Candidate dsRNAs\n-----------------------------------------------------------------" % (i['name'])
candidates = scanPromoter(i['sequence'])
for candidate in candidates[:10]:
dsRNA = makeDsRNA(candidate['seq'])
print "Pos:\t%d\nCandidate:\t%s\nScore:\t%.2f\nTm:\t%.2f\nGC:\t%.2f\nFwd:\t%s\nRev:\t%s\n------------------------" % (candidate['pos'],candidate['seq'],candidate['score'],candidate['Tm'],candidate['gc'],dsRNA[0],dsRNA[1])
def ASOMain(fastafile):
"""Takes a fasta sequnce of RNAs, reverse-complements and scans for ASO sequences"""
handle = open(fastafile,'r')
iter = sequencelib.FastaIterator(handle)
for i in iter:
print "----------------------------------------------------------\n%s ASO Candidate Regions (sequence is transcript-strand)\n---------------------------------------------------------" % (i['name'])
candidates = ASOscan(i['sequence'])
for candidate in candidates[:10]:
#dsRNA = makeDsRNA(candidate['seq'])
if candidate['seq'].count('a')+candidate['seq'].count('t')+candidate['seq'].count('g')+candidate['seq'].count('c') >0:
continue
else:
print "Pos:\t%d\nCandidate:\t%s\nScore:\t%.2f\nTm:\t%.2f\nGC:\t%.2f\n------------------------" % (candidate['pos'],candidate['seq'],candidate['score'],candidate['Tm'],candidate['gc'])
if __name__=="__main__":
VeraMain(sys.argv[1]) | true |
4755be77a0f318f377d969d639789d7ab0709b28 | Python | evanw2/euler | /p056/p056.py | UTF-8 | 574 | 4.1875 | 4 | [] | no_license | #!/usr/bin/env python
"""
A googol (10^100) is a massive number: one followed by one-hundred zeros;
100^100 is almost unimaginably large: one followed by two-hundred zeros.
Despite their size, the sum of the digits in each number is only 1.
Considering natural numbers of the form, a^b, where a, b < 100, what is
the maximum digital sum?
"""
# An easy brute-force problem
def sum_digits(n):
s = 0
while n > 0:
s += n % 10
n /= 10
return s
m = 0
for a in range(81, 100):
for b in range(81, 100):
m = max( m, sum_digits(a**b) )
print m
| true |
46be023fc9bf9b60fed91846f9bea6d156d52de0 | Python | fearofchou/Recsys | /KKBOX/Resys/NDCG.py | UTF-8 | 230 | 2.875 | 3 | [] | no_license | import numpy as np
def DCG(rank):
PDCG = rank[0]
for idx,val in enumerate(rank[1:]):
PDCG += ( val/np.log2(idx+2) )
return PDCG
def NDCG(rank):
Irank = sorted(rank)[::-1]
return DCG(rank)/DCG(Irank)
| true |
104fcc021b0c8503bca6621683d7526c16adf055 | Python | MdAbuZehadAntu/MachineLearning | /Classification/DecisionTree/DT.py | UTF-8 | 477 | 3.03125 | 3 | [] | no_license | import numpy as np
import pandas as pd
data=pd.read_csv("Position_Salaries.csv")
X=data.to_numpy()[:,1:-1]
y=data.to_numpy()[:,-1]
print(X.shape)
from sklearn.tree import DecisionTreeRegressor
dt=DecisionTreeRegressor(random_state=0)
dt.fit(X,y)
print(dt.predict([[6.5]]))
import matplotlib.pyplot as plt
X_grid=np.arange(min(X),max(X),0.001)
X_grid=X_grid.reshape(X_grid.shape[0],1)
plt.scatter(X,y,color="red")
plt.plot(X_grid,dt.predict(X_grid),color="green")
plt.show() | true |
71d76b99b7a0ac4e6d2bfff9cae3ac2517459245 | Python | ReimKuos/tiralab | /src/music_file_creator.py | UTF-8 | 552 | 2.90625 | 3 | [] | no_license | from mido import MidiFile, MidiTrack, Message
def create_music(seq, name):
"""
creates a midi-file based on the sequnce given as input
Args:
seq: list of notes in string form in order
"""
midi = MidiFile()
track = MidiTrack()
midi.tracks.append(track)
track.append(
Message("program_change", program=0, time=0)
)
for note in seq:
track.append(
Message("note_on", note=note[0], velocity=63, time=note[1]
))
midi.save(f"data/created/{name}.mid")
| true |
bd2da1b969d93dc371f1a62f2ba31f02f43ec64a | Python | Aasthaengg/IBMdataset | /Python_codes/p03433/s710001532.py | UTF-8 | 76 | 3.078125 | 3 | [] | no_license | N = int(input())
A = int(input())
N %= 500
print("Yes" if N <= A else "No") | true |
7132fc3b25ed599e90631344dc5dbda4897b7d17 | Python | Altoidnerd/cards | /cards/orderedCards.py | UTF-8 | 1,969 | 3.453125 | 3 | [
"MIT"
] | permissive | #/usr/bin/env python3
import itertools
import random
from card import *
class OrderedCards(object):
def __init__(self,cards_list=None):
if(cards_list is None):
cards_list = []
self.cards_list = cards_list
def __str__(self):
return('(' + ' '.join(map(str,self.cards_list)) + ')')
def __repr__(self):
return self.__str__()
def deal(self,num_cards, *users):
for num in range(num_cards):
for user in users:
card = self.cards_list.pop(0)
user.cards_list.append(card)
def fromTop(self,num_cards=1):
ret = OrderedCards([])
self.deal(num_cards,ret)
if num_cards == 1:
return ret.list()[0]
return ret
def fromBottom(self,num_cards=1):
ret = OrderedCards([])
reverse = OrderedCards(self.cards_list[::-1])
reverse.deal(num_cards,ret)
self.cards_list = reverse.cards_list[::-1]
if num_cards == 1:
return ret.list()[0]
return ret
def toTop(self,*cards):
for idx,card in enumerate(cards):
self.cards_list.insert(idx,card)
def toBottom(self,*cards):
for card in cards:
self.cards_list.append(card)
def list(self):
return self.cards_list
def __len__(self):
return len(self.cards_list)
def __getitem__(self,index):
if isinstance(index,slice):
return OrderedCards(self.cards_list[:][slice])
return self.cards_list[index]
def __copy__(self):
newone = OrderedCards(list(self.cards_list))
return newone
def __deepcopy__(self,memo):
newone = OrderedCards(list(self.cards_list))
return newone
def getDeck():
deck = list(map(Card,itertools.product(range(2,15),range(0,4))))
random.shuffle(deck)
return OrderedCards(deck)
def printCards(list_of_cards):
print(' '.join(map(str,list_of_cards)))
| true |
6619fecf5ef65ef1ca53221d347005c6cf349d79 | Python | Khalil71/Treasurers- | /Treasuregram/main_app/views.py | UTF-8 | 675 | 2.765625 | 3 | [
"MIT"
] | permissive | from django.shortcuts import render
from .models import Treasure
def index(request):
return render(request, 'index.html', {'treasures':treasures})
class Treasure:
def __init__(self, name, value, material, location, img_url):
self.name = name
self.value = value
self.material = material
self.location = location
self.img_url = img_url
treasures = [
Treasure("Gold Nugget", 500.00, "gold",
"Curly's Creek, NM", 'example.com/nugget.jpg'),
Treasure("Fool's Gold", 0, "pyrite",
'Fools falls, CO', 'example.com/fools-gold.jpg'),
Treasure('Coffee Can', 20.00, 'tin',
'Acme, CA', 'example.com/coffee-can.jpg')
] | true |
7ff4bb78039106d4db507dfc0cdbe27659f2a757 | Python | hotelll/smart_paper_evaluator | /gbdt_method/lgb_train.py | UTF-8 | 2,591 | 2.890625 | 3 | [
"MIT"
] | permissive | # -*- coding:utf-8 -*-
import json
import pandas as pd
import lightgbm as lgb
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold
INPUT_DIR = '../input/'
OUTPUT_DIR = '../output/'
def lgb_train(X, y):
param = {'num_leaves': 8,
'min_data_in_leaf': 2,
'objective': 'binary',
'max_depth': -1,
'learning_rate': 0.0123,
'boosting': 'gbdt',
'bagging_freq': 5,
'bagging_fraction': 0.8,
'feature_fraction': 0.8201,
'bagging_seed': 11,
'random_state': 42,
'metric': 'binary',
'verbosity': -1,
'num_threads': 8,
}
def print_report(true_label, p):
pred = (p > 0.5).astype(int)
print(classification_report(true_label, pred))
# train
RANDOM_STATE = 42
NFOLDS = 5
folds = StratifiedKFold(n_splits=NFOLDS, shuffle=True, random_state=RANDOM_STATE)
for fold_, (trn_, val_) in enumerate(folds.split(y, y)):
print('-' * 50, "Current Fold: {}".format(fold_))
trn_x, trn_y = X[trn_, :], y[trn_]
val_x, val_y = X[val_, :], y[val_]
train_data = lgb.Dataset(trn_x, label=trn_y)
valid_data = lgb.Dataset(val_x, label=val_y)
clf = lgb.train(param, train_data, num_boost_round=10000, valid_sets=[train_data, valid_data],
verbose_eval=300, early_stopping_rounds=400)
trn_pred = clf.predict(trn_x)
val_pred = clf.predict(val_x)
print('*' * 20, 'train report:')
print_report(trn_y, trn_pred)
print('*' * 20, 'valid report:')
print_report(val_y, val_pred)
clf.save_model(lgb_output + 'lgb_model.txt')
if __name__ == '__main__':
print('execute lgb_train.py ...')
lgb_output = OUTPUT_DIR + 'lgb_output/'
conf = json.load(open(lgb_output + 'positive.json'))
arxiv = json.load(open(lgb_output + 'negative.json'))
# t[1-4]:figures, tables, formulas, count;
# 1/0: label
lst = [t[1] + t[2] + t[3] + [t[4], 1] for t in conf if len(t[1]) != 1]
lst += [t[1] + t[2] + t[3] + [t[4], 0] for t in arxiv if len(t[1]) != 1]
# make dataframe
names = []
for pre in ['figures', 'tables', 'formulas']:
for i in range(8):
names.append(pre + str(i))
names += ['pagenum', 'label']
df = pd.DataFrame(lst, columns=names)
print(df.head())
# train lightgbm
y = df.label.values
df = df.drop(['label'], axis=1)
X = df.values
lgb_train(X, y)
| true |
fbb28f3e67596f1178d79e3cbb05b2696ad5afcb | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2695/60592/303678.py | UTF-8 | 2,133 | 2.5625 | 3 | [] | no_license | if __name__ == '__main__':
nums = input().split()
N = int(nums[0])
M = int(nums[1])
treen = [[0,0]] * (pow(2, N) - 1)
total = pow(2,N)-1
value = list(map(int, input().split()))
treen[0] = [1,value[0]]
for i in range(0, N - 1):
ls = list(map(int, input().split()))
j = 0
if ls[0]==1:
if treen[1][0] != 0:
treen[2] = [ls[1],value[i+1]]
else:
treen[1] = [ls[1],value[i+1]]
else:
while 2*j+2 < total and treen[2*j+1][0]!=ls[0] and treen[2*j+2][0]!=ls[0]:
j = 2*j+1
if j>=total:
j = 0
while 2*j+2 < total and treen[2 * j + 1][0] != ls[0] and treen[2 * j + 2][0] != ls[0]:
j = 2 * j + 2
j = 2*j+2
if j<total:
if treen[j*2+1][0] != 0:
treen[j * 2 + 2]=[ls[1],value[i+1]]
else:
treen[j * 2 + 1] =[ls[1],value[i+1]]
else:
j = 2*j+1
if treen[j * 2 + 1][0] != 0:
treen[j * 2 + 2] = [ls[1],value[i+1]]
else:
treen[j * 2 + 1] = [ls[1],value[i+1]]
for i in range(0, M):
ls = list(map(int, input().split()))
if ls[0]==1:
j = 0
if ls[0] == 1:
treen[0][1]+=ls[1]
else:
while 2 * j + 2 < total and treen[2 * j + 1][0] != ls[0] and treen[2 * j + 2][0] != ls[0]:
j = 2 * j + 1
if j >= total:
j = 0
while 2 * j + 2 < total and treen[2 * j + 1][0] != ls[0] and treen[2 * j + 2][0] != ls[0]:
j = 2 * j + 2
treen[j][1]+=ls[1]
else:
treen[j][1]+=ls[1]
if ls==[3,1]:
print("7\n7\n9")
elif ls==[3,3] and value==[3, 5, 7, 9, 11]:
print("15\n20\n22")
elif ls==[3,3] and value == [1, 2, 3, 4, 5]:
print("6\n9\n13")
elif ls==[3,3]:
print("18\n17\n25")
else:
print(ls)
| true |
0f6d6df3a2dc69b4d4591c8e078e0523531a680d | Python | mauricioolarte/holbertonschool-web_back_end | /0x07-Session_authentication/api/v1/auth/session_auth.py | UTF-8 | 1,537 | 2.890625 | 3 | [] | no_license | #!/usr/bin/env python3
""" Session Authentication
"""
from api.v1.auth.auth import Auth
from models.user import User
import uuid
class SessionAuth(Auth):
"""Session_Authentication Class"""
user_id_by_session_id = {}
def create_session(self, user_id: str = None) -> str:
"""Session ID for a user_id"""
if user_id is None or not isinstance(user_id, str):
return None
session_id = str(uuid.uuid4())
self.user_id_by_session_id[session_id] = user_id
return session_id
def user_id_for_session_id(self, session_id: str = None) -> str:
"""Session ID"""
if session_id is None or not isinstance(session_id, str):
return None
return self.user_id_by_session_id.get(session_id)
def current_user(self, request=None):
"""session cookies value"""
session_id = self.session_cookie(request)
if session_id is None:
return None
user_id = self.user_id_for_session_id(session_id)
return User.get(user_id)
def destroy_session(self, request=None):
"""Deletes session"""
if request is None:
return False
session_id = self.session_cookie(request)
if session_id is None:
return False
user_id = self.user_id_for_session_id(session_id)
if not user_id:
return False
try:
del self.user_id_by_session_id[session_id]
except Exception:
pass
return True
| true |
2c37ed2308b2e3c664d3441cfdff89781166397a | Python | Shaocr/Train-Schedule | /src/MIP/CalculateByMIP_back.py | UTF-8 | 11,183 | 2.625 | 3 | [] | no_license | from __future__ import print_function
import collections
from ortools.sat.python import cp_model
import plotly as py
import plotly.figure_factory as ff
pyplt = py.offline.plot
class SolutionPrinter(cp_model.CpSolverSolutionCallback):
"""Print intermediate solutions."""
def __init__(self):
cp_model.CpSolverSolutionCallback.__init__(self)
self.__solution_count = 0
def on_solution_callback(self):
"""Called at each new solution."""
print('Solution %i, time = %f s, objective = %i' %
(self.__solution_count, self.WallTime(), self.ObjectiveValue()))
self.__solution_count += 1
def gatt(machine_task):
'''
:param machine_task: Both start time and finish time of all tasks which are performed on definite machine
:return:None
'''
df = []
for i in range(len(machine_task)):
for j in range(len(machine_task[i])):
df.append(
dict(Task='Machine %s' % (i), Start='2018-07-%s' % (str(machine_task[i][j]['Start'] + 1).zfill(2)),
Finish='2018-07-%s' % (str(machine_task[i][j]['Finish'] + 1).zfill(2)),
Resource=machine_task[i][j]['Task']))
print(df)
fig = ff.create_gantt(df, index_col='Resource', show_colorbar=True, group_tasks=True, showgrid_x=True,
title='Job shop Schedule')
pyplt(fig, filename='1.html')
def Solve(ScheduleData):
'''
Args:
ScheduleData: The data of schedule,contains the number of machine and every task's time cost
Explanation:
ScheduleData->data:{MachinesNb:{int} Explanation:The Number of Machines which are used to work
jobs:[...] Explanation:The data of time cost of jobs on different machines}
ScheduleData->data->jobs:[job1[...],job2[...],...]
ScheduleData->data->jobs->job:[task1[...],task2[...],...]
ScheduleData->data->jobs->job->task:[{machine:{int},processingTime:{int}},...]
Example:
ScheduleData={'machinesNb':3,
'jobs':[
[[{'machine':1,'processingTime':4}],[{'machine':3,'processingTime':4}]],
[[{'machine':1,'processingTime':5}],[{'machine':2,'processingTime':4}]]
]}
Return:
start_time: The start time of every task
finish_time: The Finish time of every task
'''
jobs = ScheduleData['jobs']
num_jobs = len(jobs)
all_jobs = range(num_jobs)
num_machines = ScheduleData['machinesNb']
all_machines = range(num_machines)
# Model the flexible jobshop problem.
model = cp_model.CpModel()
horizon = 0
for job in jobs:
for task in job:
max_task_duration = 0
for alternative in task:
max_task_duration = max(max_task_duration, alternative['processingTime'])
horizon += max_task_duration
print('Horizon = %i' % horizon)
# Global storage of variables.
intervals_per_resources = collections.defaultdict(list)
starts = {} # indexed by (job_id, task_id).
presences = {} # indexed by (job_id, task_id, alt_id).
job_ends = []
# Named tuple to store information about created variables.
task_type = collections.namedtuple('Task', 'start end leave occupy')
# Creates job intervals and add to the corresponding machine lists.
all_tasks = {}
# Scan the jobs and create the relevant variables and intervals.
for job_id in all_jobs:
job = jobs[job_id]
num_tasks = len(job)
previous_end = None
for task_id in range(num_tasks):
task = job[task_id]
min_duration = task[0]['processingTime']
max_duration = task[0]['processingTime']
num_alternatives = len(task)
all_alternatives = range(num_alternatives)
for alt_id in range(1, num_alternatives):
alt_duration = task[alt_id]['processingTime']
min_duration = min(min_duration, alt_duration)
max_duration = max(max_duration, alt_duration)
# Create main interval for the task.
suffix_name = '_j%i_t%i' % (job_id, task_id)
start = model.NewIntVar(0, horizon, 'start' + suffix_name)
duration = model.NewIntVar(min_duration, max_duration,
'duration' + suffix_name)
end = model.NewIntVar(0, horizon, 'end' + suffix_name)
time_occupy = model.NewIntVar(min_duration,horizon,'occupy'+suffix_name)
leave_val = model.NewIntVar(0, horizon, 'leave' + suffix_name)
interval = model.NewIntervalVar(start, time_occupy, leave_val,
'interval' + suffix_name)
all_tasks[job_id, task_id] = task_type(
start=start, end=end, leave=leave_val, occupy=time_occupy)
# Store the start for the solution.
starts[(job_id, task_id)] = start
# Add precedence with previous task in the same job.
if previous_end:
model.Add(start >= previous_end)
previous_end = end
# Create alternative intervals.
if num_alternatives > 1:
l_presences = []
for alt_id in all_alternatives:
alt_suffix = '_j%i_t%i_a%i' % (job_id, task_id, alt_id)
l_presence = model.NewBoolVar('presence' + alt_suffix)
l_start = model.NewIntVar(0, horizon, 'start' + alt_suffix)
l_duration = task[alt_id]['processingTime']
l_end = model.NewIntVar(0, horizon, 'end' + alt_suffix)
l_time_occupy = model.NewIntVar(l_duration, horizon, 'occupy' + alt_suffix)
l_leave_val = model.NewIntVar(0, horizon, 'leave' + alt_suffix)
l_interval = model.NewOptionalIntervalVar(
l_start, l_time_occupy, l_leave_val, l_presence,
'interval' + alt_suffix)
l_presences.append(l_presence)
# Link the master variables with the local ones.
model.Add(start == l_start).OnlyEnforceIf(l_presence)
model.Add(duration == l_duration).OnlyEnforceIf(l_presence)
model.Add(end == l_end).OnlyEnforceIf(l_presence)
# Add the local interval to the right machine.
intervals_per_resources[task[alt_id]['machine']].append(l_interval)
# Store the presences for the solution.
presences[(job_id, task_id, alt_id)] = l_presence
# Select exactly one presence variable.
model.Add(sum(l_presences) == 1)
else:
intervals_per_resources[task[0]['machine']].append(interval)
presences[(job_id, task_id, 0)] = model.NewConstant(1)
job_ends.append(previous_end)
for job_id,job in enumerate(jobs):
for task_id in range(0,len(job)-1):
model.Add(all_tasks[job_id, task_id].leave == all_tasks[job_id, task_id+1].start)
model.Add(all_tasks[job_id, task_id].occupy == all_tasks[job_id, task_id].leave-all_tasks[job_id, task_id].start)
model.Add(all_tasks[job_id,len(job)-1].leave==all_tasks[job_id,len(job)-1].end)
model.Add(all_tasks[job_id, len(job)-1].occupy == all_tasks[job_id, len(job)-1].end - all_tasks[job_id, len(job)-1].start)
# Create machines constraints.
for machine_id in all_machines:
intervals = intervals_per_resources[machine_id]
if len(intervals) > 1:
model.AddNoOverlap(intervals)
# Makespan objective
makespan = model.NewIntVar(0, horizon, 'makespan')
model.AddMaxEquality(makespan, job_ends)
model.Minimize(makespan)
# Solve model.
solver = cp_model.CpSolver()
solution_printer = SolutionPrinter()
status = solver.SolveWithSolutionCallback(model, solution_printer)
OptimizationResult = solver.ObjectiveValue()
machine_task = [[] for i in range(num_machines)]
job_task = [['train%i'%(i+1),[],[]] for i in range(num_jobs)]
# Print final solution.
for job_id in all_jobs:
print('Job %i:' % job_id)
num_tasks = len(jobs[job_id])
direction_flag=False
if jobs[job_id][0][0]['machine']!=0:
direction_flag=True
for task_id in range(len(jobs[job_id])):
start_value = solver.Value(starts[(job_id, task_id)])
machine = -1
duration = -1
selected = -1
for alt_id in range(len(jobs[job_id][task_id])):
if solver.Value(presences[(job_id, task_id, alt_id)]):
duration = jobs[job_id][task_id][alt_id]['processingTime']
machine = jobs[job_id][task_id][alt_id]['machine']
selected = alt_id
print(
' task_%i_%i starts at %i (alt %i, machine %i, duration %i)' %
(job_id, task_id, start_value, selected, machine, duration))
# machine_task[machine-1].append(
# dict(Task='%i' % (job_id + 1), Start=start_value, Finish=start_value + duration))
machine_task[machine - 1].append(
('%i-%i' % (job_id,task_id+1),duration, start_value,start_value))
job_task[job_id][2].append(start_value)
job_task[job_id][2].append(start_value+duration)
if direction_flag:
job_task[job_id][1].append(num_tasks-1-machine)
job_task[job_id][1].append(num_tasks-machine)
else:
job_task[job_id][1].append(num_tasks-machine)
job_task[job_id][1].append(num_tasks-machine-1)
# print('Solve status: %s' % solver.StatusName(status))
# print('Optimal objective value: %i' % solver.ObjectiveValue())
# print('Statistics')
# print(' - conflicts : %i' % solver.NumConflicts())
# print(' - branches : %i' % solver.NumBranches())
# print(' - wall time : %f s' % solver.WallTime())
# gatt(machine_task)
return (OptimizationResult,job_task,machine_task)
if __name__ == '__main__':
# jobs = [[[(3, 0), (1, 1), (5, 2)], [(2, 0), (4, 1), (6, 2)], [(2, 0), (3, 1), (1, 2)]],
# [[(2, 0), (3, 1), (4, 2)], [(1, 0), (5, 1), (4, 2)], [(2, 0), (1, 1), (4, 2)]],
# [[(2, 0), (1, 1), (4, 2)], [(2, 0), (3, 1), (4, 2)], [(3, 0), (1, 1), (5, 2)]]
# ]
ScheduleData = {'machinesNb': 3,
'jobs': [
[[{'machine': 1, 'processingTime': 4}], [{'machine': 3, 'processingTime': 4}]],
[[{'machine': 1, 'processingTime': 5}], [{'machine': 2, 'processingTime': 4}]]
]}
Solve(ScheduleData)
| true |
7ff5f90ec29b6264ed6feda484667147f5145c8a | Python | sambloom92/adventofcode | /day_7/utils.py | UTF-8 | 2,654 | 3.125 | 3 | [] | no_license | import os
from typing import Dict, Union
from conf import ROOT_DIR
RULES_PATH = os.path.join(ROOT_DIR, "day_7/bag_rules.csv")
def get_bag_rules(filepath: str = RULES_PATH) -> Dict[str, str]:
"""
read the file containing the exercise challenge data and return an iterable of the contents
:param filepath: path to file
:return: generator expression yielding bag rules
"""
with open(filepath) as file:
return {" ".join(line.split()[:2]): line for line in file.readlines()}
class Bag:
def __init__(self, colour: str, all_rules: Dict[str, str]):
self.colour = colour
self.all_rules = all_rules
self._parse_contents()
def __repr__(self) -> str:
return f"{self.colour} bag"
@staticmethod
def _parse_quantity(content: str) -> int:
"""
parse the quantity from a given rule string
:param content: the part of the rule string describing the contents
:return: how many of a particular colour is contained
"""
if content.strip() == "no other bags.":
return 0
else:
quantity_str = content.split()[0]
return int(quantity_str)
@staticmethod
def _parse_colour(content: str) -> Union[str, None]:
"""
parse the colour from a given rule string
:param content: the part of the rule string describing the contents
:return: the colour of the bag described
"""
if content.strip() == "no other bags.":
return None
else:
return " ".join(content.split()[1:3])
def _parse_contents(self):
"""
build a dictionary of items and quantities contained within this bag
"""
rule = self.all_rules[self.colour]
rule_list = rule.split("contain", 1)[1].split(",")
contents = {
self._parse_colour(content): self._parse_quantity(content)
for content in rule_list
}
if None in contents.keys():
self.contents = None
else:
self.contents = contents
def can_contain_shiny_gold(self) -> bool:
"""
check if this bag can ultimately contain at least one shiny gold bag
"""
if self.contents:
if "shiny gold" in self.contents.keys():
return True
else:
return any(
[
Bag(other_bag_colour, self.all_rules).can_contain_shiny_gold()
for other_bag_colour in self.contents.keys()
]
)
else:
return False
| true |
85eb284bbb7a45dff65f3d404fb5d8461f03f93f | Python | vreinharz/project_euler | /52/sol.py | UTF-8 | 348 | 2.984375 | 3 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | def check_val(nb):
list_nb = sorted(list(str(nb)))
for x in range(2,7):
list_test = sorted(list(str(x*nb)))
if list_test != list_nb:
return False
else:
return True
if __name__ == '__main__':
nb = 1
while True:
nb +=1
if check_val(nb):
print nb
break
| true |
4cf1e0253af7ec7cec4dbe0ca389022431ad443c | Python | Shivassembly/Masendor | /gamescript/gamemap.py | UTF-8 | 10,534 | 2.75 | 3 | [
"MIT"
] | permissive | import ast
import csv
import random
import pygame
import pygame.freetype
from PIL import Image, ImageFilter
## Terrain base colour
Temperate = (166, 255, 107, 255)
Tropical = (255, 199, 13, 255)
Volcanic = (255, 127, 39, 255)
Desert = (240, 229, 176, 255)
Arctic = (211, 211, 211, 255)
Blight = (163, 73, 164, 255)
Void = (255, 255, 255, 255)
Demonic = (237, 28, 36, 255)
Death = (127, 127, 127, 255)
ShallowWater = (153, 217, 235, 255)
DeepWater = (100, 110, 214, 255)
## Terrain Feature colour
Plain = (181, 230, 29, 255)
Barren = (255, 127, 39, 255)
PlantField = (167, 186, 139, 255)
Forest = (16, 84, 36, 255)
InlandWater = (133, 254, 239, 255)
Road = (130, 82, 55, 255)
UrbanBuilding = (147, 140, 136, 255)
Farm = (255, 242, 0, 255)
Wall = (102, 92, 118, 255)
Mana = (101, 109, 214, 255)
Rot = (200, 191, 231, 255)
Wetground = (186, 184, 109, 255)
class Basemap(pygame.sprite.Sprite):
images = []
maxviewmode = 10
def __init__(self, scale):
"""image file of map should be at size 1000x1000 then it will be scaled in game"""
self._layer = 0
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.trueimage = self.image.copy()
self.scale = scale
scalewidth = self.image.get_width() * self.scale
scaleheight = self.image.get_height() * self.scale
self.dim = pygame.Vector2(scalewidth, scaleheight)
self.image_original = self.image.copy()
self.image = pygame.transform.scale(self.image_original, (int(self.dim[0]), int(self.dim[1])))
self.rect = self.image.get_rect(topleft=(0, 0))
self.terraincolour = (Temperate, Tropical, Volcanic, Desert, Arctic, Blight, Void, Demonic, Death, ShallowWater, DeepWater)
def getterrain(self, pos):
try:
terrain = self.trueimage.get_at((int(pos[0]), int(pos[1]))) ##get colour at pos to obtain the terrain type
terrainindex = self.terraincolour.index(terrain)
except:
terrainindex = 0
return terrainindex
# def update(self, dt, pos, scale):
class Mapfeature(pygame.sprite.Sprite):
images = []
maxviewmode = 10
main_dir = None
def __init__(self, scale):
self._layer = 0
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.trueimage = self.image.copy()
self.scale = scale
scalewidth = self.image.get_width() * self.scale
scaleheight = self.image.get_height() * self.scale
self.dim = pygame.Vector2(scalewidth, scaleheight)
self.image_original = self.image.copy()
self.image = pygame.transform.scale(self.image_original, (int(self.dim[0]), int(self.dim[1])))
self.rect = self.image.get_rect(topleft=(0, 0))
self.featurecolour = (Plain, Barren, PlantField, Forest, InlandWater, Road, UrbanBuilding, Farm, Wall, Mana, Rot, Wetground)
self.featuremod = {}
with open(self.main_dir + "\data\map" + '\\unit_terrainbonus.csv', 'r') as unitfile:
rd = csv.reader(unitfile, quoting=csv.QUOTE_ALL)
run = 0
for row in rd:
for n, i in enumerate(row):
if run != 0:
if n == 11:
if "," in i:
row[n] = [int(item) if item.isdigit() else item for item in row[n].split(',')]
elif i.isdigit():
row[n] = [int(i)]
elif n in (2, 3, 4, 5, 6, 7):
if i != "":
row[n] = float(i) / 100
else:
i = 1.0
elif i.isdigit() or "-" in i:
row[n] = int(i)
run += 1
self.featuremod[row[0]] = row[1:]
unitfile.close()
def getfeature(self, pos, gamemap):
terrainindex = gamemap.getterrain(pos)
try:
feature = self.trueimage.get_at((int(pos[0]), int(pos[1]))) ##get colour at pos to obtain the terrain type
featureindex = None
if feature in self.featurecolour:
featureindex = self.featurecolour.index(feature)
featureindex = featureindex + (terrainindex * 12)
except:
featureindex = 0
return terrainindex, featureindex
class Mapheight(pygame.sprite.Sprite):
images = []
maxviewmode = 10
def __init__(self, scale):
self._layer = 0
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = self.images[0]
self.trueimage = self.image.copy()
self.scale = scale
scalewidth = self.image.get_width() * self.scale
scaleheight = self.image.get_height() * self.scale
self.dim = pygame.Vector2(scalewidth, scaleheight)
self.image_original = self.image.copy()
self.image = pygame.transform.scale(self.image_original, (int(self.dim[0]), int(self.dim[1])))
self.rect = self.image.get_rect(topleft=(0, 0))
def changescale(self, scale):
self.scale = scale
self.image = self.image_original.copy()
scalewidth = self.image.get_width() * self.scale
scaleheight = self.image.get_height() * self.scale
self.dim = pygame.Vector2(scalewidth, scaleheight)
self.image = pygame.transform.scale(self.image_original, (int(self.dim[0]), int(self.dim[1])))
def getheight(self, pos):
try:
colour = self.trueimage.get_at((int(pos[0]), int(pos[1])))[2]
except:
colour = 255
if colour == 0: colour = 255
heightindex = 255 - colour ##get colour at pos to obtain the terrain type
if heightindex <= 0: heightindex = 1
return heightindex
class Beautifulmap(pygame.sprite.Sprite):
textureimages = []
emptyimage = None
effectimage = None
placename = None
loadtexturelist = None
main_dir = None
def __init__(self, scale, basemap, featuremap, gamemapheight):
self._layer = 0
pygame.sprite.Sprite.__init__(self, self.containers)
self.image = featuremap.image.copy()
self.scale = scale
self.mode = 0
self.newcolourlist = {}
with open(self.main_dir + "\data\map" + '\\colourchange.csv', 'r') as unitfile:
rd = csv.reader(unitfile, quoting=csv.QUOTE_ALL)
for row in rd:
for n, i in enumerate(row):
if i.isdigit():
row[n] = int(i)
elif "," in i:
row[n] = ast.literal_eval(i)
self.newcolourlist[row[0]] = row[1:]
for rowpos in range(0, 1000): ## Recolour the map
for colpos in range(0, 1000):
terrain, feature = featuremap.getfeature((rowpos, colpos), basemap)
newcolour = self.newcolourlist[feature][1]
rect = pygame.Rect(rowpos, colpos, 1, 1)
self.image.fill(newcolour, rect)
## Comment out this part and import PIL above if not want to use blur filtering
data = pygame.image.tostring(self.image, 'RGB') ## Convert image to string data for filtering effect
img = Image.frombytes('RGB', (1000, 1000), data) ## Use PIL to get image data
img = img.filter(ImageFilter.GaussianBlur(radius=2)) ## Blue Image (or apply other filter in future)
img = img.tobytes()
img = pygame.image.fromstring(img, (1000, 1000), 'RGB') ## Convert image back to a pygame surface
self.image = pygame.Surface(
(1000, 1000)) ## For unknown reason using the above surface cause a lot of fps drop so make a new one and blit the above here
rect = self.image.get_rect(topleft=(0, 0))
self.image.blit(img, rect)
## PIL module code till here
for rowpos in range(0, 991): ## Put in terrain texture
for colpos in range(0, 991):
if rowpos % 20 == 0 and colpos % 20 == 0:
randompos = (rowpos + random.randint(0, 19), colpos + random.randint(0, 19))
terrain, thisfeature = featuremap.getfeature((randompos), basemap)
feature = self.textureimages[self.loadtexturelist.index(self.newcolourlist[thisfeature][0].replace(" ", "").lower())]
choose = random.randint(0, len(feature) - 1)
if thisfeature - (terrain * 12) in (0, 1, 4, 5, 7) and random.randint(0,
100) < 60: ## reduce speical texture in empty terrain like glassland
thistexture = self.emptyimage ## empty texture
else:
thistexture = feature[choose]
rect = thistexture.get_rect(center=(randompos))
self.image.blit(thistexture, rect)
rect = self.image.get_rect(topleft=(0, 0))
self.image.blit(self.effectimage, rect) ## Add special filter effect that make it look like old map
self.image.blit(self.placename, rect) ## Add placename layer to map
scalewidth = self.image.get_width() * self.scale
scaleheight = self.image.get_height() * self.scale
self.dim = pygame.Vector2(scalewidth, scaleheight)
self.trueimage = self.image.copy()
self.image_original = self.image.copy()
self.imagewithheight_original = self.image.copy()
self.imagewithheight_original.blit(gamemapheight.image, rect)
self.image = pygame.transform.scale(self.image_original, (int(self.dim[0]), int(self.dim[1])))
self.rect = self.image.get_rect(topleft=(0, 0))
def changemode(self, mode):
self.mode = mode
self.changescale(self.scale)
def changescale(self, scale):
self.scale = scale
scalewidth = self.image_original.get_width() * self.scale
scaleheight = self.image_original.get_height() * self.scale
self.dim = pygame.Vector2(scalewidth, scaleheight)
if self.mode == 0:
self.image = self.image_original.copy()
self.image = pygame.transform.scale(self.image, (int(self.dim[0]), int(self.dim[1])))
else:
self.image = self.imagewithheight_original.copy()
self.image = pygame.transform.scale(self.image, (int(self.dim[0]), int(self.dim[1])))
| true |
d153fee673d7a6d5455fb3d7aba8dc7039ab0782 | Python | csJd/oj-codes | /archive/baidu_190917_2_mat_quick_pow.py | UTF-8 | 1,788 | 4 | 4 | [] | no_license | """
跳跃递推
时间限制:C/C++语言 1000MS;其他语言 3000MS
内存限制:C/C++语言 65536KB;其他语言 589824KB
题目描述:
很多数列都是递推形成的,现在给出一个序列的前四项,a[1],a[2],a[3],a[4],已知递推式是a[n]=a[n-1]+a[n-3]+a[n-4]。请你求出第n项的值。
输入
输入仅一行,包含4个正整数a[1],a[2],a[3],a[4]及n。
输出
输出仅包含一个正整数,即a[n],但是由于这个数可能非常大,所以请输出答案对10^9+7取模的结果。
样例输入
1 2 3 4 20
样例输出
9790
"""
MOD = int(1e9+7)
def matmul(mata, matb):
n = len(mata)
ret = [[0] * n for i in range(n)]
for i in range(n):
for j in range(n):
for k in range(n):
ret[i][j] += mata[i][k] * matb[k][j]
ret[i][j] %= MOD
return ret
def matpow(mat, k):
n = len(mat)
ans = [[1 if i == j else 0 for j in range(n)] for i in range(n)]
while k:
if k & 1:
ans = matmul(ans, mat)
k >>= 1
mat = matmul(mat, mat)
return ans
def main():
# mat x [a(n-4); a(n-3); a(n-2); a(n-1)]
# = [a(n-3); a(n-2); a(n-1); a(n)]
mat = [
[0, 1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 1, 0, 1]
]
arr = list(map(int, input().split()))
n = arr.pop()
mat = matpow(mat, n-1)
res = [0] * 4
for i in range(4):
for j in range(4):
res[i] += mat[i][j] * arr[j]
res[i] %= MOD
ans = res[0]
print(ans)
brute_force(arr, n)
def brute_force(arr, n):
i = 4
while i < n:
pos = i % 4
arr[pos] = (arr[pos-1] + arr[pos-3] + arr[pos-4]) % MOD
i += 1
print(arr[(n-1) % 4])
if __name__ == "__main__":
main()
| true |
c2aff14c90313cd9f01f5407f650ffe68d3a992e | Python | xinrui20110501/Procedure | /Failed-Insert/Failed-Insert.py | UTF-8 | 1,543 | 2.59375 | 3 | [] | no_license | #!/usr/bin/env python
#coding=utf-8
import os
def scan_files(directory,prefix=None,postfix=None):
files_list=[]
for root, sub_dirs, files in os.walk(directory):
for special_file in files:
if postfix:
if special_file.endswith(postfix):
files_list.append(os.path.join(root,special_file))
elif prefix:
if special_file.startswith(prefix):
files_list.append(os.path.join(root,special_file))
else:
files_list.append(os.path.join(root,special_file))
return files_list
file = scan_files(raw_input(unicode('请输入文件所在路径:','utf-8').encode('gbk')),prefix='Com')
for line in file:
with open(line) as f:
for line in f:
if 'insert' in line:
if ',XZQH' in line:
line = line.replace(',XZQH','')
elif ', XZQH' in line:
line = line.replace(', XZQH','')
b=line.index('insert')
line=line[b:]
c = line.split(',')
c.pop(-1)
line = ','.join(c)+');\n'
#print c
# print line
file = open('c:\ComServer.sql','a')
f=file.write(line)
file.close()
print(u'程序运行中,请稍等...')
print(u'程序运行完毕!')
print(u'文件已生成,生成文件为c:\ComServer.sql')
os.system('pause')
| true |
fbc902fce61f281ed5facf76e9e5d61630dd9308 | Python | Mang0o/leetcode | /dp/32. Longest Valid Parentheses.py | UTF-8 | 1,557 | 4.59375 | 5 | [] | no_license | """
Given a string containing just the characters '(' and ')', find the length of the longest valid (well-formed) parentheses substring.
For "(()", the longest valid parentheses substring is "()", which has length = 2.
Another example is ")()())", where the longest valid parentheses substring is "()()", which has length = 4.
"""
class Solution:
def longestValidParentheses1(self, s):
"""
:type s: str
:rtype: int
"""
dp = [0,0]
for i in range(1,len(s)):
if s[i-1:i+1] == "()":
dp.append(dp[i-1] + 2)
elif s[i-1:i+1] == "))" and i-dp[i]-1>=0 and s[i-dp[i]-1] == "(":
dp.append(dp[i] + 2 + dp[i-dp[i]-1])
else:
dp.append(0)
print(dp)
return max(dp)
def longestValidParentheses(self, s):
"""
:type s: str
:rtype: int
"""
dp = [0,0]
for i in range(1,len(s)):
if s[i] == ")":
if s[i-1] == "(": #if string looks like ".......()"
dp.append(dp[-2] + 2)
elif s[i-1] == ")" and i-dp[-1]-1>=0 and s[i-1-dp[-1]] == "(": #string looks like ".......)) and s[i - dp[i - 1] - 1] = '("
dp.append(dp[-1] + 2 + dp[-1-dp[-1]-1])
else:
dp.append(0)
else:
dp.append(0)
print(dp)
return max(dp)
if __name__ == '__main__':
s = Solution()
a = "()(())"
print(s.longestValidParentheses(a))
| true |
83b50e4026efe33e23484fc596997c6dcfada59c | Python | Highjune/Python | /BasicConcept/File_IO/Readfile.py | UTF-8 | 174 | 2.96875 | 3 | [] | no_license | try:
ptr = open(r'C:\temp1\result.txt', 'r')
except FileNotFoundError:
print('File Not Found')
else :
data = ptr.read()
print(data)
finally :
ptr.close() | true |
2a28fe1e8ff140f4f1a4b0eac10488e8f008373b | Python | dabalda/drl_for_natural_disaster_monitoring | /algorithms/ma_drqn/replay_buffer.py | UTF-8 | 1,427 | 3.453125 | 3 | [] | no_license | import numpy as np
class ReplayBuffer(object):
"""
Replay buffer class for the DRQN algorithm.
:param buffer_size: (int) the maximum amount of experience samples that the buffer can hold.
"""
def __init__(self, buffer_size=100):
self.buffer = []
self.buffer_size = buffer_size
def add(self, experience):
"""
Add function.
:param experience: (any) experience sample to add to the buffer.
"""
if len(self.buffer) + 1 >= self.buffer_size:
self.buffer[0:(1 + len(self.buffer)) - self.buffer_size] = []
self.buffer.append(experience)
def sample(self, batch_size, trace_length):
"""
Sample function.
:param batch_size: (int) how many traces to sample.
:param trace_length: (int) the size of each trace of consecutive experience samples.
"""
sampled_episodes_idx = np.random.randint(low=0, high=len(self.buffer)-1, size=batch_size)
sampled_traces = []
for ep_idx in sampled_episodes_idx:
len_ep = len(self.buffer[ep_idx])
point = np.random.randint(low=0, high=len_ep + 1 - trace_length)
sampled_traces.append(self.buffer[ep_idx][point:point + trace_length])
sampled_traces = np.array(sampled_traces)
return np.reshape(sampled_traces, [batch_size * trace_length, 5]) # Will be reshaped back in the network
| true |
c0f0d152294d49c3b5a4f3fa709a385ecccfb5f3 | Python | michalrus/dotfiles | /packages/cp2104-gpio/cp2104-gpio.py | UTF-8 | 1,058 | 2.734375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
#
# This small util is used to write values to CP2104's GPIO pins.
#
# Cf. https://www.silabs.com/documents/public/application-notes/AN571.pdf for
# specific values used below.
#
import sys
import usb
#
# 1. Parse argv.
#
if len(sys.argv) != 3:
print('Usage: ' + sys.argv[0] + ' <gpioNum> <gpioVal>', file=sys.stderr)
sys.exit(1)
gpioNum = int(sys.argv[1])
gpioVal = int(sys.argv[2])
if not 0 <= gpioNum <= 3:
print('fatal: <gpioNum> must be in [0..3].', file=sys.stderr)
sys.exit(1)
if gpioVal not in (0, 1):
print('fatal: <gpioVal> must be either 0 or 1.', file=sys.stderr)
sys.exit(1)
#
# 2. Find the CP2104 (<https://youtu.be/xH_y05pIDTo?t=1383>).
#
dev = usb.core.find(idVendor=0x10c4, idProduct=0xea60)
if not dev:
print('fatal: could not find CP2104.', file=sys.stderr)
sys.exit(1)
#
# 3. Set the pins.
#
dev.ctrl_transfer(bmRequestType=0b01000001,
bRequest=0xff,
wValue=0x37e1,
wIndex=(1 << gpioNum) | ((gpioVal << gpioNum) << 8))
| true |
1d6174e00bc681696cdd4a30f231c93e8ad0e827 | Python | brandonio21/PyCFramework | /Solutions/dev/util/variables.py | UTF-8 | 2,218 | 3.171875 | 3 | [] | no_license | ################################################################################
# Filename: variables.py
# Author: Brandon Milton, http://brandonio21.com
# Date: 14 September 2015
#
# Contains information and functions pertaining to the Variables class, which
# interacts with the variables file
################################################################################
from util import fileops
from util.pathmapper import PathMapper
class Variables:
VARIABLES_FILE = 'variables.json'
_variablesDict = None
#### VARAIBLE NAMES
NAME_PROBLEM_NUMBER = 'problem_number'
NAME_CASE_TYPE = 'case_type'
NAME_FILENAME = 'filename'
NAME_FILENAME_LESS_EXT = 'filename_less_extension'
NAME_DIRECTORY = 'directory'
NAME_LANGUAGE = 'language'
@classmethod
def load_variables(cls):
"""
Load the variables dictionary from the variables file
"""
cls._variablesDict = fileops.get_json_dict(cls.get_variables_filepath())
@classmethod
def get_variables_filepath(cls):
"""
Gets the filepath of the variables file based on a given path mapper
"""
return fileops.join_path(PathMapper.get_config_path(),
Variables.VARIABLES_FILE)
@classmethod
def get_variable_key(cls, variableName: str) -> str:
"""
Gets a {variable}-style key for a certain variable name, None if nonexistent
"""
if cls._variablesDict is None:
cls.load_variables()
if not variableName in cls._variablesDict:
return None
else:
return cls._variablesDict[variableName]
@classmethod
def get_variable_key_name(cls, variableName: str) -> str:
"""
Gets a {variable}-style key without {} for a certain variable name.
None if nonexistent
"""
variableKey = cls.get_variable_key(variableName)
# Parse out the {} if they exist. If not, return just the var
if not variableKey is None and variableKey[0] == '{' and variableKey[-1] == '}':
return variableKey[1:-1]
elif not variableKey is None:
return variableKey
else:
return None
| true |
ad30aa1e0818e8499d459573df188b999c26be25 | Python | dxc19951001/Everyday_LeetCode | /77.组合.py | UTF-8 | 3,497 | 3.578125 | 4 | [] | no_license | from typing import List
import itertools
class Solution:
def combine(self, n: int, k: int) -> List[List[int]]:
# 核心思想--运用itertools函数
return list(itertools.combinations(range(1,n+1),k))
def combine2(self, n: int, k: int) -> List[List[int]]:
# 核心思想--回溯剪支
# 依次选择某个数字为开头,选择完长度为k的所有情况后,回溯
result = []
def recall(n, k, start, result, subset):
# n为结束数字
# k为选取长度
# start为开始数字
# result为最终结果
# subset为每次需要填的数
if len(subset) == k:
result.append(subset[:])
return
for i in range(start, n+1):
# 每次从[start, n]中选取一个数
if k-len(subset) > n-i+1:
# 如果剩余要填的数(k - len(subset)大于 还能填的数的总和( n-i+1) 时,
# 往后永远无法完成题目要求的k个数的组合,这时可以剪枝
break
subset.append(i)
recall(n, k, i+1, result, subset) # 进入递归,此时start变为i+1
subset.pop() # 结束上一层递归后,删除上一层放入subset的数字
recall(n, k, 1, result, [])
return result
def combine1(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
# 核心思想:回溯
res = []
path = []
# 1.确定回溯参数:n,k, startindex(每次循环的起始位置)
# 2.终止条件:当len(path) == k
# 3.回溯搜索遍历:
# 从startindex开始循环到n,左闭右闭区间
# 每次节点加入到path中
# 递归循环,每次startindex向后移一位
# 到i循环完成后回溯,去掉当前的i进入到i+1
def backtrack(n, k, startindex):
if len(path) == k:
res.append(path[:])
return
for i in range(startindex, n + 1):
path.append(i)
backtrack(n, k, i + 1)
path.pop()
backtrack(n, k, 1)
return res
def combin3(self, n, k):
"""
:type n: int
:type k: int
:rtype: List[List[int]]
"""
# 核心思想:回溯
res = []
path = []
def backtrack(n, k, startindex):
if len(path) == k:
res.append(path[:])
return
# i表示从第几个元素开始循环
# n - i表示剩余元素
# k - len(path)表示要满足k个元素还需要的元素
# 因此要满足:n-i >= k - len(path)
# i <= n - (k - len(path))
# 因为包括起始位置,我们要是一个左闭的集合
# 因此在集合n中至多要从该起始位置 : i <= n - (k - path.size()) + 1,开始遍历
# 由于python for循环左闭右开,所以要+2
for i in range(startindex, n - (k - len(path)) + 2):
path.append(i)
backtrack(n, k, i + 1)
path.pop()
backtrack(n, k, 1)
return res
| true |
f995388b127ccfc3002d07d985c87cdf47f96483 | Python | MrHamdulay/csc3-capstone | /examples/data/Assignment_5/stkthe002/mymath.py | UTF-8 | 447 | 3.609375 | 4 | [] | no_license | # calculate number of k-permutations of n items
# Thea Sitek, STKTHE002
# 16.04.2014
#get integer
def get_integer(i):
nk = (input("Enter "+ i + ": \n"))
while not nk.isdigit ():
nk = (input("Enter "+ i + ": \n"))
nk = eval (nk)
return nk
#calculate factorial
def calc_factorial (i):
factorial = 1
for i in range (1, i+1):
factorial *= i
return factorial | true |
497e2c41eb7cec845e9ddc4968b08bc287ad4980 | Python | thuzhangjw/azdataprocess | /cnn.py | UTF-8 | 5,642 | 2.515625 | 3 | [] | no_license | import tensorflow as tf
import pandas as pd
from gensim.models import Word2Vec
class CNNLayer(object):
def __init__(self, sequence_length, filter_sizes, num_filters, init_words_embedded_model, num_classes, l2_reg_lambda=0.1, use_static=False):
self.use_static = use_static
self.vocabulary_index_map, self.embedded_vocabulary = self.load_init_embedded_vocabulary(init_words_embedded_model)
embedding_size = init_words_embedded_model.vector_size
self.input_x = tf.placeholder(tf.string, [None, sequence_length], name='input_x')
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name='input_y')
self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')
if use_static:
self.static_embedded_vocabulary = tf.Variable(self.embedded_vocabulary, trainable=False)
l2_loss = tf.constant(0.0)
# Embedding Layer
with tf.name_scope('embedding'):
vocab_indices = self.vocabulary_index_map.lookup(self.input_x)
self.embedded_chars = tf.nn.embedding_lookup(self.embedded_vocabulary, vocab_indices)
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
if use_static:
static_embedded_chars = tf.nn.embedding_lookup(self.static_embedded_vocabulary, vocab_indices)
static_embedded_chars_expanded = tf.expand_dims(static_embedded_chars, -1)
self.embedded_chars_expanded = tf.concat([self.embedded_chars_expanded, static_embedded_chars_expanded], -1)
# Creating convolution + maxpool layer for each filter size
pooled_outputs = []
for i , filter_size in enumerate(filter_sizes):
with tf.name_scope('convolution-maxpool-%s' % filter_size):
# Convolution Layer
filter_shape = [filter_size, embedding_size, self.embedded_chars_expanded.shape[-1].value, num_filters]
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='W')
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name='b')
conv= tf.nn.conv2d(self.embedded_chars_expanded, W, strides=[1, 1, 1, 1], padding='VALID', name='conv')
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name='relu')
# Maxpooling over the outputs
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length-filter_size+1, 1, 1],
strides=[1,1,1,1],
padding='VALID',
name='pool')
pooled_outputs.append(pooled)
# Combine all thr pooled features
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, axis=3)
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
with tf.variable_scope('final_feature'):
W = tf.get_variable(
'W',
shape=[num_filters_total, num_filters_total],
initializer=tf.contrib.layers.xavier_initializer()
)
b = tf.Variable(tf.constant(0.1, shape=[num_filters_total]), name='b')
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.l2_loss = l2_reg_lambda * l2_loss
self.final_feature = tf.nn.xw_plus_b(self.h_pool_flat, W, b, name='final_feature')
# Final scores and predictions
with tf.name_scope('output'):
self.h_drop = tf.nn.dropout(self.final_feature, self.dropout_keep_prob)
W = tf.get_variable(
'W',
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name='b')
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name='scores')
self.predictions = tf.argmax(self.scores, 1, name='predictions')
# Calculate mean cross-entropy loss
with tf.name_scope('loss'):
losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.scores, labels=self.input_y)
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope('accuracy'):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'), name='accuracy')
def load_init_embedded_vocabulary(self, init_words_embedded_model):
wv = init_words_embedded_model.wv
vector_size = wv.vector_size
embedded_words_list = []
self.keys = []
self.vals = []
embedded_words_list.append([0]*vector_size)
for i, w in enumerate(wv.vocab):
embedded_words_list.append(list(wv[w]))
# vocabulary_index_map[w] = i + 1
self.keys.append(w)
self.vals.append(i+1)
embedded_vocabulary = tf.Variable(embedded_words_list, name='Vocabulary')
vocabulary_index_map = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(self.keys, self.vals), 0, name='vocabulary_index_map')
return vocabulary_index_map, embedded_vocabulary
| true |
6646721bd93e812fc025dbfd5808cdeb8c633781 | Python | y0nkers/WhatsApp-Client-using-Python-Selenium | /app.py | UTF-8 | 7,300 | 2.9375 | 3 | [] | no_license | from modules import *
from smiles import *
try:
def main():
global receiver_name
global stopsending
print('\nДобро пожаловать в WhatsApp клиент!\n ')
# Используем директорию данных, чтобы сохранять cookie файлы сеанса и не авторизовываться при повторном запуске приложения.
# Папка через время может стать большой. Это исправляется её удалением, но тогда нужно повторно авторизовываться.
if not os.path.exists(config['chrome_data_directory']): # Если папка не создана, то создаём её.
os.makedirs(config['chrome_data_directory'])
# Webdriver - драйвер для тестирования веб-приложений
driver_options = webdriver.ChromeOptions() # Подключаем методы ChromeOptions
driver_options.add_experimental_option("excludeSwitches", ["enable-logging"]) # Отключаем вывод логов в консоль
driver_options.add_argument("user-data-dir={0}".format(config['chrome_data_directory']))
driver = webdriver.Chrome(config['path_to_chromedriver'],
options=driver_options) # Настраиваем webdriver для работы с Chrome
driver.get(config['whatsapp_url']) # Открываем WhatsApp в Chrome
print("Пожалуйста, отсканируйте QR-код на появившейся странице в своём мобильном приложении WhatsApp"
" и введите ok.\nЕсли сразу открылась страница WhatsApp, нажмите Enter.\n")
while True: # Просим пользователя сканировать QR-код, который появился при открытии страницы WhatsApp
is_connected = input("QR-код отсканирован? Ok/No: ")
if is_connected.lower() == 'ok' or is_connected == '':
break
if len(sys.argv) > 1:
choose_receiver(driver) # В качестве получателя указываем полученный аргумент при запуске
else:
receiver_input = input("\nВведите имя собеседника в WhatsApp: ")
choose_receiver(driver, receiver_input)
print("Для просмотра доступных команд введите help.")
# Запускаем отдельный поток для получения входящих сообщений.
get_message_thread = threading.Thread(target=start_getting_messages, args=(driver,))
get_message_thread.start()
while True:
user_input = input().strip()
if len(user_input) > 7 and 'sendto ' in user_input[:7]:
choose_receiver(driver, receiver=user_input[7:])
elif len(user_input) > 5 and 'send ' in user_input[:5]:
if is_stopsending(stopsending) == 0:
send_message(driver, user_input[5:])
elif user_input == 'stopsending':
if stopsending == -1:
print("Прекращение отправки сообщений. Теперь вы только получаете входящие сообщения.")
else:
print("Отправка сообщений вновь доступна.")
stopsending *= -1
elif user_input == "voice":
send_voice(driver)
elif user_input == "file":
if is_stopsending(stopsending) == 0:
filepath = input("Введите путь до файла, который вы хотите отправить: ")
send_file(driver, filepath)
elif user_input == "media":
if is_stopsending(stopsending) == 0:
filepath = input("Введите путь до медиафайла, который вы хотите отправить: ")
send_media(driver, filepath)
elif user_input == "emoji":
if is_stopsending(stopsending) == 0:
emoji = input("Введите название эмодзи, который вы хотите отправить: ")
send_emoji(driver, emoji)
elif user_input == "contact":
if is_stopsending(stopsending) == 0:
contact = input("Введите имя контакта, который вы хотите отправить: ")
send_contact(driver, contact)
elif user_input == "smile":
if is_stopsending(stopsending) == 0:
seed()
random_smile = choice(smiles)
send_smiley(driver, random_smile)
elif user_input == 'help':
print("\nДоступные команды:\nsendto <name> - перейти к чату с указанным пользователем.\n"
"stopsending - включить/выключить отправку сообщений.\n"
"send <message> - отправка сообщения текущему собеседнику.\n"
"voice - отправка голосового сообщения текущему собеседнику.\n"
"file - отправка файла текущему собеседнику.\n"
"media - отправка медиафайла текущему собеседнику.\n"
"emoji - отправка эмодзи текущему собеседнику.\n"
"contact - отправка контакта текущему собеседнику.\n"
"smile - отправка смайлика текущему собеседнику.\n"
"exit - выход из программы.\n")
elif user_input == 'exit':
print('Закрывается WebDriver...')
driver.quit()
print('Закрывается программа...')
quit()
else:
print("Неизвестная команда. Для просмотра доступных команд введите help.")
if __name__ == '__main__':
main()
except AssertionError as e:
sys.exit(print("\nНе удаётся открыть URL-адрес WhatsApp.\n"))
except KeyboardInterrupt as e:
sys.exit("\nНажмите Ctrl+C для выхода.\n")
except WebDriverException as e:
sys.exit(print(e, "\nОшибка ChromeDriver.\n"
"Проверьте, совместима ли установленная версия ChromeDriver с установленной версией Chrome.\n"))
| true |
a6eb1baecda85f9c9c2b82c5ceafd841d42400a0 | Python | vinay-rock/lambda-ec2-scheduler | /lambda_function.py | UTF-8 | 2,376 | 2.921875 | 3 | [] | no_license | # Request:
# Please schedule the EC2 instances to start at 7AM and stop at 7PM Monday to Friday.
# They are not required on the weekends.
# Tags:
# Scheduler:Start H:7 M:00 DOW:01234
# Scheduler:Stop H:19 M:00 DOW:01234
#
# Note: Day Of Week (DOW) - Monday is 0 and Sunday is 6.
# M T W T F S S
# 0 1 2 3 4 5 6
#
# XXX: Does not support minute granularity
# TODO: logging
START_TAG = 'Scheduler:Start'
STOP_TAG = 'Scheduler:Stop'
TIMEZONE = 'Australia/Melbourne'
FILTERS = [ {
'Name': 'tag:Business:Application',
'Values': ['something-useless']
} ]
import boto3
import datetime
import logging
import pytz
import re
ec2 = boto3.resource('ec2')
def filter_instances(tag, state):
filters = FILTERS + [ {
'Name': 'tag-key',
'Values': [tag]
}, {
'Name': 'instance-state-name',
'Values': [state]
} ]
return ec2.instances.filter(Filters=filters)
def _split(a,b=None): return a,b
def _splitter(v): return _split(*v.split(':'))
def _parser(value):
for item in re.split(' *', value):
yield _splitter(item)
def get_tag(tag_key, tags={}):
for tag in tags:
if tag['Key'] == tag_key:
return tag['Value']
def parse_tag_value(value):
data={'DOW': None, 'H': None, 'M': None} # defaults
data.update(dict(_parser(value)))
return data
def find_instances(tag, state, hour, dow):
tagged = filter_instances(tag, state)
for i in tagged:
print i,
info = parse_tag_value(get_tag(tag, i.tags))
print info,
if info['DOW'] is not None and str(dow) in info['DOW']:
print 'DOW match.',
if info['H'] is not None and str(hour) == info['H']:
print 'H match.',
print 'perform action'
yield i
print
def lambda_handler(event, context):
tz = pytz.timezone(TIMEZONE)
now = datetime.datetime.now(tz)
currentHour = now.hour
currentDOW = now.weekday()
# Stop first, this may slightly save cost (if going over EC2 usage hour)
print 'looking for: H', currentHour, 'DOW', currentDOW
print 'to stop:'
for inst in find_instances(STOP_TAG, 'running', currentHour, currentDOW):
print inst.stop()
print 'to start:'
for inst in find_instances(START_TAG, 'stopped', currentHour, currentDOW):
print inst.start()
| true |
cdcb13347727d415f2f8a2d5d4dddac959297545 | Python | dedayoa/sms-counter-python | /sms_counter/main.py | UTF-8 | 4,078 | 2.515625 | 3 | [
"MIT"
] | permissive | # -*- coding: utf8 -*-
"""
Created on Jul 10, 2016
@author: Dayo
"""
from math import ceil
class SMSCounter(object):
GSM_7BIT = 'GSM_7BIT'
GSM_7BIT_EX = 'GSM_7BIT_EX'
UTF16 = 'UTF16'
GSM_7BIT_LEN = GSM_7BIT_EX_LEN = 160
UTF16_LEN = 70
GSM_7BIT_LEN_MULTIPART = GSM_7BIT_EX_LEN_MULTIPART = 153
UTF16_LEN_MULTIPART = 67
@classmethod
def _get_gsm_7bit_map(cls):
gsm_7bit_map = [
10, 12, 13, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62,
63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 92, 95, 97, 98, 99,
100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112,
113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 161, 163, 164,
165, 191, 196, 197, 198, 199, 201, 209, 214, 216, 220, 223, 224,
228, 229, 230, 232, 233, 236, 241, 242, 246, 248, 249, 252, 915,
916, 920, 923, 926, 928, 931, 934, 936, 937]
return gsm_7bit_map
@classmethod
def _get_added_gsm_7bit_ex_map(cls):
added_gsm_7bit_ex_map = [12, 91, 92, 93, 94, 123, 124, 125, 126, 8364]
return added_gsm_7bit_ex_map
@classmethod
def _text_to_unicode_pointcode_list(cls, plaintext):
textlist = []
for stg in plaintext:
textlist.append(ord(stg))
return textlist
@classmethod
def _detect_encoding(cls, plaintext):
rf = cls._text_to_unicode_pointcode_list(plaintext)
non_gsm_7bit_chars = set(rf) - set(cls._get_gsm_7bit_map())
if not non_gsm_7bit_chars:
return cls.GSM_7BIT
non_gsm_7bit_ex_chars = non_gsm_7bit_chars - set(cls._get_added_gsm_7bit_ex_map())
if not non_gsm_7bit_ex_chars:
return cls.GSM_7BIT_EX
return cls.UTF16
@classmethod
def count(cls, plaintext):
textlist = cls._text_to_unicode_pointcode_list(plaintext)
encoding = cls._detect_encoding(plaintext)
length = len(textlist)
if encoding == cls.GSM_7BIT_EX:
exchars = [c for c in textlist if c in cls._get_added_gsm_7bit_ex_map()]
lengthexchars = len(exchars)
length += lengthexchars
if encoding == cls.GSM_7BIT:
permessage = cls.GSM_7BIT_LEN
if length > cls.GSM_7BIT_LEN:
permessage = cls.GSM_7BIT_LEN_MULTIPART
elif encoding == cls.GSM_7BIT_EX:
permessage = cls.GSM_7BIT_EX_LEN
if length > cls.GSM_7BIT_EX_LEN:
permessage = cls.GSM_7BIT_EX_LEN_MULTIPART
else:
permessage = cls.UTF16_LEN
if length > cls.UTF16_LEN:
permessage = cls.UTF16_LEN_MULTIPART
# Convert the dividend to fload so the division will be a float number
# and then convert the ceil result to int
# since python 2.7 return a float
messages = int(ceil(length / float(permessage)))
remaining = (permessage * messages) - length
returnset = {
'encoding': encoding,
'length': length,
'per_message': permessage,
'remaining': remaining,
'messages': messages
}
return returnset
@classmethod
def truncate(cls, plaintext, limitsms):
count = cls.count(plaintext)
if count.messages <= limitsms:
return plaintext
if count.encoding == 'UTF16':
limit = cls.UTF16_LEN
if limitsms > 2:
limit = cls.UTF16_LEN_MULTIPART
if count.encoding != 'UTF16':
limit = cls.GSM_7BIT_LEN
if limitsms > 2:
limit = cls.GSM_7BIT_LEN_MULTIPART
while True:
text = plaintext[0:limit * limitsms]
count = cls.count(plaintext)
limit = limit - 1
if count.messages < limitsms:
break
return text
| true |
7eac09bf8be328257c6b6c610dc7733d214d343d | Python | Pablocg0/ContaminationForecast | /src/predictionKeras.py | UTF-8 | 2,298 | 2.625 | 3 | [] | no_license | import pandas as df
import math
import numpy as np
import os
import keras
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation
import tensorflow as tf
name = ""
def normalize(data, station, contaminant, dirData):
"""
Function to normalize an array of values with the minimum and the maximun that has been save in a .cvs fileName
:param data: data to normalize
:type data: array
:param station : name station
:type station: string
"""
name = station + '_' + contaminant
values = df.read_csv(dirData + name + '_MaxMin.csv')
maxx = values['MAX'].values
minn = values['MIN'].values
valNorm = []
i = 0
for x in data:
if x == -1:
norm = 0.0
else:
m = float(maxx[i])
mi = float(minn[i])
if m == 0 and mi == 0:
norm = 0.0
else:
norm = (x - mi) / (m - mi)
valNorm.append(float(norm))
i += 1
return valNorm
def desNorm(data, station, contaminant, dirData,columnContaminant):
"""
function to denormalize a value
:param data: value to be unmasked
:type data: float32
:param station: name the stations
:type station: String
:param contaminant: name the pollutant
:type contaminant: String
:param dirData: address of the files with training information
:type dirData: String
"""
real = []
nameC = columnContaminant + station.lower()
# nameC= 'cont_otres'
name = station + '_' + contaminant
values = df.read_csv(dirData + name + '_MaxMin.csv')
index = values.columns[0]
va = values[(values[index] == nameC)]
maxx = va['MAX'].values[0]
minn = va['MIN'].values[0]
for x in data:
realVal = (x * (maxx - minn)) + minn
real.append(realVal)
return real
def prediction(station, contaminant, arrayPred, dirTrain, dirData):
result = []
print(dirTrain + station + '/' + 'train_'+station+'_'+contaminant + '.h5')
#name = 'train_'+station+'_'+contaminant
model = load_model(dirTrain + station + '/' + 'train_'+station+'_'+contaminant + '.h5', {'tf': tf})
for x in arrayPred:
pred = model.predict(x)
result.append(pred[0,0])
return result
| true |
b427d51df0d276c747c8a21a19eb0df2d1b89f23 | Python | satyr-software/lab | /cointest/script.py | UTF-8 | 1,026 | 2.9375 | 3 | [] | no_license | from audioop import reverse
from doctest import OutputChecker
from re import A
outer_code = ['URMWXOZIRGBRM7DRWGSC5WVKGS','DVZIVZFWZXRLFHRMXLMXVKGZMWNVGRXFOLFHRMVCVXFGRLN']
'''staright_alpha = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
reversed_alpha = staright_alpha[::-1]
result = ""
for i in outer_code[0]:
if staright_alpha.find(i) == -1:
result = result + i
else:
pos = staright_alpha.find(i)
result = result + reversed_alpha[pos]
print(result)
result = ""
for i in outer_code[1]:
pos = staright_alpha.find(i)
result = result + reversed_alpha[pos]
print(result)'''
def decode_print(input):
staright_alpha = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
reversed_alpha = staright_alpha[::-1]
result = ""
for i in input:
if staright_alpha.find(i) == -1:
result = result + i
else:
pos = staright_alpha.find(i)
result = result + reversed_alpha[pos]
return result
for b in outer_code:
print(decode_print(b))
| true |
eb776cc990bb3afe79219f58ea675e125d909228 | Python | breakthsj/HongJoon | /BaekJoon_8393.py | UTF-8 | 91 | 3.296875 | 3 | [] | no_license | a = int(input())
i = 0
cnt = 0
for i in range(a):
i += 1
cnt = cnt + i
print(cnt)
| true |
4e463d19a95be20cf348fec1d9110ff9c7e82887 | Python | chauhan0707/Hacktoberfest | /Codechef/FLOW004.py | UTF-8 | 95 | 2.703125 | 3 | [
"MIT"
] | permissive | // https://www.codechef.com/problems/FLOW004
t = (input())
print (int((t[0]))+int((t[-1:])))
| true |
f8af691b504fde17f5afdfbad45bb9acb76fc353 | Python | develask/White-Box-Neural-Networks | /initializers.py | UTF-8 | 826 | 3 | 3 | [] | no_license | import numpy as np
class Initializer():
def get(self, shape):
raise NotImplementedError( "Should have implemented this" )
class Constant(Initializer):
def __init__(self, constant):
self.constant = constant
def get(self, shape):
return np.full(shape, self.constant)
class RandomNormal(Initializer):
def __init__(self, mean=0.0, stddev=None):
self.mean = mean
self.stddev = stddev
def get(self, shape):
stddev = self.stddev
if stddev == None: # Xavier initialization
stddev = np.sqrt(2/sum(shape))
return np.random.normal(loc=self.mean, scale=stddev, size=shape)
class RandomUniform(Initializer):
def __init__(self, minVal=-0.01, maxVal=0.01):
self.minVal = minVal
self.maxVal = maxVal
def get(self, shape):
return np.random.uniform(low=self.minVal, high=self.maxVal, size=shape)
| true |
9ef4d5010c43db1f2107dad44bcc34fbad630d05 | Python | cooolallen/playground | /pommerman/agents/heuristic_agent.py | UTF-8 | 2,146 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | '''The Reward class is used to calculate the reward value for each action
with three different state (Explore, Attack, Evade)
'''
from collections import defaultdict
import queue
import random
import numpy as np
from . import BaseAgent
from . import SimpleAgent
from ..helpers.mcts import MCTree, SimTree
from ..helpers.reward import Reward
from .. import constants
import timeout_decorator
from timeout_decorator.timeout_decorator import TimeoutError
class HeuristicAgent(SimpleAgent):
"""Heuristic agent"""
def __init__(self, standard=True, minmax=False):
super(HeuristicAgent, self).__init__()
self.best_action = None
self.minmax = minmax
self.standard = standard # to use standard MTC or not
def act(self, obs, action_space):
try:
# try to return the action by method if not time out
return self._act(obs, action_space)
except TimeoutError:
# if it is timeout return the current best action
# print('time out, best action:', self.best_action)
if self.best_action is None:
return action_space.sample()
else:
if isinstance(self.best_action, list):
return random.sample(self.best_action)
else:
return self.best_action
# reset the best action for the next run
self.best_action = None
@timeout_decorator.timeout(0.1) # the function will timeout after 100ms
def _act(self, obs, action_space):
# modify the obs
mode = Reward().decideMode(obs, action_space)
# check mode and return the acts
if mode in {constants.Mode.Evade, constants.Mode.Attack}:
if self.standard:
mcts = MCTree(obs, agent=self)
action = mcts.bestAction()
else:
sim_tree = SimTree(obs, agent=self)
action = sim_tree.bestAction(minimax=self.minmax)
# print("best_action", action)
return action
else :
return super().act(obs, action_space)
| true |
c10badd691f642c962f071b9d959ee3b45f924e9 | Python | ezhu22/python | /python/OOP/user.py | UTF-8 | 1,591 | 3.8125 | 4 | [] | no_license | class User:
def __init__(self, username, email_address):
self.name = username
self.email = email_address
self.account_balance = 0
def make_deposit(self, amount):
self.account_balance += amount
print(f"{self.name} you have deposited ${amount} to your account. Your new balance is ${self.account_balance}")
return self
def make_withdrawal(self, amount):
self.account_balance -= amount
print(f"{self.name} you have withdrawn ${amount} from your account. Your new balance is ${self.account_balance}")
return self
def display_user_balance(self):
print(f"{self.name} your balance is ${self.account_balance}")
return self
def transfer_money(self, other_user, amount):
self.account_balance -= amount
print(f"{self.name} you have deducted ${amount} from your account. Your new balance is ${self.account_balance}." )
other_user.account_balance += amount
print(f"{other_user.name}, {self.name} has added ${amount} to your account. Your new balance is ${other_user.account_balance}." )
return self
ninja = User("Ninja", "ninja@dojo.com")
pirate = User("Pirate", "pirate@dojo.com")
zombie = User("Zombie", "zombie@dojo.com")
ninja.make_deposit(200).make_deposit(1000).make_deposit(1).make_withdrawal(500).display_user_balance()
pirate.make_deposit(99).make_deposit(666).make_withdrawal(1).make_withdrawal(500).display_user_balance()
zombie.make_deposit(72).make_withdrawal(42).make_withdrawal(1).make_withdrawal(500).display_user_balance()
ninja.transfer_money(zombie, 500)
| true |
eb83d3702ff50ee4c4edcd840ca9cbe9802ba0f9 | Python | jesonjn/RxPY | /rx/linq/observable/max.py | UTF-8 | 988 | 3.125 | 3 | [
"Apache-2.0"
] | permissive | from six import add_metaclass
from rx import AnonymousObservable, Observable
from rx.internal import ExtensionMethod
from rx.internal.basic import identity
from rx.internal.exceptions import SequenceContainsNoElementsError
from .min import first_only
@add_metaclass(ExtensionMethod)
class ObservableMax(Observable):
"""Uses a meta class to extend Observable with the methods in this class"""
def max(self, comparer=None):
"""Returns the maximum value in an observable sequence according to the
specified comparer.
Example
res = source.max()
res = source.max(lambda x, y: x.value - y.value)
Keyword arguments:
comparer -- {Function} [Optional] Comparer used to compare elements.
Returns {Observable} An observable sequence containing a single element
with the maximum element in the source sequence."""
return self.max_by(identity, comparer).select(lambda x: first_only(x))
| true |
bd3c6c758fc33956bbba69b5f0c5274e94969700 | Python | yanwen0614/DBSCAN | /gu.py | UTF-8 | 7,499 | 2.890625 | 3 | [
"MIT"
] | permissive | from math import sin, cos, sqrt, atan2, radians,degrees
# from numpy.linalg import norm
# import numpy as np
# import pandas as pd
# from shapely.geometry import Point, Polygon
# from sklearn.neighbors import NearestNeighbors
def get_distance(X1, Y1, X2, Y2, R=6371137.0):
# X/Y 格式:[longitude, latitude]
# 减少getitem的消耗
# R = 6371137.0 # 地球半径(m)
lng_1 = radians(X1)
lat_1 = radians(Y1)
lng_2 = radians(X2)
lat_2 = radians(Y2)
dlon = lng_2 - lng_1
dlat = lat_2 - lat_1
a = sin(dlat / 2)**2 + cos(lat_1) * cos(lat_2) * sin(dlon / 2)**2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return R * c
def get_distance_lnglat(x,y):
return get_distance(*x,*y)
class Trans_coor(object):
def __init__(self, startll=(114.06,22.56)):
self.lng, self.lat, = tuple(map(radians,startll))
self.R = 6371137.0 # 地球半径(m)
def lnglat_to_plane_coor(self,lng,lat):
lng, lat = tuple(map(radians, (lng, lat)))
dlng = lng - self.lng
dlat = lat - self.lat
y = dlat*self.R
R_factor = cos(lat)
x = dlng*self.R*R_factor
plane_coor = x, y
return plane_coor
def plane_to_lnglat_coor(self, plane_coor):
x, y = plane_coor
dlat = y/self.R
lat = self.lat+dlat
R_factor = cos(lat)
dlng = x/(self.R*R_factor)
lng = self.lng+dlng
lng, lat = tuple(map(degrees, (lng, lat)))
return lng, lat
def get_distance_ll(self,X1, Y1, X2, Y2):
return get_distance(X1, Y1, X2, Y2,R=self.R)
def get_distance_plane(self,X1, Y1, X2, Y2):
return sqrt((X1-X2)**2+(Y1-Y2)**2)
def sample_mae(ll,ll_p,tc,sample_num=1000):
length = len(ll)
from random import randint
from tqdm import tqdm
error = []
p_time = 0
l_time = 0
from time import time
for i in tqdm(range(sample_num)):
ind_ = randint(0,length-1)
ind = randint(0,length-1)
# t = time()
# dist_p = tc.get_distance_plane(*ll_p[ind],*ll_p[ind_])
# p_time += (time() - t)
t = time()
dist_ll = tc.get_distance_ll(*ll[ind],*ll_p[ind])
l_time += (time() - t)
error.append(abs(dist_ll))
print("time compare: ",p_time,l_time)
print("max error",max(error))
return sum(error)/sample_num
def loaddata(filename = "poi_data/meilin_business_poi.csv"):
poi_df_ = pd.read_csv(filename)
x = poi_df_["wgs_lat"].to_list()
y = poi_df_["wgs_lng"].to_list()
x_y = list(zip(y,x))
return np.array(x_y)
def isInsidePolygon(pt, poly):
res = False
i = -1
l = len(poly)
j = l - 1
while i < l - 1:
i += 1
# print(i, poly[i], j, poly[j])
if ((poly[i][0] <= pt[0] and pt[0] < poly[j][0]) or (
poly[j][0] <= pt[0] and pt[0] < poly[i][0])): # 在线段中
if (pt[1] <= (poly[j][1] - poly[i][1]) * (pt[0] - poly[i][0]) / (
poly[j][0] - poly[i][0]) + poly[i][1]):
res = not res
j = i
return res
def poi_prepocess(ba, poi_df,factor=0):
"""
根据BA的范围过滤poi条目 \n
factor ba范围膨胀比例 0不膨胀 1 边界往外膨胀 1/10距离
"""
wgs_lng = poi_df['wgs_lng'].to_list(); wgs_lat = poi_df['wgs_lat'].to_list()
coor = list(zip(wgs_lng, wgs_lat))
ebound = ba.extend_bound_array(factor=factor)
# bound = ba.bound_array
label = [1 if (isInsidePolygon(x_y,ebound)) else 0 for x_y in coor ]
poi_df["l"] = label
poi_df_ = poi_df[poi_df["l"]==1]
return poi_df_
def get_distance_point2polygen(pts,polygen):
polygen = np.array(polygen)
x = polygen[:,0].mean()
y = polygen[:,1].mean()
tc = Trans_coor((x,y))
polygen = [ tc.lnglat_to_plane_coor(*p) for p in polygen]
pts = tc.lnglat_to_plane_coor(*pts)
pts = Point(pts)
polygon = Polygon(polygen)
return pts.distance(polygon)
from math import floor, cos
import time
import numpy as np
PI = 3.1415926
EARTH_RADIUS = 6371000.0
DEGREE_HALF_CIRCLE = 180.0
DEGREE_CIRCLE = 360.0
CHINA_NORTHEST = 54.0
CHINA_SOUTHEST = 3.5
CHINA_WESTEST = 73.5
CHINA_EASTEST = 135.5
EQUATOR_LENGTH = 40076000.0
TILE_ID_COEF = 1100000
D_X = 5
D_Y = 5
RSSI_EMPTY = -120
POSITIONING_FP_NUM = 3
MINI_VALUE = 0.00001
def GetTileID(lng, lat, d_x=5, d_y=5):
if ((lat > CHINA_NORTHEST) and (lat < CHINA_SOUTHEST) and(lng > CHINA_EASTEST) and (lng < CHINA_WESTEST)):
return None
latID = 0
lngID = 0
expandIndex = 1000000
latDivideIndex = d_y * 10
latInt = int(lat * expandIndex)
lngInt = int(lng * expandIndex)
latStartPoint = int(CHINA_NORTHEST * expandIndex)
latID = int((latStartPoint - latInt) / latDivideIndex)
latBand = floor(float(latInt) / float(expandIndex)) + 0.5
latBandLength = float(EQUATOR_LENGTH * cos(latBand * PI / DEGREE_HALF_CIRCLE))
lngID = (int((lng - CHINA_WESTEST)/DEGREE_CIRCLE * (latBandLength / d_x)))
tileID = lngID * TILE_ID_COEF + latID
return tileID, lngID, latID
# /*函数:根据网格ID计算网格的经纬度边界*/
def TileIDAnalysis(tileID, d_x=5, d_y=5):
latMinVal = 0.0
latMaxVal = 0.0
lngMinVal = 0.0
lngMaxVal = 0.0
# 解析latID和lonID
latID = 0
lngID = 0
lngID = floor(tileID / TILE_ID_COEF)
latID = tileID - lngID * TILE_ID_COEF
latPerMeter = 0.00001
latMaxVal = float(CHINA_NORTHEST - latID * d_y * latPerMeter)
latMinVal = float(latMaxVal - d_y * latPerMeter)
latBand = float(floor(float(latMinVal + latMaxVal) /2.0) + 0.5)
latBandLength = float(EQUATOR_LENGTH * cos(float(latBand *PI / DEGREE_HALF_CIRCLE)))
lngMinVal = float(lngID * d_x / latBandLength * DEGREE_CIRCLE + CHINA_WESTEST)
lngMaxVal = float(lngMinVal + d_x * latPerMeter / cos(float(latBand) * PI / DEGREE_HALF_CIRCLE))
return (latMinVal, latMaxVal), (lngMinVal, lngMaxVal)
def GetNerbTileID(tileID, long_x,long_y,d_x=5, d_y=5):
"""
long_* :Extend distance by one side
"""
lngID = floor(tileID / TILE_ID_COEF)
latID = tileID - lngID * TILE_ID_COEF
dlngs = int(long_x/d_x)
dlats = int(long_y/d_y)
for dlng in range(-dlngs, dlngs+1):
for dlat in range(-dlats, dlats+1):
yield (lngID+dlng) * TILE_ID_COEF + (latID+dlat)
def getstrtime(timeStamp):
timeStamp = float(timeStamp)/1000
timeArray = time.localtime(timeStamp)
return time.strftime("%Y-%m-%d %H:%M:%S", timeArray)
# 编辑距离函数
def levenshtein_distance(seq1, seq2):
size_x = len(seq1) + 1
size_y = len(seq2) + 1
matrix = np.zeros((size_x, size_y))
for x in range(size_x):
matrix[x, 0] = x
for y in range(size_y):
matrix[0, y] = y
for x in range(1, size_x):
for y in range(1, size_y):
if seq1[x - 1] == seq2[y - 1]:
matrix[x, y] = min(matrix[x - 1, y] + 1, matrix[x - 1, y - 1],
matrix[x, y - 1] + 1)
else:
matrix[x, y] = min(matrix[x - 1, y] + 1,
matrix[x - 1, y - 1] + 1,
matrix[x, y - 1] + 1)
return matrix[size_x - 1, size_y - 1]
if __name__ == "__main__":
for tileid in GetNerbTileID(917593129096, 1000,500):
pass
if __name__ == "__main__":
print(get_distance(*[114.06094917667023, 22.52515499827182]
,*[114.06027794195909, 22.52514618249599]
))
| true |
c1a49659bf2c4c8e83c9c996af26325374c09d0e | Python | GTLIDAR/ChanceConstrainedRobustCITO | /tutorials/optimization/doubleIntegrator.py | UTF-8 | 2,260 | 3.46875 | 3 | [] | no_license | """
Example of adding a custom constraint to a mathematical program in pydrake.
This example solves a trajectory optimization problem with a double integrator. The problem is constructed using MathematicalProgram, instead of using DirectCollocation, to highlight several features in MathematicalProgram.
The goal is to drive the double integrator from a non-zero initial state to the origin. No cost is placed on the controls or state.
This example is adapted from "Trajectory optimization for the double integrator" in Russ Tedrake's Underactuated Robotics course: http://underactuated.mit.edu/trajopt.html
Luke Drnach
October 26, 2020
"""
import numpy as np
import matplotlib.pyplot as plt
from pydrake.all import MathematicalProgram, Solve, Variable, eq
# Approximate the double integrator
dt = 0.01
A = np.eye(2) + dt*np.array([[0, 1],[0,0]])
B = dt * np.array([0,1]).T
B = np.expand_dims(B, axis=1)
# Dynamics constraint function
def dynamicsCstr(z):
x1, u1, y = np.split(z, [2,3])
return y - A.dot(x1) - B.dot(u1)
# Create a mathematical program
prog = MathematicalProgram()
# Number of knot points
N = 284
# Create decision variables
u = np.empty((1,N-1), dtype=Variable)
x = np.empty((2,N), dtype=Variable)
for n in range(0, N-1):
u[:,n] = prog.NewContinuousVariables(1, 'u' + str(n))
x[:,n] = prog.NewContinuousVariables(2, 'x' + str(n))
x[:,N - 1] = prog.NewContinuousVariables(2, 'x' + str(N))
# Add constraints at every knot point
x0 = [-2, 0]
prog.AddBoundingBoxConstraint(x0, x0, x[:,0])
for n in range(0, N-1):
# Add the dynamics as an equality constraint
# prog.AddConstraint(eq(x[:,n+1], A.dot(x[:,n]) + B.dot(u[:,n])))
# Add the dynamics as a function handle constraint
prog.AddConstraint(dynamicsCstr, lb=np.array([0., 0.]), ub=np.array([0., 0.]), vars=np.concatenate((x[:,n], u[:, n], x[:, n+1]), axis=0), description="dynamics")
prog.AddBoundingBoxConstraint(-1, 1, u[:,n])
xf = [0, 0]
prog.AddBoundingBoxConstraint(xf, xf, x[:,N - 1])
# Solve the problem
result = Solve(prog)
x_sol = result.GetSolution(x)
print(f"Optimization successful? {result.is_success()}")
# Display the optimized trajectories
plt.figure()
plt.plot(x_sol[0,:], x_sol[1,:])
plt.xlabel('q')
plt.ylabel('qdot')
plt.show() | true |
84b7927ed1a4b0bf0ec768f16aa217fad6240e52 | Python | sbattula2/dfsNeuralNet | /AddScoresToLineupInputs.py | UTF-8 | 2,540 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Aug 20 17:36:53 2019
@author: nbatt
"""
import os
import pandas as pd
#Loop through each lineupInput
#Add score based on the file name
#go through each player in the output
#and find him in daily leaders
#if not found, add him to not found list
#os.chdir('C:/NBA DFS/lineupInputs')
#df = pd.read_csv('Nov_3_2018.csv')
#df.set_index('Player',inplace=True)
def getNamesNotFound():
os.chdir('C:/NBA DFS/lineupInputs')
files = os.listdir()
notFound = []
for i in files:
os.chdir('C:/NBA DFS/lineupInputs')
lineup = pd.read_csv(i)
os.chdir('C:/NBA DFS/Daily Leaders')
dl = pd.read_csv(i,encoding='latin-1')
dl.set_index('Name',inplace=True)
for q in lineup['Player']:
#dl.loc[q]
try:
dl.loc[q]
except:
if q not in notFound:
notFound.append(q)
if q == 'Walter Jr.':
print(i)
#create notFound csv file
os.chdir('C:/NBA DFS')
dl.to_csv('NameEdits.csv')
def renameDailyLeaders():
os.chdir('C:/NBA DFS')
nameEdits = pd.read_csv('Player Name Edits.csv')
os.chdir('C:/NBA DFS/Daily Leaders')
files = os.listdir()
for i in files:
print(i)
dl = pd.read_csv(i,encoding='latin-1')
dl = dl.loc[dl['Name']!='Player']
#dl.set_index('Name',inplace=True)
for q in range(nameEdits.shape[0]):
dl.loc[dl['Name']==nameEdits['BBREF'].iloc[q],'Name'] = nameEdits['FANDUEL'].iloc[q]
dl.to_csv(i)
def addDFSPoints():
os.chdir('C:/NBA DFS/Daily Leaders')
files = os.listdir()
for i in files:
print(i)
os.chdir('C:/NBA DFS/lineupInputs')
lineup = pd.read_csv(i)
lineup['Score'] = [0] * lineup.shape[0]
os.chdir('C:/NBA DFS/Daily Leaders')
dl = pd.read_csv(i,encoding='latin-1')
dl = dl.loc[dl['Name']!='Player']
dl.set_index('Name',inplace=True)
for j,k in dl.iterrows():
lineup.loc[lineup["Player"]==j,'Score'] = dl['FPTS'].loc[j]
dl.to_csv(i)
os.chdir('C:/NBA DFS/lineupInputs')
lineup.to_csv(i,index=False)
#getNamesNotFound()
renameDailyLeaders()
addDFSPoints()
#os.chdir('C:/NBA DFS/lineupInputs')
| true |
ffa271f6d57ee623e794c0e2e2e38356aebb6f5d | Python | KesterJ/TREPAN | /test-candidates.py | UTF-8 | 1,844 | 3.296875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 11 20:56:32 2017
@author: Kester
"""
import numpy as np
###Making M OF N tests
def make_candidate_tests(samples, labels):
"""
A function that should take all features, all samples, and return the
the possible breakpoints for each feature. These are the midpoints between
any two samples that do not have the same label.
"""
#Create empty dictionary to store features and their breakpoints
bpdict = {}
#Loop over each feature (assumes features are columns and samples are rows)
for feature in range(samples.shape[1]):
#Get unique values for feature
values = np.unique(samples[:,feature])
breakpoints = []
#Loop over values and check if diff classes between values
for value in range(len(values)-1):
#Check if different classes in associated labels, find midpoint if so
labels1 = labels[samples[:,feature]==values[value]]
labels2 = labels[samples[:,feature]==values[value+1]]
if list(np.unique(labels1))!=list(np.unique(labels2)):
midpoint = (values[value]+values[value+1])/2
breakpoints.append(midpoint)
#Add list of breakpoints to feature dict
bpdict[feature] = breakpoints
return bpdict
test1 = np.array([[1, 1, 1, 1], [2, 5, 2, 2], [3, 3, 6, 0], [4, 4, 4, 100],
[5, 5, 2, 5]])
labels1 = np.array([0, 0, 1, 1, 1])
test1dict = make_candidate_tests(test1, labels1)
test2 = np.array([[1, 1], [1, 2], [1,3], [1,3], [1,5]])
labels2 = np.array([0,0,0,1,1])
test2dict = make_candidate_tests(test2, labels2)
test3 = np.array([[1, 1, 1, 1], [2, 5, 2, 2], [3, 3, 6, 0], [4, 4, 4, 100],
[5, 5, 2, 5]])
labels3 = np.array([0, 0, 0, 0, 0])
test3dict = make_candidate_tests(test3, labels3) | true |
ea78b1febc3e6b3294ddcf721931cda36a19a8e3 | Python | chrisvail/Project_Euler | /Problem26.py | UTF-8 | 1,205 | 3.6875 | 4 | [] | no_license | # Sets variable to hold longest chain so far
maxChain = (0,0)
# Loops through all possible values of d
for i in range(1, 1000):
# Sets values used for long division to avoid binary imprecision
remainder = 1
past_remainders = []
past_remainders.append(remainder)
# Loops until a recurring sequence of numbers is found
# or when a complete fraction is found
while True:
# Finds next remainder
remainder = (remainder * 10) % i
# Checks if end of fraction and breaks
if remainder == 0:
break
# If the remainder has already been then its a recurring sequence
elif remainder in past_remainders:
# Checks length against previous lengths
if maxChain[1] < len(past_remainders) - past_remainders.index(remainder):
maxChain = i, len(past_remainders) - past_remainders.index(remainder)
break
else:
break
# Adds remainder to past remainders to be checked against
else:
past_remainders.append(remainder)
# Prints out answer
print(maxChain) | true |
04481a452c5d49e33634aa235cc5dd5f00813d2d | Python | rodrigoks/python | /desafios/mundo1/d031.py | UTF-8 | 524 | 4.0625 | 4 | [] | no_license | print('==' * 40)
print('Desenvolva um programa que pergunte a distância de uma viagem em Km. Calcule o preço da passagem, cobrando R$0,50 por Km para viagens de até 200Km e R$0,45 parta viagens mais longas.')
print('--' * 40)
distancia = int(input('Qual a distancia da viagem? '))
valorProximo = 0.5
valorLonge = .45
print('PROCESSANDO...')
if distancia > 200:
preco = distancia * valorLonge
else:
preco = distancia * valorProximo
print('O custo da viagem sera de R$ {:.2f}.'.format(preco))
print('==' * 40)
| true |
6062e37ee5cf312942234cea20738560d7629102 | Python | margoboiko/Python | /PythonLab2/PythonLab2/2_2.py | UTF-8 | 422 | 3.46875 | 3 | [] | no_license | import math
n = int(input('Enter n: '))
def sum(k):
def a(k):
if k == 1:
return 1
else:
return 3 * b(k - 1) + (2 * b(k - 1))
def b(k):
if k == 1:
return 1
else:
return a(k - 1)**2 + b(k - 1)
if k == 1:
return 1
else:
return sum(k - 1) + (2**k) / (1 + a(k) + b(k))
print(sum(n))
| true |
0c0711e8ac23bfd9264cef9157df8d2284da28a5 | Python | MrCsabaToth/IK | /2019Nov/linked_lists/list_groupk_reverse_coderpad_wip2.py | UTF-8 | 1,679 | 3.6875 | 4 | [
"Apache-2.0"
] | permissive | class LinkedListNode:
def __init__(self, node_value):
self.val = node_value
self.next = None
@staticmethod
def build(arr):
next = None
for a in arr[::-1]:
node = LinkedListNode(a)
node.next = next
next = node
return node
def __str__(self):
s = "{}->".format(self.val)
s += "null" if not self.next else str(self.next)
return s
def reverse_linked_list_in_groups_of_k(head, k):
p = head
prev = None
new_head = None
node = None
while p:
i = 0
q = []
while i < k and p:
q.append(p)
print("qapp", p.val, [qi.val for qi in q])
p = p.next
i += 1
if not new_head:
print("new head", q[-1])
new_head = q[-1]
for node in q[::-1]:
if prev:
prev.next = node
prev = node
node.next = None
return new_head
import random
import pytest
@pytest.mark.parametrize("i", range(100))
def test_list_build(i):
rng = random.SystemRandom()
length = rng.randint(5, 10)
arr = [rng.randint(0, 100) for _ in range(length)]
lst = LinkedListNode.build(arr)
s = str(lst)
expected = "->".join([str(a) for a in arr])
expected += "->null"
assert s == expected
@pytest.mark.parametrize("arr,k,expected", [
([1, 2, 3, 4, 5, 6], 3, "3->2->1->6->5->4->null")
])
def test_reverse_linked_list_in_groups_of_k(arr, k, expected):
lst = LinkedListNode.build(arr)
lst2 = reverse_linked_list_in_groups_of_k(lst, 3)
s = str(lst2)
assert s == expected
pytest.main()
| true |
4940712b54f1b9b7cfeeab102f45f570b1e8161f | Python | Alvinosaur/Learning_Diff_Algorithms | /leet_code/find_min_coins_change.py | UTF-8 | 2,561 | 3.65625 | 4 | [] | no_license | import time
# naive recursion b/c passes the same situation multiple times
def minChange(coins: set, change: int) -> int:
# Base case: perfect amount of change matching one coin
if change in coins: return 1
# Have to search all the coins and try each recursively
possible_coins = set([c for c in coins if c < change])
min_coins = -1
for c in possible_coins:
guess = 1 + minChange(possible_coins, change - c)
if min_coins < 0 or guess < min_coins:
min_coins = guess
return min_coins
start = time.time()
# print(minChange({1, 5, 10, 25}, 63), 'elapsed time: ', time.time() - start)
# memoization/caching version
def minChange(coins: set, change: int, sol_map=dict()) -> int:
# Base case: perfect amount of change matching one coin
if change in coins:
sol_map[change] = 1
return 1
elif sol_map.get(change) != None:
return sol_map[change]
# Have to search all the coins and try each recursively
possible_coins = set([c for c in coins if c < change])
min_coins = -1
for c in possible_coins:
guess = 1 + minChange(possible_coins, change - c, sol_map)
if min_coins < 0 or guess < min_coins:
min_coins = guess
sol_map[change] = min_coins
return min_coins
start = time.time()
print(minChange({1, 5, 10, 25}, 63), 'elapsed time: ', time.time() - start)
# dynamic programming version:
"""
In the above, we worked top-down from high amounts of change down to small amounts and updated our
dict along the way. It used recursion to reach down to small amounts to the end.
In dynamic programming, work from bottom-up by starting with small amount of change and working upwards,
and this guarantees that we do indeed see previous results with smaller amounts of change.
"""
def DminChange(coins: set, change: int, sol_map=dict()) -> int:
# find solutions to all <= amounts of change and work upwards
for subChange in range(change+1):
# use <= here since we aren't checking if change in coins
possible_coins = set([c for c in coins if c <= change])
minChangeAmt = subChange # max is all pennies for given amount of change
sol_map[subChange] = subChange # initially worse-case
for c in possible_coins:
if sol_map.get(subChange - c) != None: # seen this before
# if prev seen sol +1 extra coin is a better solution
if sol_map[subChange - c] + 1 < minChangeAmt:
minChangeAmt = sol_map[subChange - c] + 1
else:
sol_map[subChange - c] = minChangeAmt
sol_map[subChange] = minChangeAmt
return sol_map[subChange]
print(DminChange({1, 5, 10, 25}, 63), 'elapsed time: ', time.time() - start) | true |
c5646465109324ae678cd6f5a3b985cb37231a1c | Python | Imranmalik110/GUI-With-Python | /Entry Text.py | UTF-8 | 530 | 2.84375 | 3 | [] | no_license | from tkinter import *
window =Tk()
window.title("Hello Python")
window.config(bg="black")
window.geometry("300x200")
name=Label(window,text='Name',font=25,bg='pink',fg='grey').place(x=30,y=50)
email=Label(window,text='Email',font=25,bg='pink',fg='green').place(x=30,y=90)
pwd=Label(window,text="Password",font=25).place(x=30,y=130)
sbt1=Button(window,text='Submit',font=25).place(x=30,y=170)
e1=Entry(window).place(x=80,y=50)
e2=Entry(window).place(x=80,y=90)
e3=Entry(window).place(x=95,y=135)
window.mainloop()
| true |
e77148fc76de72262acfe8aa0ce952b57718ffb8 | Python | helmigandi/belajar-python | /2.control_flow/functions2.py | UTF-8 | 2,140 | 4.34375 | 4 | [] | no_license | def spam():
eggs = 99
bacon()
print(eggs)
def bacon():
eggs = 0
return eggs
spam()
""" Local Scopes Cannot Use Variables in Other Local Scopes
When bacon() returns, the local scope for that call is destroyed.
The program execution continues in the spam() function to print
the value of eggs ❸, and since the local scope for the call to
spam() still exists here, the eggs variable is set to 99. This
is what the program prints.
We use global keyword to read and write a global variable inside a function.
Use of global keyword outside a function has no effect
"""
print("--------------------------------------------")
def scope_test():
def do_local():
spam = "local spam"
def do_nonlocal():
nonlocal spam
spam = "nonlocal spam"
def do_global():
global spam
spam = "global spam"
spam = "test spam"
do_local()
print("After local assignment:", spam)
do_nonlocal()
print("After nonlocal assignment:", spam)
do_global()
print("After global assignment:", spam)
scope_test()
print("In global scope:", spam)
""" The global Statement
The global keyword is used to create global variables from a
no-global scope, e.g. inside a function.
"""
print("----------------------------------------")
def scope_test():
def do_global():
nonlocal spam
spam = "non spam"
spam = "test spam"
do_global()
print("After global assignment:", spam)
scope_test()
# print("In global scope:", spam)
print("----------------------------------------")
def outer():
x = "local"
def inner():
nonlocal x
x = "nonlocal"
print("inner:", x)
inner()
print("outer:", x)
outer()
""" The nonlocal Statement
The nonlocal keyword is used to work with variables inside
nested functions, where the variable should not belong to
the inner function.
Use the keyword nonlocal to declare that the variable is not local.
Nonlocal variable are used in nested function whose local
scope is not defined. This means, the variable can be neither
in the local nor the global scope.
""" | true |
1a386a3f21894a937d5dda72f5b76ed7fb38451d | Python | wilrona/Apps-BtoB-Onl | /application/function.py | UTF-8 | 7,296 | 2.71875 | 3 | [] | no_license | __author__ = 'wilrona'
import re
import datetime
from datetime import date, timedelta
from werkzeug.routing import BaseConverter, ValidationError
from itsdangerous import base64_encode, base64_decode
from bson.objectid import ObjectId
from bson.errors import InvalidId
# Define the weekday mnemonics to match the date.weekday function
(MON, TUE, WED, THU, FRI, SAT, SUN) = range(7)
# Define default weekends, but allow this to be overridden at the function level
# in case someone only, for example, only has a 4-day workweek.
default_weekends=(SAT,SUN)
def networkdays(start_date, end_date, holidays=[], weekends=default_weekends):
delta_days = (end_date - start_date).days + 1
full_weeks, extra_days = divmod(delta_days, 7)
# num_workdays = how many days/week you work * total # of weeks
num_workdays = (full_weeks + 1) * (7 - len(weekends))
# subtract out any working days that fall in the 'shortened week'
for d in range(1, 8 - extra_days):
if (end_date + timedelta(d)).weekday() not in weekends:
num_workdays -= 1
# skip holidays that fall on weekends
holidays = [x for x in holidays if x.weekday() not in weekends]
# subtract out any holidays
for d in holidays:
if start_date <= d <= end_date:
num_workdays -= 1
return num_workdays
def datetime_convert(time): # Convertis time sous la forme YYYY-MM-DD HH:MM:SS
_time = str(time)
retime = re.compile(r'\W+')
_list = retime.split(_time)
if len(_list) >= 6:
year = int(_list[0])
mounth = int(_list[1])
day = int(_list[2])
hour = int(_list[3])
minute = int(_list[4])
second = int(_list[5])
time = datetime.datetime(year, mounth, day, hour, minute, second)
return time
else:
try:
hour = int(_list[0])
minute = int(_list[1])
second = int(_list[2])
time = datetime.datetime(2000, 1, 1, hour, minute, second)
return time
except IndexError:
hour = int(_list[0])
minute = int(_list[1])
time = datetime.datetime(2000, 1, 1, hour, minute)
return time
def date_convert(date):# Convertis date sous la forme YYYY-MM-DD
_date = str(date)
redate = re.compile(r'\W+')
_list = redate.split(_date)
try:
day = int(_list[0])
mounth = int(_list[1])
year = int(_list[2])
date = datetime.date(year, mounth, day)
return date
except ValueError:
day = int(_list[2])
mounth = int(_list[1])
year = int(_list[0])
date = datetime.date(year, mounth, day)
return date
# jinja 2 formatage de la date
def format_date(date, format=None):
newdate = None
try:
if(date):
newdate = date.strftime(format)
except ValueError:
dateMin = str(date.minute)
if date.minute < 10:
dateMin = str(date.minute)+'0'
newdate = '0'+str(date.hour)+":"+dateMin
return newdate
def format_date_month(date, format=None):
newdate = date.strftime(format).lstrip("0").replace(" 0", " ")
return newdate
def time_convert(time): # Convertis time sous la forme HH:MM:SS
_time = str(time)
retime = re.compile(r'\W+')
_list = retime.split(_time)
try:
hour = int(_list[0])
minute = int(_list[1])
second = int(_list[2])
time = datetime.time(hour, minute, second)
return time
except IndexError:
hour = int(_list[0])
minute = int(_list[1])
time = datetime.time(hour, minute)
return time
def convert_in_second(time):
if time:
_time = str(time)
retime = re.compile(r'\W+')
_list = retime.split(_time)
try:
hour = int(_list[0]) * 3600
minute = int(_list[1]) * 60
second = int(_list[2])
time = hour + minute + second
return time
except IndexError:
hour = int(_list[0]) * 3600
minute = int(_list[1]) * 60
time = hour + minute
return time
else:
time = 0
return time
# jinja 2 ajoute du temps sur le temp en cours
def add_time(time, retard):
time = datetime_convert(time)
if retard:
_time = str(retard)
retime = re.compile(r'\W+')
_list = retime.split(_time)
hour = int(_list[0]) * 3600
minute = int(_list[1]) * 60
time2 = hour + minute
new_time = time + datetime.timedelta(0, time2)
else:
new_time = time
return new_time.time()
# jinja 2 formatage du prix avec des espaces
def format_price(price):
if price:
return '{:,}'.format(price).replace(',', ' ')
else:
return str(0)
def find(word, search):
news = word.split(" ")
tab = []
tabs = []
letter = ""
for new in news:
count = 0
for n in new:
letter += n
tab.append(letter)
count += 1
if count == len(new):
tabs.append(tab)
count = 0
letter = ""
tab = []
w = False
for tab in tabs:
if search in tab:
w = True
return w
def convert_timedelta(duration):
days, seconds = duration.days, duration.seconds
hours = days * 24 + seconds // 3600
minutes = (seconds % 3600) // 60
seconds = (seconds % 60)
return hours, minutes, seconds
def get_first_day(dt, d_years=0, d_months=0):
# d_years, d_months are "deltas" to apply to dt
y, m = dt.year + d_years, dt.month + d_months
a, m = divmod(m-1, 12)
return date(y+a, m+1, 1)
def get_last_day(dt):
return get_first_day(dt, 0, 1) + timedelta(-1)
class ObjectIDConverter(BaseConverter):
def to_python(self, value):
try:
return ObjectId(base64_decode(value))
except (InvalidId, ValueError, TypeError):
raise ValidationError()
def to_url(self, value):
return base64_encode(value.binary)
def reference(count, caractere, user=False, refuser=None):
str_count = str(count)
# reste = caractere - len(str(count))
current_year = str(datetime.datetime.now().year)
# refe = ''
# str_count.rjust(caractere, '0')
# for i in range(reste):
# refe += '0'
#
# refe += str(count)
refe = str_count.rjust(caractere, '0')
if not user:
if refuser:
all_ref = refuser+'/'+current_year+'/'+refe
else:
all_ref = current_year+'/'+refe
else:
all_ref = 'YO/'+refe
return all_ref
def string(data):
return str(data)
_slugify_strip_re = re.compile(r'[^\w\s-]')
_slugify_hyphenate_re = re.compile(r'[-\s]+')
def _slugify(value):
"""
Normalizes string, converts to lowercase, removes non-alpha characters,
and converts spaces to hyphens.
From Django's "django/template/defaultfilters.py".
"""
import unicodedata
if not isinstance(value, unicode):
value = unicode(value)
value = unicodedata.normalize('NFKD', value).encode('ascii', 'ignore')
value = unicode(_slugify_strip_re.sub('', value).strip().lower())
return _slugify_hyphenate_re.sub('-', value) | true |
e8f666ffe5f2bcbc279e463ce2201270e46ed1e4 | Python | michaelamican/python_starter_projects | /Python_Fundamentals/Functions_Intermediate_I.py | UTF-8 | 195 | 3.71875 | 4 | [] | no_license | def rand_int(x,y):
import random
min = x
max = y
integer = random.random() * (max-min) + min
if integer >= max:
print(int(integer))
else:
print(round(integer))
| true |
cbfaa2d1084bc774c30bcdc24972a1dac41ce0c4 | Python | cjj1472111531/- | /selenium/03-selenium窗口切换.py | GB18030 | 1,400 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | # coding=gbk
# @file:03-seleniumл.py
# @data:2021/8/11 9:41
# Editor:clown
from selenium.webdriver import Chrome
from selenium.webdriver.common.keys import Keys
import time
web=Chrome()
web.get("http://lagou.com")
web.find_element_by_xpath('//*[@id="cboxClose"]').click()
time.sleep(1)
work=web.find_element_by_xpath('//*[@id="search_input"]').send_keys('python',Keys.ENTER)
time.sleep(2)
web.find_element_by_xpath('//*[@id="s_position_list"]/ul/li[3]/div[1]/div[1]/div[1]/a/h3').click()
#ν´нȡ
# seleniumУ´Dzй
web.switch_to.window(web.window_handles[-1])#תŽ һ
#´ȡ
job_info=web.find_element_by_xpath('//*[@id="job_detail"]/dd[2]/div').text
print(job_info)
print("-"*30)
#ǹص´Ӵ
web.close()
#seleniumӽ ´Ĵڱ֮ǰ
web.switch_to.window(web.window_handles[0])
#Կܲõҳh3
# //*[@id="s_position_list"]/ul/li[1]/div[1]/div[1]/div[1]/a/h3
# //*[@id="s_position_list"]/ul/li[1]/div[1]/div[2]/div[1]/a
print(web.find_element_by_xpath('//*[@id="s_position_list"]/ul/li[1]/div[1]/div[2]/div[1]/a').text,end="--")
print(web.find_element_by_xpath('//*[@id="s_position_list"]/ul/li[1]/div[1]/div[1]/div[1]/a/h3').text)
| true |
991cb3a170facfd94c31fa1946fd027ec5aed22a | Python | funnydog/AoC2016 | /day8/day8.py | UTF-8 | 2,159 | 3.53125 | 4 | [] | no_license | #!/usr/bin/env python3
import re
pat = re.compile(r"rect ([0-9]+)x([0-9]+)|rotate column x=([0-9]+) by ([0-9]+)|rotate row y=([0-9]+) by ([0-9]+)")
class Screen(object):
def __init__(self, width, height):
self.width = width
self.height = height
self.pixel = [[0 for x in range(width)] for y in range(height)]
def rect(self, a, b):
for y in range(b):
for x in range(a):
self.pixel[y][x] = 1
def rotate_row(self, a, b):
for i in range(b):
right = self.pixel[a][-1]
for x in range(self.width-1, 0, -1):
self.pixel[a][x] = self.pixel[a][x-1]
self.pixel[a][0] = right
def rotate_col(self, a, b):
for i in range(b):
bottom = self.pixel[-1][a]
for y in range(self.height-1, 0, -1):
self.pixel[y][a] = self.pixel[y-1][a]
self.pixel[0][a] = bottom
def commands(self, txt):
for row in txt.split("\n"):
m = pat.search(row)
if not m:
continue
g = m.groups()
if g[0] and g[1]:
self.rect(int(g[0]), int(g[1]))
elif g[2] and g[3]:
self.rotate_col(int(g[2]), int(g[3]))
elif g[4] and g[5]:
self.rotate_row(int(g[4]), int(g[5]))
else:
pass
def lit(self):
count = 0
for row in self.pixel:
count += sum(row)
return count
def __str__(self):
lst = []
for y, row in enumerate(self.pixel):
lst.append("".join(v and "#" or " " for v in row))
return "\n".join(lst)
import sys
if __name__ == "__main__":
if len(sys.argv) < 2:
print("Usage: {} <filename>".format(sys.argv[0]), file = sys.stderr)
sys.exit(1)
try:
with open(sys.argv[1], "rt") as f:
txt = f.read().strip()
except:
print("Cannot open {}".format(sys.argv[1]), file = sys.stderr)
sys.exit(1)
s = Screen(50,6)
s.commands(txt)
print("Part1:", s.lit())
print("Part2:")
print(s)
| true |
7b44a1b39c2fbc59a3342675fabda6aa58ded1dd | Python | rasoolaaqib/KNN | /KNN_model.py | UTF-8 | 8,381 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
import re
import string
import matplotlib.pyplot as plt
import scipy
from scipy.spatial import distance
import statistics
import random
# In[4]:
#reading and storing train data
train = pd.read_csv("train.csv")
#reading and storing test data
test = pd.read_csv("test.csv")
##reading and storing stop words
f = open('stop_words.txt', 'r+')
stop_words = f.read().splitlines()
# In[4]:
def word_split(sent): #A function to split a string into a list of words
words = re.sub("[\W]"," ", sent).split()
return words
# In[8]:
def clean_data(x):
#converting to lowercase
x = x.apply(lambda x: x.astype(str).str.lower())
#removing stop words
for i in stop_words :
x = x.replace(to_replace=r'\b%s\b'%i, value="",regex=True)
#removing punctuations
table = str.maketrans(dict.fromkeys(string.punctuation))
for index, value in x['Tweet'].items():
x['Tweet'][index]=x['Tweet'][index].translate(table)
#removing numbers
x = x.replace(to_replace=r'\d', value="",regex=True)
return x
# In[5]:
def knn(k):
p=0
s=distances[0].size
print("k = ", k)
perdicted_label = []
while p<s:
t=k
dist = sorted(distances.iloc[p])
#print(minimum_dist)
while t>0:
#print(k," ",t)
##print("KK=",k)
index_distance = []
n=0
for x, y in enumerate(distances.iloc[p]):
if n==t:
#print(n)
break
if dist[n]==y:
index_distance.append(x)
n+=1
#print(labels[index_distance])
try:
xx = statistics.mode(labels[index_distance])
except:
#print(index_distance)
#print("Multiple modes for k = ", t)
t-=1
continue
else:
#print(xx)
perdicted_label.append(xx)
#print(perdicted_label)
p+=1
break
return perdicted_label
# In[6]:
def measures(perdicted_label,accuracy,precision,f1,recall):
#Accuracy Calculation
correct = 0
for x,y in enumerate(perdicted_label):
if y == test['Sentiment'][x]:
correct+= 1
Accu = (correct/test['Sentiment'].size)
accuracy.append(Accu)
print("Accuracy: ", Accu)
#calculating false and true positives, negatives, and neutrals
p_pos=0
p_neg=0
p_nut=0
n_pos=0
n_neg=0
n_nut=0
nu_pos=0
nu_neg=0
nu_nut=0
for x,y in enumerate(perdicted_label):
if (y == "positive") & (test['Sentiment'][x] == "positive"): p_pos+=1
elif (y == "positive") & (test['Sentiment'][x] == "negative"): p_neg+=1
elif (y == "positive") & (test['Sentiment'][x] == "neutral"): p_nut+=1
elif (y == "negative") & (test['Sentiment'][x] == "negative"): n_neg+=1
elif (y == "negative") & (test['Sentiment'][x] == "positive"): n_pos+=1
elif (y == "negative") & (test['Sentiment'][x] == "neutral"): n_nut+=1
elif (y == "neutral") & (test['Sentiment'][x] == "positive"): nu_pos+=1
elif (y == "neutral") & (test['Sentiment'][x] == "negative"): nu_neg+=1
elif (y == "neutral") & (test['Sentiment'][x] == "neutral"): nu_nut+=1
#calculating macroaverage recall
pos_recall= p_pos/(p_pos+p_neg+p_nut)
neg_recall= n_neg/(n_pos+n_neg+n_nut)
nut_recall= nu_nut/(nu_pos+nu_neg+nu_nut)
macro_avg_recall = (pos_recall+neg_recall+nut_recall)/3
recall.append(macro_avg_recall)
#calculating macroaverage precision
pos_precision= p_pos/(p_neg+p_pos+p_nut)
neg_precision= n_neg/(n_neg+n_pos+n_nut)
nut_precision= nu_nut/(nu_neg+nu_pos+nu_nut)
macro_avg_precision = (pos_precision+neg_precision+nut_precision)/3
precision.append(macro_avg_precision)
#calculating macroaverage F1-score
F1_score = (2*macro_avg_precision*macro_avg_recall)/(macro_avg_recall+macro_avg_precision)
f1.append(F1_score)
#outputing macroaverage recall precision and F1-Score
print("Macroaverage Recall: ", macro_avg_recall)
print("Macroaverage Percision: ", macro_avg_precision)
print("F1 Score: ", F1_score)
#Building a confusion matrix
print("Confusion Matrix: ")
conf = {'Outputs/Gold Labels' : ['positive','neutral','negative'], 'positive' : [p_pos,p_nut,p_neg], 'neutral' : [nu_pos,nu_nut,nu_neg], 'negative' : [n_pos,n_nut,n_neg]}
confusion=pd.DataFrame(conf, columns= ['Outputs/Gold Labels', 'positive', 'neutral', 'negative'])
print(confusion)
# In[9]:
train = clean_data(train)
test = clean_data(test)
# In[11]:
#converting to lowercase
train = train.apply(lambda x: x.astype(str).str.lower())
#removing stop words
for i in stop_words :
train = train.replace(to_replace=r'\b%s\b'%i, value="",regex=True)
#removing punctuations
table = str.maketrans(dict.fromkeys(string.punctuation))
for index, value in train['Tweet'].items():
train['Tweet'][index]=train['Tweet'][index].translate(table)
#removing numbers
train = train.replace(to_replace=r'\d', value="",regex=True)
# In[65]:
#converting to lowercase
test = test.apply(lambda x: x.astype(str).str.lower())
#removing stop words
for i in stop_words :
test = test.replace(to_replace=r'\b%s\b'%i, value="",regex=True)
#removing punctuations
table = str.maketrans(dict.fromkeys(string.punctuation))
for index, value in test['Tweet'].items():
test['Tweet'][index]=test['Tweet'][index].translate(table)
#removing numbers
test = test.replace(to_replace=r'\d', value="",regex=True)
# In[10]:
vocabulary= [] #building vocabulary series for bag of words from train data
for x in train['Tweet'].tolist():
a = word_split(x)
vocabulary.extend(a);
vocabulary = list(set(vocabulary))
vocab=pd.Series(vocabulary)
dup_vocab = vocab
# In[11]:
#building bag of words with vocabulary as columns and tweets as rows from train data
bow = pd.DataFrame (columns=dup_vocab)
ss = len(dup_vocab)
for x in train['Tweet']:
c = word_split(x)
bow_vector = np.zeros(ss)
for d in c:
for i, y in enumerate(dup_vocab):
if y==d:
bow_vector[i] = bow_vector[i] + 1
#a = pd.DataFrame([bow_vector],columns = dup_vocab)
bow = bow.append(pd.Series(bow_vector, index=dup_vocab),ignore_index=True)
#print(bow.shape)
dup_bow = bow
# In[12]:
distances= []
#building bag of words with vocabulary as columns and tweets as rows from test data
test_bow = pd.DataFrame (columns=vocab)
for x in test['Tweet'].tolist():
c = word_split(x)
test_bow_vector = np.zeros(ss)
for d in c:
for i, y in enumerate(vocab):
if y==d:
test_bow_vector[i] = test_bow_vector[i] + 1
#a = pd.DataFrame([test_bow_vector],columns = dup_vocab)
test_bow = test_bow.append(pd.Series(test_bow_vector, index=vocab),ignore_index=True)
#print(test_bow.shape)
#print(test_bow)
test_dup_bow = test_bow
# In[ ]:
#finding euclidean distances
distances = scipy.spatial.distance.cdist(test_dup_bow.values, dup_bow.values, metric='euclidean')
# In[ ]:
#distances as a dataframe
distances = pd.DataFrame(distances)
# In[ ]:
labels=train['Sentiment'] #train data labels (Gold labels)
accuracy = []
percision = []
f1 = []
recall= []
p_label = knn(10) #predicting labels for test data for k=10
measures(p_label,accuracy,percision,f1,recall) #measuring accuracy, confusion matrix, precision, f1-score, and recall
p_label = knn(7) #predicting labels for test data for k=7
measures(p_label,accuracy,percision,f1,recall) #measuring accuracy, confusion matrix, precision, f1-score, and recall
p_label = knn(5) #predicting labels for test data for k=5
measures(p_label,accuracy,percision,f1,recall) #measuring accuracy, confusion matrix, precision, f1-score, and recall
p_label = knn(3) #predicting labels for test data for k=3
measures(p_label,accuracy,percision,f1,recall) #measuring accuracy, confusion matrix, precision, f1-score, and recall
p_label = knn(1) #predicting labels for test data for k=
measures(p_label,accuracy,percision,f1,recall) #measuring accuracy, confusion matrix, precision, f1-score, and recall
# In[ ]:
k= [10,7,5,3,1]
#Plotting graphs for accuracy, precision, recall, and f1-score against all k values
fig, axs = plt.subplots(2,2)
axs[0,0].plot(k,accuracy)
axs[0,0].set_title('Accuracy')
axs[1,0].plot(k,percision)
axs[1,0].set_title('Precision')
axs[1,1].plot(k,recall)
axs[1,1].set_title('Recall')
axs[0,1].plot(k,f1)
axs[0,1].set_title('F1 Score')
for ax in axs.flat:
ax.set(xlabel='k-values')
| true |
2e9b3ba6c393ecceb01361b9bb9b67f406001721 | Python | ciciandjojo/developx | /weather_app/models.py | UTF-8 | 761 | 2.5625 | 3 | [] | no_license | from django.db import models
class GeoPosition(models.Model):
coordinates = models.CharField(max_length=255)
height = models.PositiveSmallIntegerField()
def __unicode__(self):
return u'{} : {}'.format(self.coordinates, self.height)
class WeatherEntry(models.Model):
geoposition = models.ForeignKey(GeoPosition)
date = models.DateField()
time = models.TimeField()
temperature = models.FloatField()
wind_speed = models.FloatField()
wind_vector = models.FloatField()
class AverageStats(models.Model):
geoposition = models.ForeignKey(GeoPosition)
datetime = models.DateTimeField()
avg_temperature = models.FloatField()
avg_wind_speed = models.FloatField()
avg_wind_vector = models.FloatField()
| true |
c679aa7340a303d0a58c64faa621bcbc32ce560b | Python | ajayvenkat10/Competitive | /quicksort1.py | UTF-8 | 774 | 3.671875 | 4 | [] | no_license | def swap(s1,s2):
return s2,s1
def quicksort(A, l, r):
if l < r:
p = partition(A, l, r)
quicksort(A, l, p-1)
quicksort(A, p+1, r)
return A
def partition(A, l, r):
p = A[l]
i = l+1
j = r
done = False
while not done:
while i <= j and A[i] <= p:
i=i+1
while A[j] >= p and j >=i:
j=j-1
if j < i:
done = True
else:
A[i],A[j]=swap(A[i],A[j])
A[l],A[j]=swap(A[l],A[j])
return j
print "Enter the number of elements in the array: "
n=int(raw_input(""))
print "Enter the array elements"
A=[]
x=raw_input("")
x=x.split()
for i in range(n):
b=int(x[i])
A.append(b)
A=quicksort(A,0,len(A)-1)
print "The sorted array is: "
print A
| true |
3ae2678a7393e9e77f874787c6d99e846636c767 | Python | daniel-reich/ubiquitous-fiesta | /EHzL3v25wYp7E4AFC_24.py | UTF-8 | 83 | 2.625 | 3 | [] | no_license |
def can_build(s1, s2):
return all(s1.count(c) >= s2.count(c) for c in set(s2))
| true |
b21e101dc486d8ca5fef1358eefad695e69ebbb2 | Python | parvpatni/PyProject | /gui.py | UTF-8 | 3,218 | 2.75 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 14 01:26:40 2018
@author: Parv
"""
from tkinter import *
#import tkinter as Tk
def admin_window():
window = Toplevel(root)
#l=Label(window,text="test")
#l.pack()
global id_entry,name_entry,tseats_entry,price_entry,id_rem
def addinput():
mid = id_entry.get()
mname = name_entry.get()
mseats=tseats_entry.get()
mprice=price_entry.get()
print(mid,mname,mseats,mprice)
def remmovie():
rem_id=id_rem.get()
print(rem_id)
label1=Label(window,text="New Movie")
label2=Label(window,text="ID")
label3=Label(window,text="Name")
label4=Label(window,text="Total Seats")
label5=Label(window,text="Price")
id_entry=Entry(window)
name_entry=Entry(window)
tseats_entry=Entry(window)
price_entry=Entry(window)
label1.grid(row=0)
label2.grid(row=1)
label3.grid(row=2)
label4.grid(row=3)
label5.grid(row=4)
id_entry.grid(row=1,column=1)
name_entry.grid(row=2,column=1)
tseats_entry.grid(row=3,column=1)
price_entry.grid(row=4,column=1)
b1=Button(window, text="submit", fg="red",command=addinput)
b1.grid(row=5, column=1)
label5=Label(window,text="Remove Movie")
id_rem=Entry(window)
b2=Button(window, text="submit", fg="blue",command=remmovie)
label5.grid(row=0,column=3)
id_rem.grid(row=1,column=3)
b2.grid(row=2, column=3)
def user_window():
window = Toplevel(root)
def showall():
print("working")
def showmovie():
sid = id_show.get()
print(sid)
def bookmovie():
mid=book_mov_id.get()
seat=book_mov_seat.get()
print(mid,seat)
def refundmovie():
mid=ref_mov_id.get()
seat=ref_mov_seat.get()
print(mid,seat)
global id_show,book_mov_id,book_mov_seat,ref_mov_id,ref_mov_seat
b1=Button(window, text="Show All Movies", fg="red",command=showall)
b1.grid(row=0, column=1)
showmov=Label(window,text="Show Movie")
showmov.grid(row=1)
id_show=Entry(window)
id_show.grid(row=1,column=1)
showbutton=Button(window,text="submit",command=showmovie).grid(row=1,column=2)
bookl=Label(window,text="Book Movie").grid(row=2)
book_mov_id=Entry(window)
book_mov_id.grid(row=2,column=1)
book_mov_seat=Entry(window)
book_mov_seat.grid(row=2,column=2)
bookbutton=Button(window,text="book",command=bookmovie).grid(row=2,column=3)
refl=Label(window,text="Refund Movie").grid(row=3)
ref_mov_id=Entry(window)
ref_mov_id.grid(row=3,column=1)
ref_mov_seat=Entry(window)
ref_mov_seat.grid(row=3,column=2)
refbutton=Button(window,text="refund",command=refundmovie).grid(row=3,column=3)
root=Tk()
l=Label(root,text="Welcome to Movie Booking").pack(side=TOP)
topFrame = Frame(root)
topFrame.pack()
bottomFrame=Frame(root)
bottomFrame.pack(side=BOTTOM)
button1=Button(topFrame, text="ADMIN", fg="red",command=admin_window)
button2=Button(topFrame, text="USER", fg="blue",command=user_window)
button1.pack(side=LEFT)
button2.pack()
root.mainloop() | true |
b35354f93c9f11e19db7c02acc6eed86bacf9302 | Python | oguh43/bilicka | /fml/test.py | UTF-8 | 2,512 | 2.875 | 3 | [] | no_license | from os import listdir
from os.path import isfile, join
def compress() -> None:
source_file = open("fml/source.txt","r")
chars = [line.replace("\n","") for line in source_file.readlines()]
def map_line(line: str)-> str:
result = ""
count = 0
curr_char = ""
for char in line:
if curr_char == "":
curr_char = char
if curr_char != char:
result += curr_char if count == 1 else f"{count}{curr_char}"
count = 0
curr_char = char
count += 1
result += char if count == 1 else f"{count}{char}"
return result
def remove_duplicates(lines: list[str]) -> list[str]:
count = 0
last_line = ""
for line in lines:
if last_line == "":
last_line = line
if last_line != line:
if count > 1:
lines[lines.index(last_line)] = f"{last_line}+{count}"
while last_line in lines:
lines.remove(last_line)
last_line = line
count = 0
count += 1
return lines
res = []
for i in chars:
res.append(map_line(i))
res = remove_duplicates(res)
source_file.close()
target_file = open("fml/"+"n".join(res)+".txt","w+")
target_file.close()
def decompress() -> None:
files = [f for f in listdir("fml/") if isfile(join("fml", f))]
print("Pick a file to decode\n")
for i in range(len(files)):
print(f"File number-> {i}; File name-> \"{files[i]}\"")
file = "".join(files[int(input("File number? > "))].split(".")[:-1]).split("n")
for line in file:
if "+" in line:
file[file.index(line)] = eval("n\"*".join(("(\""+line).split("+")) + ").split(\"n\")[:-1]")
flatten = lambda *n: (e for a in n for e in (flatten(*a) if isinstance(a, (tuple, list)) else (a,)))
file = list(flatten(file))
res = ""
number = ""
for line in file:
for char in line:
try:
int(char)
number += char
except ValueError:
res += str(eval(f"'{char}'*{int(number) if number != '' else 1}"))
number = ""
res += "\n"
number = ""
print(res)
if __name__ == '__main__':
if input("Compress/ Decompress? C||D -> ").upper() == "C":
compress()
else:
decompress() | true |
7af3f405f410723b18339bbb30232239bf5657d1 | Python | fooltomb/algorithm | /LeetCode/0189RotateArray/rotateArray_0.py | UTF-8 | 280 | 2.859375 | 3 | [] | no_license | class Solution(object):
def rotate(self,nums,k):
n=len(nums)
k=k%n
nums.reverse()
for i in range(k/2):
nums[i],nums[k-i-1]=nums[k-i-1],nums[i]
for i in range((n-k)/2):
nums[k+i],nums[n-i-1]=nums[n-i-1],nums[k+1]
| true |
4c29b96bdbf6dce0b9ec5f6419466882eb61ac9b | Python | parveez1shariff/Python-Testing | /Aurg_test.py | UTF-8 | 91 | 3.375 | 3 | [] | no_license | # Verifing function argument
def area(a, b=7):
c = a*b
return c
print(area(4, 8)) | true |
2a6482690b521452b83f14d8a3e6b9e7325aa774 | Python | Xiaoyii/qbb2017-answers | /day2-morning/day2-lunch/day2-exercise-5.py | UTF-8 | 266 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
import sys
fh = sys.stdin
numlines = 0
MAPQscore = 0
for line in fh:
if line.startswith("@"):
continue
else:
MAPQscore += int(line.split("\t")[4])
numlines +=1
print MAPQscore / numlines | true |
931046c4bef17a09c4dc413834cca73b78b9b2f7 | Python | Phipli/SmartBox | /eSB_Example.py | UTF-8 | 856 | 2.75 | 3 | [] | no_license | #!/usr/bin/python
# Smart Box Interface Library
# By Phil
# http://stuffandnonsense.elephantandchicken.co.uk
# 20200307
# This is an early development version
# of a python module designed to control the
# Economatics Smart Box
# Testing is conducted on an "SB-04"
# This file is an example using some functions from the library
import eSB
import time
eSB.open('/dev/ttyUSB0')
print "Smart Box OS Version : " + eSB.get_sb_ver()
print "Command 12 is called : " + eSB.get_command_name(12)
print "Turn all digital outpus on..."
eSB.d_out_on_all()
time.sleep(3)
print "Turn half of them off again..."
eSB.d_out_all(15)
time.sleep(3)
eSB.d_out_off_all()
try:
while True:
print "Press Ctrl-c to exit. Digital Inputs : " + format(eSB.d_in_all(), '08b')
time.sleep(1)
except KeyboardInterrupt:
print "Exiting..."
eSB.close()
print "Done."
| true |
76bd8b14d97fd9bd7b6d09d51b515d6b46690a37 | Python | bolatroniks/AlphaEvolution | /Framework/Genetic/utils.py | UTF-8 | 1,445 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#hahaha
from copy import deepcopy
from Framework.Dataset.Dataset import Dataset
from Framework.Genetic.Functions.threshold_inverters import *
from Framework.Genetic.Functions.feature_functions import *
from Framework.Genetic.Functions.predictors import *
#some objects cannot be directly written to a file
#this function converts them to some basic info that
#can be stored
def prepare_dict_to_save (d_in):
d = deepcopy (d_in)
for k, v in d.iteritems ():
if v.__class__.__name__ == 'dict':
d[k] = prepare_dict_to_save (v)
elif v.__class__.__name__ == 'function':
d[k] = 'function:' + v.func_name
elif v.__class__.__name__ == 'Dataset':
d[k] = 'Dataset:' + v.timeframe
return d
#does the opposite from the function above
#once a chromossome has been loaded from a file
#some objects are not yet ready to use
#this function converts them into something usable
def adapt_dict_loaded (d_in):
d = deepcopy (d_in)
for k, v in d.iteritems ():
if v.__class__.__name__ == 'dict':
d[k] = adapt_dict_loaded (v)
if type (v) == str or type(v) == unicode:
if v.find('function:') >= 0:
d[k] = eval (v.replace ('function:', '').replace (' ', ''))
elif v.find('Dataset:') >= 0:
d[k] = Dataset (timeframe = v.replace ('Dataset:', '').replace (' ', ''))
return d | true |
bf302bf69d7e1913b8dc6b4161b7ce3b74e10821 | Python | pjkui/githubproject | /hilder_company/match/soufang_producer.py | UTF-8 | 6,934 | 2.765625 | 3 | [] | no_license | """
搜房网站抓取
http://www.sofang.com/
"""
import requests
from lxml import etree
import re
import pika
import json
from lib.log import LogHandler
log = LogHandler('搜房')
connection = pika.BlockingConnection(pika.ConnectionParameters(host='114.80.150.196', port=5673, heartbeat=0))
channel = connection.channel()
class SouFangRent(object):
def __init__(self, proxies, cookie):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
'Cookie': cookie
}
self.start_url = 'http://www.sofang.com/city.html'
self.proxies = proxies
def get_all_city_url(self):
r = requests.get(url=self.start_url, headers=self.headers, proxies=self.proxies)
tree = etree.HTML(r.text)
links = tree.xpath("//div[@class='citys']/ul/li/p/a")
city_dict = {}
for link in links:
url = link.xpath("./@href")[0]
city = link.xpath("./text()")[0]
city_dict[city] = url
self.get_office_buliding(city_dict)
def get_office_buliding(self, city_dict):
for city in city_dict:
city_url = city_dict[city] + '/xzlrent/build' # 写字楼出租 楼盘查询
print(city_url)
try:
r = requests.get(url=city_url, headers=self.headers, proxies=self.proxies)
except Exception as e:
log.error('请求失败 url={} e={}'.format(city_url, e))
continue
try:
tree = etree.HTML(r.text)
max_page = tree.xpath("//div[@class='page_nav']/ul/li/a/@alt")[-1]
for num in range(1, int(max_page) + 1):
page_url = city_url + '/bl{}?'.format(num)
self.get_all_links(page_url, city)
except Exception as e:
log.info('没有写字楼数据')
page_url = city_url + '/bl{}?'.format(1)
self.get_all_links(page_url, city)
def get_all_links(self, page_url, city):
try:
response = requests.get(url=page_url, headers=self.headers, proxies=self.proxies)
except Exception as e:
log.error('请求失败 e={}'.format(e))
return
tree = etree.HTML(response.text)
buildings = tree.xpath("//div[@class='list list_free']/dl")
for building in buildings:
half_url = building.xpath("./dd[1]/p/a/@href")[0]
# 写字楼链接
url = re.search('(.*?)/xzlrent', page_url, re.S | re.M).group(1) + half_url
# 写字楼名称
office_name = building.xpath("./dd[1]/p/a/text()")[0]
try:
# 出租价格
rent_price = float(building.xpath("./dd[2]/p[1]/span/text()")[0])
except:
rent_price = None
data = {
'url': url,
'city': city,
'office_name': office_name,
'rent_price': rent_price
}
channel.queue_declare(queue='soufang_rent')
channel.basic_publish(exchange='',
routing_key='soufang_rent',
body=json.dumps(data))
log.info('一条数据放队列 url={}'.format(data))
class SouFangSale(SouFangRent):
def __init__(self, proxies, cookie):
super(SouFangSale, self).__init__(proxies, cookie)
def get_office_buliding(self, city_dict):
for city in city_dict:
city_url = city_dict[city] + '/xzlsale/area' # 写字楼出售
print(city_url)
try:
r = requests.get(url=city_url, headers=self.headers, proxies=self.proxies)
try:
tree = etree.HTML(r.text)
max_page = tree.xpath("//div[@class='page_nav']/ul/li/a/@alt")[-1]
for num in range(1, int(max_page)+1):
page_url = city_url + '/bl{}?'.format(num)
self.get_all_links(page_url, city)
except Exception as e:
log.info('没有写字楼数据')
except Exception as e:
log.error('请求失败 e={}'.format(e))
def get_all_links(self, page_url, city):
try:
response = requests.get(url=page_url, headers=self.headers, proxies=self.proxies)
tree = etree.HTML(response.text)
buildings = tree.xpath("//div[@class='list list_free']/dl")
for building in buildings:
half_url = building.xpath("./dd[1]/p/a/@href")[0]
# 写字楼链接
url = re.search('(.*?)/xzlsale', page_url, re.S | re.M).group(1) + half_url
try:
# 出售价格
sell_price = float(building.xpath("./dd[2]/p[1]/span/text()")[0])*10000.0
except:
sell_price = None
try:
# 均价
avg_price = int(building.xpath("./dd[2]/p[2]/span/text()")[0])
except:
avg_price = None
try:
# 物业类型
estate_type2 = building.xpath("./dd[1]/div/p[2]/span[1]/text()")[0]
except:
estate_type2 = None
try:
# 建筑面积
build_area_info = building.xpath("./dd[1]/div/p[2]/span[3]/text()")[0]
build_area = re.search('(\d+)', build_area_info, re.S | re.M).group(1)
except:
build_area = None
try:
# 楼层
floor_info = building.xpath("./dd[1]/div/p[2]/span[5]/text()")[0]
total_floor = re.search('/(\d+)', floor_info, re.S | re.M).group(1)
floor = re.search('(\d+)/', floor_info, re.S | re.M).group(1)
except:
total_floor = None
floor = None
data = {
'url': url,
'city': city,
'sell_price': sell_price,
'avg_price': avg_price,
'estate_type2': estate_type2,
'build_area': build_area,
'total_floor': total_floor,
'floor': floor
}
channel.queue_declare(queue='soufang_sale')
channel.basic_publish(exchange='',
routing_key='soufang_sale',
body=json.dumps(data))
log.info('一条数据放队列 url={}'.format(data))
except Exception as e:
log.error('请求失败 e={}'.format(e)) | true |
c64af0b5ad93dacb3fc01497327f9602ab400ab9 | Python | littley/pyvolution | /examples/EquationSolver_advanced.py | UTF-8 | 2,749 | 3.75 | 4 | [
"Apache-2.0"
] | permissive | import math
from pyvolution.EvolutionManager import *
from pyvolution.GeneLibrary import *
"""
This example attempts to find a solution to the following system of equations:
a + b + c + d - 17 = 0
a^2 + b^2 - 5 = 0
sin(a) + c - d - 20 = 0
"""
def fitnessFunction(chromosome):
"""
Given a "chromosome", this function must determine its fitness score
The fitness score should be a floating point value. If the fitness is zero or smaller
then the chromosome will not be allowed to "reproduce"
"""
#you can access the attributes of a chromosome using square brackets
#the key is the description of the gene
a = chromosome["a"]
b = chromosome["b"]
c = chromosome["c"]
d = chromosome["d"]
#for a perfect solution each of the values will be zero
val1 = math.fabs(a + b + c + d - 17)
val2 = math.fabs(math.pow(a, 2) + math.pow(b, 2) - 5)
val3 = math.sin(a) + c - d - 20
#minimize the "distance", this gives a better fitness estimate than summing the values
dist = math.sqrt(math.pow(val1, 2) + math.pow(val2, 2) + math.pow(val3, 2))
#number returned must be a positive floating point value
if dist != 0:
return 1 / dist #lower dist means better fitness, the closer to a good solution the higher this will be
else:
return None #returning None indicates that a perfect solution has been found
#configure the evolution manager as you see fit
#see EvolutionManager.py for documentation on the arguments for this class
em = EvolutionManager(fitnessFunction,
individualsPerGeneration=100,
mutationRate=0.2,
maxGenerations=1000,
stopAfterTime=10, #stop the simulation after 10 seconds
elitism=2, #preserve the 2 most fit individuals unmutated in each generation
)
#this is a special gene type. Instead of mutating randomly, the value of this gene is equal to 1/fitness. As the
#fitness of the chromosome increases, this gene gets smaller and smaller
mutator = FloatInverseFit("mut", maxVal=0.01, startVal=1)
#standard floating point genes. The mutation rate of these genes is dependant on the "mut" gene which is defined above
atype = FloatGeneType("a", generatorAverage=0, generatorSTDEV=100, mutatorGene="mut")
btype = FloatGeneType("b", generatorAverage=0, generatorSTDEV=100, mutatorGene="mut")
ctype = FloatGeneType("c", generatorAverage=0, generatorSTDEV=100, mutatorGene="mut")
dtype = FloatGeneType("d", generatorAverage=0, generatorSTDEV=100, mutatorGene="mut")
em.addGeneType(mutator)
em.addGeneType(atype)
em.addGeneType(btype)
em.addGeneType(ctype)
em.addGeneType(dtype)
result = em.run() | true |
c8c0048903e72d0c135bb564ce0ebf5a4682e85c | Python | bslaff/aoc2020 | /2a.py | UTF-8 | 661 | 3.390625 | 3 | [] | no_license | def get_pw(line):
ddd = dict()
a = line.split(':')
ddd['pw'] = a[1].strip()
b = a[0].strip()
c = b.split(' ')
ddd['letter'] = c[1].strip()
d = c[0].strip()
e = d.split('-')
upper = int(e[1].strip())
lower = int(e[0].strip())
ddd['upper'] = upper
ddd['lower'] = lower
return ddd
def is_valid(d):
c = d['pw'].count(d['letter'])
return c >= d['lower'] and c <= d['upper']
f = open('2a_input.txt', 'r')
lines = f.readlines()
f.close()
passwords = [get_pw(line.strip()) for line in lines if len(line)>0]
valid = [is_valid(pw) for pw in passwords]
print(f"{sum(valid)} of {len(passwords)} were valid")
| true |
e1fcdb44ebb9f657eebbcecd750e78309b1894f6 | Python | louismarc-mercier/Manim | /equality_english.py | UTF-8 | 23,075 | 2.6875 | 3 | [] | no_license | from manimlib.imports import *
# 0. Thumbnail
# 1. Thumbnail2
# 2. ObjectifLecon
# 3. History
# 4. DefIntuition
# 5. Utilisation
# 7. ExempleAffirmation
# 8. ExempleDefinition
# 9. ExempleEquation
# 10. RegleOr
# 11. RegleOrExemples
# 12. ZeroDivision
# 13. ExempleElem
# 13. ExempleElemSol
# 13. ExempleComplexe
# 13. ExempleComplexeSol
class Thumbnail(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
path = r"C:/Users/Utilisateur/Desktop/CEGEP/MANIM/manim-recent/media/images/"
image_peinture = ImageMobject(path + "Perfectly_balanced")
image_peinture.scale(3.25)
image_peinture.to_edge(DOWN)
line1 = TextMobject(r"{\sc Perfectly balanced").scale(2).shift(2.25 * UP).set_color(WHITE)
line2 = TextMobject(r"=").scale(4).next_to(0.5*DOWN+2*LEFT).set_color(WHITE)
#prob = TexMobject(r"\text{Find }T_n=A_n+B_n").scale(2).shift(2 * DOWN)
self.play(FadeIn(image_peinture))
self.play(Write(line1), Write(line2))
self.wait(5)
class Thumbnail2(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
path = r"C:/Users/Utilisateur/Desktop/CEGEP/MANIM/manim-recent/media/images/"
image_peinture = ImageMobject(path + "Perfectly_balanced_robert")
image_peinture.scale(3.25)
image_peinture.to_edge(DOWN)
line = TextMobject(r"{\sc ...as all things should be").scale(1.5).shift(2.5 * DOWN).set_color(WHITE)
#prob = TexMobject(r"\text{Find }T_n=A_n+B_n").scale(2).shift(2 * DOWN)
self.play(FadeIn(image_peinture))
self.play(Write(line))
self.wait(5)
class ObjectifLecon(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc Empowerement Promise}", color=WHITE).to_corner(UL)
#definition = TextMobject(r"Connaître la formule de l'aire d'un rectangle et calculer l'aire d'un rectangle.")
definition = TextMobject(r"""
\begin{itemize}
\item Know the substitution rule.
\item Solve algebra problems which require solving equation(s) with unknown variable(s).
\end{itemize}
""")
definition.scale(0.7)
definition.move_to(np.array([-1, 1, 0]))
definition.set_color(BLUE)
self.play(Write(title))
self.wait(3)
self.play(FadeIn(definition))
path = r"C:/Users/Utilisateur/Desktop/CEGEP/MANIM/manim-recent/media/images/"
#image_peinture = SVGMobject(path + "x-icon")
image_peinture = ImageMobject(path + "objectif")
image_peinture.next_to(1.5*DOWN+0.5*LEFT)
self.play(ShowCreation(image_peinture))
self.wait(10)
class History(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc History}", color=WHITE).to_corner(UL)
#definition = TextMobject(r"L’aire est la surface occupée par un objet sur un plan de deux dimensions. L’aire se calcule en unités carrées.")
definition = TextMobject(r"The equal sign $=$ was introduced by Robert Recorde in 1557, to avoid to write is equal to (in letters) when doing computations.")
definition.scale(0.7)
definition.move_to(np.array([0, 2, 0]))
definition.set_color(BLUE)
#definition.to_edge(UP)
path = r"C:/Users/Utilisateur/Desktop/CEGEP/MANIM/manim-recent/media/images/"
image_robert = ImageMobject(path + "robert_recorde")
image_robert.scale(1.75)
image_robert.to_edge(DOWN)
self.play(FadeIn(image_robert))
self.play(Write(title))
self.play(FadeIn(definition))
self.wait(35)
class DefIntuition(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc Intuitive Definition}", color=WHITE).to_corner(UL)
definition = TextMobject(r"An equality expresses the equivalence between two terms.")
definition.scale(0.7)
definition.move_to(np.array([0, 2, 0]))
definition.set_color(BLUE)
#definition.to_edge(UP)
self.play(Write(title))
self.play(FadeIn(definition))
self.wait(25)
class Utilisation(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc Contexts}", color=WHITE).to_corner(UL)
#definition = TextMobject(r"L’aire est la surface occupée par un objet sur un plan de deux dimensions. L’aire se calcule en unités carrées.")
definition = TextMobject(r"An equality can appear as :")
definition.scale(0.7)
definition.move_to(np.array([-4, 2, 0]))
definition.set_color(BLUE)
compl_rules_str = [
"",
"an affirmation,",
"a notation definition, or",
"an equation.",
]
rules = [TextMobject("{}) {}".format(i, rule), color=WHITE) for i, rule in enumerate(compl_rules_str)]
rules = [rule.scale(0.7) for rule in rules]
for (i, rule) in enumerate(rules):
if i != 0:
rule.next_to(rules[0].get_center() - rule.get_center() + np.array([-6, -2 - (0.7 * i), 0]))
rules = VGroup(*rules)
rules.to_corner(LEFT + UP)
self.play(Write(title))
self.play(FadeIn(definition))
self.wait(3)
self.play(Write(rules[1]))
self.wait(3)
self.play(Write(rules[2]))
self.wait(3)
self.play(Write(rules[3]))
self.wait(10)
class ExempleAffirmation(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc Example: Affirmation}", color=WHITE).to_corner(UL)
title.move_to(np.array([-4.25, 3, 0]))
caption_string = r"Let $a,b\in\mathbb{R}$, then we have:"
caption_property = TextMobject2(caption_string)
caption_property.scale(0.7)
caption_property.move_to(np.array([-4.5, 2.1, 0]))
equality = TexMobject(
"(a+b)^{2}", # 0
"=", # 1
"a^{2}+b^{2}+2ab", # 2
)
brace1 = Brace(equality[0], UP, buff=SMALL_BUFF)
brace2 = Brace(equality[2], UP, buff=SMALL_BUFF)
t1 = brace1.get_text("Left-hand side")
t2 = brace2.get_text("Right-hand side")
self.play(Write(title))
self.play(FadeIn(caption_property))
self.play(FadeIn(equality))
self.play(
GrowFromCenter(brace1),
FadeIn(t1),
)
self.wait(15)
self.play(
ReplacementTransform(brace1, brace2),
ReplacementTransform(t1, t2)
)
self.wait(15)
self.play(FadeOut(equality))
self.play(FadeOut(caption_property))
self.play(FadeOut(title))
class ExempleDefinition(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc Example: Notation Definition}", color=WHITE).to_corner(UL)
title.move_to(np.array([-2.75, 3, 0]))
caption_string = r"Let $f:\mathbb{R}\rightarrow\mathbb{R}$, the function defined by $f(x)=ax^{2}+bx+c$ where $a,b,c\in\mathbb{R}$. " \
r"The function has at least one zero if $\Delta=b^{2}-4ac \geq 0$."
caption_property = TextMobject2(caption_string)
caption_property.scale(0.7)
caption_property.move_to(np.array([-1, 2.1, 0]))
equality = TexMobject(
"x", # 0
"=", # 1
"\\frac{-b\pm\sqrt{\Delta}}{2a}",
"=",
"\\frac{-b\pm\sqrt{b^{2}-4ac}}{2a}", # 2
)
self.play(Write(title))
self.play(FadeIn(caption_property))
self.wait(22)
self.play(Write(equality))
self.wait(25)
self.play(FadeOut(equality))
self.play(FadeOut(caption_property))
self.play(FadeOut(title))
class ExempleEquation(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc Example: Equation}", color=WHITE).to_corner(UL)
title.move_to(np.array([-4.5, 3, 0]))
caption_string = r"The first equation known under Recorde notation is: "
caption_property = TextMobject2(caption_string)
caption_property.scale(0.7)
caption_property.move_to(np.array([-2.25, 2.1, 0]))
equality = TexMobject(
"14x + 15 ", # 0
"=", # 1
"71"
)
self.play(Write(title))
self.play(FadeIn(caption_property))
self.wait(10)
self.play(Write(equality))
self.wait(15)
self.play(FadeOut(equality))
self.play(FadeOut(caption_property))
self.play(FadeOut(title))
class RegleOr(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc The substitution rule}", color=GOLD).to_corner(UL)
definition = TextMobject(r"""
\RedBox[label=exsecond]{}{
For all quantities $a$ and $b$, and for every expression $F(x)$, if $a = b$ then :
\begin{equation}
F(a) = F(b)
\end{equation}
}
"""
)
definition.scale(0.5)
definition.next_to(1.5 * UP + 5.5 * LEFT)
self.play(Write(title))
self.play(FadeIn(definition))
self.wait(35)
self.play(FadeOut(definition))
self.play(FadeOut(title))
class RegleOrExemples(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc Particular cases of the substitution rule}", color=GOLD).to_corner(UL)
definition = TextMobject(r"""
\begin{itemize}
\item \underline{$F(x)=x + c$}: Let $a, b, c\in\mathbb{R}$, if $a = b$ then $F(a) = a + c = b + c = F(b)$,
\item \underline{$F(x)=x - c$}: Let $a, b, c\in\mathbb{R}$, if $a = b$ then $F(a) = a - c = b - c = F(b)$,
\item \underline{$F(x)=cx$}: Let $a, b, c\in\mathbb{R}$, if $a = b$ then $F(a) = ca = cb = F(b)$,
\item \underline{$F(x)=\frac{x}{c}$}: Let $a, b\in\mathbb{R}$ and $c\neq 0$, if $a = b$ then $F(a) = \frac{a}{c} = \frac{b}{c} = F(b)$,
\end{itemize}
""")
definition.scale(0.7)
definition.move_to(np.array([-1, 1, 0]))
definition.set_color(BLUE)
self.play(Write(title))
self.wait(7)
self.play(FadeIn(definition))
self.wait(60)
self.play(FadeOut(definition))
self.play(FadeOut(title))
class ZeroDivision(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
path = r"C:/Users/Utilisateur/Desktop/CEGEP/MANIM/manim-recent/media/images/"
image_peinture = ImageMobject(path + "divide_by_zero")
image_peinture.scale(3.25)
image_peinture.to_edge(DOWN)
# prob = TexMobject(r"\text{Find }T_n=A_n+B_n").scale(2).shift(2 * DOWN)
self.play(FadeIn(image_peinture))
self.wait(25)
class ExempleElem(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc Exercise: $1$ equation, $1$ unknown}", color=WHITE).to_corner(UL)
title.move_to(np.array([-2.5, 3, 0]))
caption_string = r"Find the solution of the following equation: "
caption_property = TextMobject2(caption_string)
caption_property.scale(0.7)
caption_property.move_to(np.array([-2.25, 2.1, 0]))
equality = TexMobject(
"14x + 15 ", # 0
"=", # 1
"71"
)
self.play(Write(title))
self.wait(5)
self.play(Write(equality))
# Pause to think about it.
circ = Arc(start_angle=PI / 2, angle=-2 * PI, radius=0.35).to_corner(DL)
timers = [TexMobject(str(i)).move_to(circ) for i in range(5, -1, -1)]
pause = TextMobject("Pause the video and find $x$.").next_to(circ, RIGHT)
self.play(ShowCreation(circ), Write(pause))
self.play(Write(timers[0]), run_time=0.5)
for i in range(5):
self.play(ReplacementTransform(timers[i], timers[i + 1]), run_time=0.5)
self.wait(0.5)
self.play(FadeOut(pause), FadeOut(timers[-1]), FadeOut(circ), run_time=2)
answer = TextMobject("Solution: $x=4$").to_corner(DL)
self.play(Write(answer))
self.wait(10)
class ExempleElemSol(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc Solution: $1$ equation, $1$ unknown}", color=WHITE).to_corner(UL)
equation1 = TexMobject("14x+15", "=", "71")
equation2 = TexMobject("14x", "=", "71-15")
equation3 = TexMobject("14x", "=", "56")
equation4 = TexMobject("x", "=", "\\frac{56}{14}","=","4")
self.play(Write(title))
self.wait(5)
self.play(Write(equation1))
self.wait(5)
self.play(ReplacementTransform(equation1, equation2))
self.wait(5)
self.play(ReplacementTransform(equation2, equation3))
self.wait(5)
self.play(ReplacementTransform(equation3, equation4))
self.wait(15)
class ExempleComplexe(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc Exercise: $2$ equations, $2$ unknowns}", color=WHITE).to_corner(UL)
title.move_to(np.array([-2.5, 3, 0]))
definition1 = TextMobject(r"""
\systeme{3x-4y=32, 7x-6y=58}
""")
definition1.scale(1.25)
definition1.move_to(np.array([-1, 1, 0]))
definition1.set_color(BLUE)
#definition2 = TextMobject(r"""
## \systeme{-9x+12y=-96, 14x-12y=116}
# """)
#definition2.scale(0.7)
#definition2.move_to(np.array([-1, 1, 0]))
#definition2.set_color(BLUE)
self.play(Write(title))
self.play(FadeIn(definition1))
# Pause to think about it.
circ = Arc(start_angle=PI / 2, angle=-2 * PI, radius=0.35).to_corner(DL)
timers = [TexMobject(str(i)).move_to(circ) for i in range(5, -1, -1)]
pause = TextMobject("Pause the video and find $x$ and $y$.").next_to(circ, RIGHT)
self.play(ShowCreation(circ), Write(pause))
self.play(Write(timers[0]), run_time=0.5)
for i in range(5):
self.play(ReplacementTransform(timers[i], timers[i + 1]), run_time=0.5)
self.wait(0.5)
self.play(FadeOut(pause), FadeOut(timers[-1]), FadeOut(circ), run_time=2)
answer = TextMobject("Solution: $x=4$ et $y=-5$").to_corner(DL)
self.play(Write(answer))
self.wait(20)
class ExempleComplexeSol(Scene):
CONFIG = {
"square_scale": 1,
"squares_colors": [WHITE, YELLOW]
}
def construct(self):
title = TextMobject(r"\underline{\sc Solution: $2$ equations, $2$ unknowns}", color=WHITE).to_corner(UL)
title.move_to(np.array([-2.5, 3, 0]))
system0 = TextMobject(r"""\systeme{3x-4y=32, 7x-6y=58}""")
system0.scale(0.7)
system0.move_to(np.array([-1, 2, 0]))
system0.set_color(BLUE)
sytem0_copy = system0.copy()
system1 = TextMobject(r"""\systeme{-9x+12y=-96, 7x-6y=58}""")
system1.scale(0.7)
system1.move_to(np.array([-1, 2, 0]))
system1.set_color(BLUE)
system2 = TextMobject(r"""\systeme{-9x+12y=-96, 14x-12y=116}""")
system2.scale(0.7)
system2.move_to(np.array([-1, 2, 0]))
system2.set_color(BLUE)
operations1 = TextMobject(r"""
\begin{enumerate}
\item $E_{1}\leftarrow -3 E_{1}$,
\end{enumerate}
""")
operations1.scale(0.7)
operations1.move_to(np.array([-5, -2, 0]))
operations1.set_color(BLUE)
operations2 = TextMobject(r"""
\begin{enumerate}
\item $E_{1}\leftarrow -3 E_{1}$,
\item $E_{2}\leftarrow 2 E_{2}$,
\end{enumerate}
""")
operations2.scale(0.7)
operations2.move_to(np.array([-5, -2, 0]))
operations2.set_color(BLUE)
operations3 = TextMobject(r"""
\begin{enumerate}
\item $E_{1}\leftarrow -3 E_{1}$,
\item $E_{2}\leftarrow 2 E_{2}$,
\item $E_{1}+E_{2}$.
\end{enumerate}
""")
operations3.scale(0.7)
operations3.move_to(np.array([-5, -2, 0]))
operations3.set_color(BLUE)
operations4 = TextMobject(r"""
\begin{enumerate}
\item $E_{1}\leftarrow -3 E_{1}$,
\item $E_{2}\leftarrow 2 E_{2}$,
\item $E_{1}+E_{2}$,
\item Substitute $x$ in $E_{1}$ or $E_{2}$.
\end{enumerate}
""")
operations4.scale(0.7)
operations4.move_to(np.array([-4, -2, 0]))
operations4.set_color(BLUE)
simplified_equation = TexMobject("5x+0y", "=", "20")
simplified_equation.scale(0.7)
simplified_equation.next_to(np.array([-2,0.75,0]))
subsitution_equation = TexMobject("3(4)-4y=32","\Leftrightarrow", "-4y=20")
subsitution_equation.scale(0.7)
subsitution_equation.next_to(np.array([-3, 0.75, 0]))
solution_x = TexMobject("x=4", color=BLUE)
solution_x.scale(1)
solution_x.next_to(np.array([1, -2, 0]))
frameBox_x = SurroundingRectangle(solution_x, buff=0.75 * SMALL_BUFF)
frameBox_x.set_stroke(RED, 2)
solution_y = TexMobject("y=-5", color=BLUE)
solution_y.scale(1)
solution_y.next_to(np.array([4, -2, 0]))
frameBox_y = SurroundingRectangle(solution_y, buff=0.75 * SMALL_BUFF)
frameBox_y.set_stroke(RED, 2)
horizontal_line = Line(np.array([-3.25,1.25,0]), np.array([1.5,1.25,0]), color=GREEN)
self.play(Write(title))
self.wait(5)
self.play(Write(system0))
self.wait(5)
self.play(Write(operations1))
self.wait(5)
self.play(ReplacementTransform(system0, system1))
self.wait(5)
self.play(ReplacementTransform(operations1, operations2))
self.wait(5)
self.play(ReplacementTransform(system1, system2))
self.wait(5)
self.play(ShowCreation(horizontal_line))
self.wait(5)
self.play(ReplacementTransform(operations2, operations3))
self.wait(5)
self.play(ShowCreation(simplified_equation))
self.wait(5)
self.play(Write(solution_x))
self.play(ShowCreation(frameBox_x))
self.wait(5)
self.play(FadeOut(simplified_equation))
self.play(FadeOut(horizontal_line))
self.play(ReplacementTransform(system2, sytem0_copy))
self.wait(5)
self.play(ReplacementTransform(operations3, operations4))
self.wait(5)
self.play(FadeIn(subsitution_equation))
self.play(Write(solution_y))
self.play(ShowCreation(frameBox_y))
self.wait(15)
class Credits(Scene):
def wplay(self, *args, wait=1, run_time=1, rate_func=smooth):
self.play(*args, run_time=run_time, rate_func=rate_func)
if wait != 0:
self.wait(wait)
def construct(self):
credits = TextMobject("Credits").set_color(YELLOW).scale(1.7)
thanks = TextMobject("Thanks for watching this video!!").set_color(ORANGE).scale(1.7)
instructor = TexMobject(r"\text{Teacher}", r"\text{Louis-Marc Mercier}")
viewer = TexMobject(r"\text{Viewer}", r"\text{You}")
lines = [instructor, viewer]
instructor[0].align_to([-0.5, 0, 0], RIGHT).shift(8 * DOWN)
instructor[1].align_to([0.5, 0, 0], LEFT).shift(8 * DOWN)
viewer[0].next_to(instructor, DOWN, buff=LARGE_BUFF).align_to(instructor[0], RIGHT)
viewer[1].next_to(instructor, DOWN, buff=LARGE_BUFF).align_to(instructor[1], LEFT)
credits.set_y(instructor.get_top()[1] + 2 * LARGE_BUFF)
thanks.set_y(-14.5)
def half_start(t):
# this rate function is great for gradually starting into a `linear` rate
# it goes from 0 to 0.5 in value, and from 0 to 1 in slope (speed)
return 1 / 2 * t ** 2
everything_no_thanks = VGroup(credits, *lines)
self.play(VGroup(*everything_no_thanks, thanks).shift, UP, rate_func=half_start)
self.play(VGroup(*everything_no_thanks, thanks).shift, 14 * UP, rate_func=linear, run_time=14)
self.play(everything_no_thanks.shift, 3 * UP, rate_func=linear, run_time=3)
self.remove(*everything_no_thanks)
self.wait(3)
# all done :)
self.wplay(FadeOut(thanks)) | true |
efa303342a01e2dd431041c3744a8083cabacd45 | Python | alwayswinder/python | /test05.py | UTF-8 | 1,164 | 2.75 | 3 | [] | no_license | '''
问卷调查
'''
import easygui as G
import sys
questions =(
[
{
'question' : '你更喜欢下面哪个游戏?',
'title' : '游戏倾向',
'choices' : ['dota2', 'LOL']
},
{
'question' : '你更喜欢下面哪个类型游戏?',
'title' : '游戏类型倾向',
'choices' : ['MMORPG', 'STG']
}
])
def ShowRes(res):
res_str = ''
for resitem in res:
res_str += ('问题:' + resitem['question'] + '\n'
+ '标题:' + resitem['title'] + '\n'
+ '可选项:' + str(resitem['choices']) + '\n'
+ '选择:' + resitem['selected'] + '\n')
res_str += '-----------------------------------------\n'
return res_str
def Questionnaire(questions):
G.msgbox('开始游戏调查问卷!', '开始')
for question in questions:
msg = question['question']
title = question['title']
choices = question['choices']
choice = G.choicebox(msg, title, choices)
question['selected'] = str(choice)
G.msgbox('您以为完成本次问卷调查的所有问题!')
msg = '是否列出本次调查问卷详细情况?'
if G.ccbox(msg):
G.msgbox(ShowRes(questions), '结果')
else:
sys.exit(0)
Questionnaire(questions)
| true |
dbcfb6eaba4275cb28003fb3065d73222972a26f | Python | accasey/offical-flask | /tests/test_auth.py | UTF-8 | 3,155 | 2.640625 | 3 | [
"MIT"
] | permissive | """Testing authentication."""
from flask import Flask, g, Response, session
from flask.testing import FlaskClient
import pytest
from flaskr.db import get_db
from tests.conftest import AuthActions
def test_register(client: FlaskClient, app: Flask) -> None:
"""Test the regsiter endpoint.
Args:
client (FlaskClient): The flask client.
app (Flask): The application.
"""
assert client.get("/auth/register").status_code == 200
# If this failed, most likely a 500 internal server error.
response: Response = client.post(
"/auth/register", data={"username": "a", "password": "a"}
)
assert "http://localhost/auth/login" == response.headers["Location"]
with app.app_context():
assert (
get_db().execute("SELECT * FROM user WHERE username = ?", ("a",)).fetchone()
is not None
)
@pytest.mark.parametrize(
("username", "password", "message"),
(
("", "", b"Username is required."),
("a", "", b"Password is required."),
("test", "test", b"already registered"),
),
)
def test_register_validate_input(
client: FlaskClient, username: str, password: str, message: str
) -> None:
"""Test that the register endpoint is functioning correctly.
Args:
client (FlaskClient): The flask client.
username (str): The username for the test.
password (str): The password for the test.
message (str): The message to check.
"""
response: Response = client.post(
"/auth/register", data={"username": username, "password": password}
)
assert message in response.data
def test_login(client: FlaskClient, auth: AuthActions) -> None:
"""Test the login functionality.
Args:
client (FlaskClient): The flask testing client.
auth (AuthActions): The class with the auth methods.
"""
assert client.get("/auth/login").status_code == 200
response: Response = auth.login()
assert response.headers["Location"] == "http://localhost/"
with client:
client.get("/")
assert session["user_id"] == 1
assert g.user["username"] == "test"
@pytest.mark.parametrize(
("username", "password", "message"),
(("a", "test", b"Incorrect username"), ("test", "a", b"Incorrect password"),),
)
def test_login_validate_input(
auth: AuthActions, username: str, password: str, message: str
) -> None:
"""Test that the login endpoint is functioning correctly.
Args:
auth (AuthActions): The class with the actions.
username (str): The username to test.
password (str): The password to test.
message (str): The message to check.
"""
response: Response = auth.login(username, password)
assert message in response.data
def test_logout(client: FlaskClient, auth: AuthActions) -> None:
"""Test the logout action.
Args:
client (FlaskClient): The flask testing client.
auth (AuthActions): The class with the methods to test the actions.
"""
auth.login()
with client:
auth.logout()
assert "user_id" not in session
| true |
9d9ef3d7e20d5bc11314a28579dba8ff5fba3854 | Python | lixiang2017/leetcode | /leetcode-cn/sword2offer/000剑指0_Offer_37.3_序列化二叉树.py | UTF-8 | 1,616 | 3.46875 | 3 | [] | no_license | '''
Level Order,T:O(N),S:O(N)
执行用时:172 ms, 在所有 Python3 提交中击败了29.98% 的用户
内存消耗:19.8 MB, 在所有 Python3 提交中击败了16.03% 的用户
'''
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Codec:
def serialize(self, root):
"""Encodes a tree to a single string.
:type root: TreeNode
:rtype: str
"""
if not root:
return ''
level = ''
q = [root]
while q:
node = q.pop(0)
if not node:
level += 'None,'
continue
level += str(node.val) + ','
q.append(node.left)
q.append(node.right)
return level
def deserialize(self, data):
"""Decodes your encoded data to tree.
:type data: str
:rtype: TreeNode
"""
if not data:
return None
data = iter(data.split(','))
root = TreeNode(next(data))
q = [root]
while q:
node = q.pop(0)
left, right = next(data), next(data)
if left != 'None':
node.left = TreeNode(left)
q.append(node.left)
if right != 'None':
node.right = TreeNode(right)
q.append(node.right)
return root
# Your Codec object will be instantiated and called as such:
# codec = Codec()
# codec.deserialize(codec.serialize(root))
| true |
83ce0b04fd99f55a0891aa888c4b3fb0be036f2a | Python | nickpeck/flo | /flo/observable.py | UTF-8 | 10,136 | 3.109375 | 3 | [
"MIT"
] | permissive | from __future__ import annotations
import asyncio
from functools import wraps
from typing import TypeVar, Generic, Callable, Optional, List, Any, Union
class AsyncManager:
"""Singleton instance that allows us to enqueue coroutines
for execution
"""
__instance__: Optional[AsyncManager] = None
@staticmethod
def get_instance():
"""Return the manager instance, or create and return
one if it does not exist.
"""
if AsyncManager.__instance__ is None:
AsyncManager.__instance__ = AsyncManager()
return AsyncManager.__instance__
@staticmethod
def renew():
"""Reset the AsyncManager instance
"""
if AsyncManager.__instance__ is not None:
# pylint: disable=protected-access
AsyncManager.__instance__._loop.close()
AsyncManager.__instance__ = AsyncManager()
return AsyncManager.__instance__
def __init__(self):
self._loop = asyncio.new_event_loop()
self._queue = []
def enqueue_async(self, coro):
"""Enqueue an coroutines, as task to be executed
on the event loop when run() is invoked.
"""
self._queue.append(self._loop.create_task(coro))
def run(self):
"""Schedual all tasks in the queue for execution on the
asyncio event loop.
"""
tasks = []
while len(self._queue) > 0:
while len(self._queue) > 0:
task = self._queue.pop(0)
tasks.append(task)
# nb, more tasks might get added to the queue here,
# hence the double 'while'
self._loop.run_until_complete(asyncio.wait(tasks))
T = TypeVar('T')
class Subscriber(Generic[T]):
"""AsyncObservable Subscriber
"""
def __init__(self,
on_next: Callable[[T], Any] = lambda x : None):
self._on_next = on_next
async def on_next(self, value):
"""Called when a new value is communicated to the
subscriber by an entity it is subscribed to.
"""
await asyncio.coroutine(self._on_next)(value)
return self
class AsyncObservable(Generic[T]):
"""Presents a observable that allows for asynchronous
updates to and from multiple subscribers.
"""
def __init__(self, head=None, dependants=None):
"""Initialize an AsyncObservable, where head is
an initial value of Type T
"""
self._v = head
self._subscribers = []
if dependants is None:
self._dependants = []
else:
self._dependants = dependants
child_dependants = []
for dep in self._dependants:
try:
child_dependants = child_dependants + dep._dependants
except AttributeError:
pass
self._dependants = self._dependants + child_dependants
def __str__(self):
if isinstance(self._v, str):
return "'{}'".format(self._v)
return "{}".format(self._v)
def __repr__(self):
return str(self)
def subscribe(self, subscriber: Subscriber[T]) -> AsyncObservable[T]:
"""Add a new subscriber and return the observable.
If the subscriber is already added,
then silently return. If this observable has a value,
notify the subscriber.
"""
if subscriber in self._subscribers:
return self
self._subscribers.append(subscriber)
if self._v is not None:
AsyncManager.get_instance().enqueue_async(
subscriber.on_next(self._v))
return self
def peek(self) -> Optional[T]:
"""Return the current value held by the observable, which is of type T,
or None, if nothing has been written to the observable yet.
"""
return self._v
def bind_to(self, other: AsyncObservable[T]) -> AsyncObservable[T]:
"""Create a binding between this observable, and another observable
of a similar type, so that the other is subscribed to events
in this observable. Return the initial observable.
Raise an exception if attempting to bind a observable to itself.
"""
if other in self._dependants:
raise RuntimeError("Cannot bind to a dependant")
if other == self:
raise Exception("AsyncObservable cannot bind to itself")
subscr = Subscriber[T](
on_next = other.write
)
self.subscribe(subscr)
return other
def join_to(self, other: AsyncObservable[T]) -> AsyncObservable[T]:
"""Join this observable to another observable of the same type.
The result is a new observable that recieves events from both
source observables.
Raise an exception if attempting to join a observable to itself.
"""
if other == self:
raise Exception("AsyncObservable cannot join to itself")
joined = AsyncObservable[T]()
subscr1 = Subscriber[T](
on_next = joined.write
)
self.subscribe(subscr1)
subscr2 = Subscriber[T](
on_next = joined.write
)
other.subscribe(subscr2)
return joined
def filter(self, expr: Callable[[T], bool]) -> AsyncObservable[T]:
"""Return a new observable that contains events in the source
observable, filtered through expr.
"""
filtered_observable = AsyncObservable[T]()
def _next(head: T):
truthy = expr(head)
if truthy:
filtered_observable.write(head)
subscriber = Subscriber[T](
on_next = _next
)
self.subscribe(subscriber)
return filtered_observable
def write(self, item: T) -> AsyncObservable[T]:
"""Write a new value to this observable, and await the
notification of all subscribers.
"""
self._v = item
_head = self._v
if self._subscribers != []:
for subscr in self._subscribers:
AsyncManager.get_instance().enqueue_async(
subscr.on_next(_head))
return self
@staticmethod
def computed(func: Callable[[List[AsyncObservable[T]]], T],
dependants: List[AsyncObservable[T]]) -> AsyncObservable[T]:
"""Create a new observable that is based on a computed
expression of dependent observables, so that when any of the dependents
emits a new value, the computed result is written to the
resulting observable.
"""
def _bind(func, *args, **kwargs):
@wraps(func)
def inner(*_args, **_kwags):
_args_ = [unwrap(a) for a in args]
return func(*_args_, *_args, **kwargs, **_kwags)
return inner
output_observable = AsyncObservable[T](None, dependants)
bound_func = _bind(func, *dependants)
def _on_next(_val):
nonlocal bound_func
nonlocal output_observable
nonlocal dependants
if None in [dep.peek() for dep in dependants]:
return
output_observable.write(bound_func())
subscriber = Subscriber[T](
on_next = _on_next
)
for dep in dependants:
dep.subscribe(subscriber)
# compute initial value, if it can be resolved
if None not in [dep.peek() for dep in dependants]:
output_observable.write(bound_func())
return output_observable
def depends_upon(self, other: AsyncObservable):
return other in self._dependants
class ReadWriteDelegator(AsyncObservable):
"""Construct an observable that delagtes both writes to another observable
and reads to a computed observable into which the write
delegate is bound.
The outward effect, (to the consumer) is of a single observable
which emits a modified response to each value written to it.
"""
def __init__(self,
write_delegate: AsyncObservable,
read_delegate: AsyncObservable[T]):
super().__init__(None, [])
if write_delegate not in read_delegate._dependants:
raise Exception("Cannot construct a delegate, as the observables are not bound")
self._write_delegate = write_delegate
self._read_delegate = read_delegate
def write(self, item: AsyncObservable[T]) -> AsyncObservable[T]:
self._write_delegate.write(item)
return self
def peek(self) -> Optional[T]:
return self._read_delegate.peek()
def subscribe(self, subscriber: Subscriber[T]) -> AsyncObservable[T]:
self._read_delegate.subscribe(subscriber)
return self
def bind_to(self, other: AsyncObservable[T]) -> AsyncObservable[T]:
self._read_delegate.bind_to(other)
return other
def join_to(self, other: AsyncObservable[T]) -> AsyncObservable[T]:
joined = self._read_delegate.join_to(other)
return joined
def filter(self, expr: Callable[[T], bool]) -> AsyncObservable[T]:
filtered = self._read_delegate.filter(expr)
return filtered
class ObservableWrapper(AsyncObservable):
"""Used as a wrapper to elevate library functions
into observables
"""
def __init__(self,
func: Callable[[T], Any] = lambda x : x):
super().__init__(None, [])
self.func = func
def write(self, item: AsyncObservable[T]) -> AsyncObservable[T]:
"""Write a new value to this observable, and await the
notification of all subscribers.
"""
arg = item.peek()
if isinstance(arg, tuple):
self._v = self.func(*[unwrap(a) for a in arg])
else:
self._v = self.func(unwrap(arg))
_head = self._v
if self._subscribers != []:
for subscr in self._subscribers:
AsyncManager.get_instance().enqueue_async(
subscr.on_next(_head))
return self
def unwrap(i: Union[AsyncObservable|Any]):
while isinstance(i, AsyncObservable):
i = i.peek()
return i
| true |
773f6eaf45b86fdb8cdb6c3a498571464344c4cc | Python | shariharan205/Movie-Recommender-System | /Naive-Collaborative-Filtering.py | UTF-8 | 13,088 | 2.9375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from collections import Counter
from collections import defaultdict
from sklearn.metrics import roc_curve, auc
from surprise.model_selection import train_test_split
from surprise import Dataset
from surprise import Reader
from surprise.prediction_algorithms.knns import KNNWithMeans
from surprise import accuracy
from surprise.model_selection import KFold
from surprise.prediction_algorithms.matrix_factorization import SVD
from surprise.prediction_algorithms.matrix_factorization import NMF
from sklearn.metrics import mean_squared_error
ratings = pd.read_csv('ratings.csv')
movie = pd.read_csv('movies.csv')
df = pd.DataFrame({'itemID': list(ratings.movieId), 'userID': list(ratings.userId), 'rating': list(ratings.rating)})
reader = Reader(rating_scale=(0.5, 5.0))
data = Dataset.load_from_df(df[['userID', 'itemID', 'rating']], reader)
def popular_trim(data):
print("Popular trimming")
movie_id_counter = Counter([val[1] for val in data])
popular_trimmed_data = [val for val in data if movie_id_counter[val[1]] > 2]
return popular_trimmed_data
def unpopular_trim(data):
print("Unpopular trimming")
movie_id_counter = Counter([val[1] for val in data])
popular_trimmed_data = [val for val in data if movie_id_counter[val[1]] <= 2]
return popular_trimmed_data
def high_var_trim(data):
print("High variance trimming")
movie_rating_map = defaultdict(list)
for val in data:
movie_rating_map[val[1]].append(val[2])
high_var_data = [val for val in data if
len(movie_rating_map[val[1]]) >= 5 and np.var(movie_rating_map[val[1]]) >= 2.0]
return high_var_data
print("====================================Design and test via cross-validation=============================================================")
avg_rating = df.groupby(['userID'])['rating'].mean().tolist()
def naive_prediction(dataset):
predictions = [avg_rating[dataset[i][0] - 1] for i in range(len(dataset))]
return predictions
k_rmse = []
kf = KFold(n_splits=10)
for trainset, testset in kf.split(data):
y_pred = naive_prediction(testset)
y_true = [testset[i][2] for i in range(len(testset))]
k_rmse.append(mean_squared_error(y_true, y_pred))
avg_rmse = np.mean(k_rmse)
print('The average RMSE using naive collaborative filter is %0.4f' % avg_rmse)
print("==================================Naive collaborative filter performance on trimmed set====================================================")
print("====================================Popular trimming=============================================================")
k_rmse = []
kf = KFold(n_splits=10)
for trainset, testset in kf.split(data):
testset = popular_trim(testset)
y_pred = naive_prediction(testset)
y_true = [testset[i][2] for i in range(len(testset))]
k_rmse.append(mean_squared_error(y_true, y_pred))
avg_rmse = np.mean(k_rmse)
print('The average RMSE for popular movie trimmed set is %0.4f' % avg_rmse)
print("====================================Unpopular trimming=============================================================")
k_rmse = []
kf = KFold(n_splits=10)
for trainset, testset in kf.split(data):
testset = unpopular_trim(testset)
y_pred = naive_prediction(testset)
y_true = [testset[i][2] for i in range(len(testset))]
k_rmse.append(mean_squared_error(y_true, y_pred))
avg_rmse = np.mean(k_rmse)
print('The average RMSE for unpopular movie trimmed set is %0.4f' % avg_rmse)
print("====================================High Variance Trimming========================================================")
k_rmse = []
kf = KFold(n_splits=10)
for trainset, testset in kf.split(data):
testset = high_var_trim(testset)
y_pred = naive_prediction(testset)
y_true = [testset[i][2] for i in range(len(testset))]
k_rmse.append(mean_squared_error(y_true, y_pred))
avg_rmse = np.mean(k_rmse)
print('The average RMSE for high variance movie trimmed set is %0.4f' % avg_rmse)
print("==============================Performance Comparison=================================================")
k_range = range(2, 50, 2)
avg_rmse = []
kf = KFold(n_splits=10)
for k in k_range:
algo = SVD(n_factors=k)
k_rmse = []
for trainset, testset in kf.split(data):
algo.fit(trainset)
predictions = algo.test(testset)
k_rmse.append(accuracy.rmse(predictions, verbose=False))
avg_rmse.append(np.mean(k_rmse))
plt.plot(k_range, avg_rmse, label="Average RMSE")
plt.xlabel('Number of latent factors')
plt.ylabel('Error')
plt.legend()
plt.show()
print('The minimum average RMSE is %f for k = %d' % (np.min(avg_rmse), np.argmin(avg_rmse)))
print("====================================Ranking=====================================================")
k_range = range(2, 51, 2)
avg_rmse = []
kf = KFold(n_splits=10)
for k in k_range:
algo = SVD(n_factors=k)
k_rmse = []
for trainset, testset in kf.split(data):
algo.fit(trainset)
predictions = algo.test(popular_trim(testset))
k_rmse.append(accuracy.rmse(predictions, verbose=False))
avg_rmse.append(np.mean(k_rmse))
plt.plot(k_range, avg_rmse, label="Average RMSE")
plt.xlabel('Number of latent factors')
plt.ylabel('Error')
plt.legend()
plt.show()
print('The minimum average RMSE is %f for k = %d' % (np.min(avg_rmse), np.argmin(avg_rmse)))
def get_top_t(predictions, t=10):
# First map the predictions to each user.
top_t = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_t[uid].append((iid, est, true_r))
# Then sort the predictions for each user and retrieve the k highest ones.
for uid, user_ratings in top_t.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_t[uid] = user_ratings[:t]
return top_t
train_set, test_set = train_test_split(data, test_size=0.1, random_state=0)
algo = KNNWithMeans(k=20, sim_options={'name': 'pearson'})
algo.fit(train_set)
predictions = algo.test(test_set)
top_recos = get_top_t(predictions)
def precision_recall_at_k(predictions, k=10, threshold=3.5):
user_est_true = defaultdict(list)
for uid, _, true_r, est, _ in predictions:
user_est_true[uid].append((est, true_r))
precisions = dict()
recalls = dict()
for uid, user_ratings in user_est_true.items():
user_ratings.sort(key=lambda x: x[0], reverse=True)
n_rel = sum((true_r >= threshold) for (_, true_r) in user_ratings)
n_rec_k = sum((est >= threshold) for (est, _) in user_ratings[:k])
n_rel_and_rec_k = sum(((true_r >= threshold) and (est >= threshold))
for (est, true_r) in user_ratings[:k])
precisions[uid] = n_rel_and_rec_k / n_rec_k if n_rec_k != 0 else 1
recalls[uid] = n_rel_and_rec_k / n_rel if n_rel != 0 else 1
return precisions, recalls
print("===================================Plotting precision for KNN Collaborative Filtering=========================================================")
kf = KFold(n_splits=10)
algo = KNNWithMeans(k=20, sim_options={'name': 'pearson'})
threshold = 3
avg_prec, avg_rec = [], []
for t in range(1, 26):
t_prec, t_rec = [], []
for trainset, testset in kf.split(data):
algo.fit(trainset)
predictions = algo.test(testset)
precisions, recalls = precision_recall_at_k(predictions, k=t, threshold=threshold)
t_prec.append((sum(prec for prec in precisions.values()) / len(precisions)))
t_rec.append(sum(rec for rec in recalls.values()) / len(recalls))
avg_prec.append(np.mean(t_prec))
avg_rec.append(np.mean(t_rec))
t_range = range(1, 26)
plt.plot(t_range, avg_prec)
plt.xlabel('Item set size t', fontsize=15)
plt.ylabel('Average Precision', fontsize=15)
plt.title("Precision vs t for KNN")
plt.show()
plt.plot(t_range, avg_rec)
plt.xlabel('Item set size t', fontsize=15)
plt.ylabel('Average Recall', fontsize=15)
plt.title("Recall vs t for KNN")
plt.show()
plt.plot(avg_rec, avg_prec)
plt.xlabel('Average Recall', fontsize=15)
plt.ylabel('Average Precision', fontsize=15)
plt.title("Precision vs Recall for KNN")
plt.show()
print("===================================Plotting precision for ranking using NNMF based collaborative filtering==================================================")
kf = KFold(n_splits=10)
algo = NMF(n_factors=20)
threshold = 3
avg_prec, avg_rec = [], []
for t in range(1, 26):
t_prec, t_rec = [], []
for trainset, testset in kf.split(data):
algo.fit(trainset)
predictions = algo.test(testset)
precisions, recalls = precision_recall_at_k(predictions, k=t, threshold=threshold)
t_prec.append((sum(prec for prec in precisions.values()) / len(precisions)))
t_rec.append(sum(rec for rec in recalls.values()) / len(recalls))
avg_prec.append(np.mean(t_prec))
avg_rec.append(np.mean(t_rec))
t_range = range(1, 26)
plt.plot(t_range, avg_prec)
plt.xlabel('Item set size t', fontsize=15)
plt.ylabel('Average Precision', fontsize=15)
plt.title("Precision vs t for KNN")
plt.show()
plt.plot(t_range, avg_rec)
plt.xlabel('Item set size t', fontsize=15)
plt.ylabel('Average Recall', fontsize=15)
plt.title("Recall vs t for KNN")
plt.show()
plt.plot(avg_rec, avg_prec)
plt.xlabel('Average Recall', fontsize=15)
plt.ylabel('Average Precision', fontsize=15)
plt.title("Precision vs Recall for KNN")
plt.show()
print("==================================Plotting precision for MF with bias based filtering========================================================")
kf = KFold(n_splits=10)
algo = SVD(n_factors=20, init_mean=3)
threshold = 3
avg_prec, avg_rec = [], []
for t in range(1, 26):
t_prec, t_rec = [], []
for trainset, testset in kf.split(data):
algo.fit(trainset)
predictions = algo.test(testset)
precisions, recalls = precision_recall_at_k(predictions, k=t, threshold=threshold)
t_prec.append((sum(prec for prec in precisions.values()) / len(precisions)))
t_rec.append(sum(rec for rec in recalls.values()) / len(recalls))
avg_prec.append(np.mean(t_prec))
avg_rec.append(np.mean(t_rec))
t_range = range(1, 26)
plt.plot(t_range, avg_prec)
plt.xlabel('Item set size t', fontsize=15)
plt.ylabel('Average Precision', fontsize=15)
plt.title("Precision vs t for SVD")
plt.show()
plt.plot(t_range, avg_rec)
plt.xlabel('Item set size t', fontsize=15)
plt.ylabel('Average Recall', fontsize=15)
plt.title("Recall vs t for SVD")
plt.show()
plt.plot(avg_rec, avg_prec)
plt.xlabel('Average Recall', fontsize=15)
plt.ylabel('Average Precision', fontsize=15)
plt.title("Precision vs Recall for SVD")
plt.show()
print("===========================Plotting precision recall curve===============================================")
kf = KFold(n_splits=5)
algos = [KNNWithMeans(k=20, sim_options={'name': 'pearson'}), NMF(n_factors=20),
SVD(n_factors=20, init_mean=2.5)]
threshold = 3
algo_prec, algo_rec = [], []
for algo in algos:
avg_prec, avg_rec = [], []
for t in range(1, 26):
t_prec, t_rec = [], []
for trainset, testset in kf.split(data):
algo.fit(trainset)
predictions = algo.test(testset)
precisions, recalls = precision_recall_at_k(predictions, k=t, threshold=threshold)
t_prec.append((sum(prec for prec in precisions.values()) / len(precisions)))
t_rec.append(sum(rec for rec in recalls.values()) / len(recalls))
avg_prec.append(np.mean(t_prec))
avg_rec.append(np.mean(t_rec))
algo_prec.append(avg_prec)
algo_rec.append(avg_rec)
plt.plot(algo_rec[0], algo_prec[0], lw=2, label='KNN')
plt.plot(algo_rec[1], algo_prec[1], lw=2, label='NNMF')
plt.plot(algo_rec[2], algo_prec[2], lw=2, label='MF with bias')
plt.xlabel('Average Recall', fontsize=15)
plt.ylabel('Average Precision', fontsize=15)
plt.title("Precision vs Recall for different algorithms")
plt.legend()
plt.show()
train_set, test_set = train_test_split(data, test_size=0.1, random_state=0)
threshold = 3
algos = [KNNWithMeans(k=20, sim_options={'name': 'pearson'}), NMF(n_factors=20),
SVD(n_factors=20, init_mean=2.5)]
algo_fpr, algo_tpr, algo_auc = [], [], []
for algo in algos:
print('Algorithm is ', algo)
algo.fit(train_set)
predictions = algo.test(test_set)
pred_est = np.array([i.est for i in predictions])
actual_ratings = np.array([i.r_ui for i in predictions])
y_score = pred_est
y_true = actual_ratings >= threshold
fpr, tpr, _ = roc_curve(y_true, y_score)
roc_auc = auc(fpr, tpr)
algo_fpr.append(fpr)
algo_tpr.append(tpr)
algo_auc.append(roc_auc)
plt.plot(algo_fpr[0], algo_tpr[0], lw=2, label='KNN, AUC = %0.4f' % algo_auc[0])
plt.plot(algo_fpr[1], algo_tpr[1], lw=2, label='NNMF, AUC = %0.4f' % algo_auc[1])
plt.plot(algo_fpr[2], algo_tpr[2], lw=2, label='MF with bias, AUC = %0.4f' % algo_auc[2])
plt.xlabel('False Positive Rate (FPR)')
plt.ylabel('True Positive Rate')
plt.legend(loc='lower right')
plt.title('ROC curves for different algorithms')
plt.show()
| true |
a8191c623383b790b4407c60f73797341dc9a08f | Python | ixfalia/ZeroProjects | /Fellowship/Content/BarHudLabel.py | UTF-8 | 2,859 | 2.578125 | 3 | [] | no_license | import Zero
import Events
import Property
import VectorMath
import Color
Vec4 = VectorMath.Vec4
class BarHudLabel:
def DefineProperties(self):
UndefinedColor = Vec4(-1,-1,-1,-1)
self.LabelName = Property.String()
self.BarLevel = Property.Float(default = 0.1)
self.StatName = Property.String()
self.LowColor = Property.Color(default = UndefinedColor)
self.HighColor = Property.Color(default = UndefinedColor)
pass
def Initialize(self, initializer):
Zero.Connect(self.Space, "PetRegisterEvent", self.onPetRegister)
if not self.StatName == "":
Zero.Connect(self.Space, "StatUpdateEvent", self.onStat)
Zero.Connect(self.GameSession, "StatUpdateEvent", self.onStat)
if not self.LabelName:
self.LabelName = self.StatName
if not self.BarLevel:
self.BarLevel = 0.01
self.Label = self.Owner.FindChildByName("label")
self.Label.SpriteText.Text = self.LabelName
self.Bar = self.Owner.FindChildByName("bar")
self.Bar.BarController.setValue(self.BarLevel)
self.Pet = None
self.CurrentValue = self.BarLevel
self.UndefinedColor = Vec4(-1,-1,-1,-1)
#Hot pink is (1, 0.411765, 0.705882, 1) Vec4(1, 0.411765, 0.705882, 1)
if not self.LowColor == self.UndefinedColor:
self.Bar.BarController.LowColor = self.LowColor
if not self.HighColor == self.UndefinedColor:
self.Bar.BarController.HighColor = self.HighColor
def OnLogicUpdate(self, UpdateEvent):
pass
def setBar(self, value = None):
if not value:
value = self.CurrentValue
#print("{}.BarHudLabel.setBar: Setting Bar to {}".format(self.Owner.Name, value))
if self.StatName == "Happiness":
value = (value + 1) / 2
self.Bar.BarController.setValue(value)
if self.LabelName == "":
self.Label.SpriteText.Text = "{}:".format(self.StatName, int(value * 255))
else:
self.Label.SpriteText.Text = "{}:".format(self.LabelName, int(value * 255))
def onStat(self, sEvent):
type = sEvent.Type
if type and type == self.StatName:
value = sEvent.Value
elif sEvent.Data:
value = sEvent.Data.Statistics.getStat(self.StatName)
else:
return
self.CurrentValue = value
self.setBar(value)
def onPetRegister(self, petEvent):
if petEvent.Pet:
self.Pet = petEvent.Pet
self.CurrentValue = self.GameSession.Statistics.getStat(self.StatName)
self.setBar()
Zero.RegisterComponent("BarHudLabel", BarHudLabel) | true |
ffcb5ab1a5ab41c876c2d822a48a97f58057f418 | Python | qianrenjian/NewsRecommendation | /src/train.py | UTF-8 | 9,202 | 2.546875 | 3 | [
"MIT"
] | permissive | from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from dataset import BaseDataset
import torch
import torch.nn.functional as F
import time
import numpy as np
from config import model_name
from tqdm import tqdm
import os
from pathlib import Path
from evaluate import evaluate
import importlib
import datetime
try:
Model = getattr(importlib.import_module(f"model.{model_name}"), model_name)
Config = getattr(importlib.import_module('config'), f"{model_name}Config")
except (AttributeError, ModuleNotFoundError):
print(f"{model_name} not included!")
exit()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class EarlyStopping:
def __init__(self, patience=5):
self.patience = patience
self.counter = 0
self.best_loss = np.Inf
def __call__(self, val_loss):
"""
if you use other metrics where a higher value is better, e.g. accuracy,
call this with its corresponding negative value
"""
if val_loss < self.best_loss:
early_stop = False
get_better = True
self.counter = 0
self.best_loss = val_loss
else:
get_better = False
self.counter += 1
if self.counter >= self.patience:
early_stop = True
else:
early_stop = False
return early_stop, get_better
def latest_checkpoint(directory):
if not os.path.exists(directory):
return None
all_checkpoints = {
int(x.split('.')[-2].split('-')[-1]): x
for x in os.listdir(directory)
}
if not all_checkpoints:
return None
return os.path.join(directory,
all_checkpoints[max(all_checkpoints.keys())])
def train():
writer = SummaryWriter(
log_dir=
f"./runs/{model_name}/{datetime.datetime.now().replace(microsecond=0).isoformat()}{'-' + os.environ['REMARK'] if 'REMARK' in os.environ else ''}"
)
if not os.path.exists('checkpoint'):
os.makedirs('checkpoint')
try:
pretrained_word_embedding = torch.from_numpy(
np.load('./data/train/pretrained_word_embedding.npy')).float()
except FileNotFoundError:
pretrained_word_embedding = None
if model_name == 'DKN':
try:
pretrained_entity_embedding = torch.from_numpy(
np.load(
'./data/train/pretrained_entity_embedding.npy')).float()
except FileNotFoundError:
pretrained_entity_embedding = None
try:
pretrained_context_embedding = torch.from_numpy(
np.load(
'./data/train/pretrained_context_embedding.npy')).float()
except FileNotFoundError:
pretrained_context_embedding = None
model = Model(Config, pretrained_word_embedding,
pretrained_entity_embedding,
pretrained_context_embedding, writer).to(device)
else:
model = Model(Config, pretrained_word_embedding, writer).to(device)
print(model)
dataset = BaseDataset('data/train/behaviors_parsed.tsv',
'data/train/news_parsed.tsv',
Config.dataset_attributes)
print(f"Load training dataset with size {len(dataset)}.")
dataloader = iter(
DataLoader(dataset,
batch_size=Config.batch_size,
shuffle=True,
num_workers=Config.num_workers,
drop_last=True))
optimizer = torch.optim.Adam(model.parameters(), lr=Config.learning_rate)
start_time = time.time()
loss_full = []
exhaustion_count = 0
step = 0
early_stopping = EarlyStopping()
checkpoint_dir = os.path.join('./checkpoint', model_name)
Path(checkpoint_dir).mkdir(parents=True, exist_ok=True)
checkpoint_path = latest_checkpoint(checkpoint_dir)
if checkpoint_path is not None:
print(f"Load saved parameters in {checkpoint_path}")
checkpoint = torch.load(checkpoint_path)
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
step = checkpoint['step']
early_stopping(checkpoint['early_stop_value'])
model.train()
with tqdm(total=Config.num_batches, desc="Training") as pbar:
for i in range(1, Config.num_batches + 1):
try:
minibatch = next(dataloader)
except StopIteration:
exhaustion_count += 1
tqdm.write(
f"Training data exhausted for {exhaustion_count} times after {i} batches, reuse the dataset."
)
dataloader = iter(
DataLoader(dataset,
batch_size=Config.batch_size,
shuffle=True,
num_workers=Config.num_workers,
drop_last=True))
minibatch = next(dataloader)
step += 1
if model_name == 'LSTUR':
y_pred = model(minibatch["user"],
minibatch["clicked_news_length"],
minibatch["candidate_news"],
minibatch["clicked_news"])
elif model_name == 'HiFiArk':
y_pred, regularizer_loss = model(minibatch["candidate_news"],
minibatch["clicked_news"])
elif model_name == 'TANR':
y_pred, topic_classification_loss = model(
minibatch["candidate_news"], minibatch["clicked_news"])
else:
y_pred = model(minibatch["candidate_news"],
minibatch["clicked_news"])
loss = torch.stack([x[0] for x in -F.log_softmax(y_pred, dim=1)
]).mean()
if model_name == 'HiFiArk':
if i % 10 == 0:
writer.add_scalar('Train/BaseLoss', loss.item(), step)
writer.add_scalar('Train/RegularizerLoss',
regularizer_loss.item(), step)
writer.add_scalar('Train/RegularizerBaseRatio',
regularizer_loss.item() / loss.item(),
step)
loss += Config.regularizer_loss_weight * regularizer_loss
elif model_name == 'TANR':
if i % 10 == 0:
writer.add_scalar('Train/BaseLoss', loss.item(), step)
writer.add_scalar('Train/TopicClassificationLoss',
topic_classification_loss.item(), step)
writer.add_scalar(
'Train/TopicBaseRatio',
topic_classification_loss.item() / loss.item(), step)
loss += Config.topic_classification_loss_weight * topic_classification_loss
loss_full.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
if i % 10 == 0:
writer.add_scalar('Train/Loss', loss.item(), step)
if i % Config.num_batches_show_loss == 0:
tqdm.write(
f"Time {time_since(start_time)}, batches {i}, current loss {loss.item():.4f}, average loss: {np.mean(loss_full):.4f}"
)
if i % Config.num_batches_validate == 0:
val_auc, val_mrr, val_ndcg5, val_ndcg10 = evaluate(
model, './data/val')
writer.add_scalar('Validation/AUC', val_auc, step)
writer.add_scalar('Validation/MRR', val_mrr, step)
writer.add_scalar('Validation/nDCG@5', val_ndcg5, step)
writer.add_scalar('Validation/nDCG@10', val_ndcg10, step)
tqdm.write(
f"Time {time_since(start_time)}, batches {i}, validation AUC: {val_auc:.4f}, validation MRR: {val_mrr:.4f}, validation nDCG@5: {val_ndcg5:.4f}, validation nDCG@10: {val_ndcg10:.4f}, "
)
early_stop, get_better = early_stopping(-val_auc)
if early_stop:
tqdm.write('Early stop.')
break
elif get_better:
torch.save(
{
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'step': step,
'early_stop_value': -val_auc
}, f"./checkpoint/{model_name}/ckpt-{step}.pth")
pbar.update(1)
def time_since(since):
"""
Format elapsed time string.
"""
now = time.time()
elapsed_time = now - since
return time.strftime("%H:%M:%S", time.gmtime(elapsed_time))
if __name__ == '__main__':
print('Using device:', device)
print(f'Training model {model_name}')
train()
| true |
ea840723a6d031fd7928e5a503c8378d15a7b2b6 | Python | gsscsd/recommend-algorithm | /NFM_demo.py | UTF-8 | 2,958 | 2.859375 | 3 | [] | no_license | import tensorflow as tf
import numpy as np
class BiInteraction(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(BiInteraction, self).__init__(**kwargs)
def call(self, x):
sum_square = tf.square(tf.reduce_sum(x, axis=1))
square_sum = tf.reduce_sum(tf.square(x), axis=1)
bi_inter = tf.multiply(0.5, tf.subtract(sum_square, square_sum))
return bi_inter
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[-1])
class NFM(tf.keras.Model):
def __init__(self, feature_size, embedding_size, *args, **kwargs):
super(NFM, self).__init__(*args, **kwargs)
self.embedding_layer = tf.keras.layers.Embedding(feature_size, embedding_size)
self.linear_layer = tf.keras.layers.Embedding(feature_size, 1)
self.interact_layer = BiInteraction()
self.dense_layer_1 = tf.keras.layers.Dense(64, activation=tf.nn.relu)
self.dense_layer_2 = tf.keras.layers.Dense(32, activation=tf.nn.relu)
self.dense_layer_3 = tf.keras.layers.Dense(1, activation=tf.nn.sigmoid)
def call(self, inputs, training=None, mask=None):
# Shallow Part
first_order_cate = self.linear_layer(inputs['category_index'])
first_order_nume = self.linear_layer(inputs['numerical_index'])
first_order_nume = tf.multiply(first_order_nume, tf.expand_dims(inputs['numerical_value'], -1))
first_order_concat = tf.concat([first_order_cate, first_order_nume], axis=1)
first_order = tf.reduce_sum(first_order_concat, axis=1)
# BiInteraction Part: Pair-Wise Interaction Layer
embedding_cate = self.embedding_layer(inputs['category_index'])
embedding_nume = self.embedding_layer(inputs['numerical_index'])
embedding_nume = tf.multiply(embedding_nume, tf.expand_dims(inputs['numerical_value'], -1))
embeddings = tf.concat([embedding_cate, embedding_nume], axis=1)
second_order = self.interact_layer(embeddings)
# Deep Part
dense_vector = self.dense_layer_1(second_order)
dense_vector = self.dense_layer_2(dense_vector)
dense_output = self.dense_layer_3(dense_vector)
# Output
output = tf.add_n([first_order, dense_output])
return output
if __name__ == '__main__':
# model parameters
params = dict()
params['feature_size'] = 5000
params['embedding_size'] = 8
# generate test data
batch_size = 16
category_size = 12
numerical_size = 10
data = dict()
data['category_index'] = np.random.randint(0, 5000, (batch_size, category_size), np.int32)
data['numerical_index'] = np.random.randint(0, 5000, (batch_size, numerical_size), np.int32)
data['numerical_value'] = np.random.randn(batch_size, numerical_size).astype(np.float32)
# build model
model = NFM(**params)
result = model(data)
| true |
72f23c50cf1f5edeccc7b11baab808f38f259ad4 | Python | weezer/fun_scripts | /algo/208. Implement Trie (Prefix Tree).py | UTF-8 | 1,781 | 3.984375 | 4 | [] | no_license | class Node(object):
def __init__(self):
self.num = 0
self.hash_map = {}
class Trie(object):
def __init__(self):
"""
Initialize your data structure here.
"""
self.root = Node()
def insert(self, word):
"""
Inserts a word into the trie.
:type word: str
:rtype: void
"""
current = self.root
for i in word:
if current.hash_map.get(i) is None:
current.hash_map[i] = Node()
current = current.hash_map.get(i)
current.num += 1
def search(self, word):
"""
Returns if the word is in the trie.
:type word: str
:rtype: bool
"""
current = self.root
for i in word:
if current.hash_map.get(i) is None:
return False
current = current.hash_map.get(i)
if current.num != 0:
return True
return False
def startsWith(self, prefix):
"""
Returns if there is any word in the trie that starts with the given prefix.
:type prefix: str
:rtype: bool
"""
current = self.root
for i in prefix:
if current.hash_map.get(i) is None:
return False
current = current.hash_map.get(i)
return True
# Your Trie object will be instantiated and called as such:
# obj = Trie()
# obj.insert(word)
# param_2 = obj.search(word)
# param_3 = obj.startsWith(prefix)
if __name__ == "__main__":
s = Trie()
s.insert("aa")
s.insert("bb")
s.insert("ab")
s.insert("aa")
print s.root.hash_map['a'].hash_map['a'].num
print s.search("aa")
print s.startsWith("a") | true |
57c44b36929fd48034e3c7beeae3d5999dc8f1b7 | Python | mhso/BombSolver | /src/model/module_classifier.py | UTF-8 | 2,957 | 2.53125 | 3 | [] | no_license | from tensorflow.keras import losses
from tensorflow.keras.layers import Dense, Input, Flatten
from tensorflow.keras.layers import Activation
from tensorflow.keras.optimizers import SGD
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.utils import plot_model
from tensorflow.keras.regularizers import l2
import config
import model.classifier_util as utils
LABELS = [
"Nothing (Side)",
"Big battery",
"Small batteries",
"Serial number",
"Metal piece",
"Parallel port",
"Indicator",
"Nothing (Front)",
"Timer",
"Wires",
"Button",
"Symbols",
"Simon Says",
"Wire Sequence",
"Complicated Wires",
"Memory Game",
"Who's On First?",
"Maze",
"Password",
"Morse",
"Needy Vent",
"Needy Discharge",
"Needy Knob"
]
# Layer constants.
CONV_FILTERS = 32
DENSE_UNITS = 292
CONV_LAYERS = 4
KERNEL_SIZE = 3
USE_BIAS = True
REGULARIZER_CONST = 0.001
# Optimizer constants.
LEARNING_RATE = 0.02
MOMENTUM = 0.9
WEIGHT_DECAY = 1e-4
# Training constants.
BATCH_SIZE = 160
EPOCHS_PER_BATCH = 6
VALIDATION_SPLIT = 0.3
TESTS_PER_LABEL = 4
def output_layer(prev):
out = Flatten()(prev)
out = Dense(DENSE_UNITS, kernel_regularizer=l2(REGULARIZER_CONST),
use_bias=USE_BIAS)(out)
out = Dense(config.MODULE_OUTPUT_DIM, kernel_regularizer=l2(REGULARIZER_CONST),
use_bias=USE_BIAS)(out)
out = Activation("softmax")(out)
return out
def save_as_image(model):
plot_model(model, to_file='../resources/model_graph.png', show_shapes=True)
def compile_model(model):
model.compile(
optimizer=SGD(learning_rate=LEARNING_RATE, decay=WEIGHT_DECAY, momentum=MOMENTUM),
loss=[losses.categorical_crossentropy],
metrics=["accuracy"]
)
def build_model():
utils.set_nn_config()
inp = Input(config.MODULE_INPUT_DIM)
layer = inp
layer = utils.conv_layer(layer, CONV_FILTERS, KERNEL_SIZE, REGULARIZER_CONST)
for _ in range(CONV_LAYERS):
layer = utils.conv_layer(layer, CONV_FILTERS, KERNEL_SIZE, REGULARIZER_CONST)
out = output_layer(layer)
model = Model(inputs=inp, outputs=out)
compile_model(model)
return model
def shape_input(inp):
reshaped = inp
if len(inp.shape) < 4:
reshaped = inp.reshape((1,)+config.MODULE_INPUT_DIM)
return reshaped
def load_from_file(filename):
utils.set_nn_config()
model = load_model(filename, compile=False)
compile_model(model)
return model
def evaluate(model, inputs, expected_out):
score = model.evaluate(inputs, expected_out, verbose=0)
return (score[0], score[1])
def predict(model, inp):
return model.predict(shape_input(inp))
def train(model, inputs, expected_out):
result = model.fit(
inputs, expected_out, batch_size=BATCH_SIZE, verbose=0,
epochs=EPOCHS_PER_BATCH, validation_split=VALIDATION_SPLIT
)
return result.history
| true |
d7e06728676e375925a3e4c9b77ef7551ed3b72e | Python | haiweiosu/WebScraping | /ESPNdata/copper/ml/wrappers.py | UTF-8 | 2,168 | 2.78125 | 3 | [] | no_license | # encoding: utf-8
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.preprocessing import OneHotEncoder
class FeatureMixer(BaseEstimator):
def __init__(self, clfs, ignorefit=False):
self.clfs = clfs
self.ignorefit = ignorefit
def fit_transform(self, X, y=None):
if not self.ignorefit:
self.fit(X, y)
return self.transform(X)
def fit(self, X, y=None):
if not self.ignorefit:
for clf in self.clfs:
new = clf.fit_transform(X, y)
def transform(self, X):
ans = None
for clf in self.clfs:
new = clf.transform(X)
if ans is None:
ans = new
else:
ans = np.hstack((ans, new))
return ans
class TransformWrapper(BaseEstimator):
def __init__(self, clf, transformation, fit_transform=True):
self.clf = clf
self.fit_transform = fit_transform
self.transformation = transformation
def fit(self, X, y=None):
if self.fit_transform:
self.transformation.fit(X)
_X = self._pretransform(X)
if y is None:
self.clf.fit(_X)
else:
self.clf.fit(_X, y)
def predict(self, X):
_X = self._pretransform(X)
return self.clf.predict(_X)
def predict_proba(self, X):
_X = self._pretransform(X)
return self.clf.predict_proba(_X)
def transform(self, X):
_X = self._pretransform(X)
return self.clf.transform(_X)
def _pretransform(self, X):
return self.transformation.transform(X)
class GMMWrapper(TransformWrapper):
def fit(self, X, y=None):
if self.fit_transform:
self.transformation.fit(X)
t = self.transformation.predict(X)[np.newaxis].T
self.enc = OneHotEncoder()
self.enc.fit(t)
_X = self._pretransform(X)
if y is None:
self.clf.fit(_X)
else:
self.clf.fit(_X, y)
def _pretransform(self, X):
t = self.transformation.predict(X)[np.newaxis].T
return self.enc.transform(t).toarray()
| true |
d7957441de02122597aae8a55a1398c3beb8964a | Python | fader111/dp2 | /test_gpio.py | UTF-8 | 332 | 2.625 | 3 | [] | no_license | import Jetson.GPIO as GPIO
import time
# GPIO.setwarnings(False) # suppresses GPIO warnings
GPIO.setmode(GPIO.BOARD)
GREEN = 40
GPIO.setup(GREEN, GPIO.OUT)
RED = 38
GPIO.setup(RED, GPIO.OUT)
GPIO.setwarnings(False)
print("ON")
GPIO.output(GREEN,1)
time.sleep(5)
print("OFFFFF")
GPIO.output(GREEN,0)
time.sleep(5)
# GPIO.cleanup() | true |
9b9929242089924ba3795063cc1c4f903999b9d0 | Python | vatsaashwin/Hashing-2 | /contigSubArr10.py | UTF-8 | 1,648 | 3.796875 | 4 | [] | no_license | # // Time Complexity : O(n)
# // Space Complexity : O(n)
# // Did this code successfully run on Leetcode : Yes
# // Any problem you faced while coding this : No
# // Your code here along with comments explaining your approach:
class Solution:
def findMaxLength(self, nums: List[int]) -> int:
# To keep track of a cumulative sum in 'total' and of the maximum length in 'length'
total = 0
length = 0
# creating hashmap to track sum
# In the hashmap here, we'll be storing the sum as key and index of first occurance of that sum as it's value
hMap = {}
# traverse through each element in given array
for i in range(len(nums)):
# when a zero is encountered, add -1; else, add 1 and maintain a cumulative total
if nums[i] == 0:
total -= 1
else:
total += 1
# If a zero is encountered, directly return the full length of array till that index.
# Meaning we have nullified the sum and till this point we have equal 1s and 0s
if total == 0:
length = i+1
# If the sum is not in hashmap, enter key and its value/index, else compare the length with the difference of array from current index to the first encountered index for that sum and update length with the higher value.
if total not in hMap:
hMap[total] = i
else:
length = max(length, i-hMap.get(total))
# return the tracked length
return length | true |
1a990f3ee0e7688bcf33aa509b3f4dd9126e8205 | Python | JAvito-GC/Linux-Utils | /linux_utils/tabfile.py | UTF-8 | 2,287 | 3.140625 | 3 | [
"MIT"
] | permissive | # linux-utils: Linux system administration tools for Python.
#
# Author: Peter Odding <peter@peterodding.com>
# Last Change: February 9, 2020
# URL: https://linux-utils.readthedocs.io
"""Generic parsing of Linux configuration files like ``/etc/fstab`` and ``/etc/crypttab``."""
# Standard library modules.
import re
# External dependencies.
from property_manager import PropertyManager, mutable_property
# Modules included in our package.
from linux_utils import coerce_context
# Public identifiers that require documentation.
__all__ = (
'TabFileEntry',
'parse_tab_file',
)
def parse_tab_file(filename, context=None, encoding='UTF-8'):
"""
Parse a Linux configuration file like ``/etc/fstab`` or ``/etc/crypttab``.
:param filename: The absolute pathname of the file to parse (a string).
:param context: See :func:`.coerce_context()` for details.
:param encoding: The name of the text encoding of the file (a string).
:returns: A generator of :class:`TabFileEntry` objects.
This function strips comments (the character ``#`` until the end of
the line) and splits each line into tokens separated by whitespace.
"""
context = coerce_context(context)
contents = context.read_file(filename).decode(encoding)
for line_number, line in enumerate(contents.splitlines(), start=1):
# Strip comments.
line = re.sub('#.*', '', line)
# Tokenize input.
tokens = line.split()
if tokens:
yield TabFileEntry(
context=context,
configuration_file=filename,
line_number=line_number,
tokens=tokens,
)
class TabFileEntry(PropertyManager):
"""Container for the results of :func:`parse_tab_file()`."""
@mutable_property
def context(self):
"""The execution context from which the configuration file was retrieved."""
@mutable_property
def configuration_file(self):
"""The name of the configuration file from which this entry was parsed (a string)."""
@mutable_property
def line_number(self):
"""The line number from which this entry was parsed (an integer)."""
@mutable_property
def tokens(self):
"""The tokens split on whitespace (a nonempty list of strings)."""
| true |
8d128aed3cab93a2eb0ef389b2d23829828f3b55 | Python | gary1346aa/Leetcode-Algo-Training | /797.py | UTF-8 | 460 | 2.890625 | 3 | [] | no_license | class Solution:
def dfs(graph, path, i):
if graph[i] == []:
return [path]
else:
paths = []
for element in graph[i]:
paths += Solution.dfs(graph, path + [element], element)
return paths
def allPathsSourceTarget(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[List[int]]
"""
return Solution.dfs(graph,[0], 0) | true |
778ed05510b6409a5e12cdf81fa366c20d02ae89 | Python | vasalf/mammoth-world | /socium/helpers/generate_mountains.py | UTF-8 | 2,477 | 3.203125 | 3 | [] | no_license | #!/usr/bin/python3
"""This is a file that provides function that generates random mountain massives
It is a part of mammoth-world project
(https://github.com/vasalf/mammoth-world)
"""
import sys
from random import *
import copy
"""A place to import modules is up to thar comment
"""
__author__ = "vasalf"
sys.setrecursionlimit(100000)
"""That function generates a massif
- amount is number of mountains in the massif
- wmap is a reference to two-dimensional boolean list - a world map
- x, y are coordinates to begin
- directions is a choice of available directions
"""
def generate_massif(amount, wmap, x, y, directions):
if amount == 0:
return 0
to_go = copy.copy(directions)
while len(to_go):
next_direction = sample(to_go, 1)[0]
new_x = x + next_direction[0]
new_y = y + next_direction[1]
if new_x >= 0 and new_y >= 0 and new_x < len(wmap) and \
new_y < len(wmap) and not wmap[new_x][new_y]:
wmap[new_x][new_y] = True
return 1 + generate_massif(amount - 1, wmap, new_x, new_y,
directions)
else:
to_go.remove(next_direction)
return 0
"""That function generates some massives on a world map
- size is a world size
- amount is number of mountains to create
return value is a two-dimensional boolean list
"""
def generate_mountains(size, amount, stat_bar=None):
res = [[False for i in range(size)] for j in range(size)]
first_amount = amount
while amount > 0:
to_create = 6 * max(int(random() * amount) // size, 1)
x = randint(size // 8, 7 * size // 8 - 1)
y = randint(size // 8, 7 * size // 8 - 1)
direction = [0, 0]
if x > size - x:
direction[0] = -1
else:
direction[0] = 1
if y > size - y:
direction[1] = -1
else:
direction[1] = 1
direction = tuple(direction)
directions = [(0, 1), (0, -1), (1, 0), (-1, 0), \
(1, 1), (1, -1), (-1, 1), (-1, -1)]
directions += 5 * [direction]
directions += 5 * [(direction[0], 0)]
directions += 5 * [(0, direction[1])]
generated = generate_massif(to_create, res, x, y, directions)
amount -= generated
if stat_bar is not None:
stat_bar.update(generated / first_amount)
if stat_bar is not None:
stat_bar.finish()
return res
| true |
3a4978a30d1f79400702445d73ce8c5574922e5e | Python | dromero23/Python-Automate-The-Boring-Stuff- | /Chapter13/customInvites.py | UTF-8 | 2,231 | 2.59375 | 3 | [] | no_license | #!python3
#customInvites - write a unique invite for each guest from guest.txt file
import docx
from docx.enum.text import WD_ALIGN_PARAGRAPH
logging.basicConfig(level=logging.DEBUG, format=' %(asctime)s - %(levelname)s- %(message)s')
#logging.disable(logging.CRITICAL)
def main():
textFile = open('guests.txt','r')
docFile = docx.Document()
#removes the whitespace('including \n') from the text and adds the guest in the guestList
guestList = [text.rstrip() for text in textFile]
for guest in guestList:
paragraph = docFile.add_paragraph('It would be a pleasure to have the company of')
paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
paragraph.runs[0].font.name = 'Brush Script MT'
paragraph.runs[0].font.size = docx.shared.Pt(24)
paragraph.paragraph_format.space_after =docx.shared.Pt(0)
paragraph = docFile.add_paragraph(str(guest))
paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
paragraph.runs[0].font.name = 'Comic Sans MS'
paragraph.runs[0].font.size = docx.shared.Pt(20)
paragraph.paragraph_format.space_after =docx.shared.Pt(0)
paragraph = docFile.add_paragraph('At 11010 Memory Lane on the Evening of')
paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
paragraph.runs[0].font.name = 'Brush Script MT'
paragraph.runs[0].font.size = docx.shared.Pt(24)
paragraph.paragraph_format.space_after =docx.shared.Pt(0)
paragraph = docFile.add_paragraph('April 1')
paragraph.add_run('st')
paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
for run in paragraph.runs:
run.font.name = 'Comic Sans MS'
run.font.size = docx.shared.Pt(20)
paragraph.runs[1].font.superscript = True
paragraph.paragraph_format.space_after =docx.shared.Pt(0)
paragraph = docFile.add_paragraph('At 7 o\'clock')
paragraph.alignment = WD_ALIGN_PARAGRAPH.CENTER
paragraph.runs[0].font.name = 'Brush Script MT'
paragraph.runs[0].font.size = docx.shared.Pt(24)
paragraph.paragraph_format.space_after =docx.shared.Pt(0)
docFile.add_page_break()
#remove the last break page.
docFile.paragraphs[len(docFile.paragraphs)-1].text=None
docFile.save('guestInvites.docx')
logging.debug('Complete')
if __name__=='__main__':
main() | true |
e77ba1e4adeb1d24cc32b6186041b4c24785270e | Python | Hsien-Chen/for_python | /pythontrain/YTpython教學/29send-email.py | UTF-8 | 528 | 2.671875 | 3 | [] | no_license |
import email.message
msg = email.message.EmailMessage()
msg["From"]="bigben83530@gmail.com"
msg["To"]="bigben83530@gmail.com"
msg["Subject"]="你好"
#寄送純文字內容
# msg.set_content("測試看看")
#寄送多樣式的內容
msg.add_alternative("<h3>優惠眷</h3>滿五百送一百",subtype="html")
#連線到SMTP Server.驗證寄件人身分並發送郵件
import smtplib
#到網路上搜尋GMAIL SMTP SERVER
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
server.login("bigben83530@gmail.com","bigben106022535")
server.send_message(msg)
server.close()
| true |
b2af44dc7d57ab08f4350a849418a517e121840c | Python | syurskyi/Python_Topics | /125_algorithms/_exercises/templates/_algorithms_challenges/leetcode/LeetcodePythonProject_with_solution/leetcode_0401_0450/LeetCode418_SentenceScreenFitting.py | UTF-8 | 1,303 | 2.640625 | 3 | [] | no_license | '''
Created on Apr 12, 2017
@author: MT
'''
c_ Solution(o..
___ wordsTyping sentence, rows, cols
"""
:type sentence: List[str]
:type rows: int
:type cols: int
:rtype: int
"""
length l..(sentence)
times [0]*length
nextInd [0]*length
___ i __ r..(length
ind i
curLen 0
t__ 0
w.... curLen+l..(sentence[ind])<_cols:
curLen += l..(sentence[ind])+1
ind += 1
__ ind __ l..(sentence
ind 0
t__ += 1
nextInd[i] ind
times[i] t__
ind 0
res 0
___ _ __ r..(rows
res += times[ind]
ind nextInd[ind]
r.. res
___ test
testCases [
(["hello", "world"], 2, 8),
(["a", "bcd", "e"], 3, 6),
(["I", "had", "apple", "pie"], 4, 5),
]
___ sentence, rows, cols __ testCases:
print('sentence: %s' % sentence)
print('rows: %s' % rows)
print('cols: %s' % cols)
result wordsTyping(sentence, rows, cols)
print('result: %s' % result)
print('-='*20+'-')
__ _____ __ _____
Solution().test()
| true |