blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9091f9bb5f2f4751a3eb34e71fed6a2ddc6d61c7 | Python | vipinsoniofficial/prefect-with-python | /sample3.py | UTF-8 | 1,883 | 2.859375 | 3 | [] | no_license | from prefect import task, Flow, Parameter
import datetime
@task
def extract_reference_data():
print("extract_reference_data invoked... extracting the data")
return 1
@task
def extract_live_data(a, b, c):
print("extract_live_data invoked with A,B,C: ", a, b, c)
return 2
@task
def transform(d):
print("transform invoked with D: ", d)
return 3
@task
def load_reference_data(e):
print("load_reference_data invoked with E: ", e)
return 4
@task
def load_live_data(f):
print("load_live_data invoked with F: ", f)
@task(max_retries=3, retry_delay=datetime.timedelta(seconds=10))
def run_independent_task():
x = 1
x = x + 1
print("***Executing Independent Task***", x)
def main():
with Flow("etl") as flow:
a = Parameter("a", default="[A]")
b = Parameter("b", default="[B]")
reference_data = extract_reference_data()
live_data = extract_live_data(a, b, reference_data)
#flow.add_task(extract_reference_data)
#flow.add_task(extract_live_data(a, b, extract_reference_data))
list = [extract_reference_data,extract_live_data(a, b, extract_reference_data)]
flow.chain(list)
#transformed_live_data = transform(live_data)
#run_independent_task()
#updated_reference_data = load_reference_data(transformed_live_data)
#updated_live_data = load_live_data(updated_reference_data)
flow.run()
"""
flow = Flow('etl')
a = Parameter("a", default="[A]")
b = Parameter("b", default="[B]")
reference_data = extract_reference_data()
live_data = extract_live_data(a, b, reference_data)
flow.chain(extract_reference_data,extract_live_data)
flow.run()
"""
#flow.visualize()
if __name__ == "__main__":
main()
"""flow = Flow("test")
flow.add_task(extract_reference_data)
flow.add_task(extract_live_data)
flow.run()"""
| true |
86c93b3e5bd7871af8f4472d0793542e2821f52f | Python | ObliviousParadigm/greenest-parts-of-Bangalore | /green.py | UTF-8 | 1,137 | 2.546875 | 3 | [
"MIT"
] | permissive | import cv2
import numpy as np
import matplotlib.pyplot as plt
import glob
import os
path = os.getcwd()
print(path)
## Read
files = [file for file in glob.glob(path + '/Pics/*.jpg')]
# img = cv2.imread("/content/drive/My Drive/test1.jpg")
file1 = open('greendata.txt','w')
words = []
for filename in files:
print(filename)
img = cv2.imread(os.path.join(path,filename))
## convert to hsv
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, (30, 0, 0), (90, 255,255))
# ## slice the green
imask = mask>0
count = np.count_nonzero(imask)
percentage = count/(img.shape[0]*img.shape[1])
# l = (0, 90, 0)
# u = (100, 255, 100)
# lower = np.array(l, dtype = "uint8")
# upper = np.array(u, dtype = "uint8")
# mask = cv2.inRange(img, lower, upper)
# imask = mask>0
# count = np.count_nonzero(imask)
# percentage = count/(img.shape[0]*img.shape[1])
filename = filename.replace(path, '')
filename = filename.replace(' ', '')
filename = filename.split(',')[0]
filename = filename.split('/')[-1]
ans = filename +' ' + str(percentage) + '\n'
words.append(ans)
file1.writelines(words) | true |
69f93626fb4416c62930594b0068953ba30e3820 | Python | vuonglee99/Chi-em-Skin-Web-app | /flask/App/productModule/cart.py | UTF-8 | 1,541 | 2.703125 | 3 | [] | no_license |
class Cart():
CART_ID:str
USER_ID:str
PRODUCT_ID:str
PRODUCT_NAME:str
PRODUCT_TYPE:str
PRODUCT_PRICE:int
PRODUCT_INFO:str
PRODUCT_RATING:int
PRODUCT_TOTAL:int
PRODUCT_IMAGE:str
SKINTYPE_NAME:str
PRODUCT_ORIGIN:str
PRODUCT_BRAND:str
PRODUCT_CAPACITY:str
PRODUCT_AMOUNT:int
def __init__(self,data):
self.CART_ID = data['CART_ID']
self.USER_ID = data['USER_ID']
self.PRODUCT_ID=data['PRODUCT_ID']
self.PRODUCT_TOTAL=data['PRODUCT_TOTAL']
self.PRODUCT_NAME = data['PRODUCT_NAME']
self.PRODUCT_TYPE = data['PRODUCT_TYPE']
self.PRODUCT_PRICE = data['PRODUCT_PRICE']
self.PRODUCT_INFO = data['PRODUCT_INFO']
self.PRODUCT_RATING = data['PRODUCT_RATING']
self.PRODUCT_TOTAL = data['PRODUCT_TOTAL']
self.PRODUCT_IMAGE = data['PRODUCT_IMAGE']
self.SKINTYPE_NAME = data['SKINTYPE_NAME']
self.PRODUCT_ORIGIN = data['PRODUCT_ORIGIN']
self.PRODUCT_BRAND = data['PRODUCT_BRAND']
self.PRODUCT_CAPACITY = data['PRODUCT_CAPACITY']
self.PRODUCT_AMOUNT=data['PRODUCT_AMOUNT']
@classmethod
def convertToTuple(self,cart):
data=(cart.CART_ID,cart.USER_ID,cart.PRODUCT_ID,cart.PRODUCT_NAME,cart.PRODUCT_TYPE,cart.PRODUCT_PRICE,
cart.PRODUCT_INFO,cart.PRODUCT_RATING,cart.PRODUCT_TOTAL,cart.PRODUCT_IMAGE,cart.SKINTYPE_NAME,
cart.PRODUCT_ORIGIN,cart.PRODUCT_BRAND,cart.PRODUCT_CAPACITY,cart.PRODUCT_AMOUNT)
return data
| true |
993d5ddb324b9bd0668a2a9182c8fa02c14d02f2 | Python | uasif13/cs115 | /lab1_asifsolution.py | UTF-8 | 394 | 3.03125 | 3 | [] | no_license | from cs115 import *
import math
def inverse(n):
return 1.0/n
def multiply(a,b):
return a*b
def factorial(n):
if n == 0:
return 1
else:
return reduce(multiply, range(1,n+1))
def e(n):
factorials = map(factorial, range(0,n+1))
inverses = map(inverse, factorials)
return sum(inverses)
def error(n):
return math.e - e(n)
| true |
f4465ed5542b2165d1ae04bfb664a2b0ef514c16 | Python | ericlin1001/AutomateTheBoringStuff | /findInAlltxt.py | UTF-8 | 728 | 3.046875 | 3 | [] | no_license | import os, re
def usage():
print("""\
Usage: findInAlltxt.py [OPTION]... PATTERN [FILE]...
Try 'findInAlltxt.py --help' for more information.
""");
if len(os.sys.argv) == 1:
usage()
else:
reg = re.compile(os.sys.argv[1]);
isFind = False;
for cf, sub, f in os.walk('.'):
for file in f:
file = os.path.join(cf, file);
if file.endswith(".txt"):
f = open(file, 'r');
fx = f.read();
m = reg.search(fx);
if m is not None:
isFind = True;
print(file + ":");
print("\t" + m.group());
f.close();
if not isFind:
print("Not Found.");
| true |
3c1bb7155210f61458cf54587b260f592b5d39d7 | Python | yanmarcossn97/Python-Basico | /Exercicios/exercicio056.py | UTF-8 | 596 | 3.46875 | 3 | [] | no_license | somaidade = 0
hmv = ''
mid = 0
qtmeninas = 0
for c in range(1, 5):
print('{}º PESSOA'.format(c))
nome = str(input('Nome: '))
idade = int(input('Idade: '))
somaidade += idade
sexo = str(input('Sexo[M]/[F]: '))
if sexo in 'Mm' and idade > mid:
hmv = nome
mid = idade
print()
if sexo in 'Ff' and idade < 20:
qtmeninas += 1
mediaidade = somaidade / 4
print('Média de idade do grupo: {} anos.'.format(mediaidade))
print('Homem mais velho: {}'.format(hmv))
print('Qtd de meninas com menos de 20 anos: {}'.format(qtmeninas))
| true |
5d8218dcc4b51d31322cb0756bb144168fa5027c | Python | SonaliKalthur/Spark_movie-recommendation | /workload1a-b.py | UTF-8 | 5,627 | 2.796875 | 3 | [] | no_license | # --master yarn-client \
# --num-executors 5 \
# output1a-b.py
from pyspark import SparkContext
import re
#
# ####### function to count users in the ratings.csv
# This function convert entries of movies.csv into key,value pair of the following format
# movie_id -> genre
# since there may be multiple genre per movie, this function returns a list of key,value pair
def pair_movie_to_genre(record):
try:
fields = re.findall('("[^"]+"|[^,]+)', record.strip())
#print(fields[2])
genres = fields[2].strip().split("|")
movie_id = fields[0]
return [(movie_id, genre.strip()) for genre in genres]
except:
return []
# This function convert entries of ratings.csv into key,value pair of the following format
# (movie_id, (user_id, 1))
def extract_rating(record):
try:
user_id, movie_id, rating, timestamp = record.strip().split(",")
return (movie_id, (user_id, 1))
except:
return ()
# This function convert entries of ratings.csv into key,value pair of the following format
# user_id -> 1
def count_user(record):
try:
user_id, movie_id, rating, timestamp = record.strip().split(",")
return (user_id, 1)
except:
return ()
# This function is used by reduceByKey function to sum the count of the same key
# This functions takes in two values - merged count from previous call of sum_rating_count, and the currently processed count
def sum_rating_count(reduced_count, current_count):
return reduced_count+current_count
# This functions convert tuples of ((title, genre), count)into key,value pair of the following format
# (genre, user)--> counter
def map_to_pair(record): # record = (title, genre), count
#title_genre, count = record
genre, count = record
user, counter = count
#title, genre = title_genre
return ((genre, user), counter)
# This functions convert tuples of ((genre1,userid1), count) into key,value pair of the following format
# (user, (genre,count))
def rearrange2(record): #record = (genre1,userid1), count
genre_user, count = record
genre, user = genre_user
return (user, (genre,count))
# This functions convert tuples of ((genre1,userid1), count) into key,value pair of the following format
# (user, (genre,count))
def rearrangeusers(record):
genre_userid, count = record
genre, userid = genre_userid
return (genre,(userid,count))
def rearrange3(record): #record = user, ((genre, countg), countd)
user, genre_countg_countd = record
genre_countg, countd = genre_countg_countd
genre, countg = genre_countg
return (genre, (user,(countg,countd)))
#combiner used for aggregate function
def merge_max_movie(top_movie_list, current_movie_count):
top_list = sorted(top_movie_list+[current_movie_count], key=lambda rec:rec[-1], reverse=True)
return top_list[:5]
#used for aggregate function
def merge_combiners(top_movie_list_1, top_movie_list_2):
top_list = sorted(top_movie_list_1+top_movie_list_2, key=lambda rec:rec[-1], reverse=True)
return top_list[:5]
#used for aggregate function
def merge_max_movie_u(top_movie_list, current_movie_count):
top_list = sorted(top_movie_list+[current_movie_count], key=lambda rec:rec[-1], reverse=True)
return top_list[:1]
#combiner used for aggregate function
def merge_combiners_u(top_movie_list_1, top_movie_list_2):
top_list = sorted(top_movie_list_1+top_movie_list_2, key=lambda rec:rec[-1], reverse=True)
return top_list[:1]
if __name__ == "__main__":
sc = SparkContext(appName="Top 5 Users per Genre ")
#read ratings.csv to rdd
ratings = sc.textFile("/share/movie/ratings.csv")
#read movie_data.csv to rdd
movie_data = sc.textFile("/share/movie/movies.csv")
#call function to transform ratings -> movieid, (userid, count)
user_ratings_count = ratings.map(extract_rating) #.reduceByKey(sum_rating_count)
#print(user_ratings_count.take(10))
#call function to tranform movie_data -> movieid, (title, genre) # genre is broken down by pipe
movie_genre = movie_data.flatMap(pair_movie_to_genre)
#print(movie_genre.take(10))
#movie_genre.saveAsTextFile("combinedusergenre_02upd")
#call function to join movie_genre with user_ratings_count -> (genre, user), count
join_users_genre = movie_genre.join(user_ratings_count).values().map(map_to_pair).reduceByKey(sum_rating_count)
#print(join_users_genre.take(10))
# call function to rearrange (genre, user), count -> genre, (user, count)
join_users_genre_reduce = join_users_genre.map(rearrangeusers)
#print(join_users_genre_reduce.take(10))
# call function to output top 5 users per genre -> genre1, [(user1, count), (user2, count)...(user5, count)]
genre_top5_users = join_users_genre_reduce.aggregateByKey([], merge_max_movie, merge_combiners, 1)
#print(genre_top5_users.take(10))
#******* solution for workload1(b)**********
# call function to count users per dataset -> user, countperdataset
user_count_value = ratings.map(count_user).reduceByKey(sum_rating_count)
#print(user_count_value.take(100))
# rearrange from (genre, user), count -> user, (genre, count)
rearrange_user = join_users_genre.map(rearrange2)
#print(rearrange_user.take(10))
# join countofdataset with countofgenre -> user, (genre, countg), countd
countd_countg_join = rearrange_user.join(user_count_value)
#print(countd_countg_join.take(5))
# -> genre, (user, (countg, countd))
user_rearrange3 = countd_countg_join.map(rearrange3)
#print(user_rearrange3.take(10))
#output -> user
genre_top5_users = user_rearrange3.aggregateByKey([], merge_max_movie, merge_combiners, 1)
genre_top5_users.saveAsTextFile("Top5MoviesPerGenre_Wholedataset_1")
| true |
4222d441546b0d184825ed91da529913f8a8a1ac | Python | leminhviett/Basic-Algo-DS | /AbstractDataStructure/Linear/LinkedList.py | UTF-8 | 2,226 | 3.75 | 4 | [] | no_license | class Node:
def __init__(self, data):
self.data = data
self.next = None
self.prev = None
def __str__(self):
return str(self.data)
class SinglyLinkedList:
def __init__(self):
self.head = None
def insertTail(self,val):
if(self.head == None):
self.head = Node(val)
else:
current = self.head
while(current.next != None):
current = current.next
newNode = Node(val)
current.next = newNode
def __str__(self):
s = ""
current = self.head
while(current != None):
s += str(current.data) + "-> "
current = current.next
s += "None"
return s
def insertHead(self,val):
if(self.head == None):
self.head = Node(val)
else:
newNode = Node(val)
newNode.next = self.head
self.head = newNode
def deletionTail(self):
if (self.head == None):
print("LinkedList is Empty")
return
else:
current = self.head
if(current.next == None):
self.head = None
return
while (current.next.next != None):
current = current.next
current.next = None
# test = SinglyLinkedList()
# print(test)
# test.insertTail(0)
# test.insertTail(1)
# test.insertTail(2)
# print(test)
#
# test.insertHead(-1)
# print(test)
# test.deletionTail()
# print(test)
# test.deletionTail()
# test.deletionTail()
# test.deletionTail()
# test.deletionTail()
class DoublyLinkedList:
def __init__(self):
self.head = None
def insertTail(self, val):
if(self.head == None):
self.head = Node(val)
else:
current = self.head
while(current.next != None):
current = current.next
newNode = Node(val)
current.next = newNode
newNode.prev= current
def __str__(self):
s = ""
current = self.head
while(current != None):
s += str(current.data) + "<-> "
current = current.next
s += "None"
return s | true |
7bd029c4bcd238e2f471818a910717843382162d | Python | PROFX8008/Python-for-Geeks | /Chapter10/casestudy/apiapp/api_app.py | UTF-8 | 2,470 | 2.640625 | 3 | [
"MIT"
] | permissive | #api_app: REST API for student resource
from flask_sqlalchemy import SQLAlchemy
from flask import Flask
from flask_restful import Resource, Api, reqparse
app = Flask(__name__)
api = Api(app)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///student.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
parser = reqparse.RequestParser()
parser.add_argument('name', type=str)
parser.add_argument('grade', type=str)
class Student(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), nullable=False)
grade = db.Column(db.String(20), nullable=True)
def __repr__(self):
return f"{'id':{self.id}, 'name':{self.name},'grade':{self.grade}}"
def serialize(self):
return {
'id': self.id,
'name': self.name,
'grade': self.grade
}
class StudentDao(Resource):
def get(self, student_id):
student = Student.query.filter_by(id=student_id).\
first_or_404(description='Record with id={} is not available'.format(student_id))
return student.serialize()
def delete(self, student_id):
student = Student.query.filter_by(id=student_id).\
first_or_404(description='Record with id={} is not available'.format(student_id))
db.session.delete(student)
db.session.commit()
return '', 204
def put(self, student_id):
student = Student.query.filter_by(id=student_id).first_or_404\
(description='Record with id={} is not available'.format(student_id))
args = parser.parse_args()
name = args['name']
grade = args['grade']
if (name):
student.name = name
if (grade):
student.grade = grade
db.session.commit()
return student.serialize(), 200
class StudentListDao(Resource):
def get(self):
students = Student.query.all()
return [Student.serialize(student) for student in students]
def post(self):
args = parser.parse_args()
name = args['name']
grade = args['grade']
student = Student(name=name, grade=grade)
db.session.add(student)
db.session.commit()
return student.serialize(), 200
api.add_resource(StudentDao, '/students/<student_id>')
api.add_resource(StudentListDao, '/students')
if __name__ == '__main__':
#app.run(debug=True)
app.run(debug=True, host='0.0.0.0', port=8080)
| true |
fb4665948f0570203fcf8ba80c3e6b5f838ac72c | Python | FastdevTeam/ldap-groups-sync-to-gitlab | /git.py | UTF-8 | 10,773 | 2.609375 | 3 | [
"MIT"
] | permissive | import os
import gitlab
import logging
import gitlab.exceptions
from univention import acs
from prettytable import PrettyTable
def git_groups_list(git):
groups = git.groups.list(all=True, order_by='name')
git_group_list = []
for group_id in groups:
members = group_id.members.all(all=True)
tmp = []
for member_id in members:
tmp.append(member_id.username)
git_group_list.append({"group": str(group_id.name), "users": tmp})
if not os.path.exists("fake_db"):
os.makedirs("fake_db")
with open("fake_db/git_origin.txt", "w") as data:
data.write(str(git_group_list))
return git_group_list, groups
def root_migration(git_auth):
"""
Add a root account to all GitLab groups by default
Arguments:
git_auth - get a gitlab object after auth
"""
groups = git_auth.groups.list(all=True, order_by='name')
root = git_auth.users.list(search='root')
if not root:
logging.warning(" i can't find root!") # here need to send message to slack
else:
for group in groups:
try:
group.members.get(root[0].id)
except gitlab.exceptions.GitlabGetError:
logging.info("root user [id: {}] was added to group - {}".format(root[0].id, group))
group.members.create({'user_id': root[0].id,
'access_level': gitlab.OWNER_ACCESS})
def rename_groups(git_item, ldap_item):
if str(ldap_item['group']).capitalize() == git_item.name.capitalize():
git_item.name = ldap_item['group']
git_item.save()
def add_users(ldap_item, git_item, git_auth):
if str(ldap_item['group']).capitalize() == git_item.name.capitalize():
for ldap_user in ldap_item['users']:
try:
git_user = git_auth.users.list(username=ldap_user)[0]
git_item.members.create({'user_id': git_user.id,
"access_level": gitlab.DEVELOPER_ACCESS})
except gitlab.exceptions.GitlabCreateError:
logging.debug("User {} already exists in Gitlab, nothing to do".format(ldap_user))
continue
except IndexError:
logging.warning("User {} doesn't exist in GitLab, can't be added to group - {}"
.format(ldap_user, str(git_item.name)))
continue
def ldap_git_migration(ldap, git, git_auth):
"""
Add users to identical groups and rename the groups in accordance with the LDAP
"""
for ldap_item in ldap:
for git_item in git:
rename_groups(git_item, ldap_item)
add_users(ldap_item, git_item, git_auth)
def add_internal_users(ldap_item, group, git_auth):
for ldap_user in ldap_item['users']:
try:
git_user = git_auth.users.list(username=ldap_user)[0]
group.members.create({'user_id': git_user.id,
"access_level": gitlab.DEVELOPER_ACCESS})
except IndexError:
logging.warning("User {} doesn't exist in GitLab, can't be added to group - {}"
.format(ldap_user, str(group.name)))
continue
except gitlab.exceptions.GitlabCreateError:
logging.debug("User {} already exists in Gitlab, nothing to do".format(ldap_user))
continue
def internal_group(ldap, git, git_auth):
git_groups = git_auth.groups.list(all=True, order_by='name')
for ldap_item in ldap:
for group in git_groups:
if 'Internal' in ldap_item['group'].capitalize() and \
group.name == ldap_item['group'].split('Internal')[1]:
add_internal_users(ldap_item, group, git_auth)
def remove_members(group, ldap_item, local):
"""
For each ldap_item(group) check the users in Git, if user doesn't exist, just delete it
"""
members = group.members.all(all=True)
for member in members:
if member.username not in ldap_item["users"] and member.username not in acs()['admins'].split(','):
try:
group.members.delete(member.id)
except gitlab.exceptions.GitlabDeleteError:
if local:
logging.info("for subgroup {}, user {} is linked from the root group, check it manually"
.format(ldap_item['group'].split('Internal')[1], member.username))
continue
def git_ldap_validation(ldap, git_auth):
"""
removes users from GitLab groups which are not present in LDAP
Calling remove_members function
"""
git_groups = git_auth.groups.list(all=True, order_by='name')
for ldap_item in ldap:
for group in git_groups:
if 'Internal' in ldap_item['group'] and \
group.name == ldap_item['group'].split('Internal')[1]:
remove_members(group, ldap_item, "local")
if group.name.capitalize() == ldap_item['group'].capitalize():
remove_members(group, ldap_item, None)
def enumerate_users(ldap_item, git_item):
ldap_item["users"].sort()
git_item["users"].sort()
ldap_tmp = []
git_tmp = []
for user in ldap_item["users"]:
if user not in git_item["users"]:
ldap_tmp.append(user)
for user in git_item["users"]:
if user not in ldap_item["users"] and user not in acs()['admins'].split(','):
git_tmp.append(user)
return ldap_tmp, git_tmp
def dif_groups(ldap, git):
table = PrettyTable(['Id', 'Gitlab_Group', 'Gitlab_Users'], title="These groups don't exist in LDAP")
count = 0
l = [str(i['group']).capitalize() for i in ldap]
for id, i in enumerate(l):
if 'Internal' in i:
l[id] = i.split('Internal')[1].capitalize()
dif = [i for i in git if not i['group'].capitalize() in l]
for i in dif:
count += 1
table.add_row([count, i['group'], i['users']])
with open("fake_db/diff_group.txt", "w") as f:
f.write(table.get_string())
logging.info("\n" + str(table))
def file_writer(file, table):
with open("fake_db/" + file, "w") as f:
f.write(table.get_string())
def enum_local_eq_groups(ldap, git, table):
for ldap_item in ldap:
for git_item in git:
if 'Internal' in ldap_item['group'] and git_item["group"] == ldap_item['group'].split('Internal')[1]:
ldap_item["users"].sort()
git_item["users"].sort()
if ldap_item["users"] != git_item["users"]:
table.add_row([ldap_item['group'].split('Internal')[1], git_item["group"], ldap_item['users'],
git_item['users']])
break
def enumerate_equal_groups(ldap, git, table):
for ldap_item in ldap:
for git_item in git:
ldap_item["users"].sort()
git_item["users"].sort()
if ldap_item["group"] == git_item["group"] or str(ldap_item["group"]).capitalize() == git_item[
"group"].capitalize():
if ldap_item["users"] != git_item["users"]:
table.add_row([ldap_item["group"], git_item["group"], ldap_item['users'], git_item['users']])
break
elif ldap_item["users"] == git_item["users"]:
table.add_row([ldap_item["group"], git_item["group"], "equal", "equal"])
else:
table.add_row([ldap_item["group"], git_item["group"], ldap_item['users'], git_item['users']])
break
def enumerate_migration_local_groups(ldap, git, table):
for ldap_item in ldap:
for git_item in git:
if 'Internal' in ldap_item['group']:
if git_item["group"] == ldap_item['group'].split('Internal')[1]:
users = enumerate_users(ldap_item, git_item)
table.add_row([ldap_item['group'].split('Internal')[1],
users[0], users[1]])
break
if git_item["group"].capitalize() == ldap_item['group'].split('Internal')[1].capitalize():
users = enumerate_users(ldap_item, git_item)
table.add_row([git_item["group"] + " -> " + ldap_item['group'].split('Internal')[1],
users[0], users[1]])
def enumerate_migration_groups(ldap, git, table):
for ldap_item in ldap:
for git_item in git:
try:
if ldap_item["group"] == git_item["group"]:
users = enumerate_users(ldap_item, git_item)
table.add_row([ldap_item["group"], users[0], users[1]])
elif str(ldap_item["group"]).capitalize() == git_item["group"].capitalize():
users = enumerate_users(ldap_item, git_item)
table.add_row(
[git_item["group"] + " -> " + str(ldap_item["group"]), users[0], users[1]])
except TypeError:
logging.warning("TypeError")
continue
return table
def migration_result(ldap, git):
table = PrettyTable(["LDAP -> Git", "Users to be added to Git group (if exists in git)", "Users to be removed from "
"Git group"],
title="Result after migration")
enumerate_migration_local_groups(ldap, git, table)
enumerate_migration_groups(ldap, git, table)
file_writer('migration_result.txt', table)
logging.info("\n" + str(table))
def equal_groups(ldap, git):
table = PrettyTable(["LDAP_Group", "GitLab_Group", "LDAP_Users", "Gitlab_Users"],
title="Difference between existing groups")
enum_local_eq_groups(ldap, git, table)
enumerate_equal_groups(ldap, git, table)
file_writer('equal_group.txt', table)
logging.info("\n" + str(table))
def commits_result(git):
table = PrettyTable(['Id', 'Project', 'Project_link', 'Last commit'],
title="Last commit for each project in GitLab")
projects = git.projects.list(all=True)
tmp = []
count = 0
for project in projects:
commits = project.commits.list()
for commit in commits:
tmp.append([str(project.name)[0:], project.web_url, str(commit.committed_date).split('T')[0][0:]])
break
result = sorted(tmp, key=lambda commit: commit[2])
result.reverse()
for res in result:
count += 1
table.add_row([count, res[0], res[1], res[2]])
file_writer('last_commits.txt', table)
logging.info("\n" + str(table))
| true |
18c86f8f49fa3a098c6cbfdfe5c5783cbd047134 | Python | caiqinxiong/python | /day18/bookmanager/app01/models.py | UTF-8 | 1,216 | 2.6875 | 3 | [] | no_license | from django.db import models
class Publisher(models.Model):
name = models.CharField(max_length=32,unique=True)
def __str__(self):
return self.name
__repr__ = __str__
class Book(models.Model):
title = models.CharField(max_length=32)
price = models.DecimalField(max_digits=5,decimal_places=2) # 999.99
kucun = models.IntegerField()
sale = models.IntegerField()
pub = models.ForeignKey('Publisher', null=True,on_delete=models.CASCADE,related_name='books',related_query_name='book' ) # pub_id
# authors = models.ManyToManyField('Author')
# CASCADE 级连删除 on_delete 2.0 版本后是必填项
# SET(1)
# SET_DEFAULT default=1, 默认值
# SET_NULL null=True blank=True
# DO_NOTHING
# PROTECT
def __str__(self):
return self.title
__repr__ = __str__
class Author(models.Model):
name = models.CharField(max_length=32)
books = models.ManyToManyField('Book') # 这个属性不会生成字段 但是会生成第三张表
def show_books(self):
return ' '.join(["《{}》".format(book.title) for book in self.books.all()])
def __str__(self):
return self.name
__repr__ = __str__
| true |
6beba652236daf8284aad6dbf0d4675baeb0cb50 | Python | alpaziz/soultions | /lecture_5/while_loop_excercise.py | UTF-8 | 417 | 3.703125 | 4 | [] | no_license | # This is your main function
# This is where you declare your variables and use your functions
def main():
jelly_beans = ['red', 'yellow', 'purple', 'orange']
count = 0
while (count != (len(jelly_beans))):
print(jelly_beans[count])
count = count +1
# while (stoping_condition == False):
# # Keep iterating
# Call your main function
if __name__ == "__main__":
main() | true |
2ed9e946f963e2d23524fdb8f20b8f5dc310cace | Python | shirayair/window-based-taggging | /plot_graphs.py | UTF-8 | 1,250 | 2.59375 | 3 | [] | no_license | import matplotlib.pyplot as plt
# y_pos_acc_1 = [86.66, 87.21, 88.03, 88.28, 88.30, 88.35, 88.51, 88.38, 88.36, 88.61]
# y_pos_loss_1 = [2.999,2.993 , 2.985, 2.983,2.982 ,2.982 ,2.980 ,2.982 ,2.982 , 2.979]
#
#
# y_pos_acc_3_no_embed = [88.98, 89.05,89.31 ,89.20 ,89.51 , 89.33,89.49 , 89.27,89.22, 89.21]
# x = range(1, 11)
#
# plt.plot(x, y_pos_acc_3_no_embed, '-')
# plt.xlabel('iterations')
# plt.ylabel('accuracy')
# plt.ylim([88.8, 90])
# plt.title('POS accuracy on the dev set')
# plt.legend()
# plt.savefig("pos_acc_3_no_embed.png")
y_ner_acc_1 = [63.75, 67.25, 68.50, 69.80, 70.87, 70.60, 70.14, 70.98, 71.33, 71.57, 72.16,
72.15, 73.09, 72.15, 72.90, 72.39, 72.87, 72.50, 73.20, 73.13, 74.26, 73.53, 73.60,
73.36, 73.81, 73.55, 74.18, 73.58, 73.48, 74.02]
y_ner_loss_1 = [1.204, 1.191, 1.176, 1.183, 1.172, 1.179, 1.173, 1.170, 1.173, 1.175,
1.168, 1.177, 1.167, 1.176, 1.177, 1.168, 1.175, 1.172, 1.171, 1.172,
1.172, 1.169, 1.175, 1.173, 1.174, 1.176, 1.172, 1.178, 1.178, 1.177]
x = range(1, 31)
plt.plot(x, y_ner_loss_1, '-')
plt.xlabel('iterations')
plt.ylabel('loss')
plt.ylim([1.14, 1.2])
plt.title('NER loss on the dev set')
plt.legend()
plt.savefig("ner_loss.png")
| true |
ee91f29d30169a8c2e033960446ccdce03621f2c | Python | pfrank13/gnome_wallpaper_updater | /gnome_wallpaper_updater.py | UTF-8 | 2,121 | 2.671875 | 3 | [] | no_license | #!/usr/bin/python3
from requests import get
import os
import subprocess
import sys
import shutil
DESTINATION_FILE = "latest_wallpaper"
def download(url, file_name):
print(f'Downloading {url} to {file_name}')
with open(file_name, "wb") as file:
response = get(url)
file.write(response.content)
def print_lines(lines):
for line in lines:
print(line)
def execute(cmd):
cmd_array = cmd.split()
return subprocess.Popen(cmd_array,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
def print_process_output(process):
while True:
process_exit_value = process.poll()
if process_exit_value is not None:
print(f'process_exit_value={process_exit_value}')
print("STDOUT")
if process_exit_value == 0:
print_lines(process.stdout.readlines())
else:
print_lines(process.stdout.readlines())
print("STDERR")
print_lines(process.stderr.readlines())
return process_exit_value
os.chdir("/tmp")
url = sys.argv[1]
if len(sys.argv) >= 3:
picture_option = sys.argv[2]
else:
picture_option = "scaled"
print(f'reset_picture_option={print_process_output(execute("gsettings reset org.gnome.desktop.background picture-options"))}')
scale_cmd = f'gsettings set org.gnome.desktop.background picture-options \'{picture_option}\''
print(f'scale_cmd={scale_cmd}')
scale_process = execute(scale_cmd)
print_process_output(scale_process)
print(f'picture_option={print_process_output(execute("gsettings get org.gnome.desktop.background picture-options"))}')
if scale_process.poll() == 0:
if not url.startswith("http"):
shutil.copyfile(url, f'./{DESTINATION_FILE}')
else:
download(url, f'./{DESTINATION_FILE}')
picture_uri_cmd = f'gsettings set org.gnome.desktop.background picture-uri file:///tmp/{DESTINATION_FILE}'
print(f'picture_uri_cmd={picture_uri_cmd}')
print_process_output(execute(picture_uri_cmd))
| true |
4734ecaa6b8d6c0585be2e67c7727ff0581e52da | Python | bill1216s/TKURoboSot | /strategy/script/methods/attack.py | UTF-8 | 1,334 | 2.515625 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | #!/usr/bin/env python
from __future__ import print_function
import rospy
import math
from robot.robot import Robot
class Attack(Robot):
def __init__(self):
pass
def ClassicAttacking(self, goal_dis, goal_ang):
v_x = goal_dis * math.cos(math.radians(goal_ang))
v_y = goal_dis * math.sin(math.radians(goal_ang))
v_yaw = goal_ang
return v_x, v_y, v_yaw
def zoneAttacking(self, goal_dis, goal_ang):
v_x = goal_dis * math.cos(math.radians(goal_ang))
v_y = goal_dis * math.sin(math.radians(goal_ang))
v_yaw = goal_ang
return v_x, v_y, v_yaw
def cross_over(self, t, side, run):
robot_info = self.GetRobotInfo()
shoot = 0
go_x = t[side]['dis'] * math.cos(math.radians(t[side]['ang']))
go_y = t[side]['dis'] * math.sin(math.radians(t[side]['ang']))
v_x = go_x * math.cos(math.radians(run['yaw'])) - go_y * math.sin(math.radians(run['yaw']))
v_y = go_x * math.sin(math.radians(run['yaw'])) + go_y * math.cos(math.radians(run['yaw']))
if t[side]['dis'] > 250:
v_yaw = t[side]['ang']
else:
if t[side]['ang'] > 0 :
v_yaw = -80
else :
v_yaw = t[side]['ang']
if t[side]['dis'] <= 200 and t[side]['ang']<=10:
shoot = 1
return v_x, v_y, v_yaw, shoot
| true |
2cee2ece56476bc02c71f5677c83d9aaed15a46c | Python | bayersglassey/cartbox | /cartbox/cart/default_data.py | UTF-8 | 2,811 | 2.640625 | 3 | [
"BSD-2-Clause"
] | permissive |
from .models import Category, Product
CATS_AND_PRODS = [
{
'title': "Fruit",
'products': [
# From http://www.bobbywires.com/plu-1.php
{'sku': '4011', 'title': "Banana Yellow"},
{'sku': '4101', 'title': "Apple Braeburn"},
{'sku': '4104', 'title': "Apple Cortland"},
{'sku': '4107', 'title': "Apple Crab"},
{'sku': '3010', 'title': "Apple Cripps"},
{'sku': '3621', 'title': "Mango Francis"},
{'sku': '4311', 'title': "Mango Green"},
{'sku': '4051', 'title': "Mango Red"},
],
},
{
'title': "Veg",
'products': [
# From http://www.bobbywires.com/plu-1.php
{'sku': '4560', 'title': "Carrots Baby"},
{'sku': '4562', 'title': "Carrots Loose"},
{'sku': '4070', 'title': "Celery Bunch"},
{'sku': '4575', 'title': "Celery Hearts"},
{'sku': '4552', 'title': "Cabbage Napa"},
{'sku': '4069', 'title': "Cabbage Green"},
{'sku': '4554', 'title': "Cabbage Red"},
],
},
{
'title': "Meat",
'products': [
# From http://gleibermans.com/Meat.pdf
{'sku': '200029', 'title': "Beef Patties"},
{'sku': '210027', 'title': "Beef Ribs"},
{'sku': '210022', 'title': "Beef Stew"},
{'sku': '210001', 'title': "Ground Beef"},
{'sku': '210002', 'title': "Ground Beef Lean"},
{'sku': '210007', 'title': "Ground Veal"},
{'sku': '200015', 'title': "Marrow Bones"},
{'sku': '200632', 'title': "Veal Roast"},
{'sku': '220009', 'title': "Veal Brisket"},
{'sku': '210014', 'title': "Rib Roast Bone In"},
{'sku': '210017', 'title': "Top of Rib"},
{'sku': '200006', 'title': "Whole Brisket"},
],
},
]
def updated_or_created(created, thing):
msg = "Created" if created else "Updated"
print("{} {}: {}".format(
msg, thing._meta.verbose_name, thing))
def update_or_create_cats_and_prods(verbose=True):
cats = []
prods = []
for cat_data in CATS_AND_PRODS:
title = cat_data['title']
prod_datas = cat_data['products']
cat, created = Category.objects.update_or_create(
title=title)
if verbose: updated_or_created(created, cat)
cats.append(cat)
for prod_data in prod_datas:
sku = prod_data['sku']
title = prod_data['title']
prod, created = Product.objects.update_or_create(
sku=sku, defaults=dict(
category=cat, title=title))
if verbose: updated_or_created(created, prod)
prods.append(prod)
return cats, prods
| true |
08f974002576134e284e346101df4f877459c172 | Python | AT-aker/-training | /print_students.py | UTF-8 | 205 | 3.15625 | 3 | [] | no_license | import csv
def print_students(*args,**kwargs):
with open('student.csv', 'r') as file:
csv_reader = csv.reader(file)
next(csv_reader)
for student in csv_reader:
print(student)
print_students() | true |
d5edf80bed870101c7da0e3f9c9f70b59783fe46 | Python | dudueasy/Python-Practice | /Python basic/购物车_函数.py | UTF-8 | 1,799 | 3.78125 | 4 | [] | no_license | class Product:
def __init__(self, id: object, name: object, price: object) -> object:
self.id = id
self.name = name
self.price = price
def get_money():
salary = input("please input your salary: ")
if salary.isdigit():
return int(salary)
else:
print('请输入正确的格式\n')
get_money()
def show_p_info():
print("您可以购买以下商品:")
for product in product_list:
print(product.id + ": " + product.name + " $" + str(product.price))
def purchase(product):
global money
if money >= product.price:
money -= product.price
cart.append(product.name)
print('已经购买了'+product.name)
print('消费了$' + str(product.price))
print('剩余金额$'+ str(money))
else:
print('买不起, 差$'+str(product.price - money))
def search_or_quit():
show_p_info()
input_key = input("请输入您要购买的商品编号, 输入q退出商店: ")
if input_key == 'q':
print("")
print('您总计购买了以下商品:')
for p_name in cart:
print(p_name)
print('您还有余额$' + str(money))
print("byebye")
else:
p_found = False
for product in product_list:
if product.id == input_key:
p_found = product
if p_found:
purchase( p_found)
else:
print("没有找到这个商品编号!")
search_or_quit()
#initialize
iphone = Product('1', 'iphone', 1000)
coffee = Product('2', 'coffee', 30)
book = Product('3', 'book', 50)
condom = Product('4', 'condom', 1)
product_list =[iphone, coffee, book, condom]
cart = []
money = get_money()
print("您有$" + str(money) +"可以使用")
search_or_quit() | true |
b5c3141e42edb60d15431d423b82aa8ae4c89728 | Python | tigervanilla/Guvi | /player115.py | UTF-8 | 66 | 2.796875 | 3 | [] | no_license | s1,s2=input().split()
s1=max(s1,s2,key=len)
print(s1[:len(s2)]+s2) | true |
fe3def2794f6edf6dcafd90972d5cfe21faeb002 | Python | JohnCatn/maltgeezers | /app/main/forms.py | UTF-8 | 3,335 | 2.546875 | 3 | [] | no_license | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, BooleanField, SubmitField, TextAreaField, IntegerField,DecimalField, HiddenField, DateTimeField, SelectField, FileField
from wtforms.validators import ValidationError, DataRequired, Email, EqualTo, Length, NumberRange
from flask_wtf.file import FileField, FileAllowed, FileRequired
from app.models import User
from flask_user import UserManager
# Customize the Register form:
from flask_user.forms import RegisterForm
class MaltgeezersRegisterForm(RegisterForm):
# Add a name fields to the Register form
first_name = StringField(('First Name'), validators=[DataRequired()])
last_name = StringField(('Last Name'), validators=[])
# Customize Flask-Userto support registration form
class MaltgeezersUserManager(UserManager):
def customize(self, app):
# Configure customized forms
self.RegisterFormClass = MaltgeezersRegisterForm
class EditProfileForm(FlaskForm):
username = StringField('Username', validators=[DataRequired()])
first_name = StringField('First Name', validators=[DataRequired()])
last_name = StringField('Last Name')
about_me = TextAreaField('About me', validators=[Length(min=0, max=140)])
submit = SubmitField('Submit')
def __init__(self, original_username, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.original_username = original_username
def validate_username(self, username):
if username.data != self.original_username:
user = User.query.filter_by(username=self.username.data).first()
if user is not None:
raise ValidationError('Please use a different username.')
class ReviewForm(FlaskForm):
brand_id = HiddenField('Brand',id="brand_id")
brand_name = HiddenField('Brand Name',id="brand_name")
tasting_id = HiddenField('Tasting Date',id="tasting_id")
order = IntegerField('Bottle Number (1,2,3,...)', validators=[DataRequired()])
name = StringField('Bottle Name', validators=[DataRequired()])
age = StringField('Age')
notes = TextAreaField('Notes', validators=[
DataRequired(), Length(min=1, max=2000)], id="notes")
tasting_note = TextAreaField('Tasting', validators=[
DataRequired(), Length(min=1, max=2000)],id="tasting")
max_rating = DecimalField('Max Score', validators=[DataRequired(),NumberRange(0, 10)])
avg_rating = DecimalField('Average Score', validators=[DataRequired(),NumberRange(0, 10)])
min_rating = DecimalField('Min Score', validators=[DataRequired(),NumberRange(0,10)])
image = FileField("Bottle Image (450 x 600 px)", validators=[
FileAllowed(['jpg', 'png'], 'jpg and png Images only!')])
submit = SubmitField('Submit')
class TastingForm(FlaskForm):
date = DateTimeField('Tasting Date (dd-mm-YYYY HH:MM)', validators=[DataRequired()],format='%d-%m-%Y %H:%M')
club_id = SelectField('Club',coerce=int, validators=[DataRequired()])
location = StringField('Location')
num_attendees = StringField('Number of Attendees')
submit = SubmitField('Submit')
class ScoreForm(FlaskForm):
review_id = HiddenField('Review',id="review_id")
score = StringField('Score', validators=[DataRequired()])
notes = StringField('Note')
submit = SubmitField('Add')
| true |
a9bff0ed77a15bd7a4296f163e162002f84b5dbf | Python | Kawser-nerd/CLCDSA | /Source Codes/AtCoder/agc004/C/3587472.py | UTF-8 | 441 | 2.890625 | 3 | [] | no_license | h,w = (int(i) for i in input().split())
a = [input() for i in range(h)]
r,b = [["#"] for i in range(h)],[["."] for i in range(h)]
x = ["#","."]
for i in range(h):
for j in range(1,w-1):
if a[i][j]=="#":
r[i].append("#")
b[i].append("#")
else:
r[i].append(x[i%2])
b[i].append(x[1-i%2])
r[i].append(".")
b[i].append("#")
for i in range(h): print("".join(r[i]))
print()
for i in range(h): print("".join(b[i])) | true |
d437c97f60b69ac1f6fd4677bf6d1cd91781c6f5 | Python | acoli-repo/book-gen | /textrank/test/test_keywords.py | UTF-8 | 4,006 | 3.375 | 3 | [
"MIT"
] | permissive | import unittest
from summa.keywords import keywords
from summa.preprocessing.textcleaner import deaccent
from .utils import get_text_from_test_data
class TestKeywords(unittest.TestCase):
def test_text_keywords(self):
text = get_text_from_test_data("mihalcea_tarau.txt")
# Calculate keywords
generated_keywords = keywords(text, split=True)
# To be compared to the reference.
reference_keywords = get_text_from_test_data("mihalcea_tarau.kw.txt").split("\n")
self.assertEqual({str(x) for x in generated_keywords}, {str(x) for x in reference_keywords})
def test_keywords_few_distinct_words_is_empty_string(self):
text = get_text_from_test_data("few_distinct_words.txt")
self.assertEqual(keywords(text), "")
def test_keywords_few_distinct_words_split_is_empty_list(self):
text = get_text_from_test_data("few_distinct_words.txt")
self.assertEqual(keywords(text, split=True), [])
def test_text_summarization_on_short_input_text_and_split_is_not_empty_list(self):
text = get_text_from_test_data("unrelated.txt")
# Keeps the first 8 sentences to make the text shorter.
text = "\n".join(text.split('\n')[:8])
self.assertNotEqual(keywords(text, split=True), [])
def test_text_summarization_on_short_input_text_is_not_empty_string(self):
text = get_text_from_test_data("unrelated.txt")
# Keeps the first 8 sentences to make the text shorter.
text = "\n".join(text.split('\n')[:8])
self.assertNotEqual(keywords(text, split=True), "")
def test_keywords_ratio(self):
text = get_text_from_test_data("mihalcea_tarau.txt")
# Check ratio parameter is well behaved.
# Because length is taken on tokenized clean text we just check that
# ratio 40% is twice as long as ratio 20%
selected_docs_20 = keywords(text, ratio=0.2, split=True)
selected_docs_40 = keywords(text, ratio=0.4, split=True)
self.assertAlmostEqual(float(len(selected_docs_40)) / len(selected_docs_20), 0.4 / 0.2, places=1)
def test_keywords_consecutive_keywords(self):
text = "Rabbit populations known to be plentiful, large, and diverse \
in the area. \
Adjacent to the site, a number number well over a thousand. \
The number of these rabbit populations has diminished in recent \
years, and perhaps we have become number to a number of their \
numbers numbering fewer."
# Should not raise an exception.
self.assertIsNotNone(keywords(text, words=10))
def test_repeated_keywords(self):
text = get_text_from_test_data("repeated_keywords.txt")
kwds = keywords(text)
self.assertTrue(len(kwds.splitlines()))
def test_spanish_without_accents(self):
# Test the keyword extraction with accented characters.
text = get_text_from_test_data("spanish.txt")
kwds = keywords(text, language="spanish", deaccent=True, split=True)
# Verifies that all words are retrieved without accents.
self.assertTrue(all(deaccent(keyword) == keyword for keyword in kwds))
def test_spanish_with_accents(self):
# Test the keyword extraction with accented characters.
text = get_text_from_test_data("spanish.txt")
kwds = keywords(text, language="spanish", deaccent=False, split=True)
# Verifies that there are some keywords are retrieved with accents.
self.assertTrue(any(deaccent(keyword) != keyword for keyword in kwds))
def test_text_as_bytes_raises_exception(self):
# Test the keyword extraction for a text that is not a unicode object
# (Python 3 str).
text = get_text_from_test_data("spanish.txt")
bytes = text.encode(encoding="utf-8")
with self.assertRaises(ValueError):
keywords(bytes, language="spanish")
if __name__ == '__main__':
unittest.main() | true |
af238574bc51fe79ef24d025bb87cfe669aba37a | Python | NetScotte/python | /字典生成/test.py | UTF-8 | 766 | 3.328125 | 3 | [] | no_license |
class test:
def first(self):
i=3
print(self.recursive(i))
def recursive(self,i):
s=0;
if i==0:
return 1
else:
s = i*self.recursive(i-1)
return s
class test1:
def first(self,n=3):
word=''
with open('./allword','a') as f2:
self.recursive(f2,word,n)
def recursive(self,f2,word,n):
if n<=1:
with open('./name','r') as f1:
for i in f1:
f2.write(word+i.strip()+'\n')
return 1
with open('./name','r') as f:
for j in f:
self.recursive(f2,word+j.strip(),n-1)
if __name__=='__main__':
t=test1()
t.first()
| true |
213770fbe71893e791d92768281113d549fa6f23 | Python | FMularski/PandasCourse | /dataframe/02_column_selection.py | UTF-8 | 688 | 3.28125 | 3 | [] | no_license | import pandas as pd
import numpy as np
# %%
df = pd.read_csv('./data/aapl_us_d.csv', index_col=0)
df.columns = ['Open', 'High', 'Low', 'Close', 'Volume']
# %%
print(df.columns)
# %%
open_price = df['Open']
open_price = df.iloc[:, 0]
high_price = df['High']
# %%
close_price = df.Close
print(type(close_price))
for price in close_price:
print(price)
# %%
last_column = df.iloc[:, -1]
# %%
two_cols = df[['Open', 'Close']]
for _open in two_cols['Open']:
print(_open)
# %%
three_cols = df.iloc[:, [0, 3]] # indexes: 0 - open 3 - close
# %%
from_open_to_close = df.iloc[:, 0:4]
# %%
from_open_to_close = df.iloc[:, :-1] # all columns but the last one | true |
2b987dd8761f20bf7abf306e9bbb48d9b407b134 | Python | EduardoSantos7/Algorithms4fun | /Leetcode/500. Keyboard Row/solution.py | UTF-8 | 733 | 3.703125 | 4 | [] | no_license | class Solution:
def findWords(self, words: List[str]) -> List[str]:
row_1 = set(list("qwertyuiop"))
row_2 = set(list("asdfghjkl"))
row_3 = set(list("zxcvbnm"))
ans = []
for word in words:
# Detect the row
target_row = row_1 if word[0].lower() in row_1 else None
target_row = row_2 if word[0].lower() in row_2 else target_row
target_row = row_3 if word[0].lower() in row_3 else target_row
# Check if all the chars are in the same row
i = 0
while i < len(word) and word[i].lower() in target_row:
i += 1
if i == len(word):
ans.append(word)
return ans
| true |
7a18b3cd2f0e39ea1c3dbd690423109255082179 | Python | catticus/Project-Euler | /p007.py | UTF-8 | 154 | 2.765625 | 3 | [] | no_license | #!/usr/bin/env python
from libeuler import is_prime
N = 10001
num = 1
count = 0
while count < N:
num += 1
if is_prime(num):
count += 1
print num
| true |
f5e1de57c6d2247601825f13132629e64c650a08 | Python | apinto25/employees_project | /apps/department/models.py | UTF-8 | 436 | 2.546875 | 3 | [] | no_license | from django.db import models
class Department(models.Model):
name = models.CharField('Name', max_length=50)
short_name = models.CharField('Short name', max_length=20)
class Meta:
verbose_name = 'Department'
verbose_name_plural = 'Departments of the company'
ordering = ['name']
unique_together = ('name', 'short_name')
def __str__(self):
return str(self.id) + '-' + self.name
| true |
ce582403a5ae44620d194bfd159232e7ca1ffd6c | Python | pppk520/scope_workflow_visualizer | /scope_parser/common.py | UTF-8 | 3,892 | 2.578125 | 3 | [] | no_license | from pyparsing import *
class Common(object):
comment = "//" + restOfLine
ident = Group(Word('_<>*' + alphanums)).setName("identifier")
ident_at = '@' + ident
ident_dot = delimitedList(ident, delim='.', combine=True)
ident_float_suffix = '.' + Word(nums) + Optional('F')
ident_val = Combine(Word('- ' + nums) + Optional(ident_float_suffix | 'UL' | 'M'))
value_str = Combine(Group(Optional(oneOf('@@ @')) + (ident_val | quotedString | ident) + Optional('@@')))
quoted_time = Combine('"' + Word(":" + nums) + '"')
ext_quoted_string = quoted_time | quotedString | Word(nums)
param_str_cat = ext_quoted_string + ZeroOrMore('+' + ext_quoted_string)
nullible = Group('(' + ident + '??' + ident + ')')
expr_item_general = quotedString | Word(printables + ' ', excludeChars=':(),+-*/|') | nullible
expr_item_parentheses = '(' + expr_item_general + ')'
expr_item = expr_item_parentheses | expr_item_general
expr = expr_item + ZeroOrMore(oneOf('+ - * / |') + expr_item)
func = Forward()
func_ptr = Forward()
func_param = param_str_cat | expr | ident | Word('- +' + nums)
func_params = func_param + ZeroOrMore(',' + func_param)
param_lambda = Group(Optional('(') + delimitedList(ident) + Optional(')') + '=>' + OneOrMore(func | ident))
func_lambda = Group(delimitedList(ident, delim='.', combine=True) + Group('(' + param_lambda + ')'))
func <<= func_lambda | Group(delimitedList(ident, delim='.', combine=True) + Group('(' + Optional(func | func_params) + ')'))
func_ptr <<= Group(delimitedList(ident, delim='.', combine=True))
# case A().B().C()
func_chain = Combine(Optional('@') + delimitedList(func, delim='.', combine=True))
if __name__ == '__main__':
obj = Common()
print(obj.expr.parseString('(CountryCode[0]<< 8) | CountryCode[1]'))
print(obj.param_lambda.parseString('a => new BidHistory(a)'))
print(obj.func_lambda.parseString('Select(a => new BidHistory(a))'))
print(obj.func.parseString('Select(a => new BidHistory(a))'))
print(obj.func_chain.parseString("History.Split(';').Select(a => new BidHistory(a)).ToList()"))
print(obj.value_str.parseString("6"))
print(obj.expr_item_general.parseString('":00:00"'))
print(obj.func_chain.parseString('DateTime.Parse(@DATE_UTC + " " + @hour + ":00:00")'))
print(obj.func_chain.parseString('DateTime.ParseExact("1", "2", "3")'))
print(obj.func_chain.parseString('DateTime.ParseExact(@DATE_UTC + " 00:00:00", "yyyy-MM-dd HH:mm:ss", System.Globalization.CultureInfo.InvariantCulture)'))
print(obj.func_chain.parseString('@ObjDate.AddDays( - 1)'))
'''
print(obj.func_params.parseString('MinBid * (ExchangeRateUSD ?? 1m) * 100 - 0.01m'))
print(obj.func.parseString("Math.Ceiling(MinBid * (ExchangeRateUSD ?? 1m) * 100 - 0.01m)"))
print(obj.ident.parseString('B.SpendUSD??0'))
print(obj.value_str.parseString("- 1"))
print(obj.param_str_cat.parseString('"2018" + " " + ":00:00" + "20"'))
print(obj.func_params.parseString('"2018" + " " + ":00:00" + "20"'))
print(obj.func.parseString('DateTime.Parse("2018" + " " + "20" + ":00:00")'))
print(obj.func.parseString("Convert.ToUInt32(SuggBid * 100)"))
print(obj.func.parseString("Convert.ToUInt32(1, 2, 3)"))
out = obj.func.parseString('DateTime.Parse("2018" + " " + "20" + ":00:00")')
print(out.asDict())
print(obj.func.parseString("FIRST(YouImpressionCnt)"))
print(obj.func.parseString('ToString("yyyy-MM-dd")'))
print(obj.expr.parseString("SuggBid * 100"))
print(obj.func.parseString("COUNT(DISTINCT (OrderId))"))
print(obj.ident.parseString("100"))
print(obj.value_str.parseString("1.0"))
print(obj.value_str.parseString("1.0F"))
print(obj.ident_val.parseString("1.0F"))
print(obj.value_str.parseString("-1"))
print(obj.value_str.parseString("0UL"))
''' | true |
ceb6fdb5f0933afaa10ab18dbcb7cdeaa5891dbf | Python | 24Jay/qqSpider | /AlbumSpider.py | UTF-8 | 621 | 2.71875 | 3 | [] | no_license | # coding:utf-8
import urllib
import re
def getHtml(url):
page = urllib.urlopen(url)
html = page.read()
return html
def getAlbums(html):
reg='"albummid":.*?"(.*?)",.*?"albumname":.*?"(.*?)".*?,'
pattern = re.compile(reg, re.S)
albums = re.findall(pattern, html)
print "I have get the albums of this singer:", len(albums)
for i in range(0, len(albums)):
print i,"=",albums[i][0],albums[i][1]
html = getHtml("https://y.qq.com/portal/singer/0025NhlN2yWrP4.html#tab=album&")
#html = getHtml("https://y.qq.com/portal/singer/003Nz2So3XXYek.html#tab=album&")
getAlbums(html)
| true |
88948971ee827e7281676ae7bbe333101edd13eb | Python | morningred88/python-projects | /Roommates_Bill/main.py | UTF-8 | 852 | 3.609375 | 4 | [] | no_license | from bill import Bill
from reports import PdfReport
from roommate import Roommate
amount = float(input ("Hey user, enter the bill amount $ "))
period = input ("What is the bill period? E.g. December 2020: ")
name1 = input ("What is your name? ")
days_in_house1 = int(input (f"How many days did {name1} stay in the house during the bill period? "))
name2 = input ("What is the name of the other roommate? ")
days_in_house2 = int(input (f"How many days did {name2} stay in the house during the bill period? "))
bill = Bill(amount, period)
roommate1 = Roommate(name1, days_in_house1)
roommate2 = Roommate(name2, days_in_house2)
print(f'{roommate1.name} pays {roommate1.pays(bill, roommate2)}')
print(f'{roommate2.name} pays {roommate2.pays(bill, roommate1)}')
pdf_report = PdfReport(f"{bill.period}.pdf")
pdf_report.generate(roommate1, roommate2, bill)
| true |
4b8282cdb80a8a633b72e0be1720a5ab78d439a6 | Python | jeroendecroos/kaggle_transfer_learning_on_stack_exchange_tags | /src/room007/exploration/common.py | UTF-8 | 418 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python3
# vim: set fileencoding=utf-8 :
import re
def tag_in_text(tag, text):
tag_re = '\\b{tag}\\b'.format(tag=re.escape(tag))
return re.search(tag_re, text) is not None
def norm_tag(tag):
return tag.replace('-', ' ')
def question_mentions_tag(qrec, tag):
normed = norm_tag(tag)
return (tag_in_text(normed, qrec['title']) or
tag_in_text(normed, qrec['content']))
| true |
b2608020a5621b152538b4f641043838ca03a41a | Python | theolamide/Whiteboard | /Python/MaximumSubarray.py | UTF-8 | 1,660 | 3.75 | 4 | [
"MIT"
] | permissive | import collections
def maxSubArray(nums):
highestSum = 0
currentSum = 0
# First number in subArray. If currentSum goes below this number, cut off and start a new subArray.
initialNumber = 0
count = 0
while count < len(nums): # Go through the given array
# Initialize for inital case
if count == 0:
currentSum += nums[count] # -2
# print("count:", count, currentSum, ":currentSum")
initialNumber = nums[count] # -2
# print("InitialNumber:", initialNumber)
count += 1
else:
if currentSum == 0:
initialNumber = nums[count]
newSum = currentSum + nums[count]
# if newSum > 0:
# print("newSum: ", newSum, count,
# ":count", "initalNumber", initialNumber)
# else:
# print("newSum:", newSum, count, ":count",
# "initalNumber", initialNumber)
if count > 0 and newSum > initialNumber:
currentSum += nums[count]
# print("Line27:", count, currentSum)
count += 1
if newSum <= initialNumber:
if currentSum >= highestSum:
# print("Reset")
highestSum = currentSum
currentSum = 0
initialNumber = nums[count]
if count < len(nums):
count += 1
else:
# print("here")
count += 1
print(highestSum)
Input = [-2, 1, -3, 4, -1, 2, 1, -5, 4]
maxSubArray(Input)
| true |
546539a4c74136215775fb53f9ecc88891b96d0d | Python | alans2021/CS-1571 | /Assignment3/a3.py | UTF-8 | 4,202 | 3.4375 | 3 | [] | no_license | from tree import Utility
from tree import Choice
from tree import Decision
from probability import BayesNet
from probability import enumeration_ask
from probability import BayesNode
# First create all nodes with connections and probabilities. Then find the value of first decision node
# Does expectimax algorithm do determine action that D1 node should take
def q2():
# Create all utility nodes first
U1 = Utility(.1, 100, 'Resolved')
U2 = Utility(.3, 5, 'Frustrated')
U3 = Utility(.7, -95, '~Frustrated')
U4 = Utility(.9, 0, '~Resolved')
U5 = Utility(.1, 100, 'Resolved')
U6 = Utility(.3, 10, 'Frustrated')
U7 = Utility(.7, -90, '~Frustrated')
# Then create all choice nodes with connections to utilities
C2 = Choice('Redirect')
C2.addChildren(U2)
C2.addChildren(U3)
C4 = Choice('Redirect')
C4.addChildren(U6)
C4.addChildren(U7)
C3 = Choice('Respond')
C3.addChildren(U4)
C3.addChildren(U5)
C1 = Choice('Respond')
C1.addChildren(U1)
# Create decision nodes
D2 = Decision(0.9, '~Resolved')
D2.addChildren(C3)
D2.addChildren(C4)
C1.addChildren(D2)
D1 = Decision()
D1.addChildren(C1)
D1.addChildren(C2)
return 'Value for D1 is ' + str(D1.value()) + ' and ' + 'Action from D1 is ' + D1.action
T, F = True, False
chatbot = BayesNet([
('Accurate', '', 0.9),
('ProblemSize', '', 0.9), # 0.9 is probabililty that problem size is small
('ConversationLength', 'ProblemSize', {T: (0.40, 0.40, 0.20), F: (0.20, 0.30, 0.50)}, 3),
('Frustrated', 'Accurate ProblemSize ConversationLength',
{(T, T, 0): 0.20, (T, T, 1): 0.30, (T, T, 2): 0.60,
(T, F, 0): 0.30, (T, F, 1): 0.60, (T, F, 2): 0.70,
(F, T, 0): 0.40, (F, T, 1): 0.50, (F, T, 2): 0.80,
(F, F, 0): 0.50, (F, F, 1): 0.80, (F, F, 2): 0.90}),
('Resolved', 'Accurate ConversationLength',
{(T, 0): 0.30, (T, 1): 0.50, (T, 2): 0.70,
(F, 0): 0.20, (F, 1): 0.30, (F, 2): 0.40})
])
psize_dict = {True: 'Small', False: 'Big'}
clength_dict = {0: 'Short', 1: 'Medium', 2: 'Long'}
def q8(variable, conditions, value):
# Do enumeration ask
probdist = enumeration_ask(variable, conditions, chatbot)
# Parse input to output in correct format
for var in conditions:
if var == 'ProblemSize':
conditions[var] = psize_dict[conditions[var]]
if var == 'ConversationLength':
conditions[var] = clength_dict[conditions[var]]
prob = round(probdist.prob[value], 4)
if type(value) == int:
value = clength_dict[value]
return 'Probability of ' + variable + ' being ' + str(value) + ' given ' + \
str(conditions) + ': ' + str(prob)
def q9():
# Full joint distribution of bayes net
print('Full Joint Distribution of Bayes Net:')
accurate = [True, False]
psize = [True, False]
clength = [0, 1, 2]
resolved = [True, False]
frustrated = [True, False]
nodea = chatbot.variable_node('Accurate')
nodep = chatbot.variable_node('ProblemSize')
nodec = chatbot.variable_node('ConversationLength')
noder = chatbot.variable_node('Resolved')
nodef = chatbot.variable_node('Frustrated')
for a in accurate:
proba = nodea.p(a, {})
for p in psize:
probp = nodep.p(p, {})
for c in clength:
probc = nodec.p(c, {'ProblemSize': p})
for r in resolved:
probr = noder.p(r, {'ConversationLength': c, 'Accurate': a})
for f in frustrated:
probf = nodef.p(f, {'ProblemSize': p, 'ConversationLength': c, 'Accurate': a})
finalprob = round(proba * probp * probc * probr * probf, 6)
print('Probability when Accuracy = ' + str(a) + ', Problem_Size = ' + psize_dict[p] +
', Conversation_Length = ' + clength_dict[c] + ', Resolved = ' + str(r) +
', Frustrated = ' + str(f) + ': ' + str(finalprob))
if __name__ == '__main__':
# answer = q2()
# print(answer)
#
a = q8('Resolved', dict(ProblemSize=True), True)
print(a)
q9()
| true |
b5dead5b532d197f842af61ce7b51b2b2eb5d1ac | Python | isun-dev/algorithm-study | /insun/check_ent.py | UTF-8 | 663 | 3.1875 | 3 | [] | no_license | def solution(n, times):
left = 0 # 최 저 시간
answer = right = max(times) * n # 최 대 시간 10 * 60
while left <= right:
complete = 0 # 심사 처리 수
mid = (left + right) // 2 # 이분 탐색, 중간부터 탐색 시작
for r in times:
complete += mid // r
if complete < n: # 처리수가 입국심사를 기다리는 수보다 작을 경우
left = mid + 1 # 중간 값 + 1
else:
right = mid - 1 # 아닐경우 -1
if mid <= answer: # 이전에 저장한 심사시간보다 작을 경우 갱신
answer = mid
return answer | true |
5c98a654f3059ba1e6960123843001d86f3df26d | Python | Prashant-JT/PartitionProblem | /Practica3PR3/PYTHON/PartitionProblemBackTrackingIterator/IteratorCombinaciones.py | UTF-8 | 1,317 | 3.328125 | 3 | [
"MIT"
] | permissive | import sys
def totalValues(combination, num):
suma=0
for i in range(0, len(num)):
if combination[i] == 1:
suma= suma + num[i]
return suma
def isValidCombination(combination, num, sum):
return totalValues(combination, num) <= sum
class IteratorCombinaciones:
def __init__(self, N):
self.valorMaximo = 1
self.valorMinimo = 0
self.combination = []
for i in range(N):
self.combination.append(0)
def endOfCombinations(self):
for i in self.combination:
if i == 0:
return False
return True
def nextCombination(self, num, suma, found):
for i in range(0, len(self.combination)):
if self.combination[i] == self.valorMaximo:
self.combination[i] = self.valorMinimo
else:
self.combination[i] = self.combination[i] + 1
if isValidCombination(self.combination, num, suma):
return True
else:
self.combination[i] = self.valorMinimo
return False
def searchedCombination(self, num, previousSum):
actualSum = totalValues(self.combination, num)
if actualSum == previousSum:
return True | true |
edba721febe2354cbfc4e25407a0004251faf668 | Python | shouwangbuqi/SpEC | /data.py | UTF-8 | 8,902 | 2.984375 | 3 | [] | no_license | from torch.utils.data import Dataset as dt
import numpy as np
import networkx as nx
import torch
#bfs_seq from GraphRNN
def bfs_seq(G, start_id):
dictionary = dict(nx.bfs_successors(G, start_id))
start = [start_id]
output = [start_id]
while len(start) > 0:
next = []
while len(start) > 0:
current = start.pop(0)
neighbor = dictionary.get(current)
if neighbor is not None:
next = next + neighbor
output = output + next
start = next
return output
def encode_adj(adj, max_prev_node=10, is_full=False):
'''
:param adj: n*n, rows means time step, while columns are input dimension
:param max_degree: we want to keep row number, but truncate column numbers
:return:
'''
if is_full:
max_prev_node = adj.shape[0] - 1
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0:n - 1]
# use max_prev_node to truncate
# note: now adj is a (n-1)*(n-1) matrix
adj_output = np.zeros((adj.shape[0], max_prev_node))
for i in range(adj.shape[0]):
input_start = max(0, i - max_prev_node + 1)
input_end = i + 1
output_start = max_prev_node + input_start - input_end
output_end = max_prev_node
adj_output[i, output_start:output_end] = adj[i, input_start:input_end]
adj_output[i, :] = adj_output[i, :][::-1] # reverse order
return adj_output
def decode_adj(adj_output):
'''
recover to adj from adj_output
note: here adj_output have shape (n-1)*m
'''
max_prev_node = adj_output.shape[1]
adj = np.zeros((adj_output.shape[0], adj_output.shape[0]))
for i in range(adj_output.shape[0]):
input_start = max(0, i - max_prev_node + 1)
input_end = i + 1
output_start = max_prev_node + max(0, i - max_prev_node + 1) - (i + 1)
output_end = max_prev_node
adj[i, input_start:input_end] = adj_output[i, ::-1][output_start:output_end] # reverse order
adj_full = np.zeros((adj_output.shape[0] + 1, adj_output.shape[0] + 1))
n = adj_full.shape[0]
adj_full[1:n, 0:n - 1] = np.tril(adj, 0)
adj_full = adj_full + adj_full.T
return adj_full
def encode_adj_flexible(adj):
'''
return a flexible length of output
note that here there is no loss when encoding/decoding an adj matrix
:param adj: adj matrix
:return:
'''
# pick up lower tri
adj = np.tril(adj, k=-1)
n = adj.shape[0]
adj = adj[1:n, 0:n - 1]
adj_output = []
input_start = 0
for i in range(adj.shape[0]):
input_end = i + 1
adj_slice = adj[i, input_start:input_end]
adj_output.append(adj_slice)
non_zero = np.nonzero(adj_slice)[0]
input_start = input_end - len(adj_slice) + np.amin(non_zero)
return adj_output
class Dataset(dt):
def __init__(self, data, max_num_node=None, max_prev_node=None, iteration=10):
'''
['adjacency_matrix', 'features_all', 'labels_all', 'labels_train', 'labels_validation',
'labels_test', 'mask_train', 'mask_validation', 'mask_test']
'''
self.data = data
self.adj_all = []
self.len_all = []
G_list = []
G_list.append(data['adjacency_matrix'].toarray())
for G in G_list:
self.adj_all.append(G)
self.len_all.append(G.shape[0])
if max_num_node is None:
self.n = max(self.len_all)
else:
self.n = max_num_node
if max_prev_node is None:
print('calculating max previous node, total iteration: {}'.format(iteration))
self.max_prev_node = max(self.calc_max_prev_node(iter=iteration))
print('max previous node: {}'.format(self.max_prev_node))
else:
self.max_prev_node = max_prev_node
def __len__(self):
return len(self.adj_all)
def __getitem__(self, idx):
sparse = 5
adj_copy = self.adj_all[idx].copy()
feats_copy = (self.data['features_all'].toarray()).copy()
x_batch = np.zeros((self.n, self.max_prev_node)) # here zeros are padded for small graph
x_batch[0, :] = 1 # the first input token is all ones
y_batch = np.zeros((self.n, self.max_prev_node)) # here zeros are padded for small graph
feats_batch = np.zeros((self.n, feats_copy.shape[1])) # here zeros are padded for small graph
# generate input x, y pairs
idx = np.arange(0, adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
idx = idx[x_idx]
feats_copy = feats_copy[x_idx]
# then do bfs in the permuted G
len_batch = 0
while len_batch < sparse:
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
len_batch = len(x_idx)
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
feats_copy = feats_copy[x_idx]
adj_encoded = encode_adj(adj_copy.copy(), max_prev_node=self.max_prev_node)
idx = idx[x_idx]
# get x and y and adj
# for small graph the rest are zero padded
y_batch[0:adj_encoded.shape[0], :] = adj_encoded
x_batch[1:adj_encoded.shape[0] + 1, :] = adj_encoded
feats_batch[0:feats_copy.shape[0], :] = feats_copy
return {'x': x_batch, 'y': y_batch, 'len': len_batch, 'features': feats_batch, 'indices': idx}
def calc_max_prev_node(self, iter=10, topk=10):
max_prev_node = []
for i in range(iter):
if i % (iter / 5) == 0:
print('iter {} times'.format(i))
adj_idx = np.random.randint(len(self.adj_all))
adj_copy = self.adj_all[adj_idx].copy()
# print('Graph size', adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
# encode adj
adj_encoded = encode_adj_flexible(adj_copy.copy())
max_encoded_len = max([len(adj_encoded[i]) for i in range(len(adj_encoded))])
max_prev_node.append(max_encoded_len)
max_prev_node = sorted(max_prev_node)[-1 * topk:]
return max_prev_node
class Dataset_test(dt):
def __init__(self, data, max_num_node=None, max_prev_node=None):
'''
['adjacency_matrix', 'features_all', 'labels_all', 'labels_train', 'labels_validation',
'labels_test', 'mask_train', 'mask_validation', 'mask_test']
'''
self.data = data
self.adj_all = []
self.len_all = []
G_list = []
G_list.append(data['adjacency_matrix'].toarray())
for G in G_list:
self.adj_all.append(G)
self.len_all.append(G.shape[0])
if max_num_node is None:
self.n = max(self.len_all)
else:
self.n = max_num_node
if max_prev_node is None:
raise ValueError('max_prev_node is None')
else:
self.max_prev_node = max_prev_node
def __len__(self):
return len(self.adj_all)
def __getitem__(self, idx):
adj_copy = self.adj_all[idx].copy()
feats_copy = (self.data['features_all'].toarray()).copy()
feats_batch = np.zeros((self.n, feats_copy.shape[1])) # here zeros are padded for small graph
# generate input x, y pairs
idx = np.arange(0, adj_copy.shape[0])
x_idx = np.random.permutation(adj_copy.shape[0])
idx = idx[x_idx]
adj_copy = adj_copy[np.ix_(x_idx, x_idx)]
adj_copy_matrix = np.asmatrix(adj_copy)
G = nx.from_numpy_matrix(adj_copy_matrix)
# then do bfs in the permuted G
start_idx = np.random.randint(adj_copy.shape[0])
x_idx = np.array(bfs_seq(G, start_idx))
idx_new = np.arange(0, adj_copy.shape[0])
oth_idx = np.array([x for x in idx_new if x not in x_idx])
if (oth_idx != []):
oth_idx = idx[oth_idx]
idx = idx[x_idx]
idx = np.concatenate((idx, oth_idx), axis=0)
len_batch = len(idx)
idx = idx.astype(np.int64)
feats_copy = feats_copy[idx]
# get x and y and adj
# for small graph the rest are zero padded
feats_batch[0:feats_copy.shape[0], :] = feats_copy
return {'len': len_batch, 'features': feats_batch, 'indices': idx}
| true |
c5ba48f23ee21bfc76aefd4d17b424bd57ca38ca | Python | louistan/grocery-receipt-extractor | /main.py | UTF-8 | 1,479 | 3.21875 | 3 | [] | no_license | #!/usr/bin/env python
from classes import *
from utils import *
def main(argv=None):
fileName = "resources/SUPERMARCHE PA.txt"
if (debug):
print("\n\n****************************************\n* The program is starting\n****************************************")
"""
The function extractData return the following variables:
- lines: contains all lines of the receipt
- words contains all the words of the receipt
"""
lines, words = extractData(fileName)
"""
The Receipt object contains the following attributes:
- merchant
- client
- date
- time
- cashNo
- cashier
- items
- value
- payment
"""
Receipt = populateReceipt(lines)
if (debug):
print("\nHere is your receipt :\n")
Receipt.printReceipt()
"""
The function removeNoise removes single characters from the
lines list and returns a cleaned version, cleanLines
"""
if (debug):
print ("\nDEBUG :\n")
cleanLines = removeNoise(lines)
"""
The function collectItems
"""
groceryList = collectItems(cleanLines)
for x in groceryList:
Receipt.items.append(Item(x[0], x[1]))
if (debug):
print("\n\nThis is a list of your items :\n")
for x in Receipt.items:
x.printItem()
verifyAmount(Receipt, groceryList)
if (debug):
print("\n\n****************************************\n* The program has finished\n****************************************\n")
if __name__ == "__main__":
main()
| true |
62eb41db2a1ca5e2b9b767e2530779acb71ea29e | Python | Cleber-Woheriton/Python-Mundo-1---2- | /desafioAula053.py | UTF-8 | 924 | 4.40625 | 4 | [] | no_license | #(Desafio 53) Crie um programa que leia uma frase qualquer e diga se ela é um
# polindromo, desconsiderando os espaços.
import time
print(f'\033[1;30;43m{" Palindromo ":=^60}\033[m')
frase = ''
frase = str(input('Digite uma frase: '))
print('\033[33mLogo saberá se ela é palindromo ou não!\033[m')
time.sleep(2)
frase1 = frase.split()
frase1 = "".join(frase1)
frase1 = frase1.lower()
tam = len(frase1)
#inverso = ''
inverso = frase1[::-1]
if frase1 == inverso: # Solução do Curso
print(f'\033[30;42mÉ um PAINDROMO!\033[m')
else:
print('\033[30;41mNão é um PALINDROMO!\033[m')
'''for x in range(tam, 0, -1):# Minha solução
inverso += frase1[x - 1]
if frase1 == inverso:
if x == 1:
print(f'\033[30;42mÉ um PAINDROMO!\033[m')
else:
if x == 1
print('\033[30;41mNão é um PALINDROMO!\033[m')'''
print(f'A frase {frase}, invertida é: {inverso}.\nFim!')
| true |
1e6c877a95b2e8caeaceee71a9e37a228d3d80f1 | Python | kendalvictor/harmonizing-tibidabo | /corpus_formats/convert_flnovelties.py | UTF-8 | 4,683 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
# /usr/bin/python
# This script adapts de corpus Tibidabo to the new FreeLing tagging tags
import re
import sys
if __name__ == "__main__":
if len(sys.argv) is not 1 + 2:
print "Usage: ./convert_flnovelties.py <corpus> <tagset>"
exit(1)
adjective = re.compile(r'^A.*')
preposition = re.compile(r'^SPS00$')
pronoun = re.compile(r'^P.*')
aq0msp = re.compile(r'AQ0MSP')
pi0cc00 = re.compile(r'PI0CC00')
cur_sentence = []
dictokens = {}
# parse the tagset of tokens (word, lemma, pos)
with open(sys.argv[2],'rb') as tagset:
for line in tagset:
tag = line.split()
# if the read line is a token (word, lemma, pos), add it to a dictionary of tuples of tokens
if len(tag) == 3:
dictokens[tuple(tag)] = tag
# parse the corpus
with open(sys.argv[1],'rb') as corpus:
for line in corpus:
token = line.split()
if len(token) == 11:
cur_sentence.append(line)
if 'inadvertido' in token[1]:
print line
for w in range(0,len(cur_sentence)):
item = cur_sentence[w].split()
pos = item[4]
token = (item[1].lower(),item[2],item[4])
# specific PoS
#if 'inadvertido' in item[1]:
if aq0msp.search(pos):
newpos = 'VMP00SM'
cur_sentence[w] = '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (item[0],item[1],item[2],item[3],
newpos,item[5],item[6],item[7],
item[8],item[9],item[10])
elif pi0cc00.search(pos):
newpos = 'PI0CP00'
cur_sentence[w] = '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (item[0],item[1],item[2],item[3],
newpos,item[5],item[6],item[7],
item[8],item[9],item[10])
# adjectives add a zero in the end
elif adjective.search(pos):
newpos = pos+'0'
cur_sentence[w] = '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (item[0],item[1],item[2],item[3],
newpos,item[5],item[6],item[7],
item[8],item[9],item[10])
# prepositions loose zeros in the end
elif preposition.search(pos):
newpos = re.sub('S00$','',pos)
cur_sentence[w] = '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (item[0],item[1],item[2],item[3],
newpos,item[5],item[6],item[7],
item[8],item[9],item[10])
# pronouns loose a zero in the end
elif pronoun.search(pos):
newpos = re.sub('0$','',pos)
cur_sentence[w] = '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (item[0],item[1],item[2],item[3],
newpos,item[5],item[6],item[7],
item[8],item[9],item[10])
# tokens of the corpus with different lemma and pos get the lemma and pos of the dictionary
elif dictokens.has_key(token):
newtoken = dictokens.get(token)
newpos = newtoken[2]
if newpos != pos:
cur_sentence[w] = '%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n' % (item[0],item[1],item[2],item[3],
newpos,item[5],item[6],item[7],
item[8],item[9],item[10])
print 'TO REVIEW ---',pos,newpos,cur_sentence[w]
# Sentence transformed. Print all sentences of the corpus
for w in range(0,len(cur_sentence)):
print cur_sentence[w]
print '\n'
cur_sentence = []
| true |
09ef22479d525437cb81f61ad6b2cabde1a6e92d | Python | AkshayPradeep6152/letshack | /Python Programs/matrix_split.py | UTF-8 | 231 | 2.78125 | 3 | [
"MIT"
] | permissive |
import re
n, m = map(int, input().split())
a = []
for _ in range(n):
a.append(input()[:m])
s = ''.join(''.join(e) for e in zip(*a))
result = re.sub(r'([a-zA-Z0-9])([^a-zA-Z0-9]+)(?=[a-zA-Z0-9])', r'\1 ', s)
print(result)
| true |
989f8b9634159287f9339cd9c013edc8227ec89c | Python | shizihao123/Problems | /Assignment2/SVN.py | UTF-8 | 2,547 | 2.921875 | 3 | [] | no_license | #!/usr/bin/env python
import numpy as np
def svn(data_matrix, k): # svn分解实现降维
[u, s, v] = np.linalg.svd(data_matrix)
p = v[range(0, k), :]
return p.T
def data_pre(base_path, file_name): # 从文本加载数据进行预处理
# 输入数据准备
train_path = base_path + file_name + "-train.txt"
test_path = base_path + file_name + "-test.txt"
train_data = np.genfromtxt(train_path, delimiter=',')
test_data = np.genfromtxt(test_path, delimiter=',')
train_labels = train_data[:, train_data.shape[1] - 1]
test_labels = test_data[:, train_data.shape[1] - 1]
train_data = np.delete(train_data, train_data.shape[1] - 1, axis=1) # 删除标签列
test_data = np.delete(test_data, test_data.shape[1] - 1, axis=1)
return train_data, test_data, train_labels, test_labels
def _1nn(pj_train_mat, pj_test_mat, train_labels, test_labels):
predict_labels = np.zeros(pj_test_mat.shape[0])
for i in range(0, pj_test_mat.shape[0]):
dis = 0xfffffff
for j in range(0, pj_train_mat.shape[0]):
tmp_dis = np.linalg.norm(pj_test_mat[i] - pj_train_mat[j]) # 求两个向量的欧式距离,即二范数
if tmp_dis < dis: # 记录距离最近的点
dis = tmp_dis
predict_labels[i] = train_labels[j]
same_num = 0 # 利用1NN预测测试集标签并且计算accuracy
for i in range(0, predict_labels.shape[0]):
if predict_labels[i] == test_labels[i]:
same_num += 1
return same_num / test_labels.shape[0]
def run(train_mat, test_mat, train_labels, test_labels, k, data_set_name): # 执行流程
pj_matrix = svn(train_mat, k) # 获得投影矩阵
pj_train_mat = np.dot(train_mat, pj_matrix)
pj_test_mat = np.dot(test_mat, pj_matrix)
accuracy = _1nn(pj_train_mat, pj_test_mat, train_labels, test_labels)
print("The accuracy of 1NN prediction of data_set_name", data_set_name, "based on svn to", k, "dimesions is: ", accuracy)
if __name__ == "__main__":
base_path = "/home/jun/Desktop/data mining/Assigment2/"
for file_name in ["sonar", "splice"]:
train_mat, test_mat, train_labels, test_labels = data_pre(base_path, file_name)
for k in [10, 20, 30]:
run(train_mat, test_mat, train_labels, test_labels, k, file_name) | true |
3036036764c45b51b16dea96ebbd7676af1b2b40 | Python | zxallen/scrapy | /day03/demo.py | UTF-8 | 1,219 | 2.984375 | 3 | [
"MIT"
] | permissive | # coding:utf-8
import requests
import re
from openpyxl import Workbook
# openpyxl官方文档:https://openpyxl.readthedocs.io/en/latest/usage.html
class NeiHan(object):
def __init__(self):
self.headers = {
"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.162 Safari/537.36"
}
self.start_url = "http://neihanshequ.com/"
self.wk = Workbook() # 实例化一个工作薄
self.ws = self.wk.active # 激活一个工作表
def parse(self):
response = requests.get(self.start_url,self.headers)
content = response.text
reg = re.compile(r'data-text="(.*?)"')
jokes_list = reg.findall(content)
return jokes_list
def save(self,jokes_list):
for con in jokes_list:
print(con)
detail_list_con = []
detail_list_con.append(con)
self.ws.append(detail_list_con)
self.ws.title = "内涵"
self.wk.save("./NeiHan.xlsx")
def run(self):
con_list = self.parse()
self.save(con_list)
if __name__ == '__main__':
neihan=NeiHan()
neihan.run()
| true |
e778b30bd866b49d6601de0bda3c82268ea5cae1 | Python | Antoine07/dev2 | /ex1_list_correction.py | UTF-8 | 562 | 3.421875 | 3 | [] | no_license | data = [(1,2,6,7,8,11), (7,2,10,17,9,121),(0,8,9,6)]
evens = []
odds = []
for vect in data:
for num in vect:
if num in evens or num in odds:
continue
if num %2 ==0:
evens.append(num)
else:
odds.append(num)
print(evens)
evens2 = [ num for vect in data for num in vect if num % 2 ==0 ]
# set crée un ensemble or le propre d'un ensemble c'est d'avoir un représentant
# unique de chaque élément => vire les doublons
# list transforme un set en list
evens2 = list( set( evens2) )
print( evens2 ) | true |
7600a2760c0b65c02374809ea82498df0294fc57 | Python | Arayray1/DoorListener | /DoorListener.py | UTF-8 | 1,712 | 2.796875 | 3 | [
"MIT"
] | permissive | from flask import Flask, request, render_template, Response, json
app = Flask(__name__)
import gevent
from gevent.pywsgi import WSGIServer
from gevent import monkey
from werkzeug.serving import run_with_reloader
monkey.patch_all
#Before any data is received it reports that it is uninitialized
current_status = "uninitialized"
#The template for the website
@app.route('/')
def index():
return render_template('index.html')
#Repeat this over and over
def event():
local_status = "not here"
global current_status
while True:
if local_status != current_status:
# yield 'data: ' + json.dumps(current_status) + '\n\n'
yield 'data: ' + current_status + '\n\n'
local_status = current_status
gevent.sleep(0.2)
#Recieves data from BBB and if no data, returns error 400
#If it recieves data, returns "got some status" and tells what it is
@app.route('/status',methods=['POST'])
def handlestatus():
status = request.form.get('status', None)
if not status:
return ('Missing status value', 400)
print "Got some status: " + status
global current_status
current_status = status
return ('',200)
# Sends data to browser and if it connects prints "A browser is connected"
#Runs the event code, so sends data to website
@app.route('/stream/',methods=['GET','POST'])
def stream():
print 'A browser is conncected!'
return Response(event(), mimetype="text/event-stream")
#The WSGIServer is a slave to the system, as it must "serve forever"
@run_with_reloader
def run_server():
WSGIServer(('',80), app).serve_forever()
#If it is the main application then it runs the code
if __name__ == "__main__":
run_server()
| true |
06876dd806dafa8b0a459c540334aa32fd7fca42 | Python | AbdullrahmanHMD/Naive-Bayes-Classifier | /main.py | UTF-8 | 4,103 | 2.9375 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import scipy.linalg as linalg
import math
import pandas as pd
np.random.seed(38)
def safelog(x):
return(np.log(x + 1e-100))
#-------------------------------------------------------------------------------
#--------------------------------Initializing Data------------------------------
def get_data(data_lables):
data = []
for i in range(len(data_lables)):
data.append(data_lables[i][1])
return data
data_labels = np.genfromtxt("hw03_data_set_labels.csv", dtype = str, delimiter = ',')
data_images = np.genfromtxt("hw03_data_set_images.csv", dtype = int, delimiter = ',')
num_of_classes = 5
data_labels = get_data(data_labels)
data_set = np.array([(ord(x) - 65) for x in data_labels])
#-------------------------------------------------------------------------------
#--------------------------------Dividing Data----------------------------------
def get_sets(array):
train_set, test_set , training_labels, test_labels = [], [], [], []
count = 0
for i in range(len(array)):
if count >= 39:
count = 0
if count < 25:
train_set.append(array[i])
training_labels.append(data_set[i])
count += 1
elif count >= 25 and count < 39:
test_set.append(array[i])
test_labels.append(data_set[i])
count += 1
return np.array(test_set) ,np.array(train_set),np.array(training_labels), np.array(test_labels)
test_set, training_set, training_labels, test_labels = np.array(get_sets(data_images))
onehot_encoded_lables = np.zeros(shape = (125,5))
onehot_encoded_lables[range(125), training_labels] = 1
onehot_encoded_lables2 = np.zeros(shape = (195, 5))
onehot_encoded_lables2[range(195), data_set] = 1
#-------------------------------------------------------------------------------
#--------------------------------Calculating Priors-----------------------------
def prior(class_data):
arr = []
for i in range(len(class_data)):
arr.append(len(class_data[i])/len(data_set))
return arr
classes = [data_set[0:39],data_set[39:78],data_set[78:117],data_set[117:156],data_set[156:195]]
priors = prior(classes)
#-------------------------------------------------------------------------------
#--------------------------------Constructing Pij_Estimator Matrix--------------
def r_k():
arr = []
for i in range(5):
arr.append(sum(onehot_encoded_lables[:,i]))
return arr
pji_matrix = np.dot(training_set.T, onehot_encoded_lables)
rk = r_k()
pji_matrix /= rk
print(pji_matrix[:,4])
#-------------------------------------------------------------------------------
#--------------------------------Prediction Function----------------------------
def predict(x, P_matrix, prior):
return np.dot(x, safelog(P_matrix)) + np.dot((1 - x), safelog(1 - P_matrix)) + safelog(prior)
def y_predicted(X, P_matrix, prior):
mat = []
for i in range (len(X)):
mat.append(predict(X[i], P_matrix, prior))
return np.array(mat).reshape(len(X),5)
#-------------------------------------------------------------------------------
#--------------------------------Confusion Matrices-----------------------------
y_pred = y_predicted(training_set, pji_matrix, priors[0]) # priors[0] since all priors are the same
max = np.argmax(y_pred, axis = 1)
confusion_matrix = pd.crosstab(max, training_labels, rownames = ['y_pred'], colnames = ['y_truth'])
print(confusion_matrix)
y_pred = y_predicted(test_set, pji_matrix, priors[0])
max = np.argmax(y_pred, axis = 1)
confusion_matrix = pd.crosstab(max, test_labels, rownames = ['y_pred'], colnames = ['y_truth'])
print(confusion_matrix)
#-------------------------------------------------------------------------------
#--------------------------------Plotting Letters-------------------------------
fig, letters = plt.subplots(1, 5)
pji_matrix = 1 - pji_matrix # Inverted the pji_matrix because the colors are inverted when printing
for i in range(5):
letters[i].imshow(pji_matrix[:,i].reshape(16, 20).T, cmap = "gray")
plt.show()
| true |
4fa1a2a3bbb70ccd3cca8ac8bdd4c6c4cfc8dee5 | Python | kremerben/race_director | /results/templatetags/list_filters.py | UTF-8 | 3,284 | 2.625 | 3 | [] | no_license | import os
from django import template
from random import shuffle
import datetime
from results.models import Biathlete
register = template.Library()
@register.filter
def ageclass(age, gender):
if gender in ['M', 'B']:
if int(age) < Biathlete.BOY10:
return Biathlete.age_class['BOY10']
elif int(age) < Biathlete.BOY12:
return Biathlete.age_class['BOY12']
elif int(age) < Biathlete.BOY14:
return Biathlete.age_class['BOY14']
elif int(age) < Biathlete.BOY16:
return Biathlete.age_class['BOY16']
elif int(age) < Biathlete.YM:
return Biathlete.age_class['YM']
elif int(age) < Biathlete.JM:
return Biathlete.age_class['JM']
elif int(age) < Biathlete.SM:
return Biathlete.age_class['SM']
elif int(age) < Biathlete.MM:
return Biathlete.age_class['MM']
elif int(age) < Biathlete.SMM:
return Biathlete.age_class['SMM']
elif gender in ['F', 'W', 'G']:
if int(age) < Biathlete.GIRL10:
return Biathlete.age_class['GIRL10']
elif int(age) < Biathlete.GIRL12:
return Biathlete.age_class['GIRL12']
elif int(age) < Biathlete.GIRL14:
return Biathlete.age_class['GIRL14']
elif int(age) < Biathlete.GIRL16:
return Biathlete.age_class['GIRL16']
elif int(age) < Biathlete.YW:
return Biathlete.age_class['YW']
elif int(age) < Biathlete.JW:
return Biathlete.age_class['JW']
elif int(age) < Biathlete.SW:
return Biathlete.age_class['SW']
elif int(age) < Biathlete.MW:
return Biathlete.age_class['MW']
elif int(age) < Biathlete.SMW:
return Biathlete.age_class['SMW']
else:
return ""
@register.filter
def filename_only(value):
return os.path.basename(value.file.name)
@register.filter
def related_images(list, project):
return [item for item in list if item.project == project]
@register.filter
def get_range(value):
"""
Filter - returns a list containing range made from given value
Usage (in template):
<ul>{% for i in 3|get_range %}
<li>{{ i }}. Do something</li>
{% endfor %}</ul>
Results with the HTML:
<ul>
<li>0. Do something</li>
<li>1. Do something</li>
<li>2. Do something</li>
</ul>
Instead of 3 one may use the variable set in the views
"""
return range(value)
# @register.filter
# def first(list):
# if list is not None and len(list):
# return list[0]
#
#
# @register.filter
# def suit(list, suit_type):
# return [item for item in list if item.get_suit_display() == suit_type]
#
#
# @register.filter
# def rank(list, rank):
# return [item for item in list if item.rank == rank]
#
#
# @register.filter
# def random(cards):
# newlist = list(cards)
# shuffle(newlist)
# return newlist
#
# @register.filter
# def random2(list):
# newlist = list[:]
# shuffle(newlist)
# return newlist
#
#
# @register.filter
# def dealsrandom(list, amount):
# newlist = list[:]
# shuffle(newlist)
# return newlist[:amount]
#
# @register.filter
# def deals(list, amount):
# return list[:amount]
#
| true |
a3713bdf2467411d0ef22776186bfbc43f57d9e3 | Python | dx886/PY-API-TEST | /zuoye_17/testcase.py | UTF-8 | 882 | 2.546875 | 3 | [] | no_license | import unittest
from ddt import ddt, data
from python_study.zuoye.zuoye_17.zuoye_doexcel_17 import DoExcel
from python_study.zuoye.zuoye_17.zuoye_request_17 import HttpRequest
@ddt
class LoginTest(unittest.TestCase):
excel = DoExcel('testcases.xlsx', 'login')
datas = excel.get_cases()
@classmethod
def setUpClass(cls):
cls.http_request = HttpRequest()
@data(*datas)
def test_login(self, case):
print(case.title)
resp = self.http_request.Request(case.method, case.url, eval(case.data))
try:
self.assertEqual(case.expected, resp.text)
self.excel.write_result(case.case_id + 1, resp.text, 'pass')
except AssertionError as e:
self.excel.write_result(case.case_id + 1, resp.text, 'fail')
raise e
@classmethod
def tearDownClass(cls):
cls.http_request.close()
| true |
86d0a76bd481a31862970efebc732e02bf15cd22 | Python | Aasthaengg/IBMdataset | /Python_codes/p02927/s015855538.py | UTF-8 | 469 | 3.28125 | 3 | [] | no_license | def divisor(n):
tank = []
for i in range(1, int(n**0.5)+1):
if n%i==0:
tank.append((i, n//i))
return tank
m,d = map(int,input().split())
cnt = 0
for i in range(1,m+1):
k = divisor(i)
for x,y in k:
a = int(str(x)+str(y))
if a//10>=2 and a%10>=2 and a<=d:
cnt += 1
if x!=y:
b = int(str(y)+str(x))
if b//10>=2 and b%10>=2 and b<=d:
cnt += 1
print(cnt) | true |
83d15d9f44e3dd487615e0fb351a835605c39a14 | Python | davidezanella/RISC-emV | /tests/ISA/test_ISA.py | UTF-8 | 1,086 | 2.671875 | 3 | [] | no_license | from riscemv.ISA.ISA import ISA
def test_sym_reg_names():
instruction = "add sp, ra, gp"
res = ISA().instruction_from_str(instruction, None, None)
assert res.rd == 2
assert res.rs1 == 1
assert res.rs2 == 3
def test_r_instruction_from_string():
instruction = "add r4, r3, r2"
res = ISA().instruction_from_str(instruction, None, None)
assert res.rd == 4
assert res.rs1 == 3
assert res.rs2 == 2
def test_r_instruction_from_binary():
instruction = "00000000001000011001001000110011"
res = ISA().instruction_from_bin(instruction, 0)
assert res.rd == 4
assert res.rs2 == 2
assert res.rs1 == 3
def test_i_instruction_from_string():
instruction = "addi r4, r3, 12"
res = ISA().instruction_from_str(instruction, None, None)
assert res.rd == 4
assert res.rs == 3
assert res.imm == 12
def test_i_instruction_from_binary():
instruction = "00000000110000011000001000010011"
res = ISA().instruction_from_bin(instruction, 0)
assert res.rd == 4
assert res.rs == 3
assert res.imm == 12
| true |
1465adc695191a3d92715889d88defd442027810 | Python | learlinian/Python-Leetcode-Solution | /200. Number of Islands.py | UTF-8 | 2,018 | 3.46875 | 3 | [] | no_license | class Solution(object):
def numIslands(self, grid):
visited_island_pixel = {} # record all visited pixels which are island ("1")
count = 0 # count the number of islands
# function to find all island pixel ("1") around current pixel
def check_1(i, j):
adjacent = []
for choice in [[i-1, j], [i+1, j], [i, j-1], [i, j+1]]: # 4 directions around the current pixel
row = choice[0]
col = choice[1]
if 0 <= row < len(grid) and 0 <= col < len(grid[0]) and grid[row][col] == '1': # record island pixel in dict
if row not in visited_island_pixel.keys():
visited_island_pixel[row] = [col]
adjacent.append([row, col])
elif col not in visited_island_pixel[row]:
visited_island_pixel[row].append(col)
adjacent.append([row, col])
return adjacent
for i in range(len(grid)):
for j in range(len(grid[0])):
if i not in visited_island_pixel.keys() or j not in visited_island_pixel[i]:
if grid[i][j] == '1':
adjacent_pixels = check_1(i, j)
while adjacent_pixels:
print(visited_island_pixel, adjacent_pixels)
adjacent_pixels = adjacent_pixels + check_1(adjacent_pixels[0][0], adjacent_pixels[0][1])
del adjacent_pixels[0]
count += 1
print(visited_island_pixel)
return count
if __name__ == '__main__':
# grid = [["1","1","1","1","0"],["1","1","0","1","0"],["1","1","0","0","0"],["0","0","0","0","0"]]
# grid = [['1', '1', '1'], ['0', '1', '0'], ['1', '1', '1']]
grid = [["1","0","1","1","1"],["1","0","1","0","1"],["1","1","1","0","1"]]
print(Solution().numIslands(grid))
| true |
d176a83a0934084ac24a81f00fc4903911f8aeef | Python | tadanmf/scenario_creater | /scenario_creater/generator/scene.py | UTF-8 | 1,143 | 2.6875 | 3 | [] | no_license | import random
season = ['봄', '여름', '가을', '겨울']
season_period = ['초', '한', '늦']
time = ['새벽', '오전', '오후', '밤', '깊은 밤'] #+ [f'{random.randint(0, 23)}:{random.randint(0, 59):02}']
place_friendliness = ['친숙한', '익숙한', '들어본 적 있는', '낯선', '듣도 보도 못한']
place_size = ['비좁은', '좁은', '적당한 넓이의', '넓은', '드넓은', '광활한']
place_type = ['방 안', '복도', '건물 안', '거리', '도시', '마을', '숲 속', '배 위', '바다 위의 공간']
behavior_daily = ['식사', '수면', '수련', '업무', '대화', '학습', '회의', '이동', '산책']
behavior_nondaily = ['임무', '예배', '성묘', '알현']
behavior_timing = ['후', '직후', '전', '직전', '도중']
def select():
return {
'시간': random.choice(season_period) + random.choice(season) + ' ' + random.choice(time),
'장소': random.choice(place_friendliness) + ' ' + random.choice(place_size) + ' ' + random.choice(place_type),
'행동': random.choice(behavior_daily + behavior_nondaily) + ' ' + random.choice(behavior_timing)
}
| true |
57273e78c213a078266c174f5ca0f59dbe62672a | Python | WCwancheng/Python- | /.github/workflows/Stack.py | UTF-8 | 1,445 | 3.5625 | 4 | [] | no_license | class Stack_ls(object):
def __init__(self):
self.__list = []
def is_empty(self):
return self.__list==[]
def push(self,item):
self.__list.append(item)
def pop(self):
if self.is_empty():
return
else:
return self.__list.pop()
def top(self):
if self.is_empty():
return
else:
return self.__list[-1]
class Node(object):
def __init__(self,elem):
self.elem = elem
self.next = None
class Stack_Dic(object):
def __init__(self):
self.__head = None
def is_empty(self):
return self.__head is None
def push(self,item):
node = Node(item)
node.next = self.__head
self.__head = None
def pop(self):
if self.is_empty():
return
else:
p = self.__head
self.__head = p.next
return p.elem
stack_ls = Stack_ls()
ls = [1,34,5,25323,62,123,4124]
def test_stack_ls():
for i in ls:
stack_ls.push(i)
ls_2 = []
while not stack_ls.is_empty():
ls_2.append(stack_ls.pop())
print(ls_2)
stack_dic = Stack_Dic()
def test_stack_dic():
for i in ls:
stack_dic.push(i)
ls_3 = []
while not stack_dic.is_empty():
ls_3.append(stack_dic.pop())
print(ls_3)
def main():
test_stack_ls()
if __name__=='__main__':
main() | true |
5e2d9f637cc1d63caaa6c9f8ddea5e54cb5d0b18 | Python | hauxir/rapidbay | /app/log.py | UTF-8 | 426 | 2.5625 | 3 | [
"MIT"
] | permissive | import traceback
import settings
def debug(msg):
with open(settings.LOGFILE, "a+") as f:
f.write(msg + "\n")
def write_log():
with open(settings.LOGFILE, "a+") as f:
f.write(traceback.format_exc() + "\n")
def catch_and_log_exceptions(fn):
def wrapper(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception:
write_log()
return wrapper
| true |
911f7725b57c9a2222488b7b3f1ff178f4f0b072 | Python | RamaKrishna-Suresh/Pythonlab | /2Task7.py | UTF-8 | 644 | 4.1875 | 4 | [] | no_license | # code to draw an arc
# import turtle package
from turtle import *
def arc(t, length, r, angle_user):
"""Function that takes turtle object and other parameters
as arguments and draws an arc"""
n = int((2 * 3.14 * r) / length)
angle = angle_user / n
for x in range(0, n):
t.fd(length)
t.rt(angle)
# input parameters
length_input = 1
radius_input = float(input('enter radius '))
angle_input = float(input('enter angle '))
# Creating turtle object and setting delay
bob = Turtle()
bob.delay = 0.01
# Invoking arc function
arc(bob, length_input, radius_input, angle_input)
# wait for user
mainloop()
| true |
7ad31161213a54c78ea173f8ccfc8b73d09cec79 | Python | antoniorcn/fatec-2020-2s | /djd-prog2/noite-aula6/teste-for.py | UTF-8 | 122 | 3.171875 | 3 | [] | no_license | print("inicio do programa")
for _ in range(1, 11, 1):
print("Mario - Kart")
print(".")
print("fim do programa")
| true |
8380464863b0dbda1ae059a4d9797ea42ff8e83e | Python | Jackerboy/test_ccimp | /CCIMP/externalClass/getRegisterInfo.py | UTF-8 | 831 | 2.859375 | 3 | [] | no_license | import requests,re
import datetime
#校验手机号是否已注册
def is_used_phoneNumber(phoneNumber):
#请求地址
mobileValiDateUrl = 'http://www.51talk.com/Ajax/homeMobilEvalidate'
#请求参数手机号
check_mobile = {"mobile": phoneNumber}
status_code = ''
try:
res = requests.request('Post',url=mobileValiDateUrl,data=check_mobile)
#获取是否注册状态 1 未注册 0 已注册
status_code = res.text[10]
#返回注册状态
return status_code
except BaseException as e:
print('调用短信登录接口不正确!信息:%s'%e)
if __name__ == '__main__':
mobile = '18611221275'
# 用户登录,查询该手机的账户与密码内容
user_phone_result = is_used_phoneNumber(mobile)
print (user_phone_result) | true |
22883fef83958ad97bde4713b9044ade762b2236 | Python | djs21905/beginning_python | /Game.py | UTF-8 | 2,064 | 3.6875 | 4 | [] | no_license | #My first game project
import pygame , time
#intializes pygame
pygame.init()
#Sets the dimensions of the gaming window to a tuple which is immutable
displayWidth = 800
displayHeight = 600
gameDisplay = pygame.display.set_mode((displayWidth,displayHeight))
# Defining colors (R,G,B)
white = (255,255,255)
black = (0,0,0)
red = (255,0,0)
font = pygame.font.SysFont(None,25)
#Functions
def message_to_screen(msg,color):
screen_text = font.render(msg,True,color)
gameDisplay.blit(screen_text,[displayWidth/2 , displayHeight/2])
#Sets the game window title
pygame.display.set_caption('Slither Me Timbers')
gameExit = False
clock = pygame.time.Clock()
fps = 60
block_size = 10
lead_x = displayWidth/2
lead_y = displayHeight/2
lead_x_change = 0
lead_y_change = 0
#Game Loop
while not gameExit:
#Event Handler
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
lead_x_change = -2
lead_y_change = 0
elif event.key == pygame.K_RIGHT:
lead_x_change = 2
lead_y_change = 0
elif event.key == pygame.K_UP:
lead_y_change = -2
lead_x_change = 0
elif event.key == pygame.K_DOWN:
lead_y_change = 2
lead_x_change = 0
if lead_x >= displayWidth or lead_x <= 0 or lead_y >= displayHeight or lead_y <= 0:
gameExit = True
lead_x += lead_x_change # lead_x = lead_x + lead_x_change
lead_y += lead_y_change
gameDisplay.fill(red)
pygame.draw.rect(gameDisplay,black,[lead_x,lead_y,block_size,block_size]) #(where to draw it, color,[x,y,width,height])
pygame.display.update()
clock.tick(fps)
message_to_screen('You lost the game! Good job noob..',white)
pygame.display.update()
time.sleep(2)
#Unanitializes pygame display
pygame.quit()
#Quits python
quit()
| true |
c621e9badb80810e07fed6769aa93fee439e6971 | Python | 13VS/pso-gene-selection | /5fd.py | UTF-8 | 857 | 2.9375 | 3 | [] | no_license | import pandas as pd
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
f = pd.read_csv('data.csv')
X = f.iloc[:,:f.shape[1]-1]
y = f.iloc[:,f.shape[1]-1]
k_range = range(1,int(len(X.index)/5))
scores = []
scores2 = []
for k in k_range:
knn = KNeighborsClassifier(n_neighbors = 2)
X_test = X.iloc[(k - 1)*5:k*5]
y_test = y.iloc[(k - 1)*5:k*5]
j=(k-1)*5
X_train=X.drop(X.index[[j,j+1,j+2,j+3,j+4]])
y_train=y.drop(y.index[[j,j+1,j+2,j+3,j+4]])
knn.fit(X_train, y_train)
a=knn.score(X_test, y_test)
scores.append([k-1,a])
sv = svm.SVC()
sv.fit(X_train, y_train)
a = sv.score(X_test, y_test)
scores2.append([k-1,a])
scores=sorted(scores, key=lambda x: x[1], reverse=True)
scores2=sorted(scores2, key=lambda x: x[1], reverse=True)
print("knn")
print(scores)
print("svm")
print(scores2)
| true |
9eb8497042b5073fb2de069961193e16dafb6046 | Python | sherryxiata/zcyNowcoder | /NowcoderPattern/test0.py | UTF-8 | 277 | 2.640625 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/8/7 20:57
# @Author : wenlei
n = int(input())
arr = list(map(int, input().split()))
a, b, c = map(int, input().split())
for _ in range(n):
length = int(input())
arr = list(map(int, input().split()))
| true |
c2c6b542bb54a52b8f80433fad99364647addb99 | Python | ashwinik01/HDPCD | /Pyspark/train_2502.py | UTF-8 | 224 | 2.671875 | 3 | [] | no_license | from pyspark import SparkContext , SparkConf
conf = SparkConf().setAppName('Test')
sc = SparkContext(conf=conf)
sc.setLogLevel("WARN")
numbers =[1,2,3,4,5,6]
numbersRDD = sc.parallelize(numbers)
print(numbersRDD.take(3))
| true |
32a784f837019f073c8dc35b5ffaef64ce798f8d | Python | oskarsinger/dog-cca | /dogcca/appgrad/two_view.py | UTF-8 | 5,220 | 2.515625 | 3 | [] | no_license | import numpy as np
import utils as agu
import global_utils as gu
from optimization.optimizers import GradientOptimizer as GO
from optimization.utils import get_gram
from data.servers.gram import BoxcarGramServer as BCGS, BatchGramServer as BGS
class AppGradCCA:
def __init__(self,
k=1,
online=False,
eps1=10**(-3), eps2=10**(-3)):
self.k = k
self.online = online
self.eps1 = eps1
self.eps2 = eps2
self.has_been_fit = False
self.Phi = None
self.unn_Phi = None
self.Psi = None
self.unn_Psi = None
self.history = []
def fit(self,
X_ds, Y_ds,
X_gs=None, Y_gs=None,
X_optimizer=None, Y_optimizer=None,
eta1=0.001, eta2=0.001,
verbose=False,
max_iter=10000):
if X_optimizer is None:
X_optimizer = GO(verbose=verbose)
if Y_optimizer is None:
Y_optimizer = GO(verbose=verbose)
if X_gs is None:
X_gs = BCGS() if self.online else BGS()
if Y_gs is None:
Y_gs = BCGS() if self.online else BGS()
(X, Sx, Y, Sy) = self._init_data(X_ds, Y_ds, X_gs, Y_gs)
print "Getting initial basis estimates"
# Randomly initialize normalized and unnormalized canonical bases for
# timesteps t and t+1. Phi corresponds to X, and Psi to Y.
basis_pairs = agu.get_init_basis_pairs([Sx, Sy], self.k)
(Phi_t, unn_Phi_t) = basis_pairs[0]
(Psi_t, unn_Psi_t) = basis_pairs[1]
(Phi_t1, unn_Phi_t1, Psi_t1, unn_Psi_t1) = (None, None, None, None)
# Initialize iteration-related variables
converged = [False] * 2
i = 1
while (not all(converged)) and i <= max_iter:
# Update step scales for gradient updates
eta1_i = eta1 / i**0.5
eta2_i = eta2 / i**0.5
i = i + 1
if verbose:
print "Iteration:", i
print "\teta1:", eta1_i, "\teta2:", eta2_i
if self.online:
print "\tGetting updated minibatches and Sx and Sy"
# Update random minibatches if doing SGD
if self.online:
X = X_ds.get_data()
Sx = X_gs.get_gram(X)
Y = Y_ds.get_data()
Sy = Y_gs.get_gram(Y)
if verbose:
print "\tGetting updated basis estimates"
# Get unconstrained, unnormalized gradients
(unn_Phi_grad, unn_Psi_grad) = agu.get_gradients(
[X,Y], [(unn_Phi_t, Phi_t),(unn_Psi_t, Psi_t)])
# Make updates to basis parameters
unn_Phi_t1 = X_optimizer.get_update(
unn_Phi_t, unn_Phi_grad, eta1_i)
unn_Psi_t1 = Y_optimizer.get_update(
unn_Psi_t, unn_Psi_grad, eta2_i)
# Normalize updated bases
Phi_t1 = agu.get_gram_normed(unn_Phi_t1, Sx)
Psi_t1 = agu.get_gram_normed(unn_Psi_t1, Sy)
if verbose:
Phi_dist = np.thelineg.norm(unn_Phi_t1 - unn_Phi_t)
Psi_dist = np.thelineg.norm(unn_Psi_t1 - unn_Psi_t)
print "\tDistance between unnormed Phi iterates:", Phi_dist
print "\tDistance between unnormed Psi iterates:", Psi_dist
print "\tObjective:", agu.get_objective(
[X, Y], [Phi_t1, Psi_t1])
# Check for convergence
converged = gu.misc.is_converged(
[(unn_Phi_t, unn_Phi_t1), (unn_Psi_t, unn_Psi_t1)],
[self.eps1, self.eps2],
verbose)
# Update state
(unn_Phi_t, Phi_t, unn_Psi_t, Psi_t) = (
np.copy(unn_Phi_t1),
np.copy(Phi_t1),
np.copy(unn_Psi_t1),
np.copy(Psi_t1))
print "Completed in", str(i), "iterations."
self.has_been_fit = True
self.Phi = Phi_t
self.unn_Phi = unn_Phi_t
self.Psi = Psi_t
self.unn_Psi = unn_Psi_t
def get_bases(self):
if not self.has_been_fit:
raise Exception(
'Model has not yet been fit.')
return (self.Phi, self.Psi, self.unn_Phi, self.unn_Psi)
def _init_data(self, X_ds, Y_ds, X_gs, Y_gs):
if not agu.is_k_valid([X_ds, Y_ds], self.k):
raise ValueError(
'The value of k must be less than or equal to the minimum of the' +
' number of columns of X and Y.')
X = X_ds.get_data()
Sx = X_gs.get_gram(X)
Y = Y_ds.get_data()
Sy = Y_gs.get_gram(Y)
if not self.online:
# Find a better solution to this
n = min([X.shape[0], Y.shape[0]])
if X.shape[0] > n:
# Remove to-be-truncated examples from Gram matrix
Sx -= get_gram(X[n:,:])
# Truncate extra examples
X = X[:n,:]
elif Y.shap[0] > n:
# Do the same for Y if Y has the extra examples
Sy -= get_gram(Y[n:,:])
Y = Y[:n,:]
return (X, Sx, Y, Sy)
| true |
72ca6c785b0c35c371142c34f16969a4404cd881 | Python | Frank12350/repl | /1_class-1.py | UTF-8 | 173 | 3.40625 | 3 | [] | no_license | class Person:
name = None
height = None
weight = None
p1 = Person()
p1.name = "Joe"
p1.weight = 60
p1.height = 170
bmi = p1.weight / (p1.height / 100) ** 2
print(bmi) | true |
e6404005f2fdb35bd38b9fbf3f6246aba2a3fbaa | Python | QuinceP/space_roguelike | /systems.py | UTF-8 | 3,944 | 2.9375 | 3 | [] | no_license | import random
import esper
from components import *
class AISystem(esper.Processor):
def __init__(self, map, mapwidth, mapheight):
self.map = map
self.mapwidth = mapwidth
self.mapheight = mapheight
def process(self):
for ent, (position, velocity, ai, sprite) in self.world.get_components(Position, Velocity, AI, Sprite):
if ai.is_turn:
direction = random.choice(['north', 'east', 'south', 'west'])
if direction == 'east' and position.x < self.mapwidth - 1:
sprite.facing_right = True
try:
passable = self.map[position.y][position.x + 1].is_passable
except IndexError:
passable = False
if passable:
velocity.dx = 1
elif direction == 'west' and position.x > 0:
sprite.facing_right = False
try:
passable = self.map[position.y][position.x - 1].is_passable
except IndexError:
passable = False
if passable:
velocity.dx = -1
elif direction == 'north' and position.y > 0:
try:
passable = self.map[position.y - 1][position.x].is_passable
except IndexError:
passable = False
if passable:
velocity.dy = -1
elif direction == 'south' and position.y < self.mapheight - 1:
try:
passable = self.map[position.y + 1][position.x].is_passable
except IndexError:
passable = False
if passable:
velocity.dy = 1
ai.is_turn = False
pass
class MovementSystem(esper.Processor):
def __init__(self):
super().__init__()
def process(self):
for ent, (position, velocity) in self.world.get_components(Position, Velocity):
position.x += velocity.dx
position.y += velocity.dy
velocity.dx = 0
velocity.dy = 0
class SpriteSystem(esper.Processor):
def __init__(self, parent_surface, tile_size):
self.parent_surface = parent_surface
self.tile_size = tile_size
@staticmethod
def flip(sprite):
sprite.image = pygame.transform.flip(sprite.image, sprite.facing_right, False)
def process(self):
for ent, (sprite, position) in self.world.get_components(Sprite, Position):
sprite.counter += 1
if sprite.counter == 5:
sprite.index += 1
sprite.counter = 0
if sprite.index >= len(sprite.image_array):
sprite.index = 0
sprite.image = sprite.image_array[sprite.index]
self.flip(sprite)
sprite.image = pygame.transform.scale(sprite.image, (64, 64))
sprite.current_image = pygame.transform.scale(sprite.image, (64, 64))
from collections import deque
class TurnTakerSystem(esper.Processor):
def __init__(self):
self.time_travelers = deque()
self.turn_taken = False
self.turns = 0
def register(self, obj):
self.time_travelers.append(obj)
obj.action_points = 0
def release(self, obj):
self.time_travelers.remove(obj)
def tick(self):
if len(self.time_travelers) > 0:
obj = self.time_travelers[0]
self.time_travelers.rotate()
obj.action_points += obj.agility
while obj.action_points > 0:
obj.action_points -= obj.take_turn()
self.turns += 1
def process(self):
if self.turn_taken:
self.tick()
self.turn_taken = False
| true |
255f464350516a731510cc35d008d78254fe01a7 | Python | parikshd/cs229-zipline | /Abhishek_arm_test/src/svm_on_test_all_labels.py | UTF-8 | 3,709 | 2.90625 | 3 | [] | no_license | # Kernel SVM
# Importing the libraries
import numpy as np
import pandas as pd
import sklearn
import util
def give_error(y_out,class_probabilities, y, x):
cnt = 0
cntfalse = 0
cntBadones = 0
cntActualTwos = 0
for i in range(len(y_out)):
if (y_out[i] == 1.0 and y[i] == 2.0):
cntBadones += 1
if (y[i] == 2.0):
cntActualTwos += 1
if (y_out[i] == y[i]):
#print("Predicted:" + str(y_out[i]) + ",actual:" + str(y[i]))
cnt += 1
else:
if (y_out[i] != 1.0):
print("Predicted:" + str(y_out[i]) + ",actual:" + str(y[i]))
print("%success=" + str(class_probabilities[i][0]*100) + " %mission-failure=" + str(class_probabilities[i][1]*100) + " %flight-failure=" + str(class_probabilities[i][2]*100))
cntfalse += 1
# #print("Flight " + str(int(x[i][flight_id_index])) + " might need maintaince, our algorithm predicted it would have mission failure!")
# if (y_out[i] == 4):
# #print("Flight " + str(int(x[i][flight_id_index])) + " definitely needs maintaince, our algorithm predicted it would have flight failure!")
print("Predicted " + str((len(y_out) - cntBadones)) + "/" + str(len(y_out)) + " correctly.")
print("Predicted " + str(cntfalse) + "/" + str(len(y_out)) + " incorrectly.")
print("Predicted " + str(cntBadones) + "/" + str(cntActualTwos) + " incorrectly. ==> predicted 1, actual 2")
model_failure = (cntActualTwos - cntBadones)/ cntActualTwos
total_failure = cnt /len(y_out)
return total_failure,model_failure
train_path = "output/flights_pass_1_na_0.csv"
#eval_path = "output/flights_pass_1_na_0.csv"
eval_path = "testinput/all_test_with_failures_clean.csv"
X, Y,X_test,Y_test,dataset = util.load_dataset_new(train_path,eval_path)
with open('featues_new.txt', 'w') as f:
for item in dataset.columns:
f.write("%s\n" % item)
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X = sc_X.fit_transform(X)
X_test_transformed = sc_X.fit_transform(X_test)
# Fitting the classifier into the Training set
from sklearn.svm import SVC
classifier = SVC(kernel = 'rbf', random_state = 0, gamma = 'auto',probability=True)
# Predicting the test set results
classifier.fit(X,Y)
# Y_pred_train = classifier.predict(X_Train)
# print(give_error(Y_pred_train,Y_Train))
#w = classifier.coef_
#print('w = ',w)
Y_Pred_first_pass = classifier.predict(X_test_transformed)
class_probabilities = classifier.predict_proba(X_test_transformed)
for i in range(len(Y_Pred_first_pass)):
if class_probabilities[i][1] > class_probabilities[i][0]:
Y_Pred_first_pass[i] = 1.0
else:
Y_Pred_first_pass[i] = 0.0
if (class_probabilities[i][2] > class_probabilities[i][1]) and (
class_probabilities[i][2] > class_probabilities[i][0]):
Y_Pred_first_pass[i] = 2.0
if class_probabilities[i][0] >= 0.85:
Y_Pred_first_pass[i] = 0.0
if class_probabilities[i][1] >= 0.15:
Y_Pred_first_pass[i] = 1.0
if class_probabilities[i][2] >= 0.15:
Y_Pred_first_pass[i] = 2.0
if Y_Pred_first_pass[i] == 0.0:
Y_Pred_first_pass[i] = 1.0
elif Y_Pred_first_pass[i] == 1.0:
Y_Pred_first_pass[i] = 2.0
elif Y_Pred_first_pass[i] == 2.0:
Y_Pred_first_pass[i] = 4.0
for i in range(len(Y_Pred_first_pass)):
print(Y_Pred_first_pass[i])
print(class_probabilities[i])
total_accuracy,model_accuracy = give_error(Y_Pred_first_pass,class_probabilities,Y_test, X_test)
print("Total accuracy:" + str(total_accuracy*100))
print("Model accuracy:" + str(model_accuracy*100))
| true |
4cb99e937df75ce4c64b1b045f5dea43d7c8b790 | Python | dima2308/supro-autotests-ui | /pages/base_page.py | UTF-8 | 2,140 | 2.875 | 3 | [] | no_license | import os.path
import pickle
from config import main_url
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class BasePage:
def __init__(self, driver):
self.driver = driver
self.base_url = main_url
def go_to_site(self):
return self.driver.get(self.base_url)
def find_element(self, locator, time=5):
return WebDriverWait(self.driver, time).until(EC.presence_of_element_located(locator),
message=f"Can't find element by locator {locator}")
def find_elements(self, locator, time=5):
return WebDriverWait(self.driver, time).until(EC.presence_of_all_elements_located(locator),
message=f"Can't find elements by locator {locator}")
def check_current_title(self, title, time=5):
return WebDriverWait(self.driver, time).until(EC.title_contains(title),
message="Current title " + self.driver.title + f" not equal {title}")
def check_text_element(self, locator, text, time=5):
return WebDriverWait(self.driver, time).until(EC.text_to_be_present_in_element(locator, text),
message=f"Can't find text {text} by locator {locator}")
def get_cookies(self):
if os.path.isfile("cookies.pkl"):
with open("cookies.pkl", "rb") as file:
cookies = pickle.load(file)
else:
raise Exception('Cookies not found')
for cookie in cookies:
if 'expiry' in cookie:
cookie['expiry'] = int(cookie['expiry'])
self.driver.add_cookie(cookie)
self.go_to_site()
def is_element_present(self, locator):
try:
self.find_element(locator)
except TimeoutException:
return False
return True
def click_on_the_button(self, locator):
return self.find_element(locator).click()
| true |
e93f9df4186a77f73ff7eb097d813ad9b8c61d7b | Python | alexsalo/matrr | /matrr/utils/gadgets.py | UTF-8 | 28,872 | 2.59375 | 3 | [] | no_license | import json
import numpy
import os
from scipy import stats
import pylab
from matplotlib.patches import Rectangle
from matplotlib.ticker import FixedLocator
from django.db.models import Max, Min, Avg
from matrr.models import MonkeyToDrinkingExperiment, MonkeyBEC, MonkeyHormone, TWENTYFOUR_HOUR, ExperimentBout
from matrr import plotting
def convex_hull(points, graphic=False, smidgen=0.0075):
"""
Calculate subset of points that make a convex hull around points
Recursively eliminates points that lie inside two neighbouring points until only convex hull is remaining.
:Parameters:
points : ndarray (2 x m)
array of points for which to find hull
graphic : bool
use pylab to show progress?
smidgen : float
offset for graphic number labels - useful values depend on your data range
:Returns:
hull_points : ndarray (2 x n)
convex hull surrounding points
"""
def _angle_to_point(point, centre):
"""calculate angle in 2-D between points and x axis"""
delta = point - centre
res = numpy.arctan(delta[1] / delta[0])
if delta[0] < 0:
res += numpy.pi
return res
def _draw_triangle(p1, p2, p3, **kwargs):
tmp = numpy.vstack((p1, p2, p3))
x, y = [x[0] for x in zip(tmp.transpose())]
pylab.fill(x, y, **kwargs)
#time.sleep(0.2)
def area_of_triangle(p1, p2, p3):
"""calculate area of any triangle given co-ordinates of the corners"""
return numpy.linalg.norm(numpy.cross((p2 - p1), (p3 - p1), axis=0)) / 2.
if graphic:
pylab.clf()
pylab.plot(points[0], points[1], 'ro')
n_pts = points.shape[1]
# assert(n_pts > 5)
centre = points.mean(1)
if graphic: pylab.plot((centre[0],), (centre[1],), 'bo')
angles = numpy.apply_along_axis(_angle_to_point, 0, points, centre)
pts_ord = points[:, angles.argsort()]
if graphic:
for i in xrange(n_pts):
pylab.text(pts_ord[0, i] + smidgen, pts_ord[1, i] + smidgen, '%d' % i)
pts = [x[0] for x in zip(pts_ord.transpose())]
prev_pts = len(pts) + 1
k = 0
while prev_pts > n_pts:
prev_pts = n_pts
n_pts = len(pts)
if graphic: pylab.gca().patches = []
i = -2
while i < (n_pts - 2):
Aij = area_of_triangle(centre, pts[i], pts[(i + 1) % n_pts])
Ajk = area_of_triangle(centre, pts[(i + 1) % n_pts],
pts[(i + 2) % n_pts])
Aik = area_of_triangle(centre, pts[i], pts[(i + 2) % n_pts])
if graphic:
_draw_triangle(centre, pts[i], pts[(i + 1) % n_pts],
facecolor='blue', alpha=0.2)
_draw_triangle(centre, pts[(i + 1) % n_pts],
pts[(i + 2) % n_pts],
facecolor='green', alpha=0.2)
_draw_triangle(centre, pts[i], pts[(i + 2) % n_pts],
facecolor='red', alpha=0.2)
if Aij + Ajk < Aik:
if graphic: pylab.plot((pts[i + 1][0],), (pts[i + 1][1],), 'go')
del pts[i + 1]
i += 1
n_pts = len(pts)
k += 1
return numpy.asarray(pts)
def Treemap(ax, node_tree, color_tree, size_method, color_method, x_labels=None):
def addnode(ax, node, color, lower=[0, 0], upper=[1, 1], axis=0):
axis %= 2
draw_rectangle(ax, lower, upper, node, color)
width = upper[axis] - lower[axis]
try:
for child, color in zip(node, color):
size_child = size_method(child)
size_node = size_method(node)
upp = (width * float(size_child)) / size_node
upper[axis] = lower[axis] + upp
addnode(ax, child, color, list(lower), list(upper), axis + 1)
lower[axis] = upper[axis]
except Exception, e:
print "Couldn't do it: %s" % e
pass
def draw_rectangle(ax, lower, upper, node, color):
c = color_method(color)
r = Rectangle(lower, upper[0] - lower[0], upper[1] - lower[1],
edgecolor='k',
facecolor=c)
ax.add_patch(r)
def assign_x_labels(ax, labels):
def sort_patches_by_xcoords(patches):
sorted_patches = []
# This method returns a list of patches sorted by each patch's X coordinate
xcoords = sorted([patch.get_x() for patch in patches])
for x in xcoords:
for patch in patches:
if patch.get_x() == x:
sorted_patches.append(patch)
return sorted_patches
patches = ax.patches
# A primary_patch is a Rectangle which takes up the full height of the treemap. In the cohort treemap implementation, a primary patch is a monkey
primary_patches = [patch for patch in patches if patch.get_height() == 1 and patch.get_width() != 1]
sorted_patches = sort_patches_by_xcoords(primary_patches)
label_locations = []
patch_edge = 0
for patch in sorted_patches:
width = patch.get_width()
_location = patch_edge + (width / 2.)
label_locations.append(_location)
patch_edge += width
Axis_Locator = FixedLocator(label_locations)
ax.xaxis.set_major_locator(Axis_Locator)
ax.set_xticklabels(labels, rotation=45)
addnode(ax, node_tree, color_tree)
if x_labels:
assign_x_labels(ax, x_labels)
else:
ax.set_xticks([])
#### Specific callables used by other gadgets and/or plotting_beta (so far)
def get_mean_MTD_oa_field(monkey, field, six_months=0, three_months=0):
"""
Designed to be used by get_percentile_callable){ and gather_monkey_three_month_average_by_field(), etc
It will get the average value of a field from the MTD table for a given monkey, filtered by a quarter or a half of open access
"""
assert not (six_months and three_months), "You cannot gather six month and three month intervals at the same time."
if six_months == 1:
mtds = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey=monkey).first_six_months_oa()
elif six_months == 2:
mtds = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey=monkey).second_six_months_oa()
elif three_months == 1:
mtds = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey=monkey).first_three_months_oa()
elif three_months == 2:
mtds = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey=monkey).second_three_months_oa()
elif three_months == 3:
mtds = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey=monkey).third_three_months_oa()
elif three_months == 4:
mtds = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey=monkey).fourth_three_months_oa()
else:
mtds = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey=monkey)
return mtds.aggregate(Avg(field))[field+'__avg']
def get_mean_BEC_oa_field(monkey, field, six_months=0, three_months=0):
"""
Designed to be used by get_percentile_callable){ and gather_monkey_three_month_average_by_field(), etc
It will get the average value of a field from the BEC table for a given monkey, filtered by a quarter or a half of open access
"""
assert not (six_months and three_months), "You cannot gather six month and three month intervals at the same time."
if six_months == 1:
becs = MonkeyBEC.objects.OA().exclude_exceptions().filter(monkey=monkey).first_six_months_oa()
elif six_months == 2:
becs = MonkeyBEC.objects.OA().exclude_exceptions().filter(monkey=monkey).second_six_months_oa()
elif three_months == 1:
becs = MonkeyBEC.objects.OA().exclude_exceptions().filter(monkey=monkey).first_three_months_oa()
elif three_months == 2:
becs = MonkeyBEC.objects.OA().exclude_exceptions().filter(monkey=monkey).second_three_months_oa()
elif three_months == 3:
becs = MonkeyBEC.objects.OA().exclude_exceptions().filter(monkey=monkey).third_three_months_oa()
elif three_months == 4:
becs = MonkeyBEC.objects.OA().exclude_exceptions().filter(monkey=monkey).fourth_three_months_oa()
else:
becs = MonkeyBEC.objects.OA().exclude_exceptions().filter(monkey=monkey)
return becs.aggregate(Avg(field))[field+'__avg']
def get_mean_MHM_oa_field(monkey, field, six_months=0, three_months=0):
"""
Designed to be used by get_percentile_callable){ and gather_monkey_three_month_average_by_field(), etc
It will get the average value of a field from the MHM table for a given monkey, filtered by a quarter or a half of open access
"""
assert not (six_months and three_months), "You cannot gather six month and three month intervals at the same time."
if six_months == 1:
mhms = MonkeyHormone.objects.OA().exclude_exceptions().filter(monkey=monkey).first_six_months_oa()
elif six_months == 2:
mhms = MonkeyHormone.objects.OA().exclude_exceptions().filter(monkey=monkey).second_six_months_oa()
elif three_months == 1:
mhms = MonkeyHormone.objects.OA().exclude_exceptions().filter(monkey=monkey).first_three_months_oa()
elif three_months == 2:
mhms = MonkeyHormone.objects.OA().exclude_exceptions().filter(monkey=monkey).second_three_months_oa()
elif three_months == 3:
mhms = MonkeyHormone.objects.OA().exclude_exceptions().filter(monkey=monkey).third_three_months_oa()
elif three_months == 4:
mhms = MonkeyHormone.objects.OA().exclude_exceptions().filter(monkey=monkey).fourth_three_months_oa()
else:
mhms = MonkeyHormone.objects.OA().exclude_exceptions().filter(monkey=monkey)
return mhms.aggregate(Avg(field))[field+'__avg']
def ebt_startdiff_volsum_exclude_fivehours(subplot, monkey_one, monkey_two):
"""
For use with plotting_beta.rhesus_category_parallel_classification_stability_popcount
"""
try:
fx = open('matrr/utils/DATA/json/ebt_startdiff_volsum_exclude_fivehours-%d-%d-xvalues.json' % (monkey_one.pk, monkey_two.pk), 'r')
fy = open('matrr/utils/DATA/json/ebt_startdiff_volsum_exclude_fivehours-%d-%d-yvalues.json' % (monkey_one.pk, monkey_two.pk), 'r')
except:
one_mtds = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey=monkey_one).order_by('drinking_experiment__dex_date')
one_dates = one_mtds.values_list('drinking_experiment__dex_date', flat=True).distinct()
x_data = [TWENTYFOUR_HOUR,]
y_data = [1000,]
for date in one_dates:
ebts = ExperimentBout.objects.filter(mtd__drinking_experiment__dex_date=date).exclude(ebt_start_time__lte=5*60*60)
one_values = ebts.filter(mtd__monkey=monkey_one).values_list('ebt_start_time', 'ebt_volume')
two_values = ebts.filter(mtd__monkey=monkey_two).values_list('ebt_start_time', 'ebt_volume')
if not one_values or not two_values:
continue
two_starts = numpy.array(two_values)[:,0]
for one_start_time, one_volume in one_values:
two_closest_start = min(two_starts, key=lambda x:abs(x-one_start_time))
two_closest_bout = two_values.get(ebt_start_time=two_closest_start)
x_value = float(numpy.abs(one_start_time - two_closest_bout[0]))
y_value = float(one_volume + two_closest_bout[1])
x_data.append(x_value)
y_data.append(y_value)
subplot.set_ylabel("Summed volume of adjacent bouts")
subplot.set_xlabel("Bout start time difference")
folder_name = 'matrr/utils/DATA/json/'
if not os.path.exists(folder_name):
os.makedirs(folder_name)
fx = open(folder_name+'ebt_startdiff_volsum_exclude_fivehours-%d-%d-xvalues.json' % (monkey_one.pk, monkey_two.pk), 'w')
fy = open(folder_name+'ebt_startdiff_volsum_exclude_fivehours-%d-%d-yvalues.json' % (monkey_one.pk, monkey_two.pk), 'w')
fx.write(json.dumps(x_data))
fy.write(json.dumps(y_data))
fx.close()
fy.close()
return subplot, x_data, y_data
else:
x = json.loads(fx.readline())
y = json.loads(fy.readline())
fx.close()
fy.close()
subplot.set_ylabel("Summed volume of adjacent bouts")
subplot.set_xlabel("Bout start time difference")
return subplot, x, y
####
def get_callable(field):
if 'mtd' in field:
return get_mean_MTD_oa_field
if 'bec' in field:
return get_mean_BEC_oa_field
if 'mhm' in field:
return get_mean_MHM_oa_field
def get_percentile_of_callable(monkey, monkeys, specific_callable, field, six_months=0, three_months=0):
"""
This function converts a value generated by specific_callable into a percentile. This percentile describes where
monkey's data is compared to monkeys' values.
monkey, field, six_months and three_months are parameters passed to specific_callable.
"""
this_value = None
all_values = list()
for mky in monkeys:
value = specific_callable(mky, field, six_months=six_months, three_months=three_months)
all_values.append(value)
if mky == monkey:
this_value = value
if this_value is None:
raise Exception("monkey was not found in the monkeys collection.")
return stats.percentileofscore(all_values, this_value)
def gather_monkey_percentiles_by_six_months(monkeys, six_months=0):
"""
six_months == (0,1,2)
0 == all OA
1 == first 6 months of OA
2 == second 6 months of OA
This will return two variables, data{} and labels[].
-Labels[] describe the data that's been collected
-data{} is the collection of data
keys are monkey pks
values are 2-d numpy arrays.
x-array is the index described by labels[]
y-array is the monkey's percentile of labels[index] as compared to the other monkeys
"""
# high drinkers == high percentiles
high_high = ['mtd_etoh_g_kg', 'mhm_ald', 'bec_mg_pct', 'mtd_veh_intake', 'mtd_max_bout_vol']
hh_label = ["Avg Daily Etoh (g/kg)", "Avg Aldosterone", "Avg BEC (% mg)", "Average Daily h20 (ml)", "Avg Daily Max Bout (ml)"]
# high drinkers == low percentiles
high_low = ['mhm_acth', 'mtd_pct_max_bout_vol_total_etoh', 'mhm_doc', 'mtd_total_pellets', 'mtd_latency_1st_drink',
'bec_pct_intake']
hl_label = ["Avg ACTH", "Avg Daily Max Bout / Total", "Avg Deoxycorticosterone", "Avg Daily Pellets",
"Avg Time to First Drink (s)", "Avg % Etoh Before BEC Sample"]
# scattered
scattered = ['mhm_cort', 'mhm_t', 'mhm_dheas', 'mhm_ald_stdev', 'mhm_doc_stdev', 'mhm_acth_stdev']
explore = []
fields = []
labels = []
fields.extend(high_high)
fields.extend(high_low)
labels.extend(hh_label)
labels.extend(hl_label)
# fields = ['mtd_etoh_g_kg', 'mhm_ald', 'bec_mg_pct', 'mtd_veh_intake', 'mtd_total_pellets', 'mtd_latency_1st_drink']
data = dict()
for monkey in monkeys:
x_values = list()
y_values = list()
x = 0
for field in fields:
field_callable = get_callable(field)
x_values.append(x)
# this is a lazy, computationally intensive way to calculate this. This could be refactored to be MUCH more efficient.
# todo: refactor this so that we collect all the raw values once, and then calculate the percentile from these values
# this collect all the raw values for all monkeys, every monkey.
y_values.append(get_percentile_of_callable(monkey, monkeys, field_callable, field, six_months=six_months))
x += 1
data[monkey] = numpy.array(zip(x_values, y_values))
return data, labels
def gather_three_month_monkey_percentiles_by_fieldname(monkeys, fieldname, three_months=0, six_months=0):
"""
three_months == (0,1,2,3,4)
1 == first 3 months of OA
2 == second 3 months of OA
3 == third 3 months of OA
4 == fourth 3 months of OA
six_months == (0,1,2)
1 == first 6 months of OA
2 == second 6 months of OA
if six_months == three_months == 0, run on all of OA
-data{} is the collection of data
keys are monkey pks
values is the monkey's percentile of fieldname's average over 3/6/all months of OA as compared to the other monkeys over the same period of OA
These monkey's do not have to be from the same cohort. It will be run on each monkey's section of OA
"""
data = dict()
for monkey in monkeys:
field_callable = get_callable(fieldname)
data[monkey] = get_percentile_of_callable(monkey, monkeys, field_callable, fieldname, three_months=three_months, six_months=six_months)
return data
def gather_three_month_monkey_average_by_fieldname(monkeys, fieldname, three_months=0, six_months=0):
"""
three_months == (0,1,2,3,4)
1 == first 3 months of OA
2 == second 3 months of OA
3 == third 3 months of OA
4 == fourth 3 months of OA
six_months == (0,1,2)
1 == first 6 months of OA
2 == second 6 months of OA
if six_months == three_months == 0, run on all of OA
"""
data = dict()
for monkey in monkeys:
field_callable = get_callable(fieldname)
data[monkey] = field_callable(monkey, fieldname, three_months=three_months, six_months=six_months)
return data
def identify_drinking_category(mtd_queryset, bec_queryset):
assert len(mtd_queryset.order_by().values_list('monkey', flat=True).distinct()) == 1, "Nothing about this function " \
"will work with an MTD " \
"queryset with multiple monkeys"
assert len(bec_queryset.order_by().values_list('monkey', flat=True).distinct()) == 1, "Nothing about this function " \
"will work with a BEC " \
"queryset with multiple monkeys"
max_date = mtd_queryset.aggregate(Max('drinking_experiment__dex_date'))['drinking_experiment__dex_date__max']
min_date = mtd_queryset.aggregate(Min('drinking_experiment__dex_date'))['drinking_experiment__dex_date__min']
total_days = float((max_date-min_date).days)
mtd_values = mtd_queryset.values('mtd_etoh_g_kg')
days_over_two = mtd_values.filter(mtd_etoh_g_kg__gt=2).count()
days_over_three = mtd_values.filter(mtd_etoh_g_kg__gt=3).count()
days_over_four = mtd_values.filter(mtd_etoh_g_kg__gt=4).count()
bec_values = bec_queryset.values('bec_mg_pct')
days_over_80_bec = bec_values.filter(bec_mg_pct__gt=80).count()
has_one_binge_per_year = days_over_80_bec > (total_days/365.25)
pct_over_two = days_over_two / total_days
pct_over_three = days_over_three / total_days
pct_over_four = days_over_four / total_days
etoh_gkg_avg = mtd_queryset.aggregate(Avg('mtd_etoh_g_kg'))['mtd_etoh_g_kg__avg']
is_BD = pct_over_two >= .55 and has_one_binge_per_year
is_HD = pct_over_three >= .2
is_VHD = pct_over_four >= .1 and etoh_gkg_avg > 3.
if is_VHD:
return 'VHD'
elif is_HD:
return 'HD'
elif is_BD:
return 'BD'
return 'LD'
def get_category_population_by_quarter(quarter, monkeys=plotting.ALL_RHESUS_DRINKERS):
quarter = str(quarter).lower()
if quarter == 'first' or quarter == '1':
mtd_queryset = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey__in=monkeys).first_three_months_oa()
elif quarter == 'second' or quarter == '2':
mtd_queryset = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey__in=monkeys).second_three_months_oa()
elif quarter == 'third' or quarter == '3':
mtd_queryset = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey__in=monkeys).third_three_months_oa()
elif quarter == 'fourth' or quarter == '4':
mtd_queryset = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey__in=monkeys).fourth_three_months_oa()
else:
raise Exception("Quarter can only be ('first', 'second', 'third', 'fourth') or (1,2,3,4)")
category_results = [identify_drinking_category(mtd_queryset.filter(monkey=monkey)) for monkey in monkeys]
return category_results
def find_nearest_bouts(bout):
from matrr.models import ExperimentBout
day_bouts = ExperimentBout.objects.filter(mtd__monkey__cohort=bout.mtd.monkey.cohort, mtd__drinking_experiment__dex_date=bout.mtd.drinking_experiment.dex_date)
day_bouts = day_bouts.exclude(mtd__monkey=bout.mtd.monkey)
day_bout_starts = day_bouts.values_list('ebt_start_time', flat=True)
closest_start = min(day_bout_starts, key=lambda x:abs(x-bout.ebt_start_time))
nearest_bouts = day_bouts.filter(ebt_start_time=closest_start)
return nearest_bouts
def find_nearest_bout_per_monkey(bout):
"""
For a given bout, find the closest ExperimentBout to bout from each monkey in the cohort.
Returns a list of the closest bout from each monkey
"""
from matrr.models import ExperimentBout, Monkey
nearest_bouts = list()
for monkey in Monkey.objects.Drinkers().filter(cohort=bout.mtd.monkey.cohort).exclude(pk=bout.mtd.monkey.pk):
day_bouts = ExperimentBout.objects.filter(mtd__monkey=monkey, mtd__drinking_experiment__dex_date=bout.mtd.drinking_experiment.dex_date)
day_bout_starts = day_bouts.values_list('ebt_start_time', flat=True)
try:
closest_start = min(day_bout_starts, key=lambda x:abs(x-bout.ebt_start_time))
close_bout = day_bouts.filter(ebt_start_time=closest_start)[0]
except ValueError:
continue # sometimes the min() raised a value error. I forget why exactly, but it happened in this function.
except IndexError:
# will catch if day_bouts.filter(blah) is empty. This should never happen in this function, but just in case.
continue
nearest_bouts.append(close_bout)
return nearest_bouts
def collect_bout_startdiff_ratesum_data(subplot, monkey_one, monkey_two):
try:
fx = open('matrr/utils/DATA/json/bout_startdiff_ratesum-%d-%d-xvalues.json' % (monkey_one.pk, monkey_two.pk), 'r')
fy = open('matrr/utils/DATA/json/bout_startdiff_ratesum-%d-%d-yvalues.json' % (monkey_one.pk, monkey_two.pk), 'r')
except:
one_mtds = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey=monkey_one).order_by('drinking_experiment__dex_date')
one_dates = one_mtds.values_list('drinking_experiment__dex_date', flat=True).distinct()
x_data = [TWENTYFOUR_HOUR,]
y_data = [1000,]
for date in one_dates:
one_bouts = ExperimentBout.objects.filter(mtd__monkey=monkey_one, mtd__drinking_experiment__dex_date=date).exclude(ebt_intake_rate=None)
one_values = one_bouts.values_list('ebt_start_time', 'ebt_intake_rate')
two_bouts = ExperimentBout.objects.filter(mtd__monkey=monkey_two, mtd__drinking_experiment__dex_date=date).exclude(ebt_intake_rate=None)
two_values = two_bouts.values_list('ebt_start_time', 'ebt_intake_rate')
if not one_values or not two_values:
continue
two_starts = numpy.array(two_values)[:,0]
for one_start_time, one_rate in one_values:
two_closest_start = min(two_starts, key=lambda x:abs(x-one_start_time))
two_closest_bout = two_values.get(ebt_start_time=two_closest_start)
x_value = float(numpy.abs(one_start_time - two_closest_bout[0]))
y_value = float(one_rate + two_closest_bout[1])
x_data.append(x_value)
y_data.append(y_value)
subplot.set_ylabel("Summed intake rate of adjacent bouts (in g/kg/minute)")
subplot.set_xlabel("Bout start time difference")
folder_name = 'matrr/utils/DATA/json/'
if not os.path.exists(folder_name):
os.makedirs(folder_name)
fx = open(folder_name+'bout_startdiff_ratesum-%d-%d-xvalues.json' % (monkey_one.pk, monkey_two.pk), 'w')
fy = open(folder_name+'bout_startdiff_ratesum-%d-%d-yvalues.json' % (monkey_one.pk, monkey_two.pk), 'w')
fx.write(json.dumps(x_data))
fy.write(json.dumps(y_data))
fx.close()
fy.close()
return subplot, x_data, y_data
else:
x = json.loads(fx.readline())
y = json.loads(fy.readline())
fx.close()
fy.close()
subplot.set_ylabel("Summed intake rate of adjacent bouts (in g/kg/minute)")
subplot.set_xlabel("Bout start time difference")
return subplot, x, y
def collect_bout_startdiff_normratesum_data(subplot, monkey_one, monkey_two):
import inspect
outfile_name = inspect.stack()[0][3]
try:
fx = open('matrr/utils/DATA/json/%s-%d-%d-xvalues.json' % (outfile_name, monkey_one.pk, monkey_two.pk), 'r')
fy = open('matrr/utils/DATA/json/%s-%d-%d-yvalues.json' % (outfile_name, monkey_one.pk, monkey_two.pk), 'r')
except:
one_mtds = MonkeyToDrinkingExperiment.objects.OA().exclude_exceptions().filter(monkey=monkey_one).order_by('drinking_experiment__dex_date')
one_dates = one_mtds.values_list('drinking_experiment__dex_date', flat=True).distinct()
x_data = [TWENTYFOUR_HOUR,]
y_data = [1000,]
for date in one_dates:
one_bouts = ExperimentBout.objects.filter(mtd__monkey=monkey_one, mtd__drinking_experiment__dex_date=date).exclude(ebt_intake_rate=None)
one_avg_rate = one_bouts.aggregate(Avg('ebt_intake_rate'))['ebt_intake_rate__avg']
one_values = one_bouts.values_list('ebt_start_time', 'ebt_intake_rate')
two_bouts = ExperimentBout.objects.filter(mtd__monkey=monkey_two, mtd__drinking_experiment__dex_date=date).exclude(ebt_intake_rate=None)
two_avg_rate = two_bouts.aggregate(Avg('ebt_intake_rate'))['ebt_intake_rate__avg']
two_values = two_bouts.values_list('ebt_start_time', 'ebt_intake_rate')
if not one_values or not two_values:
continue
two_starts = numpy.array(two_values)[:,0]
for one_start_time, one_rate in one_values:
two_closest_start = min(two_starts, key=lambda x:abs(x-one_start_time))
two_closest_bout = two_values.get(ebt_start_time=two_closest_start)
x_value = float(numpy.abs(one_start_time - two_closest_bout[0]))
y_value = float(one_rate/one_avg_rate + two_closest_bout[1]/two_avg_rate)
x_data.append(x_value)
y_data.append(y_value)
subplot.set_ylabel("Summed intake rate of adjacent bouts (in g/kg/minute)")
subplot.set_xlabel("Bout start time difference")
folder_name = 'matrr/utils/DATA/json/'
if not os.path.exists(folder_name):
os.makedirs(folder_name)
fx = open(folder_name+'%s-%d-%d-xvalues.json' % (outfile_name, monkey_one.pk, monkey_two.pk), 'w')
fy = open(folder_name+'%s-%d-%d-yvalues.json' % (outfile_name, monkey_one.pk, monkey_two.pk), 'w')
fx.write(json.dumps(x_data))
fy.write(json.dumps(y_data))
fx.close()
fy.close()
return subplot, x_data, y_data
else:
x = json.loads(fx.readline())
y = json.loads(fy.readline())
fx.close()
fy.close()
subplot.set_ylabel("Summed intake rate of adjacent bouts (in g/kg/minute)")
subplot.set_xlabel("Bout start time difference")
return subplot, x, y
def dump_figure_to_file(fig, name, output_path='', output_format='png', dpi=80):
if output_format:
filename = output_path + '%s.%s' % (name, output_format)
fig.savefig(filename, format=output_format,dpi=dpi)
def sum_dictionaries_by_key(first_dictionary, second_dictionary):
from collections import defaultdict
output_dictionary = defaultdict(lambda: 0)
for _key in first_dictionary.iterkeys():
output_dictionary[_key] += first_dictionary[_key]
for _key in second_dictionary.iterkeys():
output_dictionary[_key] += second_dictionary[_key]
return output_dictionary | true |
a7df12895900e6a4026042d8bd2db21292a9d966 | Python | devanrichter/cmsc123-project | /iter_files.py | UTF-8 | 832 | 3.015625 | 3 | [] | no_license | import os
import json
import time
start_time = time.time()
artist_dictionary = {}
dirpath = os.getcwd()
for subdir, dirs, files in os.walk(dirpath):
for file in files:
full_fname = subdir + os.sep + file
if full_fname.endswith(".json"):
with open(full_fname,'r') as f:
song = json.load(f)
artist = song['artist']
tags = song['tags']
for tag in tags:
if artist not in artist_dictionary:
artist_dictionary[artist] = []
artist_dictionary[artist].append(tag[0])
for artist in artist_dictionary:
print(artist)
print(artist_dictionary[artist])
print("Length : %d" % len (artist_dictionary))
print("--- %s seconds ---" % (time.time() - start_time))
| true |
51a1624522f22294f4a9cee1a550489c7f3c28c6 | Python | Louis95/movielist | /app.py | UTF-8 | 2,436 | 3.03125 | 3 | [] | no_license | from flask import Flask, jsonify
from utilities import make_get_request
import os
app = Flask(__name__)
@app.route('/')
def hello_world():
"""
Prints out a welcome note.
Parameters:
Returns:
'welcome!!
"""
return 'Welcome!!'
def replace_people(movies_list, people_list):
"""
Replaces the people value with a list of all the people who acted in that movies.
Parameters:
movies_list (list): A list of dictionaries
people_list (list): Another list of dictionaries
Returns:
movies_list (list): a new movies_list with the value of the people replace in a the dictionaries
"""
try:
for movies_dict in movies_list:
movies_dict['people'] = []
for people_dict in people_list:
# I was quite confuse as to what to do with /films that are not in /people
if movies_dict['title'] in people_dict['movies']:
movies_dict['people'].append(people_dict['people'])
# if len(dict1['people']) == 0:
# dict1['people'] = make_get_request(backup[0])[]
return movies_list
except ImportError:
return None
@app.route("/movies")
def get_movies():
"""
Get the list of movies.
Parameters:
Returns:
movies_people (list): a list of dictionaries
"""
data = make_get_request("https://ghibliapi.herokuapp.com/films?limit=20")
pe = get_people_in_movies()
movies_people = replace_people(data, pe)
return jsonify(movies_people)
def get_people_in_movies():
"""
Get the list of people who acted in a film.
Parameters:
Returns:
people_and_movies_list (list): a list of dictionaries
"""
people_movies = make_get_request("https://ghibliapi.herokuapp.com/people?limit=20")
people_and_movies_list = []
for people in people_movies:
people_movies_dict = {'people': people['name'],
'movies': [make_get_request(i)['title'] for i in people['films']]}
people_and_movies_list.append(people_movies_dict)
return people_and_movies_list
if __name__ == '__main__':
port = int(os.environ.get('PORT', 5000))
app.run(host='0.0.0.0', port=port)
app.run()
| true |
749de269aaaa2de452c61c0b76219be2ac551324 | Python | ajy720/Algorithm | /BOJ/9663.py | UTF-8 | 634 | 2.96875 | 3 | [] | no_license | cnt = 0
def solve(m:int, arr:list, r:list, s:list):
global cnt
if m == 0:
cnt += 1
return
for i in range(n//2):
if arr[i] and r[m+i] and s[m-i]:
arr[i] = False
r[m+i] = False
s[m-i] = False
solve(m-1, arr, r, s)
arr[i] = True
r[m+i] = True
s[m-i] = True
def ans(a:int):
print({1:1, 2:0, 3:0, 4:2, 5:10, 6:4, 7:40, 8:92, 9:352, 10:724, 11:2680, 12:14200, 13:73712, 14:365596}.get(a))
n = int(input())
ans(n)
""" arr = [True] * n
r = [True] * (n*2)
s = [True] * (n*2)
solve(n, arr, r, s)
print(cnt) """
| true |
af114fec07916d0aaccd9a0b1b3d71b7c8704d2b | Python | AdamZhouSE/pythonHomework | /Code/CodeRecords/2896/60835/300230.py | UTF-8 | 244 | 3.203125 | 3 | [] | no_license | s1 = list(input())
s2 = list(input())
res = "YES"
while len(s2) > 0:
if s2[0] == ' ':
del s2[0]
else:
if s2[0] not in s1:
res = "NO"
break
else:
del s2[0]
print(res, end = "")
| true |
f20d2c71166ec8703da576c2fd57a0c43624fa26 | Python | sergioib94/Proyecto-Json | /pokedex.py | UTF-8 | 2,032 | 3.765625 | 4 | [] | no_license | from func_pokedex import *
with open("pokedex.json") as fichero:
datos = json.load(fichero)
while True:
print ("")
print ("Menu Principal:")
print ("")
print ("1.Listar Informacion: Mostrar los pokemons que hay.")
print ("2.Contar informacion: Contar pokemons que hay de un tipo especifico.")
print ("3.Buscar o Filtrar informacion: Buscar los pokemons que pueden salir de un huevo.")
print ("4.Buscar informacion relacionada: Meter por teclado una caracteristica de un pokemon y decir que pokemons superan esa estadistica.")
print ("5.Ejercicio Libre: Proponer un combate, pedir por teclado el nombre de dos pokemons y decir quien ganaria.")
print ("6.Ayuda (listado de tipos)")
print ("0.Salir")
print ("")
opcion = input("opcion: ")
print ("")
if opcion == "1":
print ("Listados de pokemons: ")
print ("")
for pokemon in Lista(datos):
print (pokemon[0],"-",pokemon[1])
if opcion == "2":
tipo = input("Introduce un tipo de pokemon: " )
print (Contar(tipo,datos))
if opcion == "3":
print ("Los siquientes pokemons nacen de huevo:")
print ("")
for huevo in Filtrar(datos):
print ("*",huevo[0],"-",huevo[1])
if opcion == "4":
estadistica = input("Introduce la estadistica que quieras comprobar: ")
base = int(input("Introduce el valor de la estadistica: "))
for estadistica in Buscar(estadistica,base,datos):
print (estadistica)
if opcion == "5":
pokemon1 = input("Introduce el nombre de tu pokemon: ")
pokemon2 = input("Introduce el nombre del pokemon rival: ")
for i in Combate(pokemon1,pokemon2,datos):
for j in i:
print (j)
if opcion == "6":
print ("Tipos de pokemon:")
print ("")
print ("Water,Fire,Ice,Flying,Psychic,Poison,Grass,Ground,Rock,Electric,Bug,Normal,Fighting,Fairy,Dark,Ghost")
if opcion == "0":
break; | true |
6adc0ffcff4338cf86b5d509b00d5987b69db80f | Python | ramya42043/RAMYA_VT_WORK | /Knowledge/VTpython/PRacPy/clsinher6.py | UTF-8 | 267 | 3.1875 | 3 | [] | no_license | #!/usr/bin/python
class A(object) :
def __init__(self):
self.x=x
self.y=y
def add(self):
print "im in add"
class B(A) :
def __init__(self):
print "i'm in child"
def add(self):
super(B,self).add()
print "im in child add"
obj= B()
obj.add()
| true |
90348d5de3017de4e46de6ecbc72c5f60e8272d7 | Python | john-hawkins/data-synthesizer | /process.py | UTF-8 | 1,897 | 3.1875 | 3 | [] | no_license | import pandas as pd
import os
# PROCESS THE ENROLMENTS INTO INTERACTIONS DATA
df = pd.read_csv("assessments.csv")
df['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d', errors='coerce')
temp = df.loc[:,['student_id','course','date']]
temp.columns = ["USER_ID", "ITEM_ID", "TIMESTAMP"]
train = temp[ temp['TIMESTAMP'] < "2020-01-01" ]
test = temp[ temp['TIMESTAMP'] > "2020-01-01" ]
convert_dict = {'USER_ID': str,
'ITEM_ID': str,
'TIMESTAMP': int}
train = train.astype(convert_dict)
test = test.astype(convert_dict)
train.to_csv("personalize/interactions_train.csv", header=True, index=False)
test.to_csv("personalize/interactions_test.csv", header=True, index=False)
# PROCESS THE USERS
df = pd.read_csv("students.csv")
#
#df['dob'] = pd.to_datetime(df['dob'], format='%Y-%m-%d', errors='coerce')
#df['registration'] = pd.to_datetime(df['registration'], format='%Y-%m-%d', errors='coerce')
#
#from dateutil.relativedelta import relativedelta
#
#def diff_in_years(newer, older):
# return relativedelta(newer, older).years
#
#
#df['age'] = df.apply( lambda row: diff_in_years(row['registration'], row['dob']), axis=1 )
studs = df.loc[:,['student_id','age','education','major','language']]
studs.columns = ["USER_ID", "AGE", "EDU", "MAJOR", "LANG"]
convert_dict = {'USER_ID': str,
'AGE': int,
'EDU': str,
'MAJOR': str,
'LANG': str,
}
temp = studs.astype(convert_dict)
print(temp.dtypes)
temp.to_csv("personalize/users.csv", header=True, index=False)
# PROCESS THE ITEMS
df = pd.read_csv("courses.csv")
df.columns = ["ITEM_ID","TITLE","LEVEL","MATH"]
convert_dict = {'ITEM_ID': str,
'TITLE': str,
'LEVEL': int,
'MATH': int,
}
items = df.astype(convert_dict)
items.to_csv("personalize/items.csv", header=True, index=False)
| true |
e6bea31d079acbfb91828c1851f3ee9a85779d12 | Python | webclinic017/Chandra | /utility/configuration.py | UTF-8 | 825 | 2.53125 | 3 | [] | no_license | '''
Created on Jul 6, 2020
@author: Brian
'''
import os
import configparser
import json
def get_ini_data(csection):
config_file = os.getenv('localappdata') + "\\Development\\chandra.ini"
config = configparser.ConfigParser()
config.read(config_file)
config.sections()
ini_data = config[csection]
return ini_data
def read_config_json(json_file) :
print ("reading configuration details from ", json_file)
json_f = open(json_file, "rb")
json_config = json.load(json_f)
json_f.close
return (json_config)
def read_processing_network_json(json_file):
print ("reading processing network details from ", json_file)
json_f = open(json_file, "rb")
network_json = json.load(json_f)
json_f.close
return (network_json) | true |
6b436496e37d219d7908174f6dbccf938102a1b1 | Python | Rach1612/Python | /Python Short Pro/p10p1.py | UTF-8 | 762 | 4.53125 | 5 | [] | no_license | #progrm to find square root of a number entered by user
#prompt user for number
#int (input(enter a number ))
#save square root variable and set to 0
#squareroot=0
#if number <0 :
# exit program
#elif number >0 :do
#while squareroot**2<number entered: do:
#increment square root.
#if sqaureroot**2== number:
#this is square root of that number
#print that message
#else print numnber is not a perfect square.
number=int(input("Enter a number please"))
squareroot=0
if number<0:
print()
elif number>0:
while squareroot **2 <number:
squareroot+=1
if squareroot **2==number:
print("square root of", number, "is" ,squareroot)
else:
print(number, "is not a perfect square")
| true |
445601071dceb3821b21e6e3933c09c2bb6e1fc9 | Python | jrl-umi3218/sch-creator | /src/sch-visualizer.py | UTF-8 | 6,813 | 2.75 | 3 | [
"BSD-2-Clause"
] | permissive | import numpy as np
from numpy.core.defchararray import add, index
from numpy.lib.function_base import angle, extract, flip
from numpy.lib.scimath import arccos
import yaml
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import math
from matplotlib.widgets import Slider
from numpy import linalg as LA
########## functions ##########
def findAngle(vector1, vector2):
uVector1 = vector1 / LA.norm(vector1)
uVector2 = vector2 / LA.norm(vector2)
dotProduct = np.dot(uVector1,uVector2)
return 180 * np.arccos(dotProduct) / math.pi
def findArc(p1,p2, alpha):
# find vector to midpoint
pa = (p2 - p1) / 2.0
# find rhombus center
rhombusCenter = p1 + pa
# get distances a and b
a = LA.norm(pa)
b = math.sqrt(pow(alpha,2) - pow(a,2))
# distance to rhombus center
xRC = b * (p2[1] - rhombusCenter[1]) / a
yRC = -b * (p2[0] - rhombusCenter[0]) / a
C = np.array([rhombusCenter[0] + xRC,rhombusCenter[1] + yRC])
# find vectors from circle center
CP1 = p1 - C
CP2 = p2 - C
Cx = np.array([alpha,0])
# find arc angles
if CP2[1] > -0.05:
arcStartAngle = findAngle(CP2,Cx) # angle from x-axis to P2
angleInbetween = findAngle(CP2,CP1) # angle from P2 to P1
else:
arcStartAngle = -findAngle(CP2,Cx) # angle from x-axis to P2
angleInbetween = findAngle(CP2,CP1) # angle from P2 to P1
return C, arcStartAngle, arcStartAngle + angleInbetween
########## end of functions ##########
# read YAML file
#file = open("build\src\Debug\output.yaml")
# file = open("build\src\Release\output.yaml")
file = open("/home/amrf/balloon-inflating/sch-creator/output.yaml")
parsed_yaml = yaml.load(file, Loader=yaml.FullLoader)
# Create numpy array of convex hull points
chPoints = np.array(parsed_yaml.get("convexHull_points"))
# Create numpy array of removed points and their index
removedPointsRadius = flip(np.array([i[0] for i in parsed_yaml.get("removed_points_radius_and_index")]))
removedPointsIndex = [i[1] for i in parsed_yaml.get("removed_points_radius_and_index")]
# flip the array of indexes
rIndex = np.array(removedPointsIndex[::-1])
# get array of eliminated points
removedPoints = np.array([chPoints[i] for i in removedPointsIndex])
#get the amount of eliminated points
noRemovedPoints = parsed_yaml.get("eliminated_points")
# get alpha
alpha = parsed_yaml.get("alpha")
# create the figures
fig, ax = plt.subplots()
# draw circles
n = len(chPoints)
initial_alpha = alpha
# remove the eliminated points
newSCHPoints = np.delete(chPoints,rIndex,axis=0)
n = len(newSCHPoints)
# plot points
points_plot = plt.scatter(chPoints[:,0],chPoints[:,1])
plt.scatter(removedPoints[:,0],removedPoints[:,1], color='hotpink')
# get plot limits
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
# calculate and plot the initial arcs
for i in range(n):
# declare the points touching the arc
p1 = newSCHPoints[i % n]
p2 = newSCHPoints[(i + 1) % n]
# Arc characteristics
C, theta1, theta2 = findArc(p1,p2,initial_alpha)
# draw the arc
arc = mpatches.Arc(C, 2 * initial_alpha, 2 * initial_alpha, 0, theta1, theta2)
ax.add_patch(arc)
# modify axes scale and limits
plt.subplots_adjust(bottom = 0.25)
plt.axis('scaled')
plt.xlim(xmin-1,xmax+1)
plt.ylim(ymin-1,ymax+1)
# Create horizontal slider
sliderAxes = plt.axes([0.2, 0.05, 0.6, 0.1], facecolor = 'ghostwhite')
Nslider = Slider(
ax = sliderAxes,
label = 'Alpha value',
valmin = initial_alpha,
valmax = math.floor(removedPointsRadius[len(removedPointsRadius)-1]*1.25),
valinit = initial_alpha
)
# make points plot the active axis
plt.sca(ax)
# initialize the index at 0
index = 0
# get current alpha from alpha
currentAlpha = alpha
def update(N):
global newSCHPoints
global n, noRemovedPoints
global index, rIndex
global currentAlpha
# clear the axes
plt.cla()
while True:
# initialize variable to leave the loop
changeInPoints = False
# remove previous point from hull
# check if alfa is smaller than the previous and current point
if index > 0 and noRemovedPoints > 0 and N < removedPointsRadius[index] and N < removedPointsRadius[index-1]:
# delete all points from index-1 onwards
newSCHPoints = np.delete(chPoints,rIndex[index-1:],axis=0)
# change state of the variable to stay in the loop
changeInPoints = True
# update index and increase number of eliminated points
index -= 1
noRemovedPoints += 1
# check if last point is active and if N is smaller than its radius
elif noRemovedPoints == 0 and N < removedPointsRadius[index]:
# remove last point from hull
newSCHPoints = np.delete(chPoints,rIndex[len(rIndex)-1], axis=0)
# change state of the variable to stay in the loop
changeInPoints = True
# set the amount of eliminated points to 1
noRemovedPoints = 1
# add points to the hull
if N >= removedPointsRadius[index] and noRemovedPoints > 0:
# remove all points from index+1 onwards
newSCHPoints = np.delete(chPoints,rIndex[index+1:],axis=0)
# change state of the variable to stay in the loop
changeInPoints = True
# check if index is in range
if index < len(removedPoints) - 1:
# if in range, increment index and decrease no. of points to add
index += 1
noRemovedPoints -= 1
else:
# else, limit to max value and set amount of points to add to 0
index = len(removedPoints) - 1
noRemovedPoints = 0
# leave loop only if there aren't changes in the state of the points
if not changeInPoints:
break
# find new number of points in hull
n = len(newSCHPoints)
# calculate the arcs for all n points
for i in range(n):
# declare the points touched by the arc
p1 = newSCHPoints[i % n]
p2 = newSCHPoints[(i + 1) % n]
# get the center, initial and ending angle
C, theta1, theta2 = findArc(p1,p2,N)
# create and draw the arc
arc = mpatches.Arc(C, 2 * N, 2 * N, 0, theta1, theta2)
ax.add_patch(arc)
# plot points
plt.scatter(removedPoints[:,0],removedPoints[:,1], color='hotpink')
plt.scatter(newSCHPoints[:,0],newSCHPoints[:,1])
# set plot limits
plt.axis('scaled')
plt.xlim(xmin-1,xmax+1)
plt.ylim(ymin-1,ymax+1)
update(initial_alpha)
# Register the update function with each slider
Nslider.on_changed(update)
plt.show()
| true |
d6a5080d10682c9490401abd8e8ff4f6d62a0c30 | Python | japhet-ye/github-workshop-2019 | /simple.py | UTF-8 | 120 | 2.59375 | 3 | [] | no_license | # File: simple.py
# Created by Brandon Wong on 4/23/19
print("Simple python script")
print("Hello World!")
print("End me!")
| true |
ac6d9e3159638a541becda1c4bef1e413cd8c44e | Python | antti-hartikka/tsoha-blog | /accounts.py | UTF-8 | 4,789 | 2.828125 | 3 | [] | no_license | from werkzeug.security import check_password_hash, generate_password_hash
import re
from app import db
def create_user(username, password):
"""Returns "ok" if new user is created, otherwise returns error message string"""
if not validate_username(username):
return "käyttäjänimi on väärää muotoa"
if user_exists(username):
return "käyttäjänimi on jo käytössä"
if not validate_password(password):
return "salasana on väärää muotoa"
sql = "INSERT INTO users (username, password, user_group, is_active) " \
"VALUES (:username, :password, 'basic', TRUE)"
password_hash = generate_password_hash(password)
db.session.execute(sql, {"username": username, "password": password_hash})
db.session.commit()
return "ok"
def check_credentials(username, password):
"""returns true if username matches password in database,
returns false if validation returns false or user doesn't exist"""
if not validate_username(username) or not validate_password(password):
return False
sql = "SELECT password " \
"FROM users " \
"WHERE username=:username AND is_active=TRUE"
result = db.session.execute(sql, {"username": username})
user = result.fetchone()
if user is None:
return False
password_hash = user[0]
if check_password_hash(password_hash, password):
return True
return False
def set_username(old_name, new_name):
"""returns ok if username is updated, otherwise returns error message"""
if not validate_username(new_name):
return "käyttäjänimi on väärää muotoa"
if user_exists(new_name):
return "käyttäjänimi on jo käytössä"
sql = "UPDATE users " \
"SET username=:new " \
"WHERE username=:old"
db.session.execute(sql, {"new": new_name, "old": old_name})
db.session.commit()
return "ok"
def set_password(username, new_password):
"""returns "ok" if password is updated, otherwise returns error message"""
if not validate_password(new_password):
return "salasana on väärää muotoa"
new_password_hash = generate_password_hash(new_password)
sql = "UPDATE users " \
"SET password=:new_pw " \
"WHERE username=:username"
db.session.execute(sql, {"new_pw": new_password_hash, "username": username})
db.session.commit()
return "ok"
def set_user_group(username, user_group):
sql = "UPDATE users " \
"SET user_group=:new_group " \
"WHERE username=:username"
db.session.execute(sql, {"new_group": user_group, "username": username})
db.session.commit()
def get_user_group(username):
"""Returns user group as a string"""
sql = "SELECT user_group " \
"FROM users " \
"WHERE username=:username"
result = db.session.execute(sql, {"username": username})
user = result.fetchone()
if user is None:
return ""
user_group = user[0]
return user_group
def get_user_list():
sql = "SELECT username " \
"FROM users " \
"WHERE is_active"
result = db.session.execute(sql)
return result.fetchall()
def user_exists(username):
"""Returns true if database contains user"""
sql = "SELECT username " \
"FROM users " \
"WHERE username=:username"
result = db.session.execute(sql, {"username": username})
user = result.fetchone()
if user is None:
return False
else:
return True
def delete_account(username):
"""Sets user type to basic, sets password to "deleted",
sets username to "[deleted]", sets is_active to FALSE"""
set_user_group(username, "basic")
password = generate_password_hash("deleted")
set_password(username, password)
sql = "UPDATE users " \
"SET is_active=FALSE, username='[deleted]' " \
"WHERE username=:username"
db.session.execute(sql, {"username": username})
db.session.commit()
def get_user_id(username):
"""returns -1 if user not found"""
if not user_exists(username):
return -1
sql = "SELECT id " \
"FROM users " \
"WHERE username=:username"
result = db.session.execute(sql, {"username": username})
user_id = result.fetchone()[0]
return user_id
def validate_username(username):
"""returns true if username is 3-20 characters long
and consists of letters and numbers"""
if re.match(r"^[a-zA-Z0-9åäöÅÄÖ]{3,20}$", username):
return True
return False
def validate_password(password):
"""returns true if password is 10-30 characters long
and consists of letters and numbers"""
if re.match(r"^[a-zA-Z0-9]{10,30}$", password):
return True
return False
| true |
2359c61cfaad34c4f4148fb247be7507eb96f485 | Python | mysilver/COMP9321-Data-Services | /Week10_Regression_and_Clustering/activity_3.py | UTF-8 | 1,693 | 3.203125 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
from sklearn.cluster import KMeans, SpectralClustering, AgglomerativeClustering
from sklearn.utils import shuffle
def load_iris(iris_path):
df = pd.read_csv(iris_path)
df = shuffle(df)
df_without_label = df.drop('Diet', axis=1)
return df, df_without_label
if __name__ == '__main__':
csv_file = 'diet.csv'
# Split the data into test and train parts
df, df_without_label = load_iris(csv_file)
# Fit a k-means estimator
estimator = AgglomerativeClustering(n_clusters=3)
estimator.fit(df_without_label)
# Clusters are given in the labels_ attribute
labels = estimator.labels_
df['cluster'] = pd.Series(labels, index=df.index)
print(labels)
# divide the dataset into three dataframes based on the species
cluster_0_df = df.query('cluster == 0')
cluster_1_df = df.query('cluster == 1')
cluster_2_df = df.query('cluster == 2')
fig, axes = plt.subplots(nrows=1, ncols=1)
fig.set_size_inches(18.5, 10.5)
fig.tight_layout()
ax = cluster_0_df.plot.scatter(x='pre.weight', y='weight6weeks', label='Cluster-0', color='blue', ax=axes)
ax = cluster_1_df.plot.scatter(x='pre.weight', y='weight6weeks', label='Cluster-1', color='red', ax=ax)
ax = cluster_2_df.plot.scatter(x='pre.weight', y='weight6weeks', label='Cluster-2', color='green', ax=ax)
for i, label in enumerate(df['Diet']):
label = "Diet_" + str(label)
ax.annotate(label, (list(df['pre.weight'])[i], list(df['weight6weeks'])[i]), color='gray', fontSize=9,
horizontalalignment='left',
verticalalignment='bottom')
plt.show()
| true |
9462fafd7ee3189b5b8dd8980bc6a8349d8aa6ca | Python | axlrosen/misc | /specific/find-ambiguous-s2.py | UTF-8 | 611 | 2.9375 | 3 | [] | no_license | import re
import Levenshtein
# load our dicts
from collections import Counter, defaultdict
entries = set()
clued_words = open("/Users/alex.rosen/personal/xword/dicts/split15.txt").readlines()
for word in clued_words:
entries.add(word.split('/')[1])
names = set()
ns = open("/Users/alex.rosen/personal/xword/corpora/first-names.txt").readlines()
for n in ns:
names.add(n.rstrip().lower())
for entry in entries:
if len(entry) < 7: continue
i = entry.rfind(' s')
before = entry[:i]
after = entry[i+2:]
if before in names and after in entries:
print(f"{before}'s {after}")
| true |
ba8adf75973c9e2d45f7a8bcd8629fd248ff74f7 | Python | HiggsHydra/permian-frac-exchange | /src/fsec/handlers.py | UTF-8 | 776 | 2.59375 | 3 | [
"MIT"
] | permissive | from abc import ABC, abstractmethod
from datetime import datetime
import pytz
class BaseHandler(ABC):
@abstractmethod
def check(self):
pass
class DateHandler(BaseHandler):
tz = pytz.timezone("US/Central")
def safe_localize(self, dt: datetime) -> datetime:
try:
if dt: # bypasses None, NaN, and NaT
if not isinstance(dt, pd.Timestamp):
dt = pd.to_datetime(dt)
dt = dt.tz_localize(self.tz)
except:
logger.debug(f"Value not localized timestamp -> {dt}")
return dt
class NaNHandler(BaseHandler):
def nan_to_none(d: dict):
for k, v in d.items():
if v in ["NaN", "NaT", "None"]:
d[k] = None
return d
| true |
1c1e7830d755683616d0cf29f94e8d36a5023740 | Python | nabilnabs1080/FIJ_Robotique | /00_algorithmique/01_python/boucles.py | UTF-8 | 416 | 4.15625 | 4 | [] | no_license | # il existe 2 type de boucles
# While > tant que...
# avec un compteur qui diminue a chaque tour
compteur = 10
while compteur > 0 :
print("le cours est fini dans :", compteur)
compteur = compteur -1
#avec un boolean
flag = True
while flag :
print("bonjour")
flag = False
#For > pour chaque
phrase = "Bonjour a tous !"
for lettre in phrase:
if lettre in "aeiouy":
print(lettre)
| true |
577d7ad9d00f8a7087e5a73e3f87a9d7b2e2a1aa | Python | pizzato/FoosballTable | /models.py | UTF-8 | 6,746 | 2.9375 | 3 | [] | no_license | import config
import tools
import elo
class Player(object):
def __init__(self, player_id, name, photo, player_stats, attack_stats, defense_stats):
self.player_id = player_id
self.name = name
self.photo = photo
self.player_stats = player_stats
self.attack_stats = attack_stats
self.defense_stats = defense_stats
def __eq__(self, other):
return self.__dict__ == other.__dict__
class Team(object):
def __init__(self, team_id, defense_player, attack_player, team_stats):
self.team_id = team_id
self.defense_player = defense_player
self.attack_player = attack_player
self.team_stats = team_stats
def summary(self):
if self.defense_player == self.attack_player:
return "{defense}".format(defense=self.defense_player.name)
else:
return "{defense}+{attack}".format(defense=self.defense_player.name, attack=self.attack_player.name)
class Game(object):
def __init__(self, game_id, timestamp, left_team, right_team, left_score=0, right_score=0, ended=0):
self.game_id = game_id
self.timestamp = timestamp
self.left_team = left_team
self.right_team = right_team
self.left_score = left_score
self.right_score = right_score
self.ended = ended
def goal_scored(self, side, value=1):
if side == config.RIGHT:
self.right_score += value
return self.right_score
elif side == config.LEFT:
self.left_score += value
return self.left_score
else:
return 0
def time_left(self):
return config.GAME_TIME_LIMIT - tools.get_seconds_from_timestamp(self.timestamp)
def time_left_string(self):
return tools.seconds_string(self.time_left())
def game_should_end(self):
should_end = (not self.ended) and ((((self.left_score >= config.GAME_GOAL_LIMIT) or \
(self.right_score >= config.GAME_GOAL_LIMIT)) or \
(self.time_left() < 0)))
if should_end:
self.ended = 1
return should_end
def predicted_player_score(self):
elo_left = (self.left_team.defense_player.player_stats.elo_rating + self.left_team.attack_player.player_stats.elo_rating)
elo_right = (self.right_team.defense_player.player_stats.elo_rating + self.right_team.attack_player.player_stats.elo_rating)
#diff = self.left_team.team_stats.elo_rating - self.right_team.team_stats.elo_rating
diff = elo_left - elo_right
predicted_left_score, predicted_right_score = elo.predicted_score(diff=diff, MAX_SCORE=config.GAME_GOAL_LIMIT)
return "{left} x {right}".format(left=predicted_left_score, right=predicted_right_score)
def predicted_team_score(self):
elo_left = (self.left_team.team_stats.elo_rating)
elo_right = (self.right_team.team_stats.elo_rating)
diff = elo_left - elo_right
predicted_left_score, predicted_right_score = elo.predicted_score(diff=diff, MAX_SCORE=config.GAME_GOAL_LIMIT)
return "{left} x {right}".format(left=predicted_left_score, right=predicted_right_score)
def predicted_position_score(self):
elo_left = (self.left_team.defense_player.defense_stats.elo_rating + self.left_team.attack_player.attack_stats.elo_rating)
elo_right = (self.right_team.defense_player.defense_stats.elo_rating + self.right_team.attack_player.attack_stats.elo_rating)
diff = elo_left - elo_right
predicted_left_score, predicted_right_score = elo.predicted_score(diff=diff, MAX_SCORE=config.GAME_GOAL_LIMIT)
return "{left} x {right}".format(left=predicted_left_score, right=predicted_right_score)
def summary(self):
sum_dict = dict(tleft=self.left_team.summary(), tright=self.right_team.summary(), sleft=self.left_score, sright=self.right_score)
if self.ended == 0:
return "Game in progress between {tleft} and {tright} the score is {sleft}x{sright}".format(**sum_dict)
elif self.left_score == self.right_score:
return "Draw between {tleft} and {tright} the score was {sleft}x{sright}".format(**sum_dict)
elif self.right_score < self.left_score:
return "{tleft} defeated {tright} with the score {sleft}x{sright}".format(**sum_dict)
else:
return "{tright} defeated {tleft} with the score {sright}x{sleft}".format(**sum_dict)
class Stats:
def __init__(self,
stats_id=None,
player_id=None,
attack_player_id=None,
defense_player_id=None,
team_id=None,
wins=0, draws=0, losses=0,
goals_pro=0, goals_against=0,
elo_rating=elo.INITIAL_RATING,
timestamp=tools.get_timestamp_for_now()):
self.stats_id = stats_id
assert 1 == (player_id is not None) + (attack_player_id is not None) + (defense_player_id is not None) + (team_id is not None), "Use only one of these (player_id, attacker_id, defender_id, team_id)"
self.player_id = player_id
self.attack_player_id = attack_player_id
self.defense_player_id = defense_player_id
self.team_id = team_id
self.wins = wins
self.draws = draws
self.losses = losses
self.goals_pro = goals_pro
self.goals_against = goals_against
self.elo_rating = elo_rating
self.timestamp = timestamp
def perc_win(self):
if (self.wins+self.losses+self.draws) == 0:
return 0.0
return (self.wins / float(self.wins+self.losses+self.draws))
def perc_win_str(self):
if (self.wins+self.losses+self.draws) == 0:
return "--"
return "%2.0f%%"%(100*self.perc_win())
def goal_ratio(self):
if (self.goals_pro + self.goals_against) == 0:
return 0.0
return (self.goals_pro - self.goals_against)/float(self.goals_pro + self.goals_against)
def goal_ratio_str(self):
if (self.goals_pro + self.goals_against) == 0:
return "--"
return "%2.0f%%"%(100*self.goal_ratio())
def elo_rating_str(self):
return "%2.0f"%self.elo_rating
def update(self, i_wins=0, i_draws=0, i_losses=0, i_goals_pro=0, i_goals_against=0, i_elo_rating=0, timestamp=tools.get_timestamp_for_now()):
self.wins += i_wins
self.draws += i_draws
self.losses += i_losses
self.goals_pro += i_goals_pro
self.goals_against += i_goals_against
self.elo_rating += i_elo_rating
self.timestamp = timestamp | true |
3cf57c4cb23cf4e46e61b7e33cd9ba7236d9d8a0 | Python | Kaushal-Chapaneri/docu-map | /get_auth_token.py | UTF-8 | 2,254 | 2.71875 | 3 | [
"MIT"
] | permissive | """
filename : get_auth_token.py
install requirements.txt in virtual environment.
command to run : python get_auth_token.py
This script is responsible for generating auth token. \
it has flask server to which docusing callbacks and sends token. \
this script needs to run first time only when setting up this project.
"""
from docusign_esign import ApiClient
import uuid
import requests
import json
from flask import Flask
from flask import request
import base64
from datetime import datetime
# update your credetials in config.json before running this script.
with open('config.json') as f:
config = json.load(f)
app = Flask(__name__)
api_client = ApiClient(oauth_host_name=config["authorization_server"])
url = api_client.get_authorization_uri(client_id=config["ds_client_id"], scopes=["signature"], redirect_uri="http://localhost:5000/ds/callback", response_type="code", state=uuid.uuid4().hex.upper())
#run this output URL in browser to get callback
print("Run this URL in browser : =========> ", url)
#here is the route callback to which docusing interacts
@app.route('/ds/callback')
def generate_token():
# to get auth token
url1 = "https://"+config["authorization_server"]+"/oauth/token"
integrator_and_secret_key = "Basic " + base64.b64encode(str.encode("{}:{}".format(config["ds_client_id"], config["ds_client_secret"]))).decode("utf-8")
headers = {
"Authorization": integrator_and_secret_key,
"Content-Type": "application/x-www-form-urlencoded",
}
post_params = {
"grant_type": "authorization_code",
"code": request.args.get("code")
}
response = requests.post(url1, headers=headers, params=post_params)
auth_data = json.loads(response.text)
auth_data['created_at'] = str( datetime.today())
resource_path = '/oauth/userinfo'
headers = {"Authorization": "Bearer " + auth_data['access_token']}
# to get user information
url2 = "https://"+config["authorization_server"]+"/oauth/userinfo"
response = requests.get(url2, headers=headers)
data = json.loads(response.text)
auth_data['user_info'] = data
with open("auth_data.json", "w") as outfile:
json.dump(auth_data, outfile)
return data
if __name__ == '__main__':
app.run()
| true |
e6c1b35a23b15c319b1a2343f874ff1a7bea2484 | Python | instigateideas/Instigate_Ideas | /reddit_extraction_pipeline/reddit_search/reddit_search.py | UTF-8 | 4,618 | 2.546875 | 3 | [] | no_license |
import json
import os
import time
import sys
import requests
#sys.path.append("../")
from config import Config
class RedditSearch(object):
def __init__(self, save_path):
self.save_path = save_path
self.default_config = Config()
self.keywords_list = self.default_config.keywords
self.username = self.default_config.user_name
self.password = self.default_config.password
self.access_key = self.default_config.access_key
self.secret_key = self.default_config.secret_key
def create_save_path(self, save_path):
if not os.path.exists(save_path):
os.makedirs(save_path)
def get_url(self, call_count, keyword, after_id=None):
if call_count == 1:
base_url = f"https://api.reddit.com/subreddits/search?q={keyword}&limit=100&raw_json=1"
else:
base_url = f"https://api.reddit.com/subreddits/search?q={keyword}&after={after_id}&limit=100&raw_json=1"
return base_url
def get_refresh_access_token(self):
client_auth = requests.auth.HTTPBasicAuth(self.access_key, self.secret_key)
post_data = {"grant_type": "password", "username": self.username, "password": self.password}
headers = {"User-Agent": "ChangeMeClient/0.1 by YourUsername"}
response = requests.post("https://www.reddit.com/api/v1/access_token", auth=client_auth, data=post_data, headers=headers)
client = requests.session()
client.headers = headers
return client
def refresh_access_token(self, start_time):
if (time.time() - start_time) > 3200:
print("Refreshing access token")
client = get_refresh_access_token()
start_time = time.time()
return start_time
def save_as_json(self, data, path, file_name):
with open(f"{path}/{file_name}", "w") as outfile:
outfile.write(json.dumps(data))
def avoiding_rate_limitation(self, start_time, request_count):
elapsed_time = (time.time() - start_time)
if request_count >= 30:
remaining_time_sleep = 60 - elapsed_time
print(f"Request crossed 30 per minute, so sleeping for {remaining_time_sleep} seconds")
time.sleep(remaining_time_sleep)
# Resetting start time and request counts
start_time = time.time()
request_count = 2
return start_time, request_count
def search_reddit_api(self, keyword, request_count=1, overall_time = time.time(), last_id=None):
start_time = time.time()
data_downloaded = 0
while True:
if request_count == 1:
client = self.get_refresh_access_token()
request_url = self.get_url(call_count=request_count, keyword=keyword)
reddit_response = client.get(request_url)
data = json.loads(reddit_response.content)
len_data = len(data["data"]["children"])
data_downloaded = data_downloaded + int(len_data)
new_file_name = "{}_{}_subreddit_file_{}.json".format(keyword, request_count, len_data)
self.save_as_json(data=data, path=self.save_path, file_name=new_file_name)
last_id = data["data"]["after"]
print("Last subreddit id extracted: ", last_id)
else:
request_url = self.get_url(call_count=request_count, keyword=keyword, after_id=last_id)
reddit_response = client.get(request_url)
try:
data = json.loads(reddit_response.content)
except Exception as e:
print("Got an error due to over loading the Reddit API")
print("Sleeping for 1 mins..")
time.sleep(60)
request_count = self.search_reddit_api(keyword, request_count=request_count, last_id=last_id)
len_data = len(data["data"]["children"])
data_downloaded = data_downloaded + int(len_data)
new_file_name = "{}_{}_subreddit_file_{}.json".format(keyword, request_count, len_data)
self.save_as_json(data=data, path=self.save_path, file_name=new_file_name)
last_id = data["data"]["after"]
start_time, request_count = self.avoiding_rate_limitation(start_time=start_time, request_count=request_count)
overall_time = self.refresh_access_token(start_time=overall_time)
if last_id == None:
print("Completed extraction total subreddits extracted are: ", data_downloaded)
break
request_count = request_count + 1
return request_count, overall_time
def reddit_search_all_keywords(self):
self.create_save_path(save_path=self.save_path)
cnt = 1
for keyword in self.keywords_list:
if cnt == 1:
request_count, overall_time = self.search_reddit_api(keyword=keyword)
else:
request_count, overall_time = self.search_reddit_api(keyword=keyword, request_count=request_count, overall_time=overall_time)
if __name__ == "__main__":
save_path = "/home/arunachalam/Documents/sense2vec_exp/output_api/subreddits"
reddit_search = RedditSearch(save_path=save_path)
reddit_search.reddit_search_all_keywords()
| true |
c7d96a4b69621730d694a8ca5aa72facd02cab11 | Python | shahrukh330/soxincfg-1 | /modules/programs/weechat/config/python/styurl.py | UTF-8 | 5,835 | 2.59375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#
# Copyright (C) 2019 Cole Helbling <cole.e.helbling@outlook.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Changelog:
# 2019-12-14, Cole Helbling <cole.e.helbling@outlook.com>
# version 1.0: initial release
# https://github.com/cole-h/styurl-weechat
SCRIPT_NAME = "styurl"
SCRIPT_AUTHOR = "Cole Helbling <cole.e.helbling@outlook.com>"
SCRIPT_VERSION = "1.0"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Style URLs with a Python regex"
import_ok = True
try:
import weechat as w
except ImportError:
print("This script must be run under WeeChat.")
print("Get WeeChat now at: https://weechat.org")
import_ok = False
try:
import re
except ImportError as message:
print("Missing package for %s: %s" % (SCRIPT_NAME, message))
import_ok = False
# https://mathiasbynens.be/demo/url-regex
# If you don't want to create your own regex, see the above link for options or
# ideas on creating your own
styurl_settings = {
"buffer_type": (
"formatted",
"the type of buffers to run on (options are \"formatted\", \"free\", "
"or \"*\" for both)"
),
"format": (
"${color:*_32}",
"the style that should be applied to the URL"
"(evaluated, see /help eval)"
),
"ignored_buffers": (
"core.weechat,python.grep",
"comma-separated list of buffers to ignore URLs in "
"(full name like \"irc.freenode.#alacritty\")"
),
"ignored_tags": (
"irc_quit,irc_join",
"comma-separated list of tags to ignore URLs from"
),
"regex": (
r"((?:https?|ftp)://[^\s/$.?#].\S*)",
"the URL-parsing regex using Python syntax "
"(make sure capturing group 1 is the full URL)"
),
}
line_hook = None
def styurl_line_cb(data, line):
"""
Callback called when a line is displayed.
This parses the message for any URLs and styles them according to
styurl_settings["format"].
"""
global styurl_settings
# Don't style the line if it's not going to be displayed... duh
if line["displayed"] != "1":
return line
tags = line["tags"].split(',')
ignored_tags = styurl_settings["ignored_tags"]
# Ignore specified message tags
if ignored_tags:
if any(tag in tags for tag in ignored_tags.split(',')):
return line
bufname = line["buffer_name"]
ignored_buffers = styurl_settings["ignored_buffers"]
# Ignore specified buffers
if ignored_buffers and bufname in ignored_buffers.split(','):
return line
message = line["message"]
# TODO: enforce presence of a properly-formatted color object at
# styurl_settings["format"] (eval object would also be valid, if it eval'd
# to a color)
regex = re.compile(styurl_settings["regex"])
url_style = w.string_eval_expression(styurl_settings["format"], {}, {}, {})
reset = w.color("reset")
# Search for URLs and surround them with the defined URL styling
formatted = regex.sub(r"%s\1%s" % (url_style, reset), message)
line["message"] = line["message"].replace(message, formatted)
return line
def styurl_config_cb(data, option, value):
"""Callback called when a script option is changed."""
global styurl_settings, line_hook
pos = option.rfind('.')
if pos > 0:
name = option[pos+1:]
if name in styurl_settings:
# Changing the buffer target requires us to re-hook to prevent
# obsolete buffer types from getting styled
if name == "buffer_type":
if value in ("free", "formatted", "*"):
w.unhook(line_hook)
line_hook = w.hook_line(value, "", "", "styurl_line_cb",
"")
else:
# Don't change buffer type if it is invalid
w.prnt("", SCRIPT_NAME + ": Invalid buffer type: '%s', "
"not changing." % value)
w.config_set_plugin(name, styurl_settings[name])
return w.WEECHAT_RC_ERROR
styurl_settings[name] = value
return w.WEECHAT_RC_OK
def styurl_unload_cb():
"""Callback called when the script is unloaded."""
global line_hook
w.unhook(line_hook)
del line_hook
return w.WEECHAT_RC_OK
if __name__ == "__main__" and import_ok:
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, "styurl_unload_cb", ""):
version = w.info_get("version_number", "") or 0
for option, value in styurl_settings.items():
if w.config_is_set_plugin(option):
styurl_settings[option] = w.config_get_plugin(option)
else:
w.config_set_plugin(option, value[0])
styurl_settings[option] = value[0]
if int(version) >= 0x00030500:
w.config_set_desc_plugin(option, "%s (default: \"%s\")"
% (value[1], value[0]))
w.hook_config("plugins.var.python." + SCRIPT_NAME + ".*",
"styurl_config_cb", "")
# Style URLs
line_hook = w.hook_line(styurl_settings["buffer_type"], "", "",
"styurl_line_cb", "")
| true |
3c03d997dac4e4afb47d30826aa860b9b4795afe | Python | reynoldscem/scholars_search | /main.py | UTF-8 | 3,693 | 2.96875 | 3 | [] | no_license | '''Script for obtaining stats about authors from google scholar'''
from argparse import ArgumentParser
from scholarly import search_author
from joblib import Parallel, delayed
import os
MAX_AUTHORS = 3
N_JOBS = 4
KEYWORDS = [
'image registration', 'deformable', 'non-rigid', 'shape matching',
'convolutional', 'cnn', 'neural', 'medical image',
'learning', 'registration', 'supervised', 'deep',
'adversarial', 'affine',
'rigid', 'diffeomorphism', 'spline',
'deformation', 'appearance', 'elastic',
'alignment'
]
# 'loss', 'network', 'information', 'models',
def build_parser():
parser = ArgumentParser(description=__doc__)
parser.add_argument(
'author_filename',
help='Path to text file containing list of authors'
)
parser.add_argument(
'-j', '--n-jobs',
default=N_JOBS,
help='Number of parallel jobs to use.'
)
return parser
def assert_file_exists(filename):
message = '{} does not exist.'.format(filename)
assert os.path.isfile(filename), message
def load_authors(filename):
with open(filename) as fd:
author_list = fd.read().splitlines()
author_list = list(sorted(set(author_list)))
return author_list
def rank_authors(authors):
from collections import defaultdict
from itertools import islice
author_to_match_count = defaultdict(int)
authors = list(islice(authors, MAX_AUTHORS))
if len(authors) == 0:
raise StopIteration
elif len(authors) == 1:
return authors
for index, author in enumerate(authors):
# print('{} {}'.format(index, author.name))
publications = author.fill().publications
print('{} publications:'.format(len(publications)))
for publication in publications:
title = publication.bib['title'].lower()
count_for_pub = 0
for keyword in KEYWORDS:
count_for_pub += title.count(keyword)
# if count_for_pub > 0:
# print(title)
# print(count_for_pub)
author_to_match_count[author] += count_for_pub
# keyword_matches = author_to_match_count[author]
# print('{} keyword matches'.format(keyword_matches), flush=True)
# print()
return list(sorted(
author_to_match_count,
key=author_to_match_count.get,
reverse=True
))
def get_author_info(author_name, callback=None):
try:
query = search_author(author_name)
potential_authors = list(query)
best_author = rank_authors(potential_authors)[0]
author_result = best_author.fill()
if callback:
callback(author_name, author_result)
except StopIteration:
print('{} not found!'.format(author_name))
return
except Exception as e:
print(
'Exception occurred for {}! Message: {}'.format(author_name, e),
flush=True
)
return
return author_result
def print_author_result(author_name, author_result):
print(
'{} ({}, {}) cited by {}, h-index of {}'
''.format(
author_result.name, author_name, author_result.id,
author_result.citedby, author_result.hindex
),
flush=True
)
def main():
args = build_parser().parse_args()
assert_file_exists(args.author_filename)
authors = load_authors(args.author_filename)
print('{} total authors'.format(len(authors)))
print()
author_infos = Parallel(n_jobs=args.n_jobs)(
delayed(get_author_info)(author, print_author_result)
for author in authors
)
if __name__ == '__main__':
main()
| true |
96a531a1018cb5ff0300f6cb084d7b6434706d8e | Python | jashasweejena/scripts | /geocoding.py | UTF-8 | 360 | 2.828125 | 3 | [] | no_license | import requests as r
import sys
(lat, long) = sys.argv[1:]
try:
reqJson = r.get('https://nominatim.openstreetmap.org/reverse?format=json&lat={}&lon={}&zoom=18&addressdetails=1'.format(lat, long))
if(reqJson.status_code == 200):
reqJson = reqJson.json()
print(reqJson['display_name'])
except:
print("Invalid latitude and longitude")
| true |
806e664ec4c2dd2c270c1a1a9dfebce39db2efcc | Python | Semper4u/KMeans | /untitled/load_cifar.py | UTF-8 | 3,648 | 3.09375 | 3 | [] | no_license | class Cifar10DataReader():
import os
import random
import numpy as np
import pickle
def __init__(self, cifar_file, one_hot=False, file_number=1):
self.batch_index = 0 # 第i批次
self.file_number = file_number # 第i个文件数r
self.cifar_file = cifar_file # 数据集所在dir
self.one_hot = one_hot
self.train_data = self.read_train_file() # 一个数据文件的训练集数据,得到的是一个1000大小的list,
self.test_data = self.read_test_data() # 得到1000个测试集数据
# 读取数据函数,返回dict
def unpickle(self, file):
with open(file, 'rb') as fo:
try:
dicts = self.pickle.load(fo, encoding='bytes')
except Exception as e:
print('load error', e)
return dicts
# 读取一个训练集文件,返回数据list
def read_train_file(self, files=''):
if files:
files = self.os.path.join(self.cifar_file, files)
else:
files = self.os.path.join(self.cifar_file, 'data_batch_%d' % self.file_number)
dict_train = self.unpickle(files)
train_data = list(zip(dict_train[b'data'], dict_train[b'labels'])) # 将数据和对应标签打包
self.np.random.shuffle(train_data)
print('成功读取到训练集数据:data_batch_%d' % self.file_number)
return train_data
# 读取测试集数据
def read_test_data(self):
files = self.os.path.join(self.cifar_file, 'test_batch')
dict_test = self.unpickle(files)
test_data = list(zip(dict_test[b'data'], dict_test[b'labels'])) # 将数据和对应标签打包
print('成功读取测试集数据')
return test_data
# 编码得到的数据,变成张量,并分别得到数据和标签
def encodedata(self, detum):
rdatas = list()
rlabels = list()
for d, l in detum:
rdatas.append(self.np.reshape(self.np.reshape(d, [3, 1024]).T, [32, 32, 3]))
if self.one_hot:
hot = self.np.zeros(10)
hot[int(l)] = 1
rlabels.append(hot)
else:
rlabels.append(l)
return rdatas, rlabels
# 得到batch_size大小的数据和标签
def nex_train_data(self, batch_size=100):
assert 1000 % batch_size == 0, 'erro batch_size can not divied!' # 判断批次大小是否能被整除
# 获得一个batch_size的数据
if self.batch_index < len(self.train_data) // batch_size: # 是否超出一个文件的数据量
detum = self.train_data[self.batch_index * batch_size:(self.batch_index + 1) * batch_size]
datas, labels = self.encodedata(detum)
self.batch_index += 1
else: # 超出了就加载下一个文件
self.batch_index = 0
if self.file_number == 5:
self.file_number = 1
else:
self.file_number += 1
self.read_train_file()
return self.nex_train_data(batch_size=batch_size)
return datas, labels
# 随机抽取batch_size大小的训练集
def next_test_data(self, batch_size=100):
detum = self.random.sample(self.test_data, batch_size) # 随机抽取
datas, labels = self.encodedata(detum)
return datas, labels
if __name__ == '__main__':
import matplotlib.pyplot as plt
Cifar10 = Cifar10DataReader(r'E:/dataset/cifar10/cifar', one_hot=True)
d, l = Cifar10.nex_train_data()
print(len(d))
print(d)
plt.imshow(d[0])
plt.show() | true |
bba51e3b3560ee114b1de87d53f65378d9534b4d | Python | mattupstate/adventofcode2018 | /src/aoc2018/d3/a.py | UTF-8 | 1,469 | 2.828125 | 3 | [] | no_license | import itertools
import sys
from collections import namedtuple
from aoc2018.data import load_input
GRIDS = dict()
class Claim(object):
def __init__(self, id, x, y, width, height):
self.id = id
self.x = x
self.y = y
self.width = width
self.height = height
def parse_claim(value):
id, parts = value.strip().split(' @ ')
position, dimensions = parts.split(': ')
x, y = position.split(',')
width, height = dimensions.split('x')
return Claim(id, int(x), int(y), int(width), int(height))
def generate_grid(claim, canvas_width, canvas_height):
canvas_width = canvas_width or claim.width
canvas_height = canvas_height or claim.height
return {canvas_width * (claim.y + r) + claim.x + c for r in range(claim.height) for c in range(claim.width)}
def get_grid(claim, canvas_width, canvas_height):
if claim.id not in GRIDS:
GRIDS[claim.id] = generate_grid(claim, canvas_width, canvas_height)
return GRIDS[claim.id]
def load_claims():
return list(map(parse_claim, load_input(__file__)))
def main():
claims = load_claims()
right = max([claim.x + claim.width for claim in claims])
bottom = max([claim.y + claim.height for claim in claims])
result = set()
for a, b in [(a, b) for a in claims for b in claims if a != b]:
result.update(get_grid(a, right, bottom) & get_grid(b, right, bottom))
print(len(result))
if __name__ == "__main__":
main()
| true |
645e6c866a46391ad1d003d3f1edf69f883d446c | Python | gistable/gistable | /all-gists/8b967950ceeabec7d920/snippet.py | UTF-8 | 3,741 | 3.171875 | 3 | [
"MIT"
] | permissive | from clarifai.client import ClarifaiApi
import spotipy
class SpotiTags(ClarifaiApi):
"""
A wrapper to tag spotify album covers from a given artist
using the Clarifai Deep-learning API.
Requires to set-up the Clarifai API first.
Usage:
sp = SpotiTags()
print sp.tag('3jOstUTkEu2JkjvRdBA5Gu')
"""
# A blacklist of terms that are (generally) not relevant to artists,
# but generic to the "album cover" artistic concept
BLACKLIST = [
'art',
'artistic',
'background',
'design',
'graphic',
'graphic design',
'illustration',
'painting',
'portrait',
'poster',
'retro',
'sign',
'symbol',
'vector'
]
def __init__(self):
super(SpotiTags, self).__init__()
self._spotipy = spotipy.Spotify()
self._cleanup()
def _cleanup(self):
self._image_tags = {}
self._tags = {}
def _get_covers(self, artist, limit=10):
"""
Get artist images from the Spotify API.
Removes duplicate and epty results.
Parameters:
- query: the query string (e.g. 'motorhead')
- limit: the number of images (optional)
Output:
- list: a list of distinct image URLs
"""
albums = self._spotipy.artist_albums(artist).get('items')
covers = [self._get_largest_image(album['images']) for album in albums]
covers = list(set(filter(None, covers)))
return limit < len(covers) and covers[:limit] or covers
def _get_largest_image(self, images):
"""
Returns the largest Spotify images among the images list.
Parameters:
- images: a list of images (as Spotify API dicts)
Output:
- image: A single image, None if no images available
"""
sorted_images = sorted(images, key=lambda x: x.get('height'), reverse=True)
return sorted_images and sorted_images[0].get('url') or None
def _tag(self, images):
"""
Tag images via the Clarifai API.
Group results as a dict using the image URL as a key, and a dict of
class => value as the dict value, e.g.
{
"http://example.org/foo" : {
"bar": 0.65
}
}
Parameters:
- images: a list of images URLs to tag
Output:
- tags: a dictionary of images URLs and tags (as desribed above)
"""
tags = {}
for result in self.tag_image_urls(images)['results']:
classes = result['result']['tag']['classes']
prob = result['result']['tag']['probs']
tags[result['url']] = dict([class_, prob[i]] for (i, class_) in enumerate(classes))
return tags
def tag(self, artist):
"""
Run the whole tagging process.
Aggregates the value of each tag, and return sorted results (most popular first).
Uses a blacklist of not relevant terms.
parameters:
- query: the query string, i.e. artist name (e.g. 'motorhead')
Output:
- tags: a list of (tag, value) items, ordered by most popular tags first
"""
self._cleanup()
covers = self._get_covers(artist)
for cover, tags in self._tag(covers).items():
for tag, value in tags.items():
if tag in self.BLACKLIST:
continue
self._tags.setdefault(tag, 0)
self._tags[tag] += value
return sorted(self._tags.items(), key=lambda x: x[1], reverse=True) | true |
20a2c19687e6df1122d6d9c986e0c01b2640ab9e | Python | Fixitdaz/skyspy | /skyutils/lastseen.py | UTF-8 | 1,308 | 2.875 | 3 | [
"MIT"
] | permissive | import json
import math
from datetime import datetime
from bs4 import BeautifulSoup
def update_last_seen():
with open('web/data.json') as f:
data = json.load(f)
last_ts = data['data']['ts']
try:
last_ts = datetime.strptime(last_ts, "%Y-%m-%d %H:%M")
time_diff = (datetime.now() - last_ts).total_seconds()
last_seen = get_text_to_display(time_diff)
except ValueError:
last_seen = "N/A"
data['data']['lastSeen'] = last_seen
with open('web/data.json', 'w') as f:
json.dump(data, f)
def get_text_to_display(time):
# convert to minutes
time = math.floor(time / 60)
# handling default starting time of 2020-1-1
if time > 500000:
return "N/A"
if time == 0:
return "Just now"
if time == 1:
return f"{time} minute ago"
if time < 60:
return f"{time} minutes ago"
hours = math.floor(time / 60)
minutes = time % 60
return f"{hours}h{minutes:02d} minutes ago"
def update_data_ts():
with open('web/data.json', 'r') as f:
data = json.load(f)
with open('web/data.json', 'w') as f:
data['data']['dataTs'] = str(datetime.now().strftime("%Y-%m-%d %H:%M"))
json.dump(data, f)
| true |
c127f6b0abcf346d31a4fdf93015abfa40d295d2 | Python | ShanksVision/dl_cvnextsteps | /Excercises/visualize_detection.py | UTF-8 | 9,642 | 2.765625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 6 18:49:32 2020
@author: shankarj
visualization script for obj detection.
Code imported and modidied from
https://github.com/yhenon/pytorch-retinanet
"""
import torch
import numpy as np
import time
import os
import csv
import cv2
import matplotlib.pyplot as plt
def load_classes(csv_reader):
result = {}
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise(ValueError('line {}: format should be \'class_name,class_id\''.format(line)))
class_id = int(class_id)
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
# Draws a caption above the box in an image
def draw_caption(image, box, caption, isPred=False):
b = np.array(box).astype(int)
if isPred:
cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN,
2, (0, 0, 255), 4, cv2.LINE_AA)
else:
cv2.putText(image, caption, (b[2], b[3] + 10), cv2.FONT_HERSHEY_PLAIN,
2, (255, 0, 0), 4, cv2.LINE_AA)
#cv2.putText(image, caption, (b[0], b[1] - 10), cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1)
def detect_image_local(image_path, model, class_list, thresh=0.5):
img_extensions = ['.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff']
with open(class_list, 'r') as f:
classes = load_classes(csv.reader(f, delimiter=','))
labels = {}
for key, value in classes.items():
labels[value] = key
if torch.cuda.is_available():
model = model.cuda()
model.training = False
model.eval()
for img_name in os.listdir(image_path):
if max([img_name.find(ext) for ext in img_extensions]) == -1:
continue
image = cv2.imread(os.path.join(image_path, img_name))
if image is None:
continue
image_orig = image.copy()
rows, cols, cns = image.shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
min_side = 608
max_side = 1024
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
# resize the image with the computed scale
image = cv2.resize(image, (int(round(cols * scale)), int(round((rows * scale)))))
rows, cols, cns = image.shape
pad_w = 32 - rows % 32
pad_h = 32 - cols % 32
new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
image = new_image.astype(np.float32)
image /= 255
image -= [0.485, 0.456, 0.406]
image /= [0.229, 0.224, 0.225]
image = np.expand_dims(image, 0)
image = np.transpose(image, (0, 3, 1, 2))
with torch.no_grad():
image = torch.from_numpy(image)
if torch.cuda.is_available():
image = image.cuda()
st = time.time()
print(image.shape, image_orig.shape, scale)
scores, classification, transformed_anchors = model(image.cuda().float())
print('Elapsed time: {}'.format(time.time() - st))
idxs = np.where(scores.cpu() > thresh)
for j in range(idxs[0].shape[0]):
bbox = transformed_anchors[idxs[0][j], :]
x1 = int(bbox[0] / scale)
y1 = int(bbox[1] / scale)
x2 = int(bbox[2] / scale)
y2 = int(bbox[3] / scale)
label_name = labels[int(classification[idxs[0][j]])]
print(bbox, classification.shape)
score = scores[j]
caption = '{} {:.3f}'.format(label_name, score)
# draw_caption(img, (x1, y1, x2, y2), label_name)
draw_caption(image_orig, (x1, y1, x2, y2), caption)
cv2.rectangle(image_orig, (x1, y1), (x2, y2), color=(0, 0, 255), thickness=2)
cv2.imshow('detections', image_orig)
cv2.waitKey(0)
cv2.destroyAllWindows()
def plot_ground_truth(data_frame, col_name, num_plots, img_name_list):
#if no requested images, choose random images
if num_plots != len(img_name_list):
img_name_list = list(data_frame[col_name].sample(num_plots))
for i in range(num_plots):
fig, ax = plt.subplots(1, 2, figsize = (10, 10))
ax = ax.flatten()
image_name = img_name_list[i]
records = data_frame[data_frame[col_name] == image_name]
image = cv2.imread(image_name, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image /= 255.0
image2 = image
ax[0].set_title('Original Image')
ax[0].imshow(image)
for idx, row in records.iterrows():
box = row[['xmin', 'ymin', 'xmax', 'ymax', 'class']].values
xmin = int(box[0])
ymin = int(box[1])
xmax = int(box[2])
ymax = int(box[3])
label = str(box[4])
cv2.rectangle(image2, (xmin, ymin), (xmax, ymax), (255,0,0), 3)
draw_caption(image2, (xmin, ymin, xmax, ymax), label)
ax[1].set_title('Annotated Image')
ax[1].imshow(image2, interpolation='nearest')
plt.show()
def plot_predictions(data_frame, class_frame, col_name, num_plots, model, thresh=0.5):
img_name_list = list(data_frame[col_name].sample(num_plots))
if torch.cuda.is_available():
model = model.cuda()
model.training = False
model.eval()
for i in range(num_plots):
fig, ax = plt.subplots(1, 1, figsize = (10, 10))
#ax = ax.flatten()
image_name = img_name_list[i]
records = data_frame[data_frame[col_name] == image_name]
image = cv2.imread(image_name, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
if image is None:
continue
image_orig = image.copy()
image_orig /= 255.0
#Prepare the image for detection (pre-proc)
rows, cols, cns = image.shape
smallest_side = min(rows, cols)
# rescale the image so the smallest side is min_side
min_side = 608
max_side = 1024
scale = min_side / smallest_side
# check if the largest side is now greater than max_side, which can happen
# when images have a large aspect ratio
largest_side = max(rows, cols)
if largest_side * scale > max_side:
scale = max_side / largest_side
# resize the image with the computed scale
image = cv2.resize(image, (int(round(cols * scale)), int(round((rows * scale)))))
rows, cols, cns = image.shape
pad_w = 32 - rows % 32
pad_h = 32 - cols % 32
new_image = np.zeros((rows + pad_w, cols + pad_h, cns)).astype(np.float32)
new_image[:rows, :cols, :] = image.astype(np.float32)
image = new_image.astype(np.float32)
image /= 255
image -= [0.485, 0.456, 0.406]
image /= [0.229, 0.224, 0.225]
image = np.expand_dims(image, 0)
image = np.transpose(image, (0, 3, 1, 2))
#plot the prediction
with torch.no_grad():
image = torch.from_numpy(image)
if torch.cuda.is_available():
image = image.cuda()
print(image.shape, image_orig.shape, scale)
scores, classification, transformed_anchors = model(image.cuda().float())
idxs = np.where(scores.cpu() > thresh)
for j in range(idxs[0].shape[0]):
bbox = transformed_anchors[idxs[0][j], :]
x1 = int(bbox[0] / scale)
y1 = int(bbox[1] / scale)
x2 = int(bbox[2] / scale)
y2 = int(bbox[3] / scale)
#label_name = labels[int(classification[idxs[0][j]])]
class_idx = int(classification[idxs[0][j]])
label_name = class_frame[class_frame['id'] == class_idx][0].item()
print(bbox, classification.shape)
score = scores[j]
caption = '{} {:.3f}'.format(label_name, score)
draw_caption(image_orig, (x1, y1, x2, y2), caption)
cv2.rectangle(image_orig, (x1, y1), (x2, y2), (0, 0, 255), 2)
#plot the ground truth
for idx, row in records.iterrows():
box = row[['xmin', 'ymin', 'xmax', 'ymax', 'class']].values
xmin = int(box[0])
ymin = int(box[1])
xmax = int(box[2])
ymax = int(box[3])
label = str(box[4])
draw_caption(image_orig, (xmin, ymin, xmax, ymax), label)
cv2.rectangle(image_orig, (xmin, ymin), (xmax, ymax), (255,0,0), 2)
ax.set_title('Ground truth(Red) vs Prediction(Blue)')
ax.imshow(image_orig, interpolation='nearest')
plt.show()
return scores, classification, transformed_anchors
| true |
1b4af95ed57ca4c0cb8cd4ed2057892e72b2013d | Python | mirchos/bot | /bot.py | UTF-8 | 488 | 2.625 | 3 | [] | no_license | import discord
import telegramPart
from discord.ext import commands
TOKEN = 'NDU2MjA4MzI4OTg5NjA1OTEw.XpDk4A.X5Bg0CnClGYnRV4mRMKjg-wPHyk'
bot = commands.Bot(command_prefix='!')
client = discord.Client()
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
print(message.content)
if message.content != '':
telegramPart.send_message(message.content)
client.run(TOKEN)
| true |
42178391da86cd3ac25c4a241fc9992925d7f13b | Python | frankhucek/Pathfinder | /modules/test/test_heatmap.py | UTF-8 | 6,612 | 2.640625 | 3 | [] | no_license |
import sys
from pathlib import Path
cur_dir = Path(__file__).parents[0]
parent_dir = Path(__file__).parents[1]
if parent_dir not in sys.path:
sys.path.insert(0, str(parent_dir))
if cur_dir not in sys.path:
sys.path.insert(0, str(cur_dir))
###############################################################################
# Imports #
###############################################################################
import os
from PIL import Image
from common import assert_close
from manifest import Manifest
from image import ImageData, WholeImageData
from datetime import datetime
from pytest import fixture
import pytest
from unittest.mock import MagicMock
###############################################################################
# Unit under test #
###############################################################################
import heatmap
from heatmap import TimePeriod, NullTimePeriod, Heatmap
from heatmap import CoordRange
###############################################################################
# Constants #
###############################################################################
HEATMAP_IMAGE_DIR = "examples/heatmap"
###############################################################################
# Fixtures #
###############################################################################
@fixture
def filepaths():
return [os.path.join(HEATMAP_IMAGE_DIR, x)
for x in os.listdir(HEATMAP_IMAGE_DIR)
if x.endswith(".jpg")]
@fixture(scope='module')
def images():
return [ImageData.create(None, x) for x in filepaths()]
@fixture(scope='module')
def filtered_images():
f_images = heatmap.trim_by_date(images(), period())
f_images = heatmap.sort_by_date(f_images)
return f_images
@fixture
def period():
return TimePeriod(datetime(2018, 3, 23, 21, 15, 25),
datetime(2018, 3, 23, 21, 15, 42))
@fixture
def first_time():
return datetime(2018, 3, 23, 21, 15, 23)
@fixture
def last_time():
return datetime(2018, 3, 23, 21, 15, 44)
@fixture
def point():
return (2000, 1000)
@fixture()
def manifest():
return Manifest.from_filepath("examples/manifest.json")
points = [
((2000, 1000), True),
((3000, 1000), True),
((1000, 2000), False),
((1500, 2000), False),
((2500, 250), False)
]
@fixture
def img_corners():
return [[0, 0], [10, 0], [0, 10], [10, 10]]
@fixture
def coord_range():
return CoordRange(img_corners())
###############################################################################
# TestCases #
###############################################################################
def test_trim_by_date(images, period):
trimmed = heatmap.trim_by_date(images, period)
assert 5 == len(trimmed)
def test_trim_by_date_all(images, first_time, last_time):
period = TimePeriod(first_time, last_time)
trimmed = heatmap.trim_by_date(images, period)
assert 7 == len(trimmed)
def test_trim_by_date_none(images, first_time, last_time):
period = TimePeriod(last_time, first_time)
trimmed = heatmap.trim_by_date(images, period)
assert 0 == len(trimmed)
def test_sort_by_date(images):
std = heatmap.sort_by_date(images)
for v, w in pairwise(std):
assert v.time_taken() <= w.time_taken()
def test_windows(images):
windows = heatmap.windows(images, 3)
assert 5 == len(windows)
assert all(3 == len(x) for x in windows)
def test_windows_single(images):
single_image_lst = [images[0]]
windows = heatmap.windows(single_image_lst, 2)
assert 0 == len(windows)
def test_coordinates():
dim = (3, 4)
coords = {(0, 0), (1, 0), (2, 0),
(0, 1), (1, 1), (2, 1),
(0, 2), (1, 2), (2, 2),
(0, 3), (1, 3), (2, 3)}
assert coords == WholeImageData.coordinates(None, dim)
def test_extract_color_set(images, point):
color_set = heatmap.extract_color_set(images, point)
assert 7 == len(color_set)
@pytest.mark.skip
@pytest.mark.parametrize("point,res",
points, ids=lambda x: str(x))
def test_is_movement_all(point, res, images):
images = heatmap.trim_by_date(images, period())
assert res == heatmap.is_movement(images, point,
color_thresh=50)
def test_include_in_period(manifest, filtered_images):
hm = Heatmap.new(manifest)
hm.include_in_period(filtered_images)
assert hm.period.start == datetime(2018, 3, 23, 21, 15, 26)
assert hm.period.end == datetime(2018, 3, 23, 21, 15, 40)
def test_null_time_period(filtered_images):
null_tp = NullTimePeriod()
img = filtered_images[0]
assert not null_tp.contains(img)
tp = null_tp.expand_to_include(img.time_taken())
assert tp.contains(img.time_taken())
assert tp.start == img.time_taken()
assert tp.end == img.time_taken()
def test_coord_range_corners(img_corners, coord_range):
for corner in img_corners:
assert coord_range.contains(corner)
def test_coord_range_inner(coord_range):
assert coord_range.contains((5, 5))
assert coord_range.contains((0, 5))
assert coord_range.contains((5, 0))
assert coord_range.contains((10, 5))
assert coord_range.contains((5, 10))
def test_coord_range_outside(coord_range):
assert not coord_range.contains((-10, -10))
assert not coord_range.contains((20, 20))
assert not coord_range.contains((11, 0))
###############################################################################
# Helper functions #
###############################################################################
def pairwise(lis):
return zip(lis, lis[1:])
###############################################################################
# Asserts #
###############################################################################
###############################################################################
# Main script #
###############################################################################
if __name__ == '__main__':
unittest.main()
| true |
0d53073e8ac64360109704cdf6113c71f1a384a2 | Python | pnovais/splus_codes | /projeto_tamaguchi.py | UTF-8 | 3,295 | 3.859375 | 4 | [] | no_license | import time
from random import randint
class Tamagushi():
def __init__(self):
self.nome = "Fofinho"
self.fome = 50
self.saude = 50
def alterar_nome(self,nome):
self.nome = nome
print("Meu nome é {0}!".format(self.nome))
def alimentar(self):
if self.fome < 100:
self.fome += 10
print("nham nham!")
else:
print("Parece que comi um elefante! :D")
def cuidar(self):
interacao = randint(0,2)
if self.saude < 100:
self.saude +=10
if interacao == 0:
print("abraço")
elif interacao == 1:
print("lambeijo!")
else:
print("pula em cima")
print("melhor mãe do mundo!!!")
def calcula_humor(self):
media = 0
if self.saude < 50:
media += 1
elif self.saude == 100:
media += 5
else:
media += 3
if self.fome < 50:
media += 1
elif self.fome == 100:
media +=5
else:
media +=3
media = media/2
if media == 1:
print("que dia pessimo!estou me sentindo muito mal >:( ")
elif media == 2:
print("tô meio mal :/")
elif media == 3:
print("tô melhorando...")
elif media == 5:
print("o dia está incrivel! estou super feliz :D")
else:
print("tô confuso... não sei o que tô sentindo (?)")
bichinho="""_____000000000____________________00000000000_____
__0000_______00__________________00________0000___
_00__________000000000____00000000____________00__
_0_________000_____________________000_________00_
_0_______000_________________________000________0_
_00_____00_____________________________00______00_
__00___00______0000___________0000______00___000__
___00000______000000_________000000______00000____
______00_______0000___________0000_______00_______
______00_____________00000000____________00_______
______00_____________00____00____________00_______
_______0______________000000_____________0________
_______00_______________00_______________00_______
________000_____________00_____________000________
__________000___________00___________000__________
____________0000________00________0000____________
_______________0000000_0000_0000000"""
tg = Tamagushi()
#print(bichinho)
def estatisticas():
print("")
print("{0}".format(bichinho))
print("Estatisticas de {0}: ".format(tg.nome))
print("Fome: {0}%".format(tg.fome))
print("Saúde: {0}%".format(tg.saude))
def menu():
print("")
print("Selecione uma opção:")
print("1 - alimentar")
print("2 - cuidar")
print("3 - mudar nome")
print("4 - ver humor")
print("0 - sair")
op = 1
while (op != 0):
estatisticas()
menu()
op = int(input("\n"))
if op == 1:
tg.alimentar()
elif op == 2:
tg.cuidar()
elif op == 3:
nome = input("Qual será meu novo nome: ")
tg.alterar_nome(nome)
elif op == 4:
tg.calcula_humor()
time.sleep(5)
print("Até mais! :3")
| true |
753adbab3376e8d52ba9e3bcaa2cd9b9556ca78e | Python | Bizangel/ZeroSubFight | /Modules/extraFunctions.py | UTF-8 | 7,803 | 3.1875 | 3 | [] | no_license | from math import atan,radians,degrees
from random import randint
MAPHEIGHT =896
GAMEWIDTH=1280
GAMEHEIGHT=964
def randomChipTimes():
'''Generates 6 random times going upwards in a list'''
returnList = []
for i in range(12):
a = randint(i*900,i*900+900)
if i==1 or i==3 or i==5 or i==7 or i==9 or i==11:
maybe = randint(0,1)
if maybe: a = -1 #Never to load, so maybe it doesn't spawn a lot of chips
returnList.append(a)
return returnList
def angletoState(angle):
if 0 <= angle <= 22.5 or angle >= 337.5: state = ('R',0)
elif 22.5 <= angle <= 67.5: state = ('R','U')
elif 67.5 <= angle <= 90: state = ('R','U',False)
elif 90 <= angle <= 112.5: state = ('L','U',False)
elif 112.5 <= angle <= 157.5: state = ('L','U')
elif 157.5 <= angle <= 202.5: state = ('L',0)
elif 202.5 <= angle <= 252.5: state = ('L','D')
elif 252.5 <= angle <= 270: state = ('L','D',False)
elif 270 <= angle <= 292.5: state = ('R','D',False)
elif 292.5 <= angle <= 337.5: state = ('R','D')
return state
def pointToAngle(p1,p2,tooCloseCancel=True):
x1,y1 = p1
x2,y2 = p2
distx = x2-x1
disty = y2-y1
if tooCloseCancel:
PointDist = (distx**2 + disty**2)**0.5
if PointDist < 40: tooClose = True
else: tooClose = False
if tooClose: return None
#Avoid div by 0
if distx == 0:
if disty > 0: PointAngle = 90
elif disty < 0: PointAngle = 270
if disty == 0: PointAngle = 0
return PointAngle
PointAngle = atan(disty/distx)
PointAngle = abs(degrees(PointAngle))
if distx>0 and disty > 0: pass
elif distx < 0 and disty > 0: PointAngle = 180- PointAngle
elif distx < 0 and disty < 0: PointAngle += 180
elif distx > 0 and disty < 0: PointAngle = 360- PointAngle
if disty == 0:
if distx < 0: PointAngle = 180
elif distx > 0: PointAngle = 0
return PointAngle
def stickto360(stickx,sticky):
stickx *= 100
sticky *= -100 #Y axis is inverted so fix that
PointDist = (stickx**2 + sticky**2)**0.5
if PointDist < 25: Deadzoned = True
else: Deadzoned = False
if Deadzoned: return None
#Avoid div by /0
if stickx == 0:
if sticky > 0: PointAngle = 90
elif sticky < 0: PointAngle = 270
elif sticky == 0: PointAngle = 0
state = angletoState(PointAngle)
return (PointAngle)
PointAngle = atan(sticky/stickx)
PointAngle = abs(degrees(PointAngle))
if stickx>0 and sticky > 0: pass
elif stickx < 0 and sticky > 0: PointAngle = 180- PointAngle
elif stickx < 0 and sticky < 0: PointAngle += 180
elif stickx > 0 and sticky < 0: PointAngle = 360- PointAngle
return (PointAngle)
#Line Implementations
def pointdist(p1,p2):
x1,y1 = p1
x2,y2 = p2
squared = (x2-x1)**2+(y2-y1)**2
return squared**0.5
def solveQuadratic(a,b,c):
sol = -b + (b**2-4*a*c)**0.5
sol /= 2*a
sol2 = -b - (b**2-4*a*c)**0.5
sol2 /= 2*a
return (sol,sol2)
def getNegslope(p1,p2):
x1,y1 = p1
x2,y2 = p2
slope = (y2-y1)/(x2-x1)
negslope = -1/slope
return negslope
def midpoint(p1,p2):
x1,y1 = p1
x2,y2 = p2
return ((x2+x1)/2,(y2+y1)/2)
def getB(p1,slope):
#gets slope and point, returns cut with y
x,y = p1
b = y-slope*x
return b
def getSidePoints(p1,p2,Pcheck,threshold):
slope = getNegslope(p1,p2)
b = getB(Pcheck,slope)
d = threshold
p,q = Pcheck
z = b-q
x1,x2 = solveQuadratic((1+slope**2),(-2*p+2*z*slope),(p**2+z**2-d**2))
y1 = slope*x1+b
y2 = slope*x2+b
return ((x1,y1),(x2,y2))
def splitpoints(p1,p2,splits):
'''Receives two points and a split, returns points that split p1 and p2 in splits segments'''
x1,y1 = p1
x2,y2 = p2
pointlist = []
splitter = 1 #current point
for repeater in range(splits-1): #there are splits-1 points, there are 3 segments are splitted by 2 points.
pointlist.append( ( x1-( (x1-x2)/splits*splitter),y1-( (y1-y2)/splits*splitter) ) )
splitter +=1
return pointlist
def getStraightSidePoints(p1,threshold,vertical):
x,y = p1
if vertical:
return ((x+threshold,y),(x-threshold,y))
else:
return ((x,y+threshold),(x,y-threshold))
def generatePoints(point1,point2):
x1,y1 = point1
x2,y2 = point2
threshold = randint(-15,15)
splitter = randint(3,9)
if pointdist(point1,point2)> 600: splitter = randint(8,12)
if pointdist(point1,point2)> 1000: splitter = randint(14,22)
splitlist = splitpoints(point1,point2,splitter)
if x2-x1 != 0 and y2-y1 != 0:
pointlist = [point1]
for point in splitlist:
pointlist.extend( getSidePoints(point1,point2,point,threshold) )
pointlist.append(point2)
elif x2-x1 == 0:
pointlist = [point1]
for point in splitlist:
pointlist.extend(getStraightSidePoints(point,threshold,True))
pointlist.append(point2)
elif y2-y1 == 0:
pointlist = [point1]
for point in splitlist:
pointlist.extend(getStraightSidePoints(point,threshold,False))
pointlist.append(point2)
for x,y in pointlist:
print (pointlist)
if not isinstance(x,(float,int)):
print (x)
raise ValueError('Complex number received')
return pointlist
#Rect/Line Collision Implementation
def orientation(p,q,r):
'''receives 3 set of points and returns orientation,
0 --> collinear
1 --> clockwise
-1 --> counterclockwise'''
px,py = p
qx,qy = q
rx,ry = r
orientation = (qy-py)*(rx-qx)-(qx-px)*(ry-qy)
if orientation == 0: return 0
if (orientation > 0): return 1
else: return -1
def onSegment(p,q,r):
'''GIVEN p,q,r collinear then checks if point q lies on segment pr'''
px,py = p
qx,qy = q
rx,ry = r
if (qx <= max(px,rx) and qx >= min(px,rx) and qy <= max(py,ry) and
qy >= min(py,ry)): return True
return False
def intersect(p1,q1,p2,q2):
''' Check if two segments p1-q1 and p2-q2 intersect'''
o1 = orientation(p1,q1,p2)
o2 = orientation(p1,q1,q2)
o3 = orientation(p2,q2,p1)
o4 = orientation(p2,q2,q1)
if (o1 != o2 and o3 != o4):
return True
#Special cases
if (o1 == 0 and onSegment(p1,p2,q1)): return True
if (o2 == 0 and onSegment(p1,q2,q1)): return True
if (o3 == 0 and onSegment(p2,p1,q2)): return True
if (o4 == 0 and onSegment(p2,q1,q2)): return True
return False
def fixY(p):
'''Receives a point, and returns another point with inversed Y according to set GAMEHEIGHT'''
x,y = p
return (x,GAMEHEIGHT-y)
def rectlinecollide(rect,p1,p2):
'''Receives a pygame.rect object and p1 and p2, checks if rect collides
with segment p1-p2 '''
p1 = fixY(p1)
p2 = fixY(p2)
topleft = (rect.left,rect.top)
topleft = fixY(topleft)
topright = (rect.right,rect.top)
topright = fixY(topright)
bottomleft = (rect.left,rect.bottom)
bottomleft = fixY(bottomleft)
bottomright = (rect.right,rect.bottom)
bottomright = fixY(bottomright)
topPoints = splitpoints(topleft,topright,10)
bottomPoints = splitpoints(bottomleft,bottomright,10)
lineList = []
for index in range(9): #there are 9 splitpoints
lineList.append( (topPoints[index],bottomPoints[index]) ) #creates a list of segments
lineList.insert(0, (topleft,bottomleft) ) #insert rect left and right sides to the list
lineList.append( (topright,bottomright) )
for segment in lineList:
p,q = segment
if intersect(p,q,p1,p2):
return True
return False
| true |
3655f04c626c58307f2a60698f049d34b45b69f0 | Python | wilile26811249/Practice | /hangman_game/hangman.py | UTF-8 | 4,121 | 3.265625 | 3 | [] | no_license | import os
import math
import random
import pygame
import nltk
from nltk.corpus import brown
# OS Path Setting
current_folder = os.path.dirname(os.path.abspath(__file__))
images_path = os.path.join(current_folder, 'images')
# Setup Display
pygame.init()
WIDTH, HEIGHT = 800, 500
win = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("~~Hangman Game~~")
# Button Variables
RADIUS = 20
GAP = 15
letters = [] # [position_x, position_y, char, visible]
startX = round((WIDTH - (RADIUS * 2 + GAP) * 13) / 2)
startY = 400
for i in range(26):
x = startX + RADIUS + ((RADIUS * 2 + GAP) * (i % 13))
y = startY + ((i // 13) * (GAP + RADIUS * 2))
letters.append([x, y, chr(65 + i), True])
# Fonts
LETTER_FONT = pygame.font.SysFont('comicsans', 40)
WORD_FONT = pygame.font.SysFont('comicsans', 60)
TITLE_FONT = pygame.font.SysFont('comicsans', 70)
# load images
images = []
for i in range(7):
image = os.path.join(images_path, "hangman" + str(i) + ".jpg")
images.append(image)
# Game Variables
hangman_status = 0
news_text = brown.words(categories='news')
words = [vocabulary.upper() for vocabulary in news_text if 2 < len(vocabulary) and len(vocabulary) < 6 and vocabulary.isalpha()]
word = ""
guessed = []
# Colors
WHITE = (255, 255, 255)
BLACK = (0, 0, 0)
# Setup game loop
FPS = 60
clock = pygame.time.Clock()
run = True
def draw():
'''
Draw button
'''
win.fill(WHITE)
# Draw title
text = TITLE_FONT.render("DEVELOPER HANGMAN", 1, BLACK)
win.blit(text, (WIDTH / 2 - text.get_width() / 2, 20))
# Draw word
display_word = ""
for letter in word:
if letter in guessed:
display_word += letter + " "
else:
display_word += "_ "
text = WORD_FONT.render(display_word, 1, BLACK)
win.blit(text, (400, 200))
# Draw buttons
for letter in letters:
posx, posy, ltr, visible = letter
if visible:
pygame.draw.circle(win, BLACK, (posx, posy), RADIUS, 3)
text = LETTER_FONT.render(ltr, 1, BLACK)
win.blit(text, (posx - text.get_width() / 2, posy - text.get_height() / 2))
win.blit(pygame.image.load(images[hangman_status]), (150, 100))
pygame.display.update()
def display_message(message):
pygame.time.delay(1000)
win.fill(WHITE)
text = WORD_FONT.render(message, 1, BLACK)
win.blit(text, (WIDTH / 2 - text.get_width() / 2, HEIGHT / 2 - text.get_height() / 2))
pygame.display.update()
pygame.time.delay(2000)
def reset_status():
global guessed, letters, hangman_status
hangman_status = 0
guessed = []
for letter in letters:
letter[3] = True
def main():
global run, hangman_status, FPS, RADIUS, guessed, letters, word
word = random.choice(words)
print(word)
while run:
clock.tick(FPS)
draw()
text = TITLE_FONT.render(word, 1, BLACK)
win.blit(text, (WIDTH / 2 - text.get_width() / 2, 10))
for event in pygame.event.get():
if event.type == pygame.QUIT: # Close window
run = False
if event.type == pygame.MOUSEBUTTONDOWN:
m_x, m_y = pygame.mouse.get_pos()
for letter in letters:
x, y, ltr, visible = letter
if visible:
dis = math.sqrt((x - m_x) ** 2 + (y - m_y) ** 2)
if dis < RADIUS:
letter[3] = False
guessed.append(ltr)
if ltr not in word:
hangman_status += 1
draw()
won = True
for letter in word:
if letter not in guessed:
won = False
break
if won:
display_message("You Win!")
reset_status()
break
if hangman_status == 6:
display_message("You Lost! The answer is " + word)
reset_status()
won = True
break
while True:
if run:
main()
else:
break
pygame.quit()
| true |
a9174f1e619c58f9e1fad9742531ff10e81d3eb6 | Python | b1ck0/python_coding_problems | /Sequences/021_climbing_leaderboard.py | UTF-8 | 2,837 | 3.546875 | 4 | [] | no_license | def dense_ranking(array):
current_rank = 1
ranking_vector = [current_rank]
for i in range(1, len(array)):
if array[i] == array[i - 1]:
ranking_vector.append(current_rank)
else:
current_rank += 1
ranking_vector.append(current_rank)
return ranking_vector
def linear_search_greater(array, number, start_from=None):
if start_from is None:
start_from = len(array) - 1
if array[0] <= number:
return 0
if array[-1] >= number:
return len(array)
while array[start_from] <= number:
start_from -= 1
return start_from + 1
def climbing_leaderboard(leaderboard_scores: list, alice_scores: list) -> list:
"""
https://www.hackerrank.com/challenges/climbing-the-leaderboard/problem
leaderboard positions in desnse ranking
:param leaderboard_scores: scores in descending order
:param alice_scores: scores in ascending order
:return: position_i for i in alice_scores after adding it to leaderboard_scores
"""
last_index = None
alice_rankings = []
standalone_rankings = dense_ranking(leaderboard_scores)
last_index = None
while len(alice_scores):
current_score = alice_scores.pop(0)
i = last_index if last_index is not None else len(standalone_rankings) - 1
if leaderboard_scores[i - 1] > current_score:
alice_rankings.insert(0, standalone_rankings[i] - 1)
return alice_rankings
if __name__ == "__main__":
print(climbing_leaderboard([100, 100, 50, 40, 40, 20, 10], [5, 25, 50, 120]))
print(climbing_leaderboard([1], [1, 1]))
print(climbing_leaderboard(
[998, 995, 995, 991, 989, 989, 984, 979, 968, 964, 955, 955, 947, 945, 942, 934, 933, 930, 928, 927, 918, 916,
905, 900, 898, 895, 895, 895, 892, 887, 882, 881, 878, 876, 872, 872, 858, 856, 846, 844, 839, 823, 808, 806,
804, 800, 799, 794, 793, 789, 784, 772, 766, 765, 764, 762, 762, 759, 757, 751, 747, 745, 738, 725, 720, 708,
706, 703, 699, 697, 693, 691, 690, 685, 682, 677, 662, 661, 656, 648, 642, 641, 640, 634, 632, 625, 623, 618,
618, 617, 601, 601, 600, 591, 585, 583, 578, 552, 550, 550, 546, 543, 539, 509, 505, 503, 503, 494, 486, 474,
472, 472, 472, 468, 467, 464, 439, 438, 434, 434, 427, 421, 420, 405, 399, 395, 392, 388, 386, 384, 377, 374,
368, 356, 350, 344, 342, 341, 337, 331, 298, 296, 296, 294, 290, 260, 259, 248, 245, 244, 244, 233, 228, 215,
211, 210, 206, 202, 201, 189, 186, 181, 178, 168, 163, 162, 161, 159, 151, 147, 143, 142, 142, 141, 139, 132,
130, 128, 125, 125, 120, 112, 111, 95, 92, 91, 88, 81, 69, 66, 63, 48, 44, 20, 18, 17, 14, 8, 1, 1],
[18, 31, 38, 126, 152, 170, 198, 199, 202, 243, 369, 376, 376, 408, 560, 572, 614, 665, 666, 942]))
| true |
32153537ebc8611cc0ec5162f12ade5ccd759aa4 | Python | milojarez/laboratorios | /perceptronv0.py | UTF-8 | 3,250 | 3.1875 | 3 | [
"Unlicense"
] | permissive | class Perceptron:
def __init__(self):
self.salida = 0
self.pesos = []
self.entradas = []
self.error = []
self.correccion = 0
self.valor_esperado = 0
self.umbral = 0.0
self.tasa_aprendizaje = 0.1
def preguntar(self, pregunta):
self.pensarPerceptron(pregunta)
print('la salida para ',pregunta, ' es ', self.salida)
return self.salida
def pensarPerceptron(self, entradas):
print('pensando..')
if self.productoEscalar(entradas, self.pesos) > self.umbral:
self.salida = 1
else:
self.salida = 0
def setUmbral(self,umbral):
self.umbral = umbral
def setTasaAprendizaje(self, tasa_aprendizaje):
self.tasa_aprendizaje = tasa_aprendizaje
def setEntradas(self, entradas, valor_esperado):
n = len(entradas)
self.entradas = entradas
self.valor_esperado = valor_esperado
self.error.clear()
self.pesos.clear()
#self.umbral = self.valor_esperado
for x in range(n):
self.pesos.append(0)
self.error.append(0)
#self.entrenar()
def productoEscalar(self, entradas, pesos):
res = 0
for x, w in zip(self.entradas, self.pesos):
res += x * w
print('producto escalar ', res)
return res
def entrenar(self):
interaciones = 0
top = 20
while True:
#if self.productoEscalar(self.entradas, self.pesos) > self.umbral:
# self.salida = 1
#else:
# self.salida = 0
self.pensarPerceptron(self.entradas)
print('comparando salida ', self.salida, ' con valor esperado ', self.valor_esperado)
if self.salida != self.valor_esperado:
print('no son iguales...')
self.error = self.valor_esperado - self.salida
self.correccion = self.tasa_aprendizaje * self.error
print('error ', self.error, ' correccion ', self.correccion)
i = 0
print('calculando nuevos pesos...')
for d in self.entradas:
w = 0
w = (d * self.correccion) + self.pesos[0]
print('valor del peso ', w)
self.pesos[i] = w
else:
print('Entrenado con ', interaciones, ' interacciones, pesos finales:')
print(self.pesos)
break
interaciones += 1
if interaciones > top:
top += top
print('se han realizado ', interaciones, ' interacciones desea salir pres x')
r = input()
if r == 'x':
print('valores de entrada')
print(self.entradas)
print('valor esperado')
print(self.valor_esperado)
print('ultima salida del Perceptron')
print(self.salida)
print('ultima tabla de pesos')
print(self.pesos)
break
| true |