blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
db0e071a9a5242e771199ee04617690c2bb27134 | Python | aidakaleb/MIS3615-2019Spring | /tech-savvy/quadratic_function.py | UTF-8 | 600 | 3.9375 | 4 | [] | no_license | import math
def quadratic(a, b, c):
discriminant = b**2 - 4 * a * c # calculate the discriminant
if discriminant >= 0: # equation has solutions
x_1 = ((-b + math.sqrt(discriminant)) / 2 * a)
x_2 = ((-b - math.sqrt(discriminant)) / 2 * a)
return x_1, x_2
else:
print('No Real Number Solution.')
return None
# print(quadratic(2, 2, 2))
print(quadratic(1, 4, 1))
# a = float(input('please enter a number:'))
# b = float(input('please enter a number:'))
# c = float(input('please enter a number:'))
# print('Results are:', quadratic(a, b, c))
| true |
7cb95b057db7d69ed4c9558cd005470751ce0c6f | Python | ofer026/VerbsTraining | /main.py | UTF-8 | 2,364 | 3.109375 | 3 | [] | no_license | import random
import sqlite3
connection = sqlite3.connect("D:\\OFER\\Python\\Projects\\verbs_training\\database\\verbs.db")
cursor = connection.cursor()
def dis(verbs=[]):
dis_index = random.randint(0, 2)
#print(verbs)
#print(dis_index)
temp = verbs[dis_index]
verbs[dis_index] = ""
text = "INSERT INTO verbs (v1, v2, v3, miss) VALUES (\'{}\', \'{}\', \'{}\', \'{}\');".format(verbs[0], verbs[1], verbs[2], temp)
#print(text)
cursor.execute(text)
connection.commit()
#tot_list.update({verbs: temp})
#print(verbs)
#print(tot_list)
def start():
cursor.execute("SELECT * FROM verbs;")
rows = cursor.fetchall()
#print(rows[0])
for row in rows:
for verb in row:
if row[row.index(verb)] == "":
#print("verb is {}".format(verb))
while True:
in_verb = input("{} what is the missing verb? ".format(row[0:3]))
if in_verb.lower() == row[3].lower():
print("Correct!")
break
else:
ans = input("Wrong! type skip to continue or leave blank to keep trying: ")
if ans.lower() == "skip":
break
while True:
yes_no = input("Play again (type play), exit or add more verbs (type add): ")
if yes_no.lower() == "play":
start()
break
elif yes_no.lower() == "exit":
connection.close()
exit()
elif yes_no.lower() == "add":
break
else:
print("type play, exit or add")
while True:
start_y_n = input("start? Y or N (type exit to exit the program): ")
if start_y_n == "Y" or start_y_n == "y":
start()
elif start_y_n == "N" or start_y_n == "n":
pass
elif start_y_n.lower() == "exit":
break
elif start_y_n.lower() == "debug":
#print(all_verbs)
cursor.execute("SELECT * FROM verbs;")
rows = cursor.fetchall()
print(rows)
elif start_y_n.lower() == "delete":
cursor.execute("DELETE FROM verbs;")
connection.commit()
print("database deleted")
v1 = input("enter v1: ")
v2 = input("enter v2: ")
v3 = input("enter v3: ")
dis([v1, v2, v3])
connection.close()
| true |
56d302357597f93ca3d6ae9a4bf846a1165cad35 | Python | hi-zhenyu/PVC | /model.py | UTF-8 | 2,153 | 2.65625 | 3 | [
"MIT"
] | permissive | import math
import torch
import torch.nn as nn
class PVC(nn.Module):
def __init__(self, arch_list):
super(PVC, self).__init__()
self.view_size = len(arch_list)
self.enc_list = nn.ModuleList()
self.dec_list = nn.ModuleList()
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sigm = nn.Sigmoid()
# network
for view in range(self.view_size):
enc, dec = self.single_ae(arch_list[view])
self.enc_list.append(enc)
self.dec_list.append(dec)
self.dim = arch_list[0][0]
def reset_parameters(self):
stdv = 1. / math.sqrt(self.dim)
self.A.data.uniform_(-stdv, stdv)
self.A.data += torch.eye(self.dim)
def single_ae(self, arch):
# encoder
enc = nn.ModuleList()
for i in range(len(arch)):
if i < len(arch)-1:
enc.append(nn.Linear(arch[i], arch[i+1]))
else:
break
# decoder
arch.reverse()
dec = nn.ModuleList()
for i in range(len(arch)):
if i < len(arch)-1:
dec.append(nn.Linear(arch[i], arch[i+1]))
else:
break
return enc, dec
def forward(self, inputs_list):
encoded_list = []
decoded_list = []
for view in range(self.view_size):
# encoded
encoded = inputs_list[view]
for i, layer in enumerate(self.enc_list[view]):
if i < len(self.enc_list[view]) - 1:
encoded = self.relu(layer(encoded))
else: # the last layer
encoded = layer(encoded)
encoded_list.append(encoded)
# decoded
decoded = encoded
for i, layer in enumerate(self.dec_list[view]):
if i < len(self.dec_list[view]) - 1:
decoded = self.relu(layer(decoded))
else: # the last layer
decoded = layer(decoded)
decoded_list.append(decoded)
return encoded_list, decoded_list
| true |
9e243d91dbc7d98392a1a3a667e36c2efaf787a6 | Python | pgomezboza/holbertonschool-higher_level_programming | /0x0A-python-inheritance/3-is_kind_of_class.py | UTF-8 | 367 | 3.78125 | 4 | [] | no_license | #!/usr/bin/python3
"""
Module: 3-is_kind_of_class
"""
def is_kind_of_class(obj, a_class):
"""
finds if obj is an instance of a_class or a class
inherited from a_class.
args:
obj: objecto to look
a_class: class to be check
return: true or false.
"""
if issubclass(type(obj), a_class):
return True
return False
| true |
ea0b5c71bf76676f84636f5d5471a97c7453b5c6 | Python | dener-ufv/CCF-110 | /Listas-de-Exercicios/Lista-1/ex11.py | UTF-8 | 117 | 4.09375 | 4 | [] | no_license | numero = float(input("Digite o número: "))
if numero > 20.0:
print("O número {0} é maior que 20".format(numero)) | true |
6918b55013fe8adc2fee2fc176ac659deca66f07 | Python | 6851-2017/ordered-file-maintenance | /bench_base.py | UTF-8 | 15,026 | 2.6875 | 3 | [] | no_license | import cProfile
import unittest
import time
import random
from math import log
from collections import defaultdict
from fpbst import FPBST
from full_persistence import FPPM
from full_persistence import FPNode
from multiprocessing import Pool
# Linked list
# 4 depth binary history for each node
def timeit(f):
def wrapper(*args, **kwargs):
t1 = time.process_time()
ret = f(*args, **kwargs)
t2 = time.process_time()
return ret, t2 - t1
return wrapper
def asymptotic(xs, ns):
xs = ["{}\t{}".format(n, x) for x, n in zip(xs, ns)]
return '\n'.join(xs)
def csv_line(struct, test, rw, p, d, T, struct_n, hist_n, t):
print("{},{},{},{},{},{},{},{},{}".format(
struct,
test,
rw,
p,
d,
T,
struct_n,
hist_n,
t,
))
@timeit
def create_linked_list(n, T, p, d):
# Initialize Fully Persitent Pointer Machine
fppm = FPPM(d=d, p=p, T=T)
node = fppm.get_root(fppm.first_version)
v = fppm.first_version
for i in range(1, n):
next_node = FPNode("n{}".format(i), fppm, v)
v = node.set_field("p0", next_node, v)
node = node.get_field("p0", v)
return fppm.get_root(v), v
@timeit
def list_linear_write(root, v, n):
versions = defaultdict(list)
node = root
n_i = 0
while node:
version = v
for i in range(n):
version = node.set_field("v0", i, version)
versions[n_i].append(version)
node = node.get_field("p0", version)
n_i += 1
return versions
@timeit
def list_linear_read(root, versions):
node = root
for n_i in range(len(versions)):
node_versions = versions[n_i]
for i, version in enumerate(node_versions):
val = node.get_field("v0", version)
#assert(val == i)
@timeit
def list_earliest_write(root, v, n):
versions = defaultdict(list)
node = root
n_i = 0
while node:
for i in range(n):
version = node.set_field("v0", i, v)
versions[n_i].append(version)
node = node.get_field("p0", version)
n_i += 1
return versions
@timeit
def list_earliest_read(root, versions):
node = root
for n_i in range(len(versions)):
node_versions = versions[n_i]
for i, version in enumerate(node_versions):
val = node.get_field("v0", version)
#assert(val == i)
@timeit
def list_branching_write(root, v, n):
versions = defaultdict(list)
def recurse(node, v, n, n_i):
if n <= 1:
return None
left_v = node.set_field("v0", n, v)
right_v = node.set_field("v0", n, v)
versions[n_i].append(left_v)
versions[n_i].append(right_v)
recurse(node, left_v, n-1, n_i)
recurse(node, right_v, n-1, n_i)
node = root
n_i = 0
while node:
recurse(node, v, int(log(n, 2)), n_i)
node = node.get_field("p0", v)
n_i += 1
return versions
@timeit
def list_branching_read(root, versions):
node = root
for n_i in range(len(versions)):
node_versions = versions[n_i]
for i, version in enumerate(node_versions):
val = node.get_field("v0", version)
#assert(val == i)
@timeit
def list_random_write(root, v, n):
all_versions = defaultdict(list)
node = root
n_i = 0
while node:
versions = [v]
for i in range(n):
version = random.choice(versions)
new_version = node.set_field("v0", i, version)
versions.append(new_version)
all_versions[n_i].append(new_version)
node = node.get_field("p0", version)
n_i += 1
return all_versions
@timeit
def list_random_read(root, versions):
node = root
for n_i in range(len(versions)):
node_versions = versions[n_i]
for i, version in enumerate(node_versions):
val = node.get_field("v0", version)
#assert(val == i)
LINKED_SIZE = 64
def linked_list():
ns1 = [2**n for n in range(8)]
ns2 = [2**n for n in range(8)]
list_creation_ts = [create_linked_list(n)[1] for n in ns1]
linear_ts = []
linear_ts_read = []
for n in ns2:
print(n)
(root, v), _ = create_linked_list(int(LINKED_SIZE))
versions, t = linear_value_history_sweep_write(root, v, n)
linear_ts.append(t)
linear_ts_read.append(linear_value_history_sweep_read(root, versions)[1])
earliest_ts = []
earliest_ts_read = []
for n in ns2:
(root, v), _ = create_linked_list(int(LINKED_SIZE))
versions, t = earliest_history_sweep_write(root, v, n)
earliest_ts.append(t)
earliest_ts_read.append(earliest_history_sweep_read(root, versions)[1])
branching_ts = []
branching_ts_read = []
for n in ns2:
(root, v), _ = create_linked_list(int(LINKED_SIZE))
versions, t = branching_history_sweep_write(root, v, n)
branching_ts.append(t)
branching_ts_read.append(branching_history_sweep_read(root, versions)[1])
random_ts = []
random_ts_read = []
for n in ns2:
(root, v), _ = create_linked_list(int(LINKED_SIZE))
versions, t = random_history_sweep_write(root, v, n)
random_ts.append(t)
random_ts_read.append(random_history_sweep_read(root, versions)[1])
print("========CREATION TIMES================")
print("T0 = {}".format(list_creation_ts[0]))
print(asymptotic(list_creation_ts, ns1))
print()
print()
print("For linked list of size {}".format(LINKED_SIZE))
print("========LINEAR HISTORY CREATION=======")
print("T0 = {}".format(linear_ts[0]))
print(asymptotic(linear_ts, ns2))
print()
print("========LINEAR HISTORY READ=======")
print("T0 = {}".format(linear_ts_read[0]))
print(asymptotic(linear_ts_read, ns2))
print()
print("========EARLIEST HISTORY CREATION========")
print("T0 = {}".format(earliest_ts[0]))
print(asymptotic(earliest_ts, ns2))
print()
print("========EARLIEST HISTORY READ========")
print("T0 = {}".format(earliest_ts_read[0]))
print(asymptotic(earliest_ts_read, ns2))
print()
print("========BRANCHING HISTORY CREATION========")
print("T0 = {}".format(branching_ts[0]))
print(asymptotic(branching_ts, ns2))
print()
print("========BRANCHING HISTORY READ========")
print("T0 = {}".format(branching_ts_read[0]))
print(asymptotic(branching_ts_read, ns2))
print()
print("========RANDOM HISTORY CREATION========")
print("T0 = {}".format(random_ts[0]))
print(asymptotic(random_ts, ns2))
print()
print("========RANDOM HISTORY READ========")
print("T0 = {}".format(random_ts_read[0]))
print(asymptotic(random_ts_read, ns2))
print()
######################
### TREE STUFF #######
######################
# Global accumulator for somer assertion testing
@timeit
def create_tree(n, p, d, T):
# Initialize Fully Persitent Pointer Machine
fppm = FPPM(p=p, d=d, T=T)
# Setup node0 and node1
root = FPNode("root", fppm, fppm.first_version)
def recurse(node, name, v, n):
if n <= 1 or node is None:
return v
left_n = FPNode(name + "L", fppm, v)
right_n = FPNode(name + "R", fppm, v)
v = node.set_field("left", left_n, v)
v = node.set_field("right", right_n, v)
v = recurse(left_n, name + "L", v, n-1)
v = recurse(right_n, name + "R", v, n-1)
return v
v = fppm.first_version
v = recurse(root, "node", v, int(log(n, 2)))
return root, v
@timeit
def linear_history_tree_write(root, v, n):
def edit_recurse(node, versions):
if not node:
return
version = v
for i in range(n):
version = node.set_field("v0", i, version)
versions[node.name].append(version)
edit_recurse(node.get_field("left", v), versions)
edit_recurse(node.get_field("right", v), versions)
versions = defaultdict(list)
edit_recurse(root, versions)
return versions
@timeit
def linear_history_tree_read(root, v, versions):
def read_recurse(node, versions):
if not node:
return
for i, version in enumerate(versions[node.name]):
val = node.get_field("v0", version)
read_recurse(node.get_field("left", v), versions)
read_recurse(node.get_field("right", v), versions)
read_recurse(root, versions)
@timeit
def earliest_history_tree_write(root, v, n):
def edit_recurse(node, versions):
if not node:
return
version = v
for i in range(n):
new_version = node.set_field("v0", i, version)
versions[node.name].append(new_version)
edit_recurse(node.get_field("left", v), versions)
edit_recurse(node.get_field("right", v), versions)
versions = defaultdict(list)
edit_recurse(root, versions)
return versions
@timeit
def earliest_history_tree_read(root, v, versions):
def read_recurse(node, versions):
if not node:
return
for i, version in enumerate(versions[node.name]):
val = node.get_field("v0", version)
read_recurse(node.get_field("left", v), versions)
read_recurse(node.get_field("right", v), versions)
read_recurse(root, versions)
@timeit
def branching_history_tree_write(root, v, nt):
def edit_recurse(node, versions):
if not node:
return
edit_recurse_ver(node, v, int(log(nt, 2)), versions)
version = v
edit_recurse(node.get_field("left", version), versions)
edit_recurse(node.get_field("right", version), versions)
def edit_recurse_ver(node, v2, n, versions):
if n <= 1:
return None
left_v = node.set_field("v0", n, v2)
right_v = node.set_field("v0", n, v2)
versions[node.name].append(left_v)
versions[node.name].append(right_v)
edit_recurse_ver(node, left_v, n-1, versions)
edit_recurse_ver(node, right_v, n-1, versions)
versions = defaultdict(list)
edit_recurse(root, versions)
return versions
@timeit
def branching_history_tree_read(root, v, versions):
def read_recurse(node, versions):
if not node:
return
for i, version in enumerate(versions[node.name]):
val = node.get_field("v0", version)
read_recurse(node.get_field("left", v), versions)
read_recurse(node.get_field("right", v), versions)
read_recurse(root, versions)
@timeit
def random_history_tree_write(root, v, n):
def edit_recurse(node, all_versions):
if not node:
return
versions = [v]
for i in range(n):
version = random.choice(versions)
versions.append(node.set_field("v0", i, version))
all_versions[node.name] = versions
version = v
edit_recurse(node.get_field("left", version), all_versions)
edit_recurse(node.get_field("right", version), all_versions)
versions = defaultdict(list)
edit_recurse(root, versions)
return versions
@timeit
def random_history_tree_read(root, v, versions):
def read_recurse(node, versions):
if not node:
return
for i, version in enumerate(versions[node.name]):
val = node.get_field("v0", version)
read_recurse(node.get_field("left", v), versions)
read_recurse(node.get_field("right", v), versions)
read_recurse(root, versions)
TREE_SIZES = 16
def tree():
ns1 = [2**n for n in range(12)]
list_creation_ts = [create_tree(n)[1] for n in ns1]
ns2 = [2**n for n in range(12)]
linear_ts_write = []
linear_ts_read = []
for n in ns2:
(root, v), _ = create_tree(int(TREE_SIZES))
versions, t = linear_value_history_tree_write(root, v, n)
linear_ts_write.append(t)
linear_ts_read.append(linear_value_history_tree_read(root, v, versions)[1])
earliest_ts_write = []
earliest_ts_read = []
for n in ns2:
(root, v), _ = create_tree(int(TREE_SIZES))
versions, t = earliest_history_tree_write(root, v, n)
earliest_ts_write.append(t)
earliest_ts_read.append(earliest_history_tree_read(root, v, versions)[1])
branching_ts_write = []
branching_ts_read = []
for n in ns2:
(root, v), _ = create_tree(int(TREE_SIZES))
versions, t = branching_history_tree_write(root, v, n)
branching_ts_write.append(t)
branching_ts_read.append(branching_history_tree_read(root, v, versions)[1])
random_ts_write = []
random_ts_read = []
for n in ns2:
(root, v), _ = create_tree(int(TREE_SIZES))
versions, t = random_history_tree_write(root, v, n)
random_ts_write.append(t)
random_ts_read.append(random_history_tree_read(root, v, versions)[1])
print("========CREATION TIMES================")
print(asymptotic(list_creation_ts,
[n - 1 if n % 2 == 0 else n for n in ns1])
)
print()
print("For tree of size {}".format(TREE_SIZES))
print()
print("========LINEAR HISTORY CREATION=======")
print("T0 = {}".format(linear_ts_write[0]))
print(asymptotic(linear_ts_write, ns2))
print()
print("========LINEAR HISTORY READ=======")
print("T0 = {}".format(linear_ts_read[0]))
print(asymptotic(linear_ts_read, ns2))
print()
print("========EARLIEST HISTORY CREATION========")
print("T0 = {}".format(branching_ts_write[0]))
print(asymptotic(earliest_ts_write, ns2))
print()
print("========EARLIEST HISTORY READ========")
print("T0 = {}".format(earliest_ts_read[0]))
print(asymptotic(earliest_ts_read, ns2))
print()
print("========BRANCHING HISTORY CREATION========")
print("T0 = {}".format(branching_ts_write[0]))
print(asymptotic(branching_ts_write, ns2))
print()
print("========BRANCHING HISTORY READ========")
print("T0 = {}".format(branching_ts_read[0]))
print(asymptotic(branching_ts_read, ns2))
print()
print("========RANDOM HISTORY CREATION========")
print("T0 = {}".format(random_ts_write[0]))
print(asymptotic(random_ts_write, ns2))
print()
print("========RANDOM HISTORY READ========")
print("T0 = {}".format(random_ts_read[0]))
print(asymptotic(random_ts_read, ns2))
print()
# BSTS
@timeit
def random_large_bst(n):
tree = FPBST()
v0 = tree.earliest_version
versions = [v0]
ivs = []
for _ in range(n):
i = random.randint(0, n)
v = random.choice(versions)
v = tree.insert(i, v)
ivs.append((i, v))
random.shuffle(ivs)
for i, v in ivs:
tree.find(i, v)
def bst():
ns2 = [2**n for n in range(8)]
random_ts = []
for n in ns2:
random_ts.append(random_large_bst(n)[1])
print(asymptotic(random_ts, ns2))
| true |
f770b1cb36012c2146a22d3909bb3aaab8e297aa | Python | Akshat2395/Dynamic_resource_management_AWS-EC2 | /User_Instance/app/routes.py | UTF-8 | 2,401 | 2.71875 | 3 | [
"MIT"
] | permissive | """
LOGIN PAGE
THIS IS THE FIRST AND MAIN PAGE WHEN YOU ACCESS THE WEBSITE. USERS WILL HAVE TO PROVIDE THEIR CREDENTIALS
IN ORDER TO ENTER THE APP
1. ENTER VALID USERNAME - SHOULD BE REGISTERED WITH THE APP ALREADY
2. ENTER VALID PASSWORD
Admin account credentials -
# uname="admin"
# password="Ece1779pass"
"""
from app import app
from flask import render_template,redirect,url_for,request,session
import hashlib
from app import global_http
from app import updater
from mysql import connector as mysqlconnector
# import mysql.connector
from app.config import db_config
# Display the login HTML file
@app.route('/')
@app.route('/login',methods=['GET'])
def login():
updater.http_inc()
err=""
return render_template('login.html', err=err)
# Read and verify the login credentials provided by the user
@app.route('/login',methods=['POST'])
def check():
updater.http_inc()
uname=request.form.get('uname',"")
password=request.form.get('pwd',"")
# Check if username exists
cnx=mysqlconnector.connect(user=db_config['user'],
password=db_config['password'],
host=db_config['host'],
database=db_config['database'],use_pure=True)
cursor=cnx.cursor()
query = 'SELECT COUNT(1) FROM new_schema.new_table WHERE username= %s'
cursor.execute(query,(uname,))
row=cursor.fetchone()
cnx.commit()
count = row[0]
if count != 1:
err='*Username does not exist!'
return render_template('login.html', err=err)
# CHECKING USER DETAILS FOR LOGGING IN
querry='SELECT salt,pwd_hash From new_schema.new_table where username = %s'
cursor.execute(querry,(uname,))
row=cursor.fetchone()
salt1=row[0]
encrypted_pwd=row[1]
hashed_password = hashlib.pbkdf2_hmac('sha256', password.encode('ascii'), salt1.encode('ascii'), 100000,dklen=16)
hashed_password=hashed_password.hex()
if encrypted_pwd==hashed_password :
session["username"]=uname
cnx.close()
return redirect(url_for('user'))
else:
cnx.close()
err="Wrong credentials"
return render_template('login.html',err=err, uname=uname)
# Display Login page if user logs out
@app.route('/logout')
def logout():
updater.http_inc()
session.pop("username", None)
return redirect(url_for("login"))
| true |
4a2d2cbb603188b9547697d9c7b2065571f4b6ef | Python | deanthedream/StructuralThermalAttitudeOptimization | /component.py | UTF-8 | 3,417 | 2.53125 | 3 | [] | no_license | #This python script contains the construct for a component
import numpy as np
from sympy import *
#from sympy.vector import CoordSysCartesian
import quaternion
class component(object):
def __init__(self, compname, mass=0.,shape='box',dims={"l":0.,"w":0.,"h":0.},\
PelectricalIn=[0.,0.,0.,0.,0.],MaxTemp=[1.,1.,1.,1.,1.],MinTemp=[0.,0.,0.,0.,0.],\
specific_heat_capacity=0.,emissivity=0.,absorptivity=0.):#SCBody,
self.compname = 'box1'#args.get('compname')#compname
self.componentFrame = np.asarray([0.,0.,0.,])#self.componentFrame = CoordSysCartesian(self.compname)
#C = self.componentFrame
self.mass = mass
self.shape = shape
self.dims = dims
self.PelectricalIn = PelectricalIn
self.MaxTemp = MaxTemp
self.MinTemp = MinTemp
self.specific_heat_capacity = specific_heat_capacity
self.emissivity = emissivity
self.absorptivity = absorptivity
#What shape is the component
if self.shape == 'box':
self.l = dims['l']
self.w = dims['w']
self.h = dims['h']
#self.r_component_Body = SCBody.origin.locate_new('Body',0.*SCBody.i + 0.*SCBody.j + 0.*SCBody.k)#np.asarray([0.,0.,0.])
self.vertices = list()
self.vertices.append(np.asarray([0., 0., 0.]))# C.origin.locate_new('A',0.*C.i + 0.*C.j + 0.*C.k))
self.vertices.append(np.asarray([self.l,0., 0.]))#C.origin.locate_new('B',self.l*C.i + 0.*C.j + 0.*C.k))
self.vertices.append(np.asarray([0., self.w,0.]))#C.origin.locate_new('C',0.*C.i + self.w*C.j + 0.*C.k))
self.vertices.append(np.asarray([0., 0., self.h]))#C.origin.locate_new('D',0.*C.i + 0.*C.j + self.h*C.k))
self.vertices.append(np.asarray([self.l,self.w,0.]))#C.origin.locate_new('E',self.l*C.i + self.w*C.j + 0.*C.k))
self.vertices.append(np.asarray([self.l,0., self.h]))#C.origin.locate_new('F',self.l*C.i + 0.*C.j + self.h*C.k))
self.vertices.append(np.asarray([0., self.w,self.h]))#C.origin.locate_new('G',0.*C.i + self.w*C.j + self.h*C.k))
self.vertices.append(np.asarray([self.l,self.w,self.h]))#C.origin.locate_new('H',self.l*C.i + self.w*C.j + self.h*C.k))
elif self.shape == 'cylinder':
self.h = dims['h']
self.r = dims['r']
if dims.haskey('theta'):#Angular size of cylinder, i.e. 2pi is a half cylinder
self.theta = dims['theta']
else:
self.theta = 2*np.pi
self.vertices = list()
self.vertices.append(np.asarray([0., 0., 0.]))
self.vertices.append(np.asarray([0., 0., self.h]))
elif self.shape == 'cone':
self.h = dims['h']
self.r = dims['r']
if dims.haskey('theta'):#Angular size of cylinder, i.e. 2pi is a half cylinder
self.theta = dims['theta']
else:
self.theta = 2*np.pi
self.vertices = list()
self.vertices.append(np.asarray([0., 0., 0.]))
self.vertices.append(np.asarray([0., 0., self.h]))
for vertex in self.vertices:
vertex = vertex + self.r_component_Body
self.q_component_Body = np.quaternion(0.,0.,0.,0.)
| true |
3610be611eb9fbaf2320369fcbeeda5dca38d831 | Python | Marx314/adventOfCodePython | /src/Year2015/Day20.py | UTF-8 | 1,218 | 3.34375 | 3 | [] | no_license | import functools
from sympy import divisors
@functools.lru_cache(maxsize=0)
def _(n):
return divisors(n)
def calculate_house_sympy(house):
return sum(_(house)) * 10
def houses_of_cards(house):
gift_count = [0 for _ in range(house + 1)]
for i in range(1, house + 1):
gift_count[i] = calculate_house_sympy(i)
return gift_count[1:]
def houses_of_cards_sum(max_gift_count, starting_at=1):
for i in range(starting_at, max_gift_count):
result = calculate_house_sympy(i)
if result >= max_gift_count:
return i
class Day20(object):
def __init__(self):
self.elf_per_house = {}
def calculate_house_sympy_fifty(self, house):
d = [self.count_elf(i) for i in _(house)]
return sum(d) * 11
def houses_of_cards_sum_fifty(self, max_gift_count, starting_at):
for i in range(starting_at, max_gift_count):
if self.calculate_house_sympy_fifty(i) >= max_gift_count:
return i
def count_elf(self, i):
if i not in self.elf_per_house:
self.elf_per_house[i] = 0
elif self.elf_per_house[i] >= 50:
return 0
self.elf_per_house[i] += 1
return i
| true |
03084fa256c9be5a177a8c6929f1921f66e5e8b6 | Python | MarquesThiago/Manga_Downloader | /src/Model/save.py | UTF-8 | 1,319 | 2.546875 | 3 | [
"MIT"
] | permissive | import os, sys
import requests
sys.path.insert(0, "./../")
from Tools.Message.messages import (message_information, message_sucess, message_failed, message_warning)
def download_images(url, name, page, name_destiny):
counter = 0
def save(url, name, page, name_destiny = None):
if name_destiny == None:
name_destiny = name
response = requests.get(url = url)
destiny = created_path(name_destiny)
path = destiny + f"\\{name}_0{page}.jpg"
if response.status_code is requests.codes.OK:
with open(path,"wb") as image:
image.write(response.content)
zero_counter()
else:
message_warning("Error in writer image")
if counter < 3:
counter += 1
download_images(url, name, page, name_destiny)
elif counter >= 3:
message_failed("Sorry, Really no Found")
zero_counter()
return False
return True
def _created_path(name):
destiny = path_abs(__file__,'../../Media/images/{name}')
if not os.path.exists(destiny):
os.makedirs(destiny)
return destiny
def _zero_counter():
counter = 0
return save(url, name, page, name_destiny) | true |
57279a7b29fe7dabe0cc91af6334278eafdd4709 | Python | HenrygShen/Smart-Diet-Diary | /_legacy/Diet_Diary/Object_Size.py | UTF-8 | 7,945 | 3.046875 | 3 | [] | no_license | # import the necessary packages
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
import math
import matplotlib.pyplot as plt
import Foreground_Extraction
# Supply an image that has a coin as the leftmost object
# To run the code:
# python object_size.py --image test1.jpg --width 2.65
def midpoint(ptA, ptB):
return (ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5
# def col_major_count_pixel(bounding_box, image_data):
# last_valid_col = 0
# total_pixels = 0
# max_row = 0
# row_count = 0
# for i in range(bounding_box[1], bounding_box[3] + bounding_box[1]):
# first_col_pos = -1
# for j in range(bounding_box[0], bounding_box[2] + bounding_box[0]):
# row_count += 1
# if image_data[i][j][0] != 0 and first_col_pos != -1:
# first_col_pos = j
# if image_data[i][j][0] != 0:
# last_valid_col = j
# if j == bounding_box[2] + bounding_box[0] - 1:
# total_pixels = total_pixels + (last_valid_col - first_col_pos)
# if row_count > max_row:
# max_row = row_count
# row_count = 0
# out = (total_pixels, max_row)
# return out
def col_major_count_pixel(bounding_box, image_data):
total_pixels = 0
max_row = 0
row_count = 0
for i in range(bounding_box[1], bounding_box[3] + bounding_box[1]):
for j in range(bounding_box[0], bounding_box[2] + bounding_box[0]):
if np.all(image_data[i][j] != [0, 0, 0]):
row_count += 1
total_pixels += row_count
if row_count > max_row:
max_row = row_count
row_count = 0
out = (total_pixels, max_row)
return out
def row_major_count_pixel(bounding_box, image_data):
total_pixels = 0
max_col = 0
col_count = 0
for i in range(bounding_box[0], bounding_box[2] + bounding_box[0]):
for j in range(bounding_box[1], bounding_box[3] + bounding_box[1]):
if np.all(image_data[j][i] != [0, 0, 0]):
col_count += 1
total_pixels += col_count
if col_count > max_col:
max_col = col_count
col_count = 0
out = (total_pixels, max_col)
return out
# inputs
# rectangle around the object you want to find the size of (topleft x, topleft y, width, height)
# rect = (272,913,249,239)
# testap1 = (71, 107, 81, 79)
coin_box = (18, 134, 27, 28)
food_box = (71, 107, 81, 79)
ref_width = 2.65 # size of coin
image = cv2.imread('testap1.jpg')
# load the image, convert it to grayscale, and blur it slightly
height, width, depth = image.shape
print("orig height: " + str(height))
print("orig width: " + str(width))
scale = width/300 if width>height else height/300
new_width = int(width/scale)
new_height = int(height/scale)
print("new height: " + str(new_height))
print("new width: " + str(new_width))
image = cv2.resize(image, (new_width, new_height))
coin_image = Foreground_Extraction.extract_foreground(image, coin_box)
food_image = Foreground_Extraction.extract_foreground(image, food_box)
added_images = cv2.add(coin_image, food_image)
##########
food_measure_col_major = col_major_count_pixel(food_box, food_image)
coin_measure_col_major = col_major_count_pixel(coin_box, coin_image)
print("food_pixel: {} -------------- food width: {}".format(food_measure_col_major[0], food_measure_col_major[1]))
print("coin_pixel: {} -------------- coin width: {}".format(coin_measure_col_major[0], coin_measure_col_major[1]))
food_measure_row_major = row_major_count_pixel(food_box, food_image)
coin_measure_row_major = row_major_count_pixel(coin_box, coin_image)
print("food_pixel: {} -------------- food height: {}".format(food_measure_row_major[0], food_measure_row_major[1]))
print("coin_pixel: {} -------------- coin height: {}".format(coin_measure_row_major[0], coin_measure_row_major[1]))
coin_area = (ref_width/2)**2 * math.pi
food_area = food_measure_row_major[0]/coin_measure_row_major[0] * coin_area
food_width = food_measure_col_major[1]/coin_measure_col_major[1] * ref_width
print("coin_area: {} cm2".format(coin_area))
print("food_area: {} cm2".format(food_area))
print("food_width: {} cm".format(food_width))
food_volume = (4 * math.pi * (food_width/2)**3)/3
print("food_volume: {} cm3".format(food_volume))
# plt.imshow(added_images)
# plt.show()
# rect = (10, 10, new_width - 20, new_height - 20)
# addedImages = Foreground_Extraction.extract_foreground(image, rect)
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# gray = cv2.GaussianBlur(gray, (7, 7), 0)
#
#
#
# # perform edge detection, then perform a dilation + erosion to
# # close gaps in between object edges
# edged = cv2.Canny(gray, 50, 100)
# edged = cv2.dilate(edged, None, iterations=1)
# edged = cv2.erode(edged, None, iterations=1)
#
# # plt.imshow(edged)
# # plt.show()
# # exit()
# # find contours in the edge map
# cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
# cv2.CHAIN_APPROX_SIMPLE)
# cnts = imutils.grab_contours(cnts)
#
# # sort the contours from left-to-right and initialize the
# # 'pixels per metric' calibration variable
# (cnts, _) = contours.sort_contours(cnts)
# pixelsPerMetric = None
#
# # loop over the contours individually
# for c in cnts:
# # if the contour is not sufficiently large, ignore it
# if cv2.contourArea(c) < 100:
# continue
#
# # compute the rotated bounding box of the contour
# orig = image.copy()
# box = cv2.minAreaRect(c)
# box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
# box = np.array(box, dtype="int")
#
# # order the points in the contour such that they appear
# # in top-left, top-right, bottom-right, and bottom-left
# # order, then draw the outline of the rotated bounding
# # box
# box = perspective.order_points(box)
# cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
#
# # loop over the original points and draw them
# for (x, y) in box:
# cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
#
# # unpack the ordered bounding box, then compute the midpoint
# # between the top-left and top-right coordinates, followed by
# # the midpoint between bottom-left and bottom-right coordinates
# (tl, tr, br, bl) = box
# (tltrX, tltrY) = midpoint(tl, tr)
# (blbrX, blbrY) = midpoint(bl, br)
#
# # compute the midpoint between the top-left and top-right points,
# # followed by the midpoint between the top-righ and bottom-right
# (tlblX, tlblY) = midpoint(tl, bl)
# (trbrX, trbrY) = midpoint(tr, br)
#
# # draw the midpoints on the image
# cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
# cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
# cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
# cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
#
# # draw lines between the midpoints
# cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
# (255, 0, 255), 2)
# cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
# (255, 0, 255), 2)
#
# # compute the Euclidean distance between the midpoints
# dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
# dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
#
# # if the pixels per metric has not been initialized, then
# # compute it as the ratio of pixels to supplied metric
# # (in this case, inches)
# if pixelsPerMetric is None:
# pixelsPerMetric = dB / refWidth
#
# # compute the size of the object
# dimA = dA / pixelsPerMetric
# dimB = dB / pixelsPerMetric
#
# # draw the object sizes on the image
# cv2.putText(orig, "{:.1f}cm".format(dimA),
# (int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
# 0.65, (255, 255, 255), 2)
# cv2.putText(orig, "{:.1f}cm".format(dimB),
# (int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
# 0.65, (255, 255, 255), 2)
#
# # show the output image
# W = 500
# height, width, depth = orig.shape
# imgScale = W / width
# newX, newY = orig.shape[1] * imgScale, orig.shape[0] * imgScale
# imS = cv2.resize(orig, (int(newX), int(newY)))
# cv2.imshow("Image", imS)
# cv2.waitKey(0) | true |
bfdd9f18ede0d48a3ab5165aa2ee4ba451d8135d | Python | bormaley999/Udacity_Python_Intro | /scripting/open_txt_file.py | UTF-8 | 894 | 3.3125 | 3 | [] | no_license | # f = open('/Users/nick/Desktop/Обучение/AI/Intro to Python from Udacity/some_file.txt', 'r')
# file_data = f.read()
# f.close()
#
# print(file_data)
#
#
# f = open('/Users/nick/Desktop/Обучение/AI/Intro to Python from Udacity/some_file.txt', 'a')
# f.append = ("Hello there!")
# f.close()
#
# print(f.append)
#
# f = open('/Users/nick/Desktop/Обучение/AI/Intro to Python from Udacity/some_file.txt', 'a')
# f.append = ("Hello there! How are you?")
# f.close()
#
# print(f.append)
# with open('/Users/nick/Desktop/Обучение/AI/Intro to Python from Udacity/camelot.txt') as song:
# print(song.read(2))
# print(song.read(8))
# print(song.read())
camelot_lines = []
with open('/Users/nick/Desktop/Обучение/AI/Intro to Python from Udacity/camelot.txt') as f:
for line in f:
camelot_lines.append(line.strip())
print(camelot_lines)
| true |
9511c0fc3698faf127d73f33afc23e92ee4f5bda | Python | candyer/leetcode | /heightChecker.py | UTF-8 | 843 | 3.84375 | 4 | [] | no_license | # https://leetcode.com/problems/height-checker/description/
# 1051. Height Checker
# Students are asked to stand in non-decreasing order of heights for an annual photo.
# Return the minimum number of students not standing in the right positions.
# (This is the number of students that must move in order for all students to be standing in non-decreasing order of height.)
# Example 1:
# Input: [1,1,4,2,1,3]
# Output: 3
# Explanation:
# Students with heights 4, 3 and the last 1 are not standing in the right positions.
# Note:
# 1 <= heights.length <= 100
# 1 <= heights[i] <= 100
def heightChecker(heights):
"""
:type heights: List[int]
:rtype: int
"""
res = 0
for a, b in zip(heights, sorted(heights)):
if a != b:
res += 1
return res
assert heightChecker([1,1,4,2,1,3]) == 3
assert heightChecker([1,1,2,3]) == 0
| true |
de74310c2d8f54d398fe34dfc42b12c5262fd337 | Python | gregmarra/pxlart | /simulations/ca.py | UTF-8 | 2,651 | 3.640625 | 4 | [
"MIT"
] | permissive | """ Code example from Complexity and Computation, a book about
exploring complexity science with Python. Available free from
http://greenteapress.com/complexity
Copyright 2011 Allen B. Downey.
Distributed under the GNU General Public License at gnu.org/licenses/gpl.html.
"""
import numpy
class CASimulation(object):
"""A CA is a cellular automaton; the parameters for __init__ are:
rule: an integer in the range 0-255 that represents the CA rule
using Wolfram's encoding.
n: the number of rows (timesteps) in the result.
ratio: the ratio of columns to rows.
"""
def __init__(self, rule, n):
"""Attributes:
table: rule dictionary that maps from triple to next state.
n: the number of cells
array: the numpy array that contains the data.
next: the index of the next empty row.
"""
self.table = self.make_table(rule)
self.n = n
self.array = numpy.zeros(n, dtype=numpy.int8)
self.next = 0
def make_table(self, rule):
"""Returns a table for the given CA rule. The table is a
dictionary that maps 3-tuples to binary values.
"""
table = {}
for i, bit in enumerate(binary(rule, 8)):
t = binary(7-i, 3)
table[t] = bit
return table
def start_single(self):
"""Starts with one cell in the middle of the top row."""
self.array[self.n/2] = 1
self.next += 1
def start_random(self):
"""Start with random values in the top row."""
self.array = numpy.random.random(self.n).round()
self.next += 1
def loop(self, steps=1):
"""Executes the given number of time steps."""
[self.step() for i in xrange(steps)]
def step(self):
"""Executes one time step by computing the next row of the array."""
i = self.next
self.next += 1
a = self.array
a_old = numpy.copy(a)
t = self.table
for i in xrange(1,self.n-1):
a[i] = t[tuple(a_old[i-1:i+2])]
def get_array(self, start=0, end=None):
"""Gets a slice of columns from the CA, with slice indices
(start, end). Avoid copying if possible.
"""
if start==0 and end==None:
return self.array
else:
return self.array[:, start:end]
def binary(n, digits):
"""Returns a tuple of (digits) integers representing the
integer (n) in binary. For example, binary(3,3) returns (0, 1, 1)"""
t = []
for i in range(digits):
n, r = divmod(n, 2)
t.append(r)
return tuple(reversed(t))
| true |
b01b55219b6507f378480feec85bd592f3cf1c2b | Python | GenevieveBuckley/cellprofiler-core | /cellprofiler_core/image/abstract_image/file/url/_objects_image.py | UTF-8 | 3,355 | 2.515625 | 3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | import bioformats
import imageio
import numpy
from .... import Image
from .....utilities.image import convert_image_to_objects
from .....utilities.pathname import url2pathname
from ._url_image import URLImage
class ObjectsImage(URLImage):
"""Provide a multi-plane integer image, interpreting an image file as objects"""
def __init__(self, name, url, series, index, volume=False, spacing=None):
self.__data = None
self.volume = volume
if volume:
index = self.get_indexes(url)
series = None
self.__image = None
self.__spacing = spacing
URLImage.__init__(
self, name, url, rescale=False, series=series, index=index, volume=volume
)
def provide_image(self, image_set):
"""Load an image from a pathname
"""
if self.__image is not None:
return self.__image
if self.volume:
return self.get_image_volume()
self.cache_file()
filename = self.get_filename()
channel_names = []
url = self.get_url()
properties = {}
if self.index is None:
metadata = bioformats.get_omexml_metadata(self.get_full_name())
ometadata = bioformats.omexml.OMEXML(metadata)
pixel_metadata = ometadata.image(
0 if self.series is None else self.series
).Pixels
nplanes = pixel_metadata.SizeC * pixel_metadata.SizeZ * pixel_metadata.SizeT
indexes = list(range(nplanes))
elif numpy.isscalar(self.index):
indexes = [self.index]
else:
indexes = self.index
planes = []
offset = 0
for i, index in enumerate(indexes):
properties["index"] = str(index)
if self.series is not None:
if numpy.isscalar(self.series):
properties["series"] = self.series
else:
properties["series"] = self.series[i]
img = bioformats.load_image(
self.get_full_name(), rescale=False, **properties
).astype(int)
img = convert_image_to_objects(img).astype(numpy.int32)
img[img != 0] += offset
offset += numpy.max(img)
planes.append(img)
image = Image(
numpy.dstack(planes),
path_name=self.get_pathname(),
file_name=self.get_filename(),
convert=False,
)
self.__image = image
return image
def get_indexes(self, url):
pathname = url2pathname(url)
# Javabridge gave us dud indexes, let's find our own planes
self.__data = imageio.volread(pathname).astype(int)
indexes = list(range(self.__data.shape[0]))
return indexes
def get_image_volume(self):
imdata = self.__data
planes = [] # newplanes = numpy.zeros_like(test2)
for planeid in range(imdata.shape[0]):
planes.append(convert_image_to_objects(imdata[planeid]).astype(numpy.int32))
imdata = numpy.stack(planes)
image = Image(
imdata,
path_name=self.get_pathname(),
file_name=self.get_filename(),
convert=False,
dimensions=3,
)
self.__image = image
return image
| true |
9bdd1763c388a65e37da93e059feac4c358615cd | Python | MaiziXiao/Algorithms | /Leetcode/回溯法/55-medium-Jump Game.py | UTF-8 | 1,607 | 4.0625 | 4 | [] | no_license | from typing import List
class Solution:
"""
Given an array of non-negative integers, you are initially positioned at the first index of the array.
Each element in the array represents your maximum jump length at that position.
Determine if you are able to reach the last index.
Example 1
Input: [2,3,1,1,4]
Output: true
Explanation: Jump 1 step from index 0 to 1, then 3 steps to the last index.
Example 2:
Input: [3,2,1,0,4]
Output: false
Explanation: You will always arrive at index 3 no matter what. Its maximum
jump length is 0, which makes it impossible to reach the last index.
"""
def canJump(self, nums: List[int]) -> bool:
# https://leetcode.com/articles/jump-game/
# Approach 1: 贪心算法
_max = 0
_len = len(nums)
for i in range(_len-1):
if i == len(nums) - 1:
return True
# 根本到不了这步
if _max < i:
return False
# 更新max: 判断max和现在这个位置能跳到最远的位置谁大
_max = max(_max, nums[i] + i)
return _max >= _len - 1
# Approach 2: 回溯 backtracking
# This is the inefficient solution where we try every single jump pattern that takes us from the first
# position to the last. We start from the first position and jump to every index that is reachable.
# We repeat the process until last index is reached. When stuck, backtrack.
# Approach 3: 动态规划 贪心
print(Solution().canJump([2,5,0,0])) | true |
b3f18b4b21f9c8dacd50ad79ca9a765a97ce2fd5 | Python | spencerT101/FlaskTemplateLabDay14 | /Modules/event_list.py | UTF-8 | 430 | 3.015625 | 3 | [] | no_license | from modules.event import Event
event_1 = Event("Danny's Birthday", "14-04-2021", 50, "Grand Ballroom", "An elegant birthday bash!", True, True)
event_2 = Event("Buff Bash", '15-09-2021', 35, 'Snug Bar', 'Nudist Party', False, False)
event_3 = Event("Summer Party", '20-07-2021', 60, 'Secret Garden', 'Celebration of Summer', False, False)
events = [event_1, event_2, event_3]
def add_new_event(event):
events.append(event) | true |
bf5c81f91a8689c62c35cab16abbf765b9fd71bc | Python | Joy-Lan/AID1909 | /学生信息管理.py | UTF-8 | 4,226 | 3.796875 | 4 | [] | no_license | class StudentModel:
'''
学生模型
'''
#id不需要传值 放在最后一位
def __init__(self, name="", age=0, score=0,id=0):
'''
创建学生对象
:param id: 编号 该学生的唯一标识
:param name: 姓名 str
:param age: 年龄 int
:param score: 成绩 int
'''
self.id = id
self.name = name
self.age = age
self.score = score
class StudentManagerController:
'''
学生管理控制器 处理业务逻辑
'''
__stu_id = 1000
def __init__(self):
self.__stu_list = []
@property
def stu_list(self):
return self.__stu_list
def add_student(self,stu):
#为学生设置id 递增
stu.id = StudentManagerController.__stu_id + 1
#将学生添加到学生列表
self.__stu_list.append(stu)
def remove_student(self,id):
for item in self.stu_list:
if item.id == id:
self.stu_list.remove(item)
return True
raise ValueError('删除失败:id错误')
def update_student(self,stu):
for item in self.__stu_list:
if item.id == stu.id:
item.name = stu.name
item.age = stu.age
item.score = stu.score
return True
raise ValueError('未找到对应学员')
#根据成绩排序
def order_by_score(self):
for i in range(len(self.__stu_list)-1):
for c in range(i+1,len(self.__stu_list)):
if self.stu_list[i].score > self.__stu_list[c].score:
self.stu_list[i],self.stu_list[c] = \
self.stu_list[c],self.stu_list[i]
#界面视图
class StudentManagerView:
def __init__(self):
self.__manager = StudentManagerController()
def __display_menu(self):
print('+--------------------------+')
print('| 1)添加学生信息 |')
print('| 2)显示学生信息 |')
print('| 3)删除学生信息 |')
print('| 4)修改学生信息 |')
print('| 5)按照成绩升序排序 |')
print('+--------------------------+')
def __select_menu(self):
option = input('请输入:')
if option == '1':
pass
elif option == '2':
pass
elif option == '3':
pass
elif option == '4':
pass
elif option == '5':
pass
def main(self):
'''
界面入口
:return:
'''
while True:
self.__display_menu()
self.__select_menu()
#输入学生__input_students
def __input_students(self):
name = input('请输入学生姓名')
age = int(input('请输入学生年龄'))
score = int(input('请输入学生成绩'))
stu = StudentModel(name,age,score)
self.__manager.add_student(stu)
#输出学生__output_students
def __output_students(self,list):
for item in list:
print(item.name,item.age,item.score,item.id)
#删除学生__delete_student
def __delete_student(self):
id = int(input('请输入要删除学生的学号:'))
if self.__manager.remove_student(id):
print('删除成功')
else:
print('删除失败')
#修改学生信息__modify_student
def __modify_student(self):
id = int(input('请输入要修改学生的学号'))
name = input('请输入新的学生姓名:')
age = int(input('请输入新的学生年龄'))
score = int(input('请输入新的学生成绩'))
stu = StudentModel(name,age,score,id)
if self.__manager.update_student(stu):
print('修改成功')
else:
print('修改失败')
# 根据成绩排序__output_student_by_socore
def __output_student_by_socore(self):
self.__manager.order_by_score()
self.__output_students(self.__manager.stu_list)
view = StudentManagerView()
# view.display_menu()
view.main()
| true |
755abd7e568e71a3f75872da7a103cb8be35cad3 | Python | Stikerz/yoyo | /yoyo/location/utils/services.py | UTF-8 | 2,140 | 3 | 3 | [] | no_license | import statistics
from datetime import datetime, timedelta
import requests
from yoyo.settings import WEATHER_KEY
def get_weather_history(payload):
try:
response = requests.get(
"http://api.weatherapi.com/v1/history.json", params=payload
)
data = response.json()
if len(data) == 2:
forecast_day = data["forecast"]["forecastday"][0]["day"]
info = {
"average": forecast_day["avgtemp_c"],
"minimum": forecast_day["mintemp_c"],
"maximum": forecast_day["maxtemp_c"],
}
return info
else:
raise Exception(f"Error retrieving weather:{payload['q']} City")
except requests.exceptions.ConnectionError as errc:
raise Exception(f"Connection Error: {errc}")
except requests.exceptions.Timeout as errt:
raise Exception(f"Timeout Error: {errt}")
except requests.exceptions.HTTPError as errh:
raise Exception(f"Http Error: {errh}")
except requests.exceptions.RequestException as err:
raise Exception(f"Error: {err}")
def get_weather_api_key():
if WEATHER_KEY is None:
raise Exception(
f"Error retrieving weather key from env variable 'WEATHER_KEY' "
f"please make sure key is set"
)
return WEATHER_KEY
def get_city_weather_info(city, days):
dates = [datetime.today().strftime("%Y-%m-%d")]
maximum = []
minimum = []
average = []
for day in range(1, int(days)):
d = datetime.today() - timedelta(days=day)
dates.append(d.strftime("%Y-%m-%d"))
payload = {"key": get_weather_api_key(), "q": city}
for date in dates:
payload["dt"] = date
data = get_weather_history(payload)
maximum.append(data["maximum"])
minimum.append(data["minimum"])
average.append(data["average"])
info = {
"median": round(statistics.median(average), 2),
"average": round(statistics.mean(average), 2),
"minimum": round(min(minimum), 2),
"maximum": round(max(maximum), 2),
}
return info
| true |
9bb615b1fad845a4335fbd0d31de04fb00881315 | Python | usako1124/teach-yourself-python | /chap07/re_read.py | UTF-8 | 752 | 3.671875 | 4 | [] | no_license | import re
# 与えられたパターptnと入力文字列inputでマッチした結果を表示する関数
def show_match(ptn, input):
results = ptn.finditer(input)
for result in results:
print(result.group())
print('-------------------')
re1 = re.compile('いろ(?=はに)')
re2 = re.compile('いろ(?!はに)')
re3 = re.compile('(?<=。)いろ')
re4 = re.compile('(?<!。)いろ')
msg1 = 'いろはにほへと'
msg2 = 'いろものですね。いろいろと'
show_match(re1, msg1) # いろ
show_match(re1, msg2) #
show_match(re2, msg1) #
show_match(re2, msg2) # いろ、いろ、いろ
show_match(re3, msg1) #
show_match(re3, msg2) # いろ
show_match(re4, msg1) # いろ
show_match(re4, msg2) # いろ、いろ
| true |
53e2fd567653ce70b69e353c9e0bf2c8423f192a | Python | AdityaPutraS/Tubes-TBFO-1 | /DFA Generator/StateGenerator.py | UTF-8 | 7,466 | 3.171875 | 3 | [] | no_license | #-----------------------------------#
# #
# Aditya Putra Santosa / 13517013 #
# Informatika ITB #
# #
#-----------------------------------#
from copy import deepcopy
pilihan = (1,1)
k = '-'
o = 'O'
x = 'X'
#Fungsi fungsi penting
def win(board):
for i in range(0,3):
if(board[i][0]==board[i][1]==board[i][2]):
if(board[i][0]==x):
return x
else:
if(board[i][0]==o):
return o
for i in range(0,3):
if(board[0][i]==board[1][i]==board[2][i]):
if(board[0][i]==x):
return x
else:
if(board[0][i]==o):
return o
if(board[0][0]==board[1][1]==board[2][2]):
if(board[0][0]==x):
return x
else:
if(board[0][0]==o):
return o
if(board[0][2]==board[1][1]==board[2][0]):
if(board[0][2]==x):
return x
else:
if(board[0][2]==o):
return o
return k
def score(board,depth):
if(win(board)==o): #ganti ini, set ke o untuk player duluan
return 10-depth
elif(win(board)==x): #ganti ini, set ke x untuk player duluan
return depth-10
else:
return 0
def gameOver(board):
if(win(board)==k):
return not((k in board[0] or k in board[1])or k in board[2])
else:
return True
def moveTersedia(board):
move = []
for i in range(0,3):
for j in range(0,3):
if(board[i][j]==k):
temp = (i,j)
move.append(temp)
return move
def minmax(board,depth,player):
#print(board)
if(gameOver(board)):
return score(board,depth)
else:
global pilihan
depth += 1
scores = []
moves = []
for mov in moveTersedia(board):
boardTemp = deepcopy(board)
boardTemp[mov[0]][mov[1]] = player
if(player==o): #ganti ini,set ke o untuk player duluan
nilai = minmax(boardTemp,depth,x) #ganti ini, set ke x untuk player duluan
else:
nilai = minmax(boardTemp,depth,o) #ganti ini, set ke o untuk player duluan
scores.append(nilai)
moves.append(mov)
if(player==o): #ganti ini, set ke o untuk player duluan
maksIndeks = scores.index(max(scores))
pilihan = moves[maksIndeks]
return scores[maksIndeks]
else:
minIndeks = scores.index(min(scores))
pilihan = moves[minIndeks]
return scores[minIndeks]
def genBoard(s):
li = s.split(',')
acuan = [(-1,-1),(0,0),(0,1),(0,2),(1,0),(1,1),(1,2),(2,0),(2,1),(2,2)]
giliran = x
hasil = [[k,k,k],
[k,k,k],
[k,k,k]
]
for a in li:
gerakan = acuan[int(a)]
hasil[gerakan[0]][gerakan[1]] = giliran
if(giliran==x):
giliran = o
else:
giliran = x
return hasil
def printBoard(s):
if(not(s=='-')):
li = genBoard(s)
print(li[0])
print(li[1])
print(li[2])
print('\n')
''' Fungsi yang tidak dipakai lagi :
def putarKanan(s):
li = s.split(',')
putar = ['0','3','6','9','2','5','8','1','4','7']
hasil = []
for i in li:
hasil.append(putar[int(i)])
return ','.join(hasil)
def flipVer(s):
li = s.split(',')
putar = ['0','3','2','1','6','5','4','9','8','7']
hasil = []
for i in li:
hasil.append(putar[int(i)])
return ','.join(hasil)
def genAllRotFlip(s):
hasil = []
gBoard = []
for i in range(0,4):
if(not(s in hasil) and not(genBoard(s) in gBoard)):
hasil.append(s)
gBoard.append(genBoard(s))
s = putarKanan(s)
s = flipVer(s)
for i in range(0,4):
if(not(s in hasil) and not(genBoard(s) in gBoard)):
hasil.append(s)
gBoard.append(genBoard(s))
s = putarKanan(s)
return hasil
'''
def genNomor(nomor):
if(nomor<10):
return '(00'+str(nomor)+')'
else:
if(nomor<100):
return '(0'+str(nomor)+')'
else:
return '('+str(nomor)+')'
def genState(s,hasil):
#genState('5',{})
global pilihan
board = genBoard(s)
if(gameOver(board)):
hasil[s] = ['-' for i in range(0,9)]
return hasil
else:
temp = ['-' for i in range(0,9)]
for mov in moveTersedia(board):
boardTemp = deepcopy(board)
boardTemp[mov[0]][mov[1]] = x #ganti ini, set ke x untuk player duluan
if(not(gameOver(boardTemp))):
minmax(boardTemp,0,o) #ganti ini, set ke o untuk player duluan
awal = (mov[0]*3)+mov[1]+1
nomor = (pilihan[0]*3)+pilihan[1]+1
stateBaru = s+','+str(awal)+','+str(nomor)
#cek apakah sudah ada sebelumnya
#cek = [stateBaru] #cek = genAllRotFlip(stateBaru), untuk generate state setelah di rotate / flip / keduanya
#for i in cek: #for dibiarkan, jaga jaga jika perlu mengenerate state rotate dan flip
# if(i in hasil):
# stateBaru = i
# break
#sudah ada state dengan kondisi yang sama tapi diputar/mungkin tidak
temp[awal-1] = stateBaru
else:
awal = (mov[0]*3)+mov[1]+1
stateBaru = s+','+str(awal)
temp[awal-1] = stateBaru
hasil[s] = temp
for i in temp:
if(i!='-'):
hasil[i] = []
for i in temp:
if(i!='-'):
hasil = genState(i,hasil)
return hasil
#1Generate state untuk player mulai duluan
a = genState('5,1',{})
#ganti ke '5' untuk cpu mulai duluan, lalu ganti pula semua kode diatas yang ada #ganti ini
daftarState = []
#Modifikasi semua state di a agar isi nomor di depannya
cnt = 1
tempA = {}
for state in a:
namaStateBaru = genNomor(cnt)+state
daftarState.append(namaStateBaru)
tempA[namaStateBaru] = a[state]
cnt += 1
a = tempA
for state in a:
for transisi in range(0,9):
#Iter semua nama di a untuk nyari nomornya
namaDicari = a[state][transisi]
if(namaDicari != '-'):
for iterState in a:
namaIterState = iterState[5:]
nomor = 0
if(namaDicari == namaIterState):
nomor = int(iterState[1:4]) #mengambil angkanya dari format (###)#,#,#,...
break
#Sudah ketemu nomornya
a[state][transisi] = genNomor(nomor)+a[state][transisi]
#Save ke file eksternal statenya
file = open('daftarStatesPlayer.txt','w') #Ganti nama
cnt = 1
for i in daftarState:
file.write(i)
file.write('\n')
file.close()
#Save ke file eksternal finish statenya
finishState = []
file = open('finishStatePlayer.txt','w') #Ganti nama
for i in daftarState:
strTest = i[5:]
if(gameOver(genBoard(strTest))):
file.write(i)
file.write('\n')
file.close()
#5ave tabel transisi (a) ke file eksternal
file = open('statesPlayer.txt','w') #Ganti nama
for i in a:
#s = i.ljust(23)
s = ''
for n in a[i]:
s = s + n.ljust(23)
file.write(s)
file.write('\n')
file.close()
| true |
b8fde39f4658ca70e194f69e96dea41803fb940a | Python | egsu20/study | /Python/python으로 시작하는/ex3_11.py | UTF-8 | 224 | 3.953125 | 4 | [] | no_license | # 온도에 따른 물 상태 출력
temp = float(input("온도 입력 : "))
print("물의 상태는 ", end="")
if temp <= 0:
print("얼음")
elif temp > 0 and temp < 100:
print("액체")
else:
print("기체")
| true |
79fc8201980867c467e277449e72b0429b7bb40c | Python | akhilbommu/May_LeetCode_Challenge | /Day6-MajorityElement.py | UTF-8 | 989 | 4.625 | 5 | [] | no_license | """
Problem Link : "https://leetcode.com/problems/majority-element/"
Approach 1 : Create a dictionary object and iterate through it and check if value of particular element is
greater than "math.floor(len(nums) / 2" if so return that element.
Approach 2 : Sort the given array.For an element to be a majority element its occurances should be greater than
half the length of given array.
So when we sort the array the majority element will be at the index "len(nums)//2".
"""
import math
from collections import Counter
class MajorityElement:
def majorityElement1(self, nums):
d = Counter(nums)
for each in d:
if d[each] > math.floor(len(nums) / 2):
return each
def majorityElement2(self, nums):
nums = sorted(nums)
return nums[len(nums) // 2]
obj = MajorityElement()
nums = [1, 2, 3, 4, 1, 2, 2, 2, 2, 2]
print(obj.majorityElement1(nums))
print(obj.majorityElement2(nums))
| true |
83698ff99f24bea0a81f5117c90f62998c61bfd6 | Python | kojidooon/tester | /test.py | UTF-8 | 1,048 | 2.96875 | 3 | [] | no_license | #!/bin/python3
import math
import os
import random
import re
import sys
# Complete the MinSliceWeight function below.
def MinSliceWeight(Matrix):
N = len(Matrix[0])
j = Matrix[0].index(min(Matrix[0]))
ans = [min(Matrix[0])]
for i in range(N-1):
cal = []
cal_num = []
if j != 0:
cal.append(j-1,Matrix[i+1][j-1])
cal_num.append(j-1)
if j != N-1:
cal.append(Matrix[i+1][j+1])
cal_num.append(j+1)
cal.append(Matrix[i+1][j])
cal_num.append(j)
for value, num in zip(cal, cal_num):
if min(cal) == value:
ans.append(value)
j = num
return sum(ans)
if __name__ == '__main__':
Matrix_rows = int(input().strip())
Matrix_columns = int(input().strip())
Matrix = []
cal = 0
ans = []
for _ in range(Matrix_rows):
Matrix.append(list(map(int, input().rstrip().split())))
res = MinSliceWeight(Matrix)
print(res)
| true |
44194ed756ea293e2a35f687f47eae9718799b98 | Python | Tzvetomir/ElegantRL | /elegantrl/agents/AgentDoubleDQN.py | UTF-8 | 4,682 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | import torch
import numpy.random as rd
from elegantrl.agents.AgentDQN import AgentDQN
from elegantrl.agents.net import QNetTwin, QNetTwinDuel
class AgentDoubleDQN(AgentDQN): # [ElegantRL.2021.10.25]
"""
Bases: ``AgentDQN``
Double Deep Q-Network algorithm. “Deep Reinforcement Learning with Double Q-learning”. H. V. Hasselt et al.. 2015.
:param net_dim[int]: the dimension of networks (the width of neural networks)
:param state_dim[int]: the dimension of state (the number of state vector)
:param action_dim[int]: the dimension of action (the number of discrete action)
:param learning_rate[float]: learning rate of optimizer
:param if_per_or_gae[bool]: PER (off-policy) or GAE (on-policy) for sparse reward
:param env_num[int]: the env number of VectorEnv. env_num == 1 means don't use VectorEnv
:param agent_id[int]: if the visible_gpu is '1,9,3,4', agent_id=1 means (1,9,4,3)[agent_id] == 9
"""
def __init__(self):
AgentDQN.__init__(self)
self.soft_max = torch.nn.Softmax(dim=1)
def init(self, net_dim=256, state_dim=8, action_dim=2, reward_scale=1.0, gamma=0.99,
learning_rate=1e-4, if_per_or_gae=False, env_num=1, gpu_id=0):
"""
Explict call ``self.init()`` to overwrite the ``self.object`` in ``__init__()`` for multiprocessing.
"""
self.ClassCri = QNetTwinDuel if self.if_use_dueling else QNetTwin
AgentDQN.init(self, net_dim, state_dim, action_dim, learning_rate, reward_scale, gamma,
if_per_or_gae, env_num, gpu_id)
if if_per_or_gae: # if_use_per
self.criterion = torch.nn.SmoothL1Loss(reduction='none')
self.get_obj_critic = self.get_obj_critic_per
else:
self.criterion = torch.nn.SmoothL1Loss(reduction='mean')
self.get_obj_critic = self.get_obj_critic_raw
def select_actions(self, states: torch.Tensor) -> torch.Tensor: # for discrete action space
"""
Select discrete actions given an array of states.
.. note::
Using softmax to random select actions with proportional probabilities for randomness.
:param states: an array of states in a shape (batch_size, state_dim, ).
:return: an array of actions in a shape (batch_size, action_dim, ) where each action is clipped into range(-1, 1).
"""
actions = self.act(states.to(self.device))
if rd.rand() < self.explore_rate: # epsilon-greedy
a_prob = self.soft_max(actions)
a_ints = torch.multinomial(a_prob, num_samples=1, replacement=True)[:, 0]
# a_int = rd.choice(self.action_dim, prob=a_prob) # numpy version
else:
a_ints = actions.argmax(dim=1)
return a_ints.detach().cpu()
def get_obj_critic_raw(self, buffer, batch_size) -> (torch.Tensor, torch.Tensor):
"""
Calculate the loss of the network and predict Q values with **uniform sampling**.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:return: the loss of the network and Q values.
"""
with torch.no_grad():
reward, mask, action, state, next_s = buffer.sample_batch(batch_size)
next_q = torch.min(*self.cri_target.get_q1_q2(next_s)).max(dim=1, keepdim=True)[0]
q_label = reward + mask * next_q
q1, q2 = [qs.gather(1, action.long()) for qs in self.act.get_q1_q2(state)]
obj_critic = self.criterion(q1, q_label) + self.criterion(q2, q_label)
return obj_critic, q1
def get_obj_critic_per(self, buffer, batch_size):
"""
Calculate the loss of the network and predict Q values with **Prioritized Experience Replay (PER)**.
:param buffer: the ReplayBuffer instance that stores the trajectories.
:param batch_size: the size of batch data for Stochastic Gradient Descent (SGD).
:return: the loss of the network and Q values.
"""
with torch.no_grad():
reward, mask, action, state, next_s, is_weights = buffer.sample_batch(batch_size)
next_q = torch.min(*self.cri_target.get_q1_q2(next_s)).max(dim=1, keepdim=True)[0]
q_label = reward + mask * next_q
q1, q2 = [qs.gather(1, action.long()) for qs in self.act.get_q1_q2(state)]
td_error = self.criterion(q1, q_label) + self.criterion(q2, q_label)
obj_critic = (td_error * is_weights).mean()
buffer.td_error_update(td_error.detach())
return obj_critic, q1
| true |
29b2c59fce9152d7419347a9b3dedbe71f4d133c | Python | Code-Institute-Submissions/tomciosegal-fitness_pot | /utilities.py | UTF-8 | 407 | 3.171875 | 3 | [] | no_license | # function for pagination
def paginate(data, count, page=None):
next = True
if page is None:
page = 1
try:
page = int(page)
except ValueError:
page = 1
if page < 1:
page = 1
start = int(count * (page - 1))
stop = int(count * page)
data = [x for x in data]
if stop >= len(data):
next = False
return data[start:stop], page, next
| true |
5576f4610d554806fe2cdee0de9ba618f0eee449 | Python | cvsch/gradgpad | /gradgpad/tools/visualization/radar/combined_scenario.py | UTF-8 | 510 | 2.609375 | 3 | [
"MIT"
] | permissive | from typing import List
class CombinedScenario:
ALL = "All"
PAS_I = "PAS_I"
PAS_II = "PAS_II"
PAS_III = "PAS_III"
PAS_I_AND_II = "PAS_I_AND_II"
PAS_II_AND_III = "PAS_II_AND_III"
@staticmethod
def options() -> List:
return [
CombinedScenario.ALL,
CombinedScenario.PAS_I,
CombinedScenario.PAS_II,
CombinedScenario.PAS_III,
CombinedScenario.PAS_I_AND_II,
CombinedScenario.PAS_II_AND_III,
]
| true |
f47a3e0fc76aad92b117a54574ea985e0d655460 | Python | Aasthaengg/IBMdataset | /Python_codes/p03296/s266519478.py | UTF-8 | 780 | 3.34375 | 3 | [] | no_license | ###template###
import sys
def input(): return sys.stdin.readline().rstrip()
def mi(): return map(int, input().split())
###template###
N = int(input())
As = list(mi())
ans = 0
prevcolor = 0 #左隣のスライムが何色だったか 色は1~Nなので0はダミー
nowsectionsize = 0
for a in As:
if prevcolor != a: #もし色が違ったら
ans += nowsectionsize//2 #左側までの区間size//2が魔法コストになる
nowsectionsize = 1 #リセット(今の色が新しい区間になるので0でなく1)
else: #色が同じなら
nowsectionsize += 1 #区間が1増える
prevcolor = a #どちらにしろ「前の色」は更新する
#ループを出た後、区間>=2なら最後のコストがかかる
ans += nowsectionsize //2
print(ans)
| true |
ef198fcf2f4d5c74dba91708a04210090f839a1a | Python | MarizzaM/QA-Architect-Python-Exercise_2 | /ex_05.py | UTF-8 | 148 | 3.515625 | 4 | [] | no_license | words = ['world of coding', 'pen', 'python', 'hello']
for word in words:
if len(word) < 4:
break
else:
print(word.upper())
| true |
2e869b8ce3b3963df61b1b8e315270d79b4401ce | Python | michalbarer/capture-pages | /capturepages/selenium_capture.py | UTF-8 | 1,208 | 2.75 | 3 | [] | no_license | import logging
from screeninfo import get_monitors
from selenium import webdriver
logger = logging.getLogger(__name__)
def init_selenium_driver():
logger.info('Initializing Selenium web driver...')
options = webdriver.ChromeOptions()
options.headless = True
driver = webdriver.Chrome(options=options)
screen_dimensions = get_monitors()[0]
screen_width = screen_dimensions.width
screen_height = screen_dimensions.height
driver.set_window_size(screen_width, screen_height)
return driver
def set_driver_full_screen(driver):
screen_width = driver.get_window_size()['width']
total_height = driver.execute_script('return document.body.scrollHeight')
driver.set_window_size(screen_width, total_height)
def capture_web_page(url, screenshot_path, full_screenshot=False):
driver = init_selenium_driver()
try:
driver.get(url)
if full_screenshot:
set_driver_full_screen(driver)
logger.info('Taking a screenshot.')
driver.find_element_by_tag_name('body') \
.screenshot(screenshot_path)
logger.info('Saved screenshot to \'{}\'.'.format(screenshot_path))
finally:
driver.quit()
| true |
bffe9bcda608ae864044d84f2a1051244ed261d2 | Python | ldenneau/mopsng | /python/MOPS/Alerts/plugins/comets.py | UTF-8 | 3,109 | 2.984375 | 3 | [] | no_license | #!/usr/bin/env python
"""
plugins.comets
MOPS Alert Rule that returns DerivedObjects for which
( a>4.5 AU && e>0.5 ) || e>0.95
"""
from base import DerivedObjectRule
import sys
from math import *
from MOPS.Alerts.plugins.Constants import CH_ALL
from decimal import *
class Comets(DerivedObjectRule):
"""
Return all new or newly modified DerivedObject instances.
"""
def IsComet(self, obj):
"""
The classical comet orbital criterion is the Tisserand paramter
(with respect to Jupiter) which is defined by:
T(J) = a(J) / a + 2cos(i) * [(1-e^2) * a/a(J)]^0.5
where a(J) is the semimajor axis of Jupiter, and a,e,i are the
semimajor axis, eccentricity, and inclination of the object in question.
Comets will typically have T(J) < 3 and asteroids will typically have
T(J) > 3.
While the line is a bit blurry for T(J) values very close to 3, it's
generally a good rule of thumb, and certainly a very widely-used one.
"""
aj = Decimal('5.203'); # Semi-major axis of jupiter in AU
q = Decimal(str(obj.alertObj.orbit.q)) # Perihelion distance in AU
e = Decimal(str(obj.alertObj.orbit.e)) # Eccentricity
a = Decimal(str(q / (Decimal(1) - e))) # Semi-major axis in AU
i = Decimal(str(radians(obj.alertObj.orbit.i))) # Inclination in radians
Tj = Decimal(str(aj / a))
Tj += Decimal (str(sqrt((1 - e ** 2) * a / aj)))
Tj *= Decimal(str(2 * cos(i)))
result = (Tj < 3)
if (result):
# Generate specific message to be included in alert
msg = "<p>a: %.3f<br/>" % a
msg += "e: %.3f<br/>" % e
msg += "q: %.3f<br/>" % q
msg += "i: %.3f<br/>" % i
msg += "H: %.1f<br/>" % obj.alertObj.orbit.h_v
msg += "Derived Object (Internal Link): <a href='http://mopshq2.ifa.hawaii.edu/model/%s/search?id=L%d'>L%d</a><br/>" % (obj.dbname, obj.alertObj._id, obj.alertObj._id)
msg += "Derived Object (External Link): <a href='http://localhost:8080/model/%s/search?id=L%d'>L%d</a></p>" % (obj.dbname, obj.alertObj._id, obj.alertObj._id)
obj.message = msg
# Generate subject line for alert.
subj = "[a=%.3f,e=%.3f,q=%.3f,i=%.3f,H=%.3f]\n" % (a, e, q, i, obj.alertObj.orbit.h_v)
obj.subject = subj
# <-- end if
return(result)
# <-- end def
def evaluate(self):
"""
Return a list of DerivedObject instances that satisfy the rule.
"""
#Set the channel that will be used to publish the alert.
self.channel = CH_ALL
return([obj for obj in self.newAlerts if self.IsComet(obj)])
# <-- end def
# <-- end class
def main(args=sys.argv[1:]):
rule = Comets(status = 'D')
alerts = rule.evaluate()
for alert in alerts:
print(str(alert) + '\n\n')
# <-- end for
# <-- end def
if(__name__ == '__main__'):
sys.exit(main())
# <-- end if
| true |
f219d4b035e68f333e3773be3fcda4ed76de28ad | Python | xjh1230/py_algorithm | /test/l5_longest_palindrome.py | UTF-8 | 3,976 | 3.4375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Author : 1230
# @Email : xjh_0125@sina.com
# @Time : 2019/11/4 15:17
# @Software: PyCharm
# @File : l5_longest_palindrome.py
class Solution:
def __init__(self):
"""
给定一个字符串 s,找到 s 中最长的回文子串。你可以假设 s 的最大长度为 1000。
示例 1:
输入: "babad"
输出: "bab"
注意: "aba" 也是一个有效答案。
示例 2:
输入: "cbbd"
输出: "bb"
https://leetcode-cn.com/problems/longest-palindromic-substring
"""
pass
def longestPalindrome1(self, s: str) -> str:
'''
中心扩散
:param s:
:return:
'''
i = l = r1 = r2 = maxc = 0
count = len(s)
dic = {}
if count < 2:
return s
while i < count - 1:
if s[i] == s[i + 1]:
l = i
r1 = i + 1
tmp = ''
while l > -1 and r1 < count:
if s[l] == s[r1]:
tmp = s[l] + tmp + s[r1]
l -= 1
r1 += 1
else:
break
if len(tmp) > maxc:
maxc = len(tmp)
dic[maxc] = tmp
l = i - 1
r2 = i + 1
tmp = s[i]
while l > -1 and r2 < count:
if s[l] == s[r2]:
tmp = s[l] + tmp + s[r2]
l -= 1
r2 += 1
else:
break
if len(tmp) > maxc:
maxc = len(tmp)
dic[maxc] = tmp
i += 1
if (count - i + 1) * 2 < maxc:
break
return dic[maxc]
def longestPalindrome(self, s):
'''
马拉车算法(中心扩展算法的补充,中间填充特殊字符串,就不会判断 aa 和 aba的问题)
:param s:
:return:
'''
if len(s) <= 1:
return s
maxc, dic = 1, {1: [s[0]]}
s = '^' + ''.join(i + '#' for i in s)
s = s[:-1] + '$'
print(s)
start, end = 0, len(s)
while start < end:
left = start - 1
right = start + 1
tmp = s[start]
while left > 0 and right < end:
if s[left] == s[right]:
tmp = s[left] + tmp + s[right]
left -= 1
right += 1
else:
break
if len(tmp) >= maxc:
maxc = len(tmp)
if maxc in dic:
dic[maxc].append(tmp)
else:
dic[maxc] = [tmp]
start += 1
if (end - start + 1) * 2 < maxc:
break
res, max_res = '', 0
for i in dic[maxc]:
tmp = i.replace('#', '').replace('$', '')
if len(tmp) > max_res:
res = tmp
max_res = len(tmp)
return res
def longestCommonSubstring(self, s, s2):
'''
最长公共子串
:param s:
:return:
'''
if len(s) < len(s2):
s, s2 = s2, s
max_c, max_index, c1, c2 = 1, 0, len(s), len(s2)
dp = [[0] * c1] * c2
for i, tmp1 in enumerate(s2):
for j, tmp2 in enumerate(s):
if tmp1 == tmp2:
if i == 0 or j == 0:
dp[i][j] = 1
else:
dp[i][j] = dp[i - 1][j - 1] + 1
if max_c < dp[i][j]:
max_index = i
max_c = dp[i][j]
print(max_c, max_index, s[max_index + 1 - max_c:max_index + 1] + '*', dp)
if __name__ == '__main__':
sc = Solution()
s = '12322'
s2 = '1234'
print(sc.longestPalindrome(s))
print(sc.longestCommonSubstring(s,s2))
| true |
c15f70c977c88e21d7461557494cbc82223bb259 | Python | AntnotAnth/rep-drawing2 | /rep-drawing/rep_drawing.pyde | UTF-8 | 446 | 2.546875 | 3 | [] | no_license | def setup():
r = random(255)
size(400, 600)
background( 255, 255, 255)
fill( 0, 125, 125)
ellipse(150, 130, 200, 200)
line(150, 350, 150, 230)
line(150 , 290, 230 ,260)
line(150, 290, 120, 235)
fill(r, r, r)
rect(70, 100, 50, 20)
rect(160, 100, 50, 20)
fill(20, 60, 70)
ellipse(145, 180, 40, 40)
Sometext= "randomcolors"
fill(255, 0, 0)
text (Sometext, 20, 350)
| true |
5e955f76badfd71130bb69b9fbcae8a8bd4b36d3 | Python | Junx0924/CV4RS | /lib/data/set/MLRSNet.py | UTF-8 | 7,258 | 2.953125 | 3 | [] | no_license | import numpy as np
import csv
import random
import xlrd
import itertools
import json
import os
from PIL import Image
def split(image_dict,split_ratio,tag:str='test'):
"""This function extract a new image dict from the given image dict by the split_ratio
Args:
image_dict (dict): {'class_label': [image_index1, image_index2, image_index3....]}
split_ratio (float): eg.0.8, the number of samples per class is 80% of that of the original image dict
Returns:
dict: image_dict
dict: a dict records the state and the index for each unique image from the original image_dict
"""
keep_split_ratio = 1 - split_ratio
keep_image_dict = {}
other_image_dict = {}
keys = sorted(list(image_dict.keys()))
values = np.unique(list(itertools.chain.from_iterable(image_dict.values())))
flag = {ind:"undefine" for ind in values}
for key in keys:
samples_ind = image_dict[key]
random.shuffle(samples_ind)
sample_num = len(samples_ind)
keep_image_dict[key] =[]
other_image_dict[key] =[]
# check if there are some sample id already in train/nontrain
for ind in samples_ind:
if flag[ind] =="undefine":
if len(keep_image_dict[key])< int(sample_num*keep_split_ratio):
keep_image_dict[key].append(ind)
flag[ind] = "non"+tag
else:
if len(other_image_dict[key])< (sample_num - int(sample_num*keep_split_ratio)):
other_image_dict[key].append(ind)
flag[ind] = tag
elif flag[ind] == "non"+tag:
if len(keep_image_dict[key])< int(sample_num*keep_split_ratio):
keep_image_dict[key].append(ind)
else:
if len(other_image_dict[key])< (sample_num - int(sample_num*keep_split_ratio)):
other_image_dict[key].append(ind)
return keep_image_dict,flag
def read_csv(csv_filename,datapath):
"""reads a csv file and returns a list of file paths
Args:
csv_filename (str): file path, this file contains the image name and its multi-hot labels
datapath (str): the source of dataset
Returns:
list: [image_path, multi-hot label]
"""
file_list, file_label =[],[]
with open(csv_filename) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
file_list.append([datapath + str(row[0]),np.array(row[1:],dtype=int)])
return file_list
def create_csv_split(csv_dir,datapath):
"""Split the dataset to train/val/test with ratio 50%/10%/40%
Keep this ratio among classes
Write the results to csv files
Args:
csv_dir (str): folder to store the resulted csv files
datapath (str): eg. /scratch/CV4RS/Dataset/MLRSNet
"""
category = {}
category_path = datapath + '/Categories_names.xlsx'
book = xlrd.open_workbook(category_path)
sheet = book.sheet_by_index(1)
for i in range(2,sheet.nrows):
category_name = sheet.cell_value(rowx=i, colx=1)
temp_label_name = np.unique(np.array([sheet.cell_value(rowx=i, colx=j).strip() for j in range(2,sheet.ncols) if sheet.cell_value(rowx=i, colx=j)!=""]))
if "chapparral" in temp_label_name: temp_label_name[np.where(temp_label_name=="chapparral")]= "chaparral"
category[category_name] = temp_label_name
label_folder = datapath +'/labels/'
image_folder = datapath +'/Images/'
image_list =[] # image path
image_labels =[]
for entry in os.listdir(label_folder):
if entry.split('.')[-1] =="csv" :
with open(label_folder + entry) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
label_names =next(csv_reader,None)[1:]
if len(label_names)==60:
sort_ind = np.argsort(label_names)
for row in csv_reader:
image_path = image_folder + entry.split('.')[0] +'/'+row[0]
#image_list.append(image_path)
image_list.append('/Images/'+ entry.split('.')[0] +'/'+row[0])
temp = np.array(row[1:])
image_labels.append(temp[sort_ind])
else:
print(entry)
label_names = np.sort(label_names)
# to record the label names and its id
label_names_dict= {i:x for i,x in enumerate(label_names)}
for key in category.keys():
labels = np.array(category[key])
label_ind = [str(np.where(label_names==item)[0][0]) for item in labels]
category[key] = label_ind
image_list = np.array(image_list)
image_labels = np.array(image_labels,dtype=int)
image_dict = {i:np.where(image_labels[:,i]==1)[0] for i in range(len(label_names))}
# split data into nontest/test 70%/30% balanced in class.
temp_image_dict, flag_test =split(image_dict, 0.3,'test')
# split train into train/val 40%/30% balanced in class
_,flag_val =split(temp_image_dict, 0.43,'val')
test = [[image_list[ind]]+list(image_labels[ind,:]) for ind in sorted(list(flag_test.keys())) if flag_test[ind]=="test"]
val = [[image_list[ind]]+list(image_labels[ind,:]) for ind in sorted(list(flag_val.keys())) if flag_val[ind]=="val"]
train = [[image_list[ind]]+list(image_labels[ind,:]) for ind in sorted(list(flag_val.keys())) if flag_val[ind]=="nonval"]
with open(csv_dir +'/label_name.json', 'w+') as label_file:
json.dump(label_names_dict, label_file,separators=(",", ":"),allow_nan=False,indent=4)
with open(csv_dir +'/category.json', 'w+') as category_file:
json.dump(category, category_file,separators=(",", ":"),allow_nan=False,indent=4)
with open(csv_dir +'/train.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(train)
with open(csv_dir +'/test.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(test)
with open(csv_dir +'/val.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerows(val)
def Give(datapath,dset_type):
"""Given a dataset path generate a list of image paths and multi-hot labels .
Args:
datapath (str): eg. /scratch/CV4RS/DatasetBigEarthNet
dset_type (str): choose from {'train','val','test'}
Returns:
list: contains [image_path, multi-hot label]
"""
csv_dir = os.path.dirname(__file__) + '/MLRSNet_split'
# check the split train/test/val existed or not
if not os.path.exists(csv_dir +'/train.csv'):
create_csv_split(csv_dir,datapath)
with open(csv_dir +'/category.json') as json_file:
category = json.load(json_file)
with open(csv_dir +'/label_name.json') as json_file:
conversion= json.load(json_file)
train_list = read_csv(csv_dir +'/train.csv',datapath)
val_list = read_csv(csv_dir +'/val.csv',datapath)
test_list= read_csv(csv_dir +'/test.csv',datapath)
dsets = {'train': train_list , 'val': val_list , 'test': test_list}
return dsets[dset_type],conversion
| true |
867c1572e21fecf6698bc32940bc0dc947c7d4b1 | Python | noradrenaline/AdventOfCode17 | /noras_solutions/16/solver16-2.py | UTF-8 | 2,870 | 3.46875 | 3 | [] | no_license | lsize = 16
with open("input.txt") as fh:
moves = fh.read().strip().split(',')
class lineup:
def __init__(self,sz):
s = 'abcdefghijklmnopqrstuvwxyz'[:sz]
self.arrangement = list(s)
def dance(self,cmd):
if cmd[0] == 's':
r = int(cmd[1:])
self.arrangement = self.arrangement[-r:]+self.arrangement[:-r]
elif cmd[0] == 'x':
[p1,p2] = cmd[1:].split('/')
t = self.arrangement[int(p1)]
self.arrangement[int(p1)] = self.arrangement[int(p2)]
self.arrangement[int(p2)] = t
elif cmd[0] == 'p':
[c1,c2] = cmd[1:].split('/')
i1 = self.arrangement.index(c1)
i2 = self.arrangement.index(c2)
self.arrangement[i1] = c2
self.arrangement[i2] = c1
else:
print 'unknown command encountered'
def stringify(self):
return ''.join(self.arrangement)
lu = lineup(lsize)
# a billion is too many apparently. Why the f is it ooming?
# I was expecting it to be super-slow, but why oom? Isn't it true that
# the only thing in the memory is the one lineup and the input instructions?
# WTF gives?
# Maybe we can just map the beginning order to the end order
# and apply the same transformation to a different starting order--
# shit, no, that won't work because of the p operator which cares about
# letters, not positions.
# it is true that if we ever land on existing order, then it will
# continue to run the same loop. But there are 16! possible orderings,
# which is way more than a billion, so will that ever happen.
# let me try to loop a shorter number of times and see if there is a closed loop, maybe?
# if there is then the solution is analytic up to the number of times you can do the loop in a billion reps
i = 0;
occurrence = {'abcdefghijklmnop':0}
while (i<1000):
for cmd in moves:
lu.dance(cmd)
i+=1
s = lu.stringify()
if s not in occurrence:
occurrence[s] = i
else:
print "string " + s + " occurred at " + str(occurrence[s]) + " and also at " + str(i)
break
if i == 1000:
print "Your shit is fucked."
# okay, so we DID find a repeat!
#firstocc = occurrence[s]
#secondocc = i
# so how many full loops fit below 1 billion?
# can I assume that only one starting string would possibly give rise to the loop one?
# well, in this case I know it is, because we do indeed loop back to abcdefghijklmnop
# I'm not convinced that this is a general condition (e.g. there couldn't be multiple routes into
# a single loop), but then, I'm also not convinced that the solution is general at all because it
# could be the case that there wasn't a loop in a reasonable amount of time. Oh well.
# i is now the looplength.
numToRun = 1000000000 % i
# lu is currently in the desired state, alphabetically ordered, right?
print "sanity check:"
print lu.stringify()
print "now running the final " + str(numToRun) + "dances."
for _ in range(numToRun):
for cmd in moves:
lu.dance(cmd)
print "final layout:"
print lu.stringify()
| true |
e893274a5a1f5119ddcfac2123b31756f220d6dd | Python | sai-prasanna/vivid | /test/test_parser.py | UTF-8 | 604 | 2.828125 | 3 | [] | no_license | import sys
sys.path.append('../')
from scheme.parser import Parser
from scheme.lexer import Lexer
from scheme.exceptions import LexicalError
from scheme.exceptions import ParserError
c1 = '''
'''
c2 = '''
(a
(b (1 2))
(c (3 4))
(d (5 (f (8 9))
6))
(e 7)
)
'''
c3 = '''
(a 1 2 () (b 9 #f))
'''
def parse_it(s):
lexer = Lexer(s)
parser = Parser(lexer)
sexp = parser.form_sexp()
print sexp
print sexp.to_lisp_str()
for code in (c1, c2, c3):
try:
print '*' * 80
print code
parse_it(code)
except ParserError, e:
print e
| true |
07fa3bea16d04080a7576abd56ab2775b91ce8a7 | Python | ddgvv/dd | /bsp46.py | UTF-8 | 50 | 2.53125 | 3 | [] | no_license | #46th problem
print(int(input("Enter Number"))+1)
| true |
9c4d004a409ac59836984a0b8375ee4ee321a329 | Python | GuillaumeHaben/MSR2021-ReplicationPackage | /Python/scripts/insightsBoW.py | UTF-8 | 4,982 | 2.953125 | 3 | [] | no_license | from collections import Counter
from keras.preprocessing.text import Tokenizer
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import cross_validate
from metricUtils import tn, fp, tp, fn, precision, recall, fpr, tpr, tnr, f1, auc, mcc
from sklearn.metrics import make_scorer
from tqdm import tqdm
import sys
import os
import json
import pandas as pd
from pprint import pprint
def main():
# Checks
checkUsage()
# Load Data
datasetPath = sys.argv[1]
data = pd.read_json(datasetPath)
flaky = data[data["Label"] == True]
nonFlaky = data[data["Label"] == False]
body = data['Body'].values
bodyFlaky = flaky['Body'].values
bodyNonFlaky = nonFlaky['Body'].values
bodyAndCut = data['Body'].values + data['CUT_1'].values + data['CUT_2'].values + data['CUT_3'].values + data['CUT_4'].values
# Build Bag of Words
tokenizer = Tokenizer(filters='\'!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n')
tokenizer.fit_on_texts(body)
tokenizerFlaky = Tokenizer(filters='\'!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n')
tokenizerFlaky.fit_on_texts(bodyFlaky)
tokenizerNonFlaky = Tokenizer(filters='\'!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n')
tokenizerNonFlaky.fit_on_texts(bodyNonFlaky)
tokenizerCut = Tokenizer(filters='\'!"#$%&()*+,-./:;<=>?@[\\]^_`{|}~\t\n')
tokenizerCut.fit_on_texts(bodyAndCut)
# Information
print("\nProject: ", data.iloc[0]["projectName"])
print("Data length: ", len(data))
print("Length of vocabulary: ", len(tokenizer.word_counts))
print("Length of vocabulary with CUT: ", len(tokenizerCut.word_counts))
print("\nNumber of Flaky: ", len(flaky))
print("Length of vocabulary: ", len(tokenizerFlaky.word_counts))
print("\nNumber of Non Flaky: ", len(nonFlaky))
print("Length of vocabulary: ", len(tokenizerNonFlaky.word_counts))
# Create and fit classifier, check most important words
fitModelAndCheckWords(tokenizer, tokenizerFlaky, tokenizerNonFlaky, body, data)
# Same but for Test + CUT
fitModelAndCheckWords(tokenizerCut, tokenizerFlaky, tokenizerNonFlaky, body, data)
def fitModelAndCheckWords(tokenizer, tokenizerFlaky, tokenizerNonFlaky, body, data):
# Model, to get information on most important features
classifierKFold = RandomForestClassifier(n_estimators = 100, random_state = 0)
X = tokenizer.texts_to_matrix(body, mode="count")
y = data['Label'].values
classifierKFold.fit(X, y)
importantWords = featuresUnderstanding(tokenizer, classifierKFold, 10)
# Further details
for word in importantWords:
print(word)
print("Number of occurence in Flaky Tests", tokenizerFlaky.word_counts.get(word))
print("Number of occurence in Non Flaky Tests", tokenizerNonFlaky.word_counts.get(word))
def featuresUnderstanding(tokenizer, classifier, num):
featureImportances = classifier.feature_importances_
featureImportancesSorted = sorted(range(len(featureImportances)), key=lambda k: featureImportances[k], reverse=True)
mostImportantFeatures = featureImportancesSorted[:num]
# mostImportantFeatureIndex = np.argmax(featureImportances)
# mostImportantFeatureValue = featureImportances[np.argmax(featureImportances)]
tokenList = list(tokenizer.word_index.keys())
MostImportantWords = []
# For 25 Most Important Features
for i in mostImportantFeatures:
# Print the corresponding token
MostImportantWords.append(tokenList[i])
# print("Features importances: ", featureImportances)
# print("Features importances sorted: ", featureImportancesSorted)
# print("Most 25 important features: ", mostImportantFeatures)
# print("Index of most important feature: ", mostImportantFeatureIndex)
# print("Value of most important feature: ", mostImportantFeatureValue)
# print("Corresponding token for Most Important Feature: ", tokenList[mostImportantFeatureIndex])
print("\nMost Important Words: ", MostImportantWords, "\n")
return MostImportantWords
def checkUsage():
#Check the programs' arguments
if len(sys.argv) != 2 or not os.path.isfile(sys.argv[1]):
print("Usage: python3 insightsBoW.py [path/to/dataset.json]")
sys.exit(1)
if __name__ == "__main__":
main()
# Dictionaries
# print("\nword_counts: A dictionary of words and their counts.")
# print("\nGlobal")
# print(tokenizer.word_counts)
# print("\nFlaky")
# print(tokenizerFlaky.word_counts)
# print("\nNon Flaky")
# print(tokenizerNonFlaky.word_counts)
# print("\nword_docs: A dictionary of words and how many documents each appeared in.")
# print(tokenizer.document_count)
# print("\nword_index: A dictionary of words and their uniquely assigned integers.")
# print(tokenizer.word_index)
# print("\ndocument_count: An integer count of the total number of documents that were used to fit the Tokenizer.")
# print(tokenizer.word_docs) | true |
a97dd5aea2e0e7363f59fc124d2bbcf1cf8980aa | Python | rdguerrerom/DFTBaby | /DFTB/Mathematica_scripts/compare_gamma_lr_exact_approx.py | UTF-8 | 1,169 | 2.65625 | 3 | [] | no_license | import numpy as np
from matplotlib import pyplot as plt
approx_0p1 = np.loadtxt("approx_0p1.dat")
approx_0p2 = np.loadtxt("approx_0p2.dat")
approx_0p333 = np.loadtxt("approx_0p333.dat")
exact_0p1 = np.loadtxt("exact_0p1.dat")
exact_0p2 = np.loadtxt("exact_0p2.dat")
exact_0p333 = np.loadtxt("exact_0p333.dat")
plt.xlabel("$R_{AB}$ / Bohr", fontsize=17)
plt.ylabel("$\gamma^{lr}_{AB}$ / Hartree", fontsize=17)
lw=3
plt.plot(exact_0p1[:,0], exact_0p1[:,1], color="blue", lw=lw, label="$\\omega = \\frac{1}{10}$ (exact)")
plt.plot(approx_0p1[:,0], approx_0p1[:,1], color="blue", lw=lw, ls="-.", label="$\\omega = \\frac{1}{10}$ (approx.)")
plt.plot(exact_0p2[:,0], exact_0p2[:,1], color="red", lw=lw, label="$\\omega = \\frac{1}{5}$ (exact)")
plt.plot(approx_0p2[:,0], approx_0p2[:,1], color="red", ls="-.", lw=lw, label="$\\omega = \\frac{1}{5}$ (approx.)")
plt.plot(exact_0p333[:,0], exact_0p333[:,1], color="green", lw=lw, label="$\\omega = \\frac{1}{3}$ (exact)")
plt.plot(approx_0p333[:,0], approx_0p333[:,1], color="green", ls="-.", lw=lw, label="$\\omega = \\frac{1}{3}$ (approx.)")
plt.legend()
plt.savefig("comparison_gamma_lr_exact_approx.png")
plt.show()
| true |
c6b330c917c1bd7a041d5a6dbeb7051f390d0be0 | Python | betodealmeida/shillelagh | /src/shillelagh/filters.py | UTF-8 | 11,160 | 3.578125 | 4 | [
"MIT"
] | permissive | """
Filters for representing SQL predicates.
"""
import re
from enum import Enum
from typing import Any, Optional, Set, Tuple
class Operator(Enum):
"""
Enum representing support comparisons.
"""
EQ = "=="
NE = "!="
GE = ">="
GT = ">"
LE = "<="
LT = "<"
IS_NULL = "IS NULL"
IS_NOT_NULL = "IS NOT NULL"
LIKE = "LIKE"
LIMIT = "LIMIT"
OFFSET = "OFFSET"
class Side(Enum):
"""Define the side of an interval endpoint."""
LEFT = "LEFT"
RIGHT = "RIGHT"
class Endpoint:
"""
One of the two endpoints of a ``Range``.
Used to compare ranges. Eg, the range ``>10`` can be represented by:
>>> start = Endpoint(10, False, Side.LEFT)
>>> end = Endpoint(None, True, Side.RIGHT)
>>> print(f'{start},{end}')
(10,∞]
The first endpoint represents the value 10 at the left side, in an open
interval. The second endpoint represents infinity in this case.
"""
def __init__(self, value: Any, include: bool, side: Side):
self.value = value
self.include = include
self.side = side
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Endpoint):
return NotImplemented
return self.value == other.value and self.include == other.include
def __gt__(self, other: Any) -> bool: # pylint: disable=too-many-return-statements
if not isinstance(other, Endpoint):
return NotImplemented
if self.value is None:
return self.side == Side.RIGHT
if other.value is None:
return other.side == Side.LEFT
if self.value == other.value:
if self.side == Side.LEFT:
if other.side == Side.LEFT:
return not self.include and other.include
return not self.include
# self.side = Side.RIGHT
if other.side == Side.RIGHT:
return not other.include and self.include
return False
return bool(self.value > other.value)
# needed for ``max()``
def __lt__(self, other: Any) -> bool:
return not self > other
def __repr__(self) -> str:
"""
Representation of an endpoint.
>>> print(Endpoint(10, False, Side.LEFT))
(10
"""
if self.side == Side.LEFT:
symbol = "[" if self.include else "("
value = "-∞" if self.value is None else self.value
return f"{symbol}{value}"
symbol = "]" if self.include else ")"
value = "∞" if self.value is None else self.value
return f"{value}{symbol}"
def get_endpoints_from_operation(
operator: Operator,
value: Any,
) -> Tuple[Endpoint, Endpoint]:
"""
Returns endpoints from an operation.
"""
if operator == Operator.EQ:
return Endpoint(value, True, Side.LEFT), Endpoint(value, True, Side.RIGHT)
if operator == Operator.GE:
return Endpoint(value, True, Side.LEFT), Endpoint(None, True, Side.RIGHT)
if operator == Operator.GT:
return Endpoint(value, False, Side.LEFT), Endpoint(None, True, Side.RIGHT)
if operator == Operator.LE:
return Endpoint(None, True, Side.LEFT), Endpoint(value, True, Side.RIGHT)
if operator == Operator.LT:
return Endpoint(None, True, Side.LEFT), Endpoint(value, False, Side.RIGHT)
# pylint: disable=broad-exception-raised
raise Exception(f"Invalid operator: {operator}")
class Filter:
"""
A filter representing a SQL predicate.
"""
operators: Set[Operator] = set()
@classmethod
def build(cls, operations: Set[Tuple[Operator, Any]]) -> "Filter":
"""
Given a set of operations, build a filter:
>>> operations = [(Operator.GT, 10), (Operator.GT, 20)]
>>> print(Range.build(operations))
>20
"""
raise NotImplementedError("Subclass must implement ``build``")
def check(self, value: Any) -> bool:
"""
Test if a given filter matches a value:
>>> operations = [(Operator.GT, 10), (Operator.GT, 20)]
>>> filter_ = Range.build(operations)
>>> filter_.check(10)
False
>>> filter_.check(30)
True
"""
raise NotImplementedError("Subclass must implement ``check``")
class Impossible(Filter):
"""
Custom Filter returned when impossible conditions are passed.
"""
@classmethod
def build(cls, operations: Set[Tuple[Operator, Any]]) -> Filter:
return Impossible()
def check(self, value: Any) -> bool:
return False
def __eq__(self, other: Any) -> bool:
if not isinstance(other, Impossible):
return NotImplemented
return True
def __repr__(self) -> str:
return "1 = 0"
class IsNull(Filter):
"""
Filter for ``IS NULL``.
"""
operators: Set[Operator] = {Operator.IS_NULL}
@classmethod
def build(cls, operations: Set[Tuple[Operator, Any]]) -> Filter:
return IsNull()
def check(self, value: Any) -> bool:
return value is None
def __eq__(self, other: Any) -> bool:
if not isinstance(other, IsNull):
return NotImplemented
return True
def __repr__(self) -> str:
return "IS NULL"
class IsNotNull(Filter):
"""
Filter for ``IS NOT NULL``.
"""
operators: Set[Operator] = {Operator.IS_NOT_NULL}
@classmethod
def build(cls, operations: Set[Tuple[Operator, Any]]) -> Filter:
return IsNotNull()
def check(self, value: Any) -> bool:
return value is not None
def __eq__(self, other: Any) -> bool:
if not isinstance(other, IsNotNull):
return NotImplemented
return True
def __repr__(self) -> str:
return "IS NOT NULL"
class Equal(Filter):
"""
Equality comparison.
"""
operators: Set[Operator] = {
Operator.EQ,
}
def __init__(self, value: Any):
self.value = value
@classmethod
def build(cls, operations: Set[Tuple[Operator, Any]]) -> Filter:
values = {value for operator, value in operations}
if len(values) != 1:
return Impossible()
return cls(values.pop())
def check(self, value: Any) -> bool:
return bool(value == self.value)
def __repr__(self) -> str:
return f"=={self.value}"
class NotEqual(Filter):
"""
Inequality comparison.
"""
operators: Set[Operator] = {
Operator.NE,
}
def __init__(self, value: Any):
self.value = value
@classmethod
def build(cls, operations: Set[Tuple[Operator, Any]]) -> Filter:
values = {value for operator, value in operations}
if len(values) != 1:
return Impossible()
return cls(values.pop())
def check(self, value: Any) -> bool:
return bool(value != self.value)
def __repr__(self) -> str:
return f"!={self.value}"
class Like(Filter):
"""
Substring searches.
"""
operators: Set[Operator] = {
Operator.LIKE,
}
def __init__(self, value: Any):
self.value = value
self.regex = re.compile(
self.value.replace("_", ".").replace("%", ".*"),
re.IGNORECASE,
)
@classmethod
def build(cls, operations: Set[Tuple[Operator, Any]]) -> Filter:
# we only accept a single value
values = {value for operator, value in operations}
if len(values) != 1:
return Impossible()
return cls(values.pop())
def check(self, value: Any) -> bool:
return bool(self.regex.match(value))
def __repr__(self) -> str:
return f"LIKE {self.value}"
class Range(Filter):
"""
A range comparison.
This filter represents a range, with an optional start and an
optional end. Start and end can be inclusive or exclusive.
Ranges can be combined by adding them:
>>> range1 = Range(start=10)
>>> range2 = Range(start=20)
>>> print(range1 + range2)
>20
>>> range3 = Range(end=40)
>>> print(range2 + range3)
>20,<40
"""
def __init__(
self,
start: Optional[Any] = None,
end: Optional[Any] = None,
include_start: bool = False,
include_end: bool = False,
):
self.start = start
self.end = end
self.include_start = include_start
self.include_end = include_end
operators: Set[Operator] = {
Operator.EQ,
Operator.GE,
Operator.GT,
Operator.LE,
Operator.LT,
}
def __eq__(self, other: Any):
if not isinstance(other, Range):
return NotImplemented
return (
self.start == other.start
and self.end == other.end
and self.include_start == other.include_start
and self.include_end == other.include_end
)
def __add__(self, other: Any) -> Filter:
if not isinstance(other, Range):
return NotImplemented
start = Endpoint(self.start, self.include_start, Side.LEFT)
end = Endpoint(self.end, self.include_end, Side.RIGHT)
new_start = Endpoint(other.start, other.include_start, Side.LEFT)
new_end = Endpoint(other.end, other.include_end, Side.RIGHT)
start = max(start, new_start)
end = min(end, new_end)
if start > end:
return Impossible()
return Range(start.value, end.value, start.include, end.include)
@classmethod
def build(cls, operations: Set[Tuple[Operator, Any]]) -> Filter:
start = Endpoint(None, True, Side.LEFT)
end = Endpoint(None, True, Side.RIGHT)
for operator, value in operations:
new_start, new_end = get_endpoints_from_operation(operator, value)
start = max(start, new_start)
end = min(end, new_end)
if start > end:
return Impossible()
return cls(start.value, end.value, start.include, end.include)
def check(self, value: Any) -> bool:
if self.start is not None:
if self.include_start and value < self.start:
return False
if not self.include_start and value <= self.start:
return False
if self.end is not None:
if self.include_end and value > self.end:
return False
if not self.include_end and value >= self.end:
return False
return True
def __repr__(self) -> str:
if self.start == self.end and self.include_start and self.include_end:
return f"=={self.start}"
comparisons = []
if self.start is not None:
operator = ">=" if self.include_start else ">"
comparisons.append(f"{operator}{self.start}")
if self.end is not None:
operator = "<=" if self.include_end else "<"
comparisons.append(f"{operator}{self.end}")
return ",".join(comparisons)
| true |
66d53d302057c8415b3bba3760c07bfef8a11252 | Python | SergioRAgostinho/PoseCNN | /lib/normals/test_normals.py | UTF-8 | 2,223 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import cv2
import numpy as np
import matplotlib.pyplot as plt
import gpu_normals
import os
import scipy.io
def set_axes_equal(ax):
'''Make axes of 3D plot have equal scale so that spheres appear as spheres,
cubes as cubes, etc.. This is one possible solution to Matplotlib's
ax.set_aspect('equal') and ax.axis('equal') not working for 3D.
Input
ax: a matplotlib axis, e.g., as output from plt.gca().
'''
x_limits = ax.get_xlim3d()
y_limits = ax.get_ylim3d()
z_limits = ax.get_zlim3d()
x_range = abs(x_limits[1] - x_limits[0])
x_middle = np.mean(x_limits)
y_range = abs(y_limits[1] - y_limits[0])
y_middle = np.mean(y_limits)
z_range = abs(z_limits[1] - z_limits[0])
z_middle = np.mean(z_limits)
# The plot bounding box is a sphere in the sense of the infinity
# norm, hence I call half the max range the plot radius.
plot_radius = 0.5*max([x_range, y_range, z_range])
ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius])
ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius])
ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius])
if __name__ == '__main__':
root_dir = '/var/Projects/FCN/data/RGBDScene/data'
fx = 570.3 # Focal length in x
fy = 570.3 # Focal length in x
cx = 320.0 # Center of projection in x
cy = 240.0 # Center of projection in y
depthCutoff = 20.0
nmaps = []
for i in range(14):
filename = os.path.join(root_dir, 'scene_{:02d}'.format(i+1), '00000-depth.png')
im = cv2.imread(filename, cv2.IMREAD_UNCHANGED)
depth = im.astype(np.float32, copy=True) / 10000.0
nmap = gpu_normals.gpu_normals(depth, fx, fy, cx, cy, depthCutoff, 0)
print nmap.shape, np.nanmin(nmap), np.nanmax(nmap)
nmaps.append(nmap)
'''
# convert normals to an image
N = 127.5*nmap + 127.5
N = N.astype(np.uint8)
fig = plt.figure()
fig.add_subplot(121)
plt.imshow(im)
fig.add_subplot(122)
plt.imshow(N)
plt.show()
'''
# save results
nmaps = {'nmaps': nmaps}
scipy.io.savemat('nmaps.mat', nmaps)
| true |
0bb80e3b798d8c075cc7dc05b874fc213d92a486 | Python | sandeeppal1991/D09 | /HW09_ch12_ex02.py | UTF-8 | 2,447 | 4.53125 | 5 | [] | no_license | """1. Write a program that reads a word list from a file (see Section 9.1) and
prints all the sets of words that are anagrams. Here is an example of what the
output might look like:
['deltas', 'desalt', 'lasted', 'salted', 'slated', 'staled']
['retainers', 'ternaries'] ['generating', 'greatening']
['resmelts', 'smelters', 'termless']
Hint: you might want to build a dictionary that maps from a collection of
letters to a list of words that can be spelled with those letters. The
question is, how can you represent the collection of letters in a way that
can be used as a key?
2. Modify the previous program so that it prints the
longest list of anagrams first, followed by the second longest, and so on.
3. In Scrabble a “bingo” is when you play all seven tiles in your rack, along
with a letter on the board, to form an eight-letter word. What collection of
8 letters forms the most possible bingos? Hint: there are seven."""
def word_file_to_anagram_dictionary():
anagram_dictionary = {}
with open("words.txt","r") as word_file:
for each_word in word_file:
sorted_word = ''.join(sorted(each_word.strip()))
anagram_dictionary[sorted_word] = anagram_dictionary.get(sorted_word,[])+[each_word.strip()]
return anagram_dictionary
def sorted_list_of_anagrams():
anagram_dictionary = word_file_to_anagram_dictionary()
for each_key in (sorted(anagram_dictionary,key = lambda x:len(anagram_dictionary[x]), reverse = True)):
if(len(anagram_dictionary[each_key])> 1):
print(anagram_dictionary[each_key])
def eight_letter_bingo():
anagram_dictionary = {}
with open("words.txt","r") as word_file:
for each_word in word_file:
if(len(each_word.strip()) == 8):
sorted_word = ''.join(sorted(each_word.strip()))
anagram_dictionary[sorted_word] = anagram_dictionary.get(sorted_word,[])+[each_word.strip()]
top_eight_bingo_list = anagram_dictionary[(sorted(anagram_dictionary,key = lambda x:len(anagram_dictionary[x]), reverse = True))[0]]
print("Collection of 8 letters that forms the most possible bingos is : {}".format(" ".join(sorted(top_eight_bingo_list[0]))))
print("The words these letters can form are : {}".format(" , ".join(sorted(top_eight_bingo_list))))
def main():
#word_file_to_anagram_dictionary()
#sorted_list_of_anagrams()
eight_letter_bingo()
if __name__ == "__main__":
main()
| true |
3af3a6981884c166a758eabd83a34b7bc41359a9 | Python | JCoetzee123/spira | /spira/yevon/vmodel/derived.py | UTF-8 | 2,469 | 2.59375 | 3 | [
"MIT"
] | permissive | import numpy as np
from copy import deepcopy
from spira.yevon.process.gdsii_layer import Layer
from spira.yevon.process.gdsii_layer import __DerivedDoubleLayer__
from spira.yevon.process.gdsii_layer import __DerivedLayerAnd__
from spira.yevon.process.gdsii_layer import __DerivedLayerXor__
from spira.yevon.gdsii.elem_list import ElementList
from spira.yevon.gdsii.polygon import Polygon
from spira.yevon.gdsii.polygon_group import PolygonGroup
from spira.yevon.filters.layer_filter import LayerFilterAllow
from spira.yevon.geometry.edges.edges import Edge
from spira.yevon.process import get_rule_deck
RDD = get_rule_deck()
__all__ = [
'get_derived_elements',
]
def _derived_elements(elems, derived_layer):
""" Derived elements are generated from derived layers using
layer operations as specified in the RDD. """
if isinstance(derived_layer, Layer):
LF = LayerFilterAllow(layers=[derived_layer])
el = LF(elems.polygons)
pg = PolygonGroup(elements=el, layer=derived_layer)
return pg
elif isinstance(derived_layer, __DerivedDoubleLayer__):
p1 = _derived_elements(elems, derived_layer.layer1)
p2 = _derived_elements(elems, derived_layer.layer2)
if isinstance(derived_layer, __DerivedLayerAnd__):
pg = p1 & p2
elif isinstance(derived_layer, __DerivedLayerXor__):
pg = p1 ^ p2
return pg
else:
raise Exception("Unexpected type for parameter 'derived_layer' : %s" % str(type(derived_layer)))
def get_derived_elements(elements, mapping, store_as_edge=False):
""" Given a list of elements and a list of tuples (DerivedLayer, PPLayer),
create new elements according to the boolean operations of the DerivedLayer
and place these elements on the specified PPLayer. """
derived_layers = mapping.keys()
export_layers = mapping.values()
elems = ElementList()
for derived_layer, export_layer in zip(derived_layers, export_layers):
layer = deepcopy(export_layer)
pg = _derived_elements(elems=elements, derived_layer=derived_layer)
for p in pg.elements:
if store_as_edge is True:
elems += Edge(shape=p.shape, layer=layer)
else:
elems += Polygon(shape=p.shape, layer=layer)
return elems
# TODO: Implement this using an adapter?
def get_derived_edge_ports():
""" Generate ports from the derived edge polygons. """
pass
| true |
ee388c47a2d574191b80c8b4f174997c6201922f | Python | avenet/project_euler | /30-digit-fifth-powers.py | UTF-8 | 263 | 3.6875 | 4 | [] | no_license | def can_be_written(number, power):
number_powers = [int(digit)**power for digit in str(number)]
return sum(number_powers) == number
total_sum = 0
for n in xrange(2, 10**6):
if can_be_written(n, 5):
total_sum += n
print total_sum | true |
afc49eb525deee5654b24b1dadbf58ee5cfa99fd | Python | paik11012/Algorithm | /study/백준im/2635_plusnum.py | UTF-8 | 680 | 3.4375 | 3 | [] | no_license | number = 100
max_cnt = 0
max_num = [] # 가장 긴 숫자들을 넣을 리스트
for i in range(number+1):
num1 = number
num2= i
num_list = [num1, num2] # 먼저 처음 두 숫자 넣어놓기
cnt = 2 # 작은 숫자로 설정
while True:
num3 = num1 - num2
if num3 >= 0:
num_list.append(num3)
cnt += 1
if len(max_num) < len(num_list): # 가장 긴 숫자 리스트 찾아서 저장하기
max_num = num_list
num1 = num2 # 숫자들
num2 = num3
else: break
if max_cnt < cnt:
max_cnt = cnt
print(max_cnt)
print(' '.join(list(map(str,max_num)))) | true |
b1ef3d495c15258355f4947a67d4b7dc77f96835 | Python | lemon-lyman/NASA_GRC_MSE_TM_PY | /heartrate.py | UTF-8 | 1,062 | 3.203125 | 3 | [] | no_license | import pandas as pd
import numpy as np
import warnings
def hr_2_np(name, subject):
# Loads heart rate data as pandas DataFrame and returns numpy array
file = 'heartrate_data/HeartRate_' + subject + '.csv'
df = pd.read_csv(file, index_col=0, header=[0, 1, 2])
df = df.loc['1':, :]
trial = name[12:15]
volume = name[16:19]
attempt_number = name[20]
hr = df.loc[:, (trial, volume, attempt_number)].values
hr = hr[~np.isnan(hr)]
return hr
def plot_hr(ax, name, subject):
# Add heart rate plot to provided axis
hr = hr_2_np(name, subject)
# hmin and hmax have been hardcoded to the maximum and minimum found across both subjects and across all trials up
# to Fall 2018 in order for the axes across all plots to be consistent. This range will probably suffice for future
# trials.
hmin = 55
hmax = 126
warnings.warn('Warning: heart rate range has been hardcoded to 55-126 BPM')
ax.plot(hr, c='r', zorder=1, alpha=.7)
ax.set_ylim(hmin, hmax)
| true |
52fae268b9a49eb34141dbdc5b945b287dce69f2 | Python | Abel-Fan/UAIF1907 | /python全栈开发/网络爬虫/python词云/test2.py | UTF-8 | 470 | 2.890625 | 3 | [] | no_license | # 处理数据
import pickle
import jieba
with open("哪吒之魔童降世 短评.txt","rb") as f:
data0 = pickle.load(f)
data1 = []
def cut_filter(data):
return [ i for i in jieba.cut(data,cut_all=False) if i not in [',',',','。','.','!','\n',':','」','「','?','…','《','》','[',']','(',')','|','~',' '] ]
for data in data0:
data1+=cut_filter(data)
with open('data.txt','w',encoding="utf-8") as f:
f.write(" ".join(data1)) | true |
fc9ed0c3d9742afc8d45afe408537b6a19ddd4c5 | Python | TommieHG/Exercism-Python | /luhn/luhn.py | UTF-8 | 828 | 3.5625 | 4 | [] | no_license | import re
class Luhn:
def __init__(self, card_num):
self.card_num = card_num
def valid(self):
#filter non-digits
if((len(re.findall(r"\D", self.card_num)) - len(re.findall(r"\s", self.card_num))) > 0):
return False
#extract each digit and reverse to make algorithm easier to perform
num_list = re.findall(r"[0-9]", self.card_num)
num_list.reverse()
#filter short strings
if(len(num_list) < 2):
return False
#convert string items to integer items
int_list = list(map(int, num_list))
#the luhn algorithm
for i in range(1, len(int_list), 2):
int_list[i] *= 2
if int_list[i] > 9:
int_list[i] -= 9
return sum(int_list) % 10 == 0
| true |
b6bc35757238d978a884eaf70ff9c1d84490e42c | Python | bn-zhou/practice | /ex17.py | UTF-8 | 503 | 2.96875 | 3 | [] | no_license | from sys import argv
from os.path import exists
script, form_file, to_file = argv
print ("Copying from", form_file,"to",to_file)
#we could do these two on one line too, how?
input = open (form_file)
indate = input.read()
print ("The input file is",len(indate),"bytes long")
print ("Does the output file exists?", exists(to_file))
print ("Read, hit RETURN to continue, CTRL-C to abort.")
output = open (to_file, "w")
output.write(indate)
print ("Alright, all done.")
output.close()
input.close()
| true |
a6bf575f3e583647bb8e1f60334791d0b48c8b00 | Python | nyucusp/gx5003-fall2013 | /jwr300/Assignment4/problem4.py | UTF-8 | 748 | 3.078125 | 3 | [] | no_license | #!/usr/local/bin/python
#Warren Reed
#Principles of Urban Informatics
#Assignment 4, Problem 4
#Connects to MySQL and returns a list of addresses of all incidents that occurred in Manhattan.
import MySQLdb
import sys
def main():
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="jwr300", # your username
passwd="jwr300", # your password
db="coursedb") # name of the data base
query = "SELECT DISTINCT incident_address FROM incidents JOIN boroughs WHERE borough = 'Manhattan' AND boroughs.zipcode = incidents.incident_zip;"
cur = db.cursor()
cur.execute(query)
for row in cur.fetchall():
print row[0]
db.close()
if __name__ == "__main__":
main() | true |
cf5a6a421bc2a6dfb83d7d002292db7359814ffe | Python | wjj800712/python-11 | /chenguowen/week6/tup2lst.py | UTF-8 | 377 | 4 | 4 | [] | no_license | #/usr/bin/env python
# -*- coding: utf-8 -*-
# 将两元组生成一个列表
def tup2lst(arg1,arg2):
'''
使用两个元组中的元素,生成一个特定格式的列表
(('a','b')), ((1,2)) --> [{'a':1},{'b':2}]
'''
return (lambda x,y:[{k:v} for k,v in zip(x,y)])(arg1,arg2)
tup1,tup2 = (('a'),('b')),(('c'),('d'))
print(tup2lst(tup1,tup2))
| true |
d1bbe8dd9026c8563901bca7f82b76a7e67df3ff | Python | JorgeGacitua/02Tarea | /Prueba.py | UTF-8 | 2,312 | 3.09375 | 3 | [
"MIT"
] | permissive | import numpy as np
import matplotlib.pyplot as plt
from pylab import *
from scipy.optimize import brentq
#---------------------------------Parte 1 -------------------------------------#
def Vprima(nu,vs,vp):
'''
Recive el valor de la velocidad del suelo y de la particula
justo antes del choque y devuelve el valor de la velocidad
de la particual un instante despues
'''
vpp=(1+nu)*vs-nu*vp
return vpp
def Ps(t,omega,A):
'''
Entrega la posicion de la membrana en un instante t
'''
ps=A*np.sin(omega*t)
return ps
def Vs(t,omega,A):
'''
Entrega la velocidad de la membrana en un instante t
'''
vs=A*omega*np.cos(omega*t)
return omega
def Yp(t,h0,v0):
g=1
yp=h0+v0*t-0.5*g*t**2
return yp
def Vp(t,v0):
g=1
vp=v0-g*t
return vp
def distancia(t,omega,A,h0,v0,tc):
'''
determina la distancia entre la particula y la membrana en un tiempo t
'''
d=Ps(t,omega,A)-Yp(t-tc,h0,v0)
return d
def ChoqueN(Yn,Vn,omega,A,nu,tc):
'''
Devuelve la posicion(Y_{n+1}) y la velocidad(V_{n+1})
de la particula luego del choque n ademas del tiempo de choque
en un vector de la forma [Yn+1,Vn+1,t]
'''
def distanciat(t):
d=distancia(t,omega,A,h0,v0,tc)
return d
t1=Vn+np.sqrt(Vn**2+2*Yn)+tc
t2=Vn+np.sqrt(Vn**2+2*Yn)+tc
delta=0.001
while (distanciat(t1)*distanciat(t2)>0.0):
t1=t1-delta
t2=t2+delta
t_eff=brentq(distanciat,t1,t2)
Yn1=Yp(t_eff-tc,Yn,Vn)
vs=Vs(t_eff,omega,A)
vp=Vp(t_eff-tc,Vn)
Vn1=Vprima(nu,vs,vp)
r=[Yn1,Vn1,t_eff]
return r
omega=1.7
A=1.0
nu=0.15
h0=0.0
v0=10.0
N1=ChoqueN(h0,v0,omega,A,nu,0)
N2=ChoqueN(N1[0],N1[1],omega,A,nu,N1[2])
N3=ChoqueN(N2[0],N2[1],omega,A,nu,N2[2])
N4=ChoqueN(N3[0],N3[1],omega,A,nu,N3[2])
N5=ChoqueN(N4[0],N4[1],omega,A,nu,N4[2])
N6=ChoqueN(N5[0],N5[1],omega,A,nu,N5[2])
N7=ChoqueN(N6[0],N6[1],omega,A,nu,N6[2])
N8=ChoqueN(N7[0],N7[1],omega,A,nu,N7[2])
N9=ChoqueN(N8[0],N1[1],omega,A,nu,N8[2])
N10=ChoqueN(N9[0],N2[1],omega,A,nu,N9[2])
N11=ChoqueN(N10[0],N3[1],omega,A,nu,N10[2])
N12=ChoqueN(N11[0],N4[1],omega,A,nu,N11[2])
N13=ChoqueN(N12[0],N5[1],omega,A,nu,N12[2])
N14=ChoqueN(N13[0],N6[1],omega,A,nu,N13[2])
N15=ChoqueN(N14[0],N7[1],omega,A,nu,N14[2])
| true |
b8403eac0ebf59a8a06a89888c72d3cd9657821e | Python | superniaoren/fresh-fish | /game_learn_v1/pygame_collision_detection.py | UTF-8 | 3,553 | 3.53125 | 4 | [] | no_license | import sys, os
import pygame as pg
import random
# set up pygame
pg.init()
mainClock = pg.time.Clock()
# set up the window surface
WindowWidth = 500
WindowHeight = 500
windowSurface = pg.display.set_mode((WindowWidth, WindowHeight), 0, 32)
pg.display.set_caption('Collsion Detection')
# set up the colors
black = (0, 0, 0)
green = (0, 255, 0)
white = (255, 255, 255)
# set up player and food
foodCounter = 0
newFood = 40
foodSize = 20
player = pg.Rect(300, 100, 50, 50)
foods = []
for i in range(20):
foods.append(pg.Rect(random.randint(0, WindowWidth - foodSize), \
random.randint(0, WindowHeight - foodSize),\
foodSize, foodSize))
# set up movement variables
moveLeft = False
moveRight = False
moveUp = False
moveDown = False
moveSpeed = 6
# run the game loop
while True:
# check for events
for event in pg.event.get():
if event.type == pg.QUIT:
pg.quit()
sys.exit()
if event.type == pg.KEYDOWN:
if event.key == pg.K_LEFT or event.key == pg.K_a:
moveRight = False
moveLeft = True
if event.key == pg.K_RIGHT or event.key == pg.K_d:
moveRight = True
moveLeft = False
if event.key == pg.K_DOWN or event.key == pg.K_s:
moveDown = True
moveUp = False
if event.key == pg.K_UP or event.key == pg.K_w:
moveDown = False
moveUp = True
if event.type == pg.KEYUP:
if event.key == pg.K_ESCAPE:
pg.quit()
sys.exit()
elif event.key == pg.K_LEFT or event.key == pg.K_a:
moveLeft = False
elif event.key == pg.K_RIGHT or event.key == pg.K_d:
moveRight = False
elif event.key == pg.K_DOWN or event.key == pg.K_s:
moveDown = False
elif event.key == pg.K_UP or event.key == pg.K_w:
moveUp = False
if event.key == pg.K_x:
player.top = random.randint(0, WindowHeight - player.height)
player.left = random.randint(0, WindowWidth - player.width)
if event.type == pg.MOUSEBUTTONUP:
foods.append(pg.Rect(event.pos[0], event.pos[1], foodSize, foodSize))
foodCounter += 1
if foodCounter >= newFood:
foodCounter = 0
foods.append(pg.Rect(random.randint(0, WindowWidth - foodSize), \
random.randint(0, WindowHeight - foodSize), \
foodSize, foodSize))
# draw white background
windowSurface.fill(white)
# move the player
if moveLeft and player.left > 0:
player.left -= moveSpeed
if moveRight and player.right < WindowWidth:
player.right += moveSpeed
if moveUp and player.top > 0:
player.top -= moveSpeed
if moveDown and player.bottom < WindowHeight:
player.bottom += moveSpeed
# draw player
pg.draw.rect(windowSurface, black, player)
# check the collision
for food in foods:
#for food in foods[:]:
if player.colliderect(food):
foods.remove(food)
# draw foods
for i in range(len(foods)):
pg.draw.rect(windowSurface, green, foods[i])
# draw the window onto the screen
pg.display.update()
mainClock.tick(40)
| true |
c11ce49623ce3ac4784e79fde5612778d704c9ea | Python | harish5556/Auto-Evaluation-of-Transcripts | /ParseTreeGeneration.py | UTF-8 | 3,451 | 3.359375 | 3 | [] | no_license | #This Class is going to take paragraphs and return whether given statement can return a parse tree or not
import time
import nltk
import re
from nltk import word_tokenize
import Grammar
import Constants
class ParseTree:
def init(self):
print("Starting the Grammar Check")
self.startTime=time.time()
self.grammar=nltk.CFG.fromstring(Grammar.grammar)
self.caps = Constants.caps
self.prefixes = Constants.prefixes
self.suffixes = Constants.suffixes
self.starters = Constants.starters
self.acronyms = Constants.acronyms
self.websites = Constants.websites
self.run()
def run(self):
self.sent = open("demo.txt").readlines()
self.sent = ''.join(self.sent)
self.sent = self.split_into_sentences(self.sent)
self.validateSentence()
def splitIntoSentences(self,text):
"""This function is going to split the text into sentences
input: raw text
output: List of sentences
"""
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(self.prefixes, "\\1<prd>", text)
text = re.sub(self.websites, "<prd>\\1", text)
if "Ph.D" in text: text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + self.caps + "[.] ", " \\1<prd> ", text)
text = re.sub(self.acronyms + " " + self.starters, "\\1<stop> \\2", text)
text = re.sub(self.caps + "[.]" + self.caps + "[.]" + self.caps + "[.]", "\\1<prd>\\2<prd>\\3<prd>", text)
text = re.sub(self.caps + "[.]" + self.caps + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + self.suffixes + "[.] " + self.starters, " \\1<stop> \\2", text)
text = re.sub(" " + self.suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + self.caps + "[.]", " \\1<prd>", text)
if "”" in text: text = text.replace(".”", "”.")
if "\"" in text: text = text.replace(".\"", "\".")
if "!" in text: text = text.replace("!\"", "\"!")
if "?" in text: text = text.replace("?\"", "\"?")
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences]
return sentences
def validateSentence(self):
"""This function is going to split the sentences into words,applies pos tagging, extract tags and generates
parse trees using the tags
input: List of sentences
output: returns validity of sentences
"""
for s in self.sent:
count = 0
s = "".join(c for c in s if c not in ('!', '.', ':', ','))
stoken = word_tokenize(s)
# print(stoken)
tagged = nltk.pos_tag(stoken)
pos_tags = [pos for (token, pos) in nltk.pos_tag(stoken)]
# print(pos_tags)
rd_parser = nltk.LeftCornerChartParser(self.grammar)
for tree in rd_parser.parse(pos_tags):
count = count + 1
break
if count == 0:
print("Invalid sentence")
else:
print("Valid sentence")
print("Total time taken:",(time.time()-self.startTime))
if __name__=="__main__":
init()
| true |
07396c6fea10a3b33f7ea9e19e2871a07302d14e | Python | VertikaD/HackerRank | /Practice/30 Days Of Code/Day26.py | UTF-8 | 913 | 3.59375 | 4 | [] | no_license | # Enter your code here. Read input from STDIN. Print output to STDOUT
# Nested Logic
# Implementation of datetime objects in python.
# code by Vertika Dhingra
from datetime import datetime
from datetime import date
d1, m1, y1 = [int(x) for x in input().split()]
returned_date = date(y1, m1, d1)
d2, m2, y2 = [int(x) for x in input().split()]
due_date = date(y2, m2, d2)
if (returned_date == due_date) or (returned_date < due_date):
fine = 0
print(fine)
if ((returned_date.day) > (due_date.day)) and (returned_date.month == due_date.month) and (returned_date.year == due_date.year):
fine = (15 * (returned_date.day - due_date.day))
print(fine)
if ((returned_date.month) > (due_date.month)) and (returned_date.year == due_date.year):
fine = (500 * (returned_date.month-due_date.month))
print(fine)
if (returned_date.year > due_date.year):
fine = 10000
print(fine)
| true |
11fa8e9434900881640b716dd32e906dd54704c4 | Python | KevinGodinho/python_challenges | /two_list_dictionary/two_list_dict.py | UTF-8 | 865 | 4.03125 | 4 | [] | no_license | # My solution
def two_list_dictionary(list1, list2):
new_dict = {}
i = 0
while i < len(list1):
if i < len(list2):
new_dict[list1[i]] = list2[i]
else:
new_dict[list1[i]] = None
i += 1
return new_dict
two_list_dictionary(['a', 'b', 'c', 'd'], [1, 2, 3]) # {'a': 1, 'b': 2, 'c': 3, 'd': None}
two_list_dictionary(['a', 'b', 'c'] , [1, 2, 3, 4]) # {'a': 1, 'b': 2, 'c': 3}
two_list_dictionary(['x', 'y', 'z'] , [1,2]) # {'x': 1, 'y': 2, 'z': None}
# Instructor's solution
# Essentially did the same as me, but with a for loop
# def two_list_dictionary(keys, values):
# collection = {}
#
# for idx, val in enumerate(keys):
# if idx < len(values):
# collection[keys[idx]] = values[idx]
# else:
# collection[keys[idx]] = None
#
# return collection
| true |
8bece2207d5fbe1c282d0a5e041c3b0766ee8a08 | Python | Kwonkunkun/DrawAndPainting_Pytorch | /DataUtils/prepare_data.py | UTF-8 | 2,144 | 2.953125 | 3 | [
"Apache-2.0"
] | permissive | import argparse
import os
import urllib.request
import numpy as np
from generate_data import generate_dataset
def download(nums=''):
"""
args:
- nums: str, specify how many categories you want to download to your device
"""
# The file 'categories.txt' includes all categories you want to download as dataset
with open("./DataUtils/"+nums+"categories.txt", "r") as f:
classes = f.readlines()
classes = [c.replace('\n', '').replace(' ', '_') for c in classes]
print(classes)
base = 'https://storage.googleapis.com/quickdraw_dataset/full/numpy_bitmap/'
for c in classes:
cls_url = c.replace('_', '%20')
path = base+cls_url+'.npy'
print(path)
urllib.request.urlretrieve(path, './Data/'+c+'.npy')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Download Quick, Draw! data from Google and then dump the raw data into cache.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--categories', '-c', type=str, default=10, choices=['all', '100', '30', '10'],
help='Choose how many categories you want to download to your device.')
parser.add_argument('--valfold', '-v', type=float,
default=0.2, help='Specify the val fold ratio.')
parser.add_argument('--max_samples_category', '-msc', type=int, default=5000,
help='Specify the max samples per category for your generated dataset.')
parser.add_argument('--download', '-d', type=int,
choices=[0, 1], default=0, help='1 for download data, 0 for not.')
parser.add_argument('--show_random_imgs', '-show', action='store_true',
default=False, help='show some random images while generating the dataset.')
args = parser.parse_args()
# Download data.
if args.download == 1:
download(args.categories)
# Generate dataset
generate_dataset(vfold_ratio=args.valfold, max_samples_per_class=args.max_samples_category,
show_imgs=args.show_random_imgs)
| true |
fef6fc93d6b92558bd189033a7efd2ba9540e90b | Python | amozie/amozie | /datazie/test.py | UTF-8 | 245 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
import datazie as dz
def test1():
x = np.linspace(0, 10, 50)
y = 3*x + 2
y += np.random.randn(50)
lm = dz.model.LinearModel(y, x)
lm.fit()
print(lm.predict(50))
if __name__ == '__main__':
test1() | true |
14e2dd1711863c9a7a3b841f306b08ed995a025b | Python | aufbakanleitung/ProjectEuler | /asterisk.py | UTF-8 | 222 | 3.75 | 4 | [] | no_license | from functools import reduce
primes = [2, 3, 5, 7, 11, 13]
def product(*numbers):
p = reduce(lambda x, y: x * y, numbers)
return p
print(product(*primes))
# 30030
print(product(primes))
# [2, 3, 5, 7, 11, 13] | true |
024c568176c467f9e83e4edf8c74e68360a38bdd | Python | leongjinqwen/python-nextagram-dec | /instagram_web/blueprints/users/views.py | UTF-8 | 4,068 | 2.53125 | 3 | [] | no_license | from flask import Blueprint, render_template,request,redirect,url_for,flash
from models.user import User
from models.image import Image
from flask_login import current_user,login_required
from werkzeug.utils import secure_filename
from instagram_web.util.helpers import upload_file_to_s3,allowed_file
import datetime
users_blueprint = Blueprint('users',
__name__,
template_folder='templates')
@users_blueprint.route('/new', methods=['GET'])
def new():
return render_template('users/new.html')
@users_blueprint.route('/', methods=['POST'])
def create():
username = request.form.get('username')
email = request.form.get('email')
password = request.form.get('password')
user = User(username=username,email=email,password=password)
if user.save():
flash("successfully create a new user",'info')
return redirect(url_for('users.new'))
else:
for error in user.errors:
flash(error,'danger')
return render_template('users/new.html')
@users_blueprint.route('/<username>', methods=["GET"])
def show(username):
user = User.get(User.username==username)
images = Image.select().where(Image.user==user.id)
return render_template("users/show.html",user=user,images=images)
@users_blueprint.route('/', methods=["GET"])
def index():
users = User.select()
return "USERS"
@users_blueprint.route('/<id>/edit', methods=['GET'])
@login_required
def edit(id):
user = User.get_by_id(id)
if current_user == user:
return render_template("users/edit.html",user=user)
else:
flash("Unauthorized to edit.",'danger')
return redirect(url_for('users.show',username=current_user.username))
@users_blueprint.route('/<id>', methods=['POST'])
def update(id):
user = User.get_by_id(id)
if current_user == user:
username = request.form.get('username')
email = request.form.get('email')
password = request.form.get('password')
if username:
user.username = username
user.password = password
if email:
user.email = email
user.password = password
if password:
user.password = password
if user.save():
flash('Successfully updated!','success')
return redirect(url_for('users.edit',id=id))
else:
for error in user.errors:
flash(error,'danger')
return render_template("users/edit.html",user=user)
else:
flash("Unauthorized to edit.",'danger')
return redirect(url_for('users.show',username=current_user.username))
@users_blueprint.route('/upload', methods=['POST'])
def upload():
# check whether an input field with name 'user_file' exist
if 'user_file' not in request.files:
flash('No user_file key in request.files')
return redirect(url_for('users.edit',id=current_user.id))
# after confirm 'user_file' exist, get the file from input
file = request.files['user_file']
# check whether a file is selected
if file.filename == '':
flash('No selected file')
return redirect(url_for('users.edit',id=current_user.id))
# check whether the file extension is allowed (eg. png,jpeg,jpg,gif)
if file and allowed_file(file.filename):
file.filename = secure_filename(f"{str(datetime.datetime.now())}{file.filename}")
output = upload_file_to_s3(file)
if output:
User.update(profile_image=file.filename).where(User.id==current_user.id).execute()
flash("Profile image successfully uploaded","success")
return redirect(url_for('users.show',username=current_user.username))
else:
flash(output,"danger")
return redirect(url_for('users.edit',id=current_user.id))
# if file extension not allowed
else:
flash("File type not accepted,please try again.")
return redirect(url_for('users.edit',id=current_user.id)) | true |
acc71a9d04f187acd4353229d9f46ae3d3d52d3d | Python | dwest/bcdmud | /GameServer.py | UTF-8 | 1,561 | 2.875 | 3 | [] | no_license | import SocketServer
import Queue
import multiprocessing
from multiprocessing import Queue, Process
from ClientMessage import *
from ServerProxy import *
class TCPHandler(SocketServer.StreamRequestHandler):
def handle(self):
# Toss request on queue
gameQueue.put(self.request.recv(4096))
class ThreadedTCPServer(SocketServer.ThreadingMixIn, SocketServer.TCPServer):
def setQueueProcess(self, queue):
self.queueProcess = queue
class QueueProcess(Process):
def __init__(self, queue, proxy):
Process.__init__(self)
self.queue = queue
# Get next item from queue
# Create ClientMessage obj
def run(self):
while True:
item = self.queue.get()
try:
message = ClientMessage(item)
except InvalidMessageError, err_msg:
print ""
print err_msg, item
# Create and start the class that will process
# items on the queue
gameQueue = Queue()
p = QueueProcess(gameQueue)
p.start()
# Create server proxy
# Create the server
HOST, PORT = "localhost", 0
server = ThreadedTCPServer((HOST, PORT), TCPHandler)
server.setQueueProcess(p)
server_process = Process(target=server.serve_forever)
server_process.daemon = True
server_process.start()
ip, add = server.server_address
print ip," ",add
# Show the server CLI
# TODO: Make ServerCLI class!
message = ""
while message != 'bye':
try:
message = raw_input(">> ")
except EOFError:
break
print "\n"
p.terminate()
server_process.terminate()
| true |
919f5f84fd5166d77f39b9bf96113a85b3849b34 | Python | romilpatel-developer/CIS2348_projects | /Homework 2/pythonProject6/main.py | UTF-8 | 509 | 2.90625 | 3 | [] | no_license | #Name-Romilkumar Patel
# PSID-1765483
# Section 6.17
password = input()
modified_password = ''
i = 0
while i < len(password):
ch = password[i]
if ch == 'i':
modified_password += '!'
elif ch == 'a':
modified_password += '@'
elif ch == 'm':
modified_password += 'M'
elif ch == 'B':
modified_password += '8'
elif ch == 'o':
modified_password += '.'
else:
modified_password += ch
i += 1
modified_password += "q*s"
print(modified_password) | true |
e7cb702e20d9a6746daaf86fb630a0b565145535 | Python | Anukul2058/python-calculator | /main.py | UTF-8 | 3,534 | 3.359375 | 3 | [] | no_license | from tkinter import *
root = Tk()
root.title('A calculator by anukul')
e = Entry(root, width=35, borderwidth=5)
e.grid(row=0, column=0, columnspan=3)
def button_click(number):
current = e.get()
e.delete(0, END)
e.insert(0, str(current) + str(number))
def button_clear():
e.delete(0, END)
def button_add():
global sign
sign = '+'
first_number = e.get()
global f_num
f_num = int(first_number)
e.delete(0, END)
def button_sub():
global sign
sign = '-'
first_number = e.get()
global f_num
f_num = int(first_number)
e.delete(0, END)
def button_div():
global sign
sign = '/'
first_number = e.get()
global f_num
f_num = float(first_number)
e.delete(0, END)
def button_multiply():
global sign
sign = '*'
first_number = e.get()
global f_num
f_num = int(first_number)
e.delete(0, END)
def button_equal():
second_number = e.get()
if sign == '+':
e.delete(0, END)
e.insert(0, int(f_num) + int(second_number))
if sign == '-':
e.delete(0, END)
e.insert(0, int(f_num) - int(second_number))
if sign == '*':
e.delete(0, END)
e.insert(0, int(f_num) * int(second_number))
if sign == '/':
e.delete(0, END)
e.insert(0, float(f_num) / float(second_number))
button_1 = Button(root, text='1', padx=40, pady=20, command=lambda: button_click(1),fg='red')
button_2 = Button(root, text='2', padx=40, pady=20, command=lambda: button_click(2),fg='red')
button_3 = Button(root, text='3', padx=40, pady=20, command=lambda: button_click(3),fg='red')
button_4 = Button(root, text='4', padx=40, pady=20, command=lambda: button_click(4),fg='red')
button_5 = Button(root, text='5', padx=40, pady=20, command=lambda: button_click(5),fg='red')
button_6 = Button(root, text='6', padx=40, pady=20, command=lambda: button_click(6),fg='red')
button_7 = Button(root, text='7', padx=40, pady=20, command=lambda: button_click(7),fg='red')
button_8 = Button(root, text='8', padx=40, pady=20, command=lambda: button_click(8),fg='red')
button_9 = Button(root, text='9', padx=40, pady=20, command=lambda: button_click(9),fg='red')
button_10 = Button(root, text='0', padx=40, pady=20, command=lambda: button_click(0),fg='red')
button_clear = Button(root, text='AC', padx=35, pady=20, command=button_clear, bg='red', fg='black')
button_add = Button(root, text='+', padx=40, pady=20, command=button_add,fg='red')
button_sub = Button(root, text='-', padx=40, pady=20, command=button_sub,fg='red')
button_div = Button(root, text='/', padx=40, pady=20, command=button_div,fg='red')
button_multiply = Button(root, text='*', padx=40, pady=20, command=button_multiply,fg='red')
button_equal = Button(root, text='=', padx=140, pady=20, command=button_equal, bg='blue', fg='white')
# put buttons on the screen
button_1.grid(row=3, column=0)
button_2.grid(row=3, column=1)
button_3.grid(row=3, column=2)
button_4.grid(row=2, column=0)
button_5.grid(row=2, column=1)
button_6.grid(row=2, column=2)
button_7.grid(row=1, column=0)
button_8.grid(row=1, column=1)
button_9.grid(row=1, column=2)
button_10.grid(row=4, column=0)
button_clear.grid(row=4, column=2)
button_add.grid(row=4, column=1)
button_sub.grid(row=5, column=0)
button_div.grid(row=5, column=1)
button_multiply.grid(row=5, column=2)
button_equal.grid(row=6, column=0, columnspan=3)
root.mainloop()
| true |
13a942b39ad83cf3ced8a46bd84f19bc0fc48950 | Python | hubert-wojtowicz/learn-python-syntax | /module-5/5-classes-vs-instance-methods.py | UTF-8 | 464 | 3.71875 | 4 | [] | no_license | class Point:
default_color = "red"
def __init__(self, x, y): # magic method __xxx__()
# self is refernce to current object
self.x = x
self.y = y
def draw(self):
print("({}, {})".format(self.x, self.y))
# class methods
@classmethod # decorator makes difference
def zero(cls): # cls is pure convention - can be used anythink
return cls(0, 0)
# Point.zero() # factory method
Point.zero().draw()
| true |
8f057eda44b2d6659524321fe8f9e3925778518f | Python | amirkhan1092/competitive-coding | /laser_tag.py | UTF-8 | 122 | 2.890625 | 3 | [] | no_license | # import itertools as ite
team, hr = map(int, input().split())
if (team-1)*30/60 <= hr:
print(1)
else:
print(0)
| true |
92a00f1aa4c460e441262c037612661901b19abf | Python | geekidharsh/ctci-solutions | /01-arrays-and-strings/stringbuilder.py | UTF-8 | 756 | 4.0625 | 4 | [] | no_license | "this is just a psuedo code"
'''
Stringbuilder:
In events when concatenating a list of string. Running time is often high.
Why:
lets assume there are n strings of each length x. Upon each concatenation, a new string is concatenated
and two strings are copied over.
First iteration requires : x characters copying.
Second iteration requires: 2x characters copying and so on...
Total time: O(x+2x+3x+.....nx) = O(xn^2)
Stringbuilder helps solve this problem by creating a resizble array of all the strings. This way,
a new string copying is done only when necessary.
String joinwords(String[] words){
Stringbuilder sentence = new Stringbuilder();
for (String w: words){
sentence.append(w);
}
return sentence.toString();
}
'''
| true |
da393edee6017f1ece1799fdff169215e0668fc1 | Python | vsofat/SoftDev | /Fall/25_restrio/app.py | UTF-8 | 1,874 | 2.65625 | 3 | [] | no_license | from flask import Flask, render_template
import json
from urllib.request import urlopen
app = Flask(__name__)
@app.route("/")
def root():
return render_template('index.html')
@app.route("/qod")
def quote():
url = urlopen("http://quotes.rest/qod.json")
response = url.read()
data = json.loads(response)
print(data['contents']['quotes'][0]['quote'], data['contents']['quotes'][0]['author'])
return render_template('qod.html', quote = data['contents']['quotes'][0]['quote'], author = data['contents']['quotes'][0]['author'])
@app.route("/bike")
def bike():
url = urlopen("http://api.citybik.es/v2/networks/smartbike-delhi-delhi")
response = url.read()
data = json.loads(response)
stations = data['network']['stations']
array = []
for i in stations:
array.append(i['name'])
#array.append("\n")
print(array)
print(data['network']['location']['city'], data['network']['name'])
return render_template('bike.html',
city = data['network']['location']['city'],
company = data['network']['name'],
stations = array
)
@app.route("/currency")
def curr():
url = urlopen("https://api.exchangerate-api.com/v4/latest/CLP")
response = url.read()
data = json.loads(response)
print(data['base'],data['rates']['USD'],data['rates']['EUR'],data['rates']['RUB'],data['rates']['GBP'])
return render_template('curr.html',
main = data['base'],
usd = data['rates']['USD'],
eur = data['rates']['EUR'],
rub = data['rates']['RUB'],
gbp = data['rates']['GBP'],
)
if __name__ == "__main__":
app.debug = True
app.run()
| true |
fec522ffde1ab2daefa7f1f1802d5669b0cb1fad | Python | mtyoumans/simple_icnn_github | /icnn.py | UTF-8 | 4,089 | 3.296875 | 3 | [] | no_license | """Provides a class implementing a prototype
Input Convex Neural Network.
Typical usage:
model = ICNN()
"""
from torch import nn
import torch
class ICNN(nn.Module):
""" Creates a simple Multi-layer Input Convex Neural Network
Input Convex Neural Networks (ICNNs) are neural networks
that are convex with respect to the inputs. They are not
convex with respect to the weights.
This architecture is based on ideas from (Amos, Xu, Kolter, 2017):
Amos, Brandon, Lei Xu, and J. Zico Kolter.
"Input convex neural networks."
International Conference on Machine Learning.
PMLR, 2017.
This a prototype implementation of that idea using skip connections
and constraining weights to be non-negative (or negative in last
layer to create a concave network with respect to inputs) using
torch.clamp(). As for now, It is to be trained normally with ADAM
stochastic gradient descent without any special concern to the weight
space constraints, though it may be possible to create a better
optimizer in the future.
"""
def __init__(self):
super(ICNN, self).__init__()
self.flatten = nn.Flatten()
self.first_hidden_layer = nn.Sequential(
nn.Linear(28*28, 512),
nn.ReLU()
)
#matrices and nonlinearities for 2nd layer
self.second_layer_linear_prim = nn.Linear(512,512)
self.second_layer_linear_prim.weight.data = torch.abs(
self.second_layer_linear_prim.weight.data)
self.second_layer_linear_skip = nn.Linear(28*28, 512)
self.second_layer_act = nn.ReLU()
#matrices and nonlinearities for 3rd layer
self.third_layer_linear_prim = nn.Linear(512,512)
self.third_layer_linear_prim.weight.data = torch.abs(
self.third_layer_linear_prim.weight.data)
self.third_layer_linear_skip = nn.Linear(28*28, 512)
self.third_layer_act = nn.ReLU()
#matrices and nonlinearities for 4th layer
self.fourth_layer_linear_prim = nn.Linear(512,512)
self.fourth_layer_linear_prim.weight.data = torch.abs(
self.fourth_layer_linear_prim.weight.data)
self.fourth_layer_linear_skip = nn.Linear(28*28, 512)
self.fourth_layer_act = nn.ReLU()
#matrices and nonlinearities for 5th layer
self.fifth_layer_linear_prim = nn.Linear(512,512)
self.fifth_layer_linear_prim.weight.data = torch.abs(
self.fifth_layer_linear_prim.weight.data)
self.fifth_layer_linear_skip = nn.Linear(28*28, 512)
self.fifth_layer_act = nn.ReLU()
#final Output layer
self.output_layer_linear_prim = nn.Linear(512, 10)
self.output_layer_linear_prim.weight.data = -1*torch.abs( #check this
self.output_layer_linear_prim.weight.data)
self.output_layer_linear_skip = nn.Linear(28*28, 10)
def forward(self, x):
x = self.flatten(x)
skip_x2 = x
skip_x3 = x
skip_x4 = x
skip_x5 = x
skip_x6 = x
z1 = self.first_hidden_layer(x)
z1 = self.second_layer_linear_prim(z1)
z1 = torch.clamp(z1, min = 0, max = None)
y2 = self.second_layer_linear_skip(skip_x2)
z2 = self.second_layer_act(z1 + y2)
z2 = self.third_layer_linear_prim(z2)
z2 = torch.clamp(z2, min = 0, max = None)
y3 = self.third_layer_linear_skip(skip_x3)
z3 = self.third_layer_act(z2 + y3)
z3 = self.fourth_layer_linear_prim(z3)
z3 = torch.clamp(z3, min = 0, max = None)
y4 = self.fourth_layer_linear_skip(skip_x4)
z4 = self.fourth_layer_act(z3 + y4)
z4 = self.fifth_layer_linear_prim(z4)
z4 = torch.clamp(z4, min = 0, max = None)
y5 = self.fifth_layer_linear_skip(skip_x5)
z5 = self.fifth_layer_act(z4 + y5)
z5 = self.output_layer_linear_prim(z5)
z5 = torch.clamp(z5, min = None, max = 0)#check this
y6 = self.output_layer_linear_skip(skip_x6)
logits = z5 + y6
return logits
| true |
8b2818651e12cccbef31746096877fb66916a9f9 | Python | yoshiscienceguy/IrvineUploadProgram | /src/MxPiDrive/Gifs.py | UTF-8 | 2,880 | 2.984375 | 3 | [] | no_license | import Tkinter as tk
import thread,time
GIFS = {}
Status = True
names = ["walking","ChickenDance"]#,"BreakDance","Dance","HipHop","Samba","Swing"]
doneLoading = False
NumberofGifs = len(GIFS)
CurrentGifNumber = 0
CurrentImage = None
button = None
root = None
class AnimatedGif(object):
""" Animated GIF Image Container. """
def __init__(self, image_file_path):
self.image_file_path = image_file_path
self._frames = []
self._load()
def __len__(self):
return len(self._frames)
def __getitem__(self, frame_num):
return self._frames[frame_num]
def _load(self):
""" Read in all the frames of a multi-frame gif image. """
while True:
frame_num = len(self._frames) # number of next frame to read
try:
frame = tk.PhotoImage(file=self.image_file_path,
format="gif -index {}".format(frame_num))
except tk.TclError:
break
self._frames.append(frame)
def updatePicture(frame_num):
if(doneLoading):
global Status
ms_delay = 1000 // len(CurrentImage)
try:
button.configure(image=CurrentImage[frame_num])
except:
button.configure(image=CurrentImage[0])
frame_num += 1
if(frame_num >= len(CurrentImage)):
frame_num = 0
if(Status == False):
Status = True
return
else:
root.after(ms_delay, updatePicture, frame_num)
def startAnimation():
updatePicture(0)
def nextAnimation():
global CurrentImage, CurrentGifNumber,Status
name =names[CurrentGifNumber]
CurrentGifNumber += 1
if(CurrentGifNumber >= NumberofGifs):
CurrentGifNumber = 0
CurrentImage = GIFS[names[CurrentGifNumber]]
updatePicture(0)
Status = False
def GetGif():
global doneLoading, CurrentImage, NumberofGifs,GIFS
for name in names:
image_file_path = "ICONS/"+name+".gif"
ani_img = AnimatedGif(image_file_path)
print(len(ani_img))
GIFS[name] = ani_img
print("done")
doneLoading = True
CurrentImage=GIFS[names[0]]
NumberofGifs = len(GIFS)
def Start(mroot):
global button,root
root = mroot
name =names[CurrentGifNumber]
button = tk.Button(root,relief = tk.FLAT,command = nextAnimation) # display first frame initially
button.pack()
GetGif()
startAnimation()
##
##root = tk.Tk()
##root.title("Animation Demo")
##Start(root)
##root.mainloop()
##changeAnimation = Button(root, text="Next", command=nextAnimation)
##changeAnimation.pack()
##stop_animation = Button(root, text="stop animation", command=cancel_animation)
##stop_animation.pack()
| true |
8d1e72c4545ddbc18a1f3a7ffbe2e57635e47885 | Python | sschmeier/vcfcompile | /vcfcompile.py | UTF-8 | 9,784 | 2.578125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
NAME: vcfcompile.py
===================
DESCRIPTION
===========
Read vcf-files and compile a table of unique variants
and extract for each file the QD value of the SNPs.
Prints to standard out. Some stats go to standard error.
INSTALLATION
============
Nothing special. Uses only standard libs.
USAGE
=====
python vcfcompile.py *.vcf.gz
TODO
====
- Make use of cyvcf for speed.
VERSION HISTORY
===============
0.0.2 2019/01/10 Fixed error: _csv.Error: field larger than field limit (131072)
0.0.1 2018 Initial version.
LICENCE
=======
2018-2019, copyright Sebastian Schmeier
s.schmeier@gmail.com // https://www.sschmeier.com
template version: 2.0 (2018/12/19)
"""
import sys
import os
import os.path
import argparse
import csv
import gzip
import bz2
import zipfile
import time
import re
import operator
import logging
csv.field_size_limit(sys.maxsize)
__version__ = '0.0.2'
__date__ = '2019/01/10'
__email__ = 's.schmeier@gmail.com'
__author__ = 'Sebastian Schmeier'
# For color handling on the shell
try:
from colorama import init, Fore
# INIT color
# Initialise colours for multi-platform support.
init()
reset = Fore.RESET
colors = {'success': Fore.GREEN,
'error': Fore.RED,
'warning': Fore.YELLOW,
'info': ''}
except ImportError:
sys.stderr.write('colorama lib desirable. ' +
'Install with "conda install colorama".\n\n')
reset = ''
colors = {'success': '', 'error': '', 'warning': '', 'info': ''}
def alert(atype, text, log, repeat=False):
if repeat:
textout = '{} [{}] {}\r'.format(time.strftime('%Y%m%d-%H:%M:%S'),
atype.rjust(7),
text)
else:
textout = '{} [{}] {}\n'.format(time.strftime('%Y%m%d-%H:%M:%S'),
atype.rjust(7),
text)
log.write('{}{}{}'.format(colors[atype], textout, reset))
if atype == 'error':
sys.exit(1)
def success(text, log=sys.stderr):
alert('success', text, log)
def error(text, log=sys.stderr):
alert('error', text, log)
def warning(text, log=sys.stderr):
alert('warning', text, log)
def info(text, log=sys.stderr, repeat=False):
alert('info', text, log)
def parse_cmdline():
""" Parse command-line args. """
# parse cmd-line ----------------------------------------------------------
description = 'Read vcf-files and compile a table of unique' + \
' variants and extract for each file the QD value' + \
' of the SNPs. Prints to standard out. Some stats' + \
' go to standard error.'
version = 'version {}, date {}'.format(__version__, __date__)
epilog = 'Copyright {} ({})'.format(__author__, __email__)
parser = argparse.ArgumentParser(description=description, epilog=epilog)
parser.add_argument('--version',
action='version',
version='{}'.format(version))
parser.add_argument(
'files',
metavar='FILE',
nargs='+',
help='vcf-file.')
parser.add_argument('--snpeff',
action="store_true",
default=False,
help='Extract SnpEff effects on genes. ' + \
'Requires that vcf is a result of a SnpEff run.')
parser.add_argument('--snpeffType',
metavar='TYPE',
default=None,
help='Extract genes with this SnpEff effect (HIGH, MODERATE, LOW, MODIFIER). ' + \
'Ignore other genes. [default: all"]')
parser.add_argument('--qual',
action="store_true",
default=False,
help='Extract QUAL instead of annotation values.')
parser.add_argument('--ann',
metavar='TYPE',
default="QD",
help='Extract this value from the annotation line [default="QD"]. ' + \
'Adds a "-", if the value is not found and --warn is specified. ' + \
'Throws an error otherwise.')
parser.add_argument('--warn',
action="store_true",
default=False,
help='Do not throw an exception if the value could not be extracted '+ \
' from a vcf line. Instead only print warning to stderr.')
# if no arguments supplied print help
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
return args, parser
def load_file(filename):
""" LOADING FILES """
if filename in ['-', 'stdin']:
filehandle = sys.stdin
elif filename.split('.')[-1] == 'gz':
filehandle = gzip.open(filename, 'rt')
elif filename.split('.')[-1] == 'bz2':
filehandle = bz2.open(filename, 'rt')
elif filename.split('.')[-1] == 'zip':
filehandle = zipfile.ZipFile(filename)
else:
filehandle = open(filename)
return filehandle
def main():
""" The main funtion. """
#logger = logging.getLogger(__name__)
args, parser = parse_cmdline()
if len(args.files) == 1:
error("Script expects at least two files. EXIT.")
if not args.snpeffType:
reg_genes = re.compile("\|(HIGH|MODERATE|LOW|MODIFIER)\|(.+?)\|")
else:
reg_genes = re.compile("\|({})\|(.+?)\|".format(args.snpeffType))
reg_ann = re.compile(";{}=(.+?);".format(args.ann))
variants = {}
allvars = {}
basenames = []
for f in args.files:
try:
fileobj = load_file(f)
except IOError:
error('Could not load file "{}". EXIT.'.format(f))
basename = os.path.basename(f)
if basename not in variants:
variants[basename] = {}
basenames.append(basename)
# delimited file handler
csv_reader_obj = csv.reader(fileobj, delimiter="\t", quoting=csv.QUOTE_NONE)
i = 0
for a in csv_reader_obj:
i += 1
if a[0][0] == "#": # comment
continue
tVariant = tuple(a[0:5])
allvars[tVariant] = allvars.get(tVariant,0) + 1
if args.snpeff:
res_genes = reg_genes.findall(a[7])
# run through SNPeff?
if not res_genes:
sys.stderr.write("{}\n".format('\t'.join(a)))
error("Could not extract genes. " + \
"Was your vcf-file {} annotated " + \
"with SnpEff? EXIT.".format(f))
if args.snpeffType:
res_genes = ['{}'.format(t[1]) for t in list(set(res_genes))]
else:
res_genes = ['{}:{}'.format(t[1], t[0]) for t in list(set(res_genes))]
res_genes = list(set(res_genes))
res_genes.sort()
res_genes = ';'.join(res_genes)
else:
res_genes = "-"
if args.qual:
ann = a[5]
else:
ann = reg_ann.search(a[7])
if not ann:
outstr = 'Could not find "{}" value:\nFile: '.format(args.ann) + \
'"{}"\nLine ({}): {}'.format(f,i,'\t'.join(a))
if args.warn:
warning(outstr)
warning('Set value to for variant in file {} to "-".'.format(f))
ann = "-"
else:
error(outstr)
else:
ann = ann.group(1)
variants[basename][tVariant] = (ann, res_genes)
success("{}: {} variants found".format(basename, len(variants[basename])))
success("Number of unique variants: {}".format(len(allvars)))
header = "CHROM\tPOS\tID\tREF\tALT\tGENES\t{}".format('\t'.join(basenames))
allvars_sorted = sorted(allvars.items(), key=operator.itemgetter(1))
allvars_sorted.reverse()
outfileobj = sys.stdout
# For printing to stdout
# SIGPIPE is throwing exception when piping output to other tools
# like head. => http://docs.python.org/library/signal.html
# use a try - except clause to handle
try:
outfileobj.write("{}\n".format(header))
for vartuple in allvars_sorted:
var = vartuple[0]
fqds = []
genes = []
for f in basenames:
try:
qd, gene = variants[f][var]
genes.append(gene)
except KeyError:
qd = "-"
fqds.append(qd)
fqds = '\t'.join(fqds)
outfileobj.write("{}\t{}\t{}\t{}\t{}\t{}\t{}\n".format(var[0],
var[1],
var[2],
var[3],
var[4],
gene,
fqds))
# flush output here to force SIGPIPE to be triggered
# while inside this try block.
sys.stdout.flush()
except BrokenPipeError:
# Python flushes standard streams on exit; redirect remaining output
# to devnull to avoid another BrokenPipeError at shut-down
devnull = os.open(os.devnull, os.O_WRONLY)
os.dup2(devnull, sys.stdout.fileno())
sys.exit(1) # Python exits with error code 1 on EPIPE
# ------------------------------------------------------
outfileobj.close()
return
if __name__ == '__main__':
sys.exit(main())
| true |
4b6c133416e94a1f8df3096f270815597638405c | Python | hrkhrkhrk/Atom | /Begineers_Selection/ABC085C_Otoshidama.py | UTF-8 | 432 | 2.640625 | 3 | [] | no_license | N, Y=map(int,input().split())
a=10000
b=5000
c=1000
x=[]
if Y%c==0:
for i in range(int(Y/a)+1):
Y_a=Y-a*i
for j in range(int(Y_a/b)+1):
Y_b=Y_a-b*j
k=int(Y_b/c)
if sum([i,j,k])==N:
x=[i,j,k]
break
else:
continue
break
if x==[]:
print(*[-1,-1,-1])
else:
print(*x)
else:
print(*[-1,-1,-1])
| true |
a9e61a2e4db31d0cabfc1a75094851c14a33e5b4 | Python | johankaito/fufuka | /microblog/flask/venv/lib/python2.7/site-packages/scipy/linalg/_decomp_qz.py | UTF-8 | 8,974 | 2.765625 | 3 | [
"Apache-2.0"
] | permissive | from __future__ import division, print_function, absolute_import
import warnings
import numpy as np
from numpy import asarray_chkfinite
from .misc import LinAlgError, _datacopied
from .lapack import get_lapack_funcs
from scipy._lib.six import callable
__all__ = ['qz']
_double_precision = ['i','l','d']
def _select_function(sort, typ):
if typ in ['F','D']:
if callable(sort):
# assume the user knows what they're doing
sfunction = sort
elif sort == 'lhp':
sfunction = lambda x,y: (np.real(x/y) < 0.0)
elif sort == 'rhp':
sfunction = lambda x,y: (np.real(x/y) >= 0.0)
elif sort == 'iuc':
sfunction = lambda x,y: (abs(x/y) <= 1.0)
elif sort == 'ouc':
sfunction = lambda x,y: (abs(x/y) > 1.0)
else:
raise ValueError("sort parameter must be None, a callable, or "
"one of ('lhp','rhp','iuc','ouc')")
elif typ in ['f','d']:
if callable(sort):
# assume the user knows what they're doing
sfunction = sort
elif sort == 'lhp':
sfunction = lambda x,y,z: (np.real((x+y*1j)/z) < 0.0)
elif sort == 'rhp':
sfunction = lambda x,y,z: (np.real((x+y*1j)/z) >= 0.0)
elif sort == 'iuc':
sfunction = lambda x,y,z: (abs((x+y*1j)/z) <= 1.0)
elif sort == 'ouc':
sfunction = lambda x,y,z: (abs((x+y*1j)/z) > 1.0)
else:
raise ValueError("sort parameter must be None, a callable, or "
"one of ('lhp','rhp','iuc','ouc')")
else: # to avoid an error later
raise ValueError("dtype %s not understood" % typ)
return sfunction
def qz(A, B, output='real', lwork=None, sort=None, overwrite_a=False,
overwrite_b=False, check_finite=True):
"""
QZ decomposition for generalized eigenvalues of a pair of matrices.
The QZ, or generalized Schur, decomposition for a pair of N x N
nonsymmetric matrices (A,B) is::
(A,B) = (Q*AA*Z', Q*BB*Z')
where AA, BB is in generalized Schur form if BB is upper-triangular
with non-negative diagonal and AA is upper-triangular, or for real QZ
decomposition (``output='real'``) block upper triangular with 1x1
and 2x2 blocks. In this case, the 1x1 blocks correspond to real
generalized eigenvalues and 2x2 blocks are 'standardized' by making
the corresponding elements of BB have the form::
[ a 0 ]
[ 0 b ]
and the pair of corresponding 2x2 blocks in AA and BB will have a complex
conjugate pair of generalized eigenvalues. If (``output='complex'``) or
A and B are complex matrices, Z' denotes the conjugate-transpose of Z.
Q and Z are unitary matrices.
Parameters
----------
A : (N, N) array_like
2d array to decompose
B : (N, N) array_like
2d array to decompose
output : {'real', 'complex'}, optional
Construct the real or complex QZ decomposition for real matrices.
Default is 'real'.
lwork : int, optional
Work array size. If None or -1, it is automatically computed.
sort : {None, callable, 'lhp', 'rhp', 'iuc', 'ouc'}, optional
NOTE: THIS INPUT IS DISABLED FOR NOW, IT DOESN'T WORK WELL ON WINDOWS.
Specifies whether the upper eigenvalues should be sorted. A callable
may be passed that, given a eigenvalue, returns a boolean denoting
whether the eigenvalue should be sorted to the top-left (True). For
real matrix pairs, the sort function takes three real arguments
(alphar, alphai, beta). The eigenvalue x = (alphar + alphai*1j)/beta.
For complex matrix pairs or output='complex', the sort function
takes two complex arguments (alpha, beta). The eigenvalue
x = (alpha/beta).
Alternatively, string parameters may be used:
- 'lhp' Left-hand plane (x.real < 0.0)
- 'rhp' Right-hand plane (x.real > 0.0)
- 'iuc' Inside the unit circle (x*x.conjugate() <= 1.0)
- 'ouc' Outside the unit circle (x*x.conjugate() > 1.0)
Defaults to None (no sorting).
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance)
overwrite_b : bool, optional
Whether to overwrite data in b (may improve performance)
check_finite : bool, optional
If true checks the elements of `A` and `B` are finite numbers. If
false does no checking and passes matrix through to
underlying algorithm.
Returns
-------
AA : (N, N) ndarray
Generalized Schur form of A.
BB : (N, N) ndarray
Generalized Schur form of B.
Q : (N, N) ndarray
The left Schur vectors.
Z : (N, N) ndarray
The right Schur vectors.
sdim : int, optional
If sorting was requested, a fifth return value will contain the
number of eigenvalues for which the sort condition was True.
Notes
-----
Q is transposed versus the equivalent function in Matlab.
.. versionadded:: 0.11.0
Examples
--------
>>> from scipy import linalg
>>> np.random.seed(1234)
>>> A = np.arange(9).reshape((3, 3))
>>> B = np.random.randn(3, 3)
>>> AA, BB, Q, Z = linalg.qz(A, B)
>>> AA
array([[-13.40928183, -4.62471562, 1.09215523],
[ 0. , 0. , 1.22805978],
[ 0. , 0. , 0.31973817]])
>>> BB
array([[ 0.33362547, -1.37393632, 0.02179805],
[ 0. , 1.68144922, 0.74683866],
[ 0. , 0. , 0.9258294 ]])
>>> Q
array([[ 0.14134727, -0.97562773, 0.16784365],
[ 0.49835904, -0.07636948, -0.86360059],
[ 0.85537081, 0.20571399, 0.47541828]])
>>> Z
array([[-0.24900855, -0.51772687, 0.81850696],
[-0.79813178, 0.58842606, 0.12938478],
[-0.54861681, -0.6210585 , -0.55973739]])
"""
if sort is not None:
# Disabled due to segfaults on win32, see ticket 1717.
raise ValueError("The 'sort' input of qz() has to be None (will "
" change when this functionality is made more robust).")
if output not in ['real','complex','r','c']:
raise ValueError("argument must be 'real', or 'complex'")
if check_finite:
a1 = asarray_chkfinite(A)
b1 = asarray_chkfinite(B)
else:
a1 = np.asarray(A)
b1 = np.asarray(B)
a_m, a_n = a1.shape
b_m, b_n = b1.shape
if not (a_m == a_n == b_m == b_n):
raise ValueError("Array dimensions must be square and agree")
typa = a1.dtype.char
if output in ['complex', 'c'] and typa not in ['F','D']:
if typa in _double_precision:
a1 = a1.astype('D')
typa = 'D'
else:
a1 = a1.astype('F')
typa = 'F'
typb = b1.dtype.char
if output in ['complex', 'c'] and typb not in ['F','D']:
if typb in _double_precision:
b1 = b1.astype('D')
typb = 'D'
else:
b1 = b1.astype('F')
typb = 'F'
overwrite_a = overwrite_a or (_datacopied(a1,A))
overwrite_b = overwrite_b or (_datacopied(b1,B))
gges, = get_lapack_funcs(('gges',), (a1,b1))
if lwork is None or lwork == -1:
# get optimal work array size
result = gges(lambda x: None, a1, b1, lwork=-1)
lwork = result[-2][0].real.astype(np.int)
if sort is None:
sort_t = 0
sfunction = lambda x: None
else:
sort_t = 1
sfunction = _select_function(sort, typa)
result = gges(sfunction, a1, b1, lwork=lwork, overwrite_a=overwrite_a,
overwrite_b=overwrite_b, sort_t=sort_t)
info = result[-1]
if info < 0:
raise ValueError("Illegal value in argument %d of gges" % -info)
elif info > 0 and info <= a_n:
warnings.warn("The QZ iteration failed. (a,b) are not in Schur "
"form, but ALPHAR(j), ALPHAI(j), and BETA(j) should be correct "
"for J=%d,...,N" % info-1, UserWarning)
elif info == a_n+1:
raise LinAlgError("Something other than QZ iteration failed")
elif info == a_n+2:
raise LinAlgError("After reordering, roundoff changed values of some "
"complex eigenvalues so that leading eigenvalues in the "
"Generalized Schur form no longer satisfy sort=True. "
"This could also be caused due to scaling.")
elif info == a_n+3:
raise LinAlgError("Reordering failed in <s,d,c,z>tgsen")
# output for real
# AA, BB, sdim, alphar, alphai, beta, vsl, vsr, work, info
# output for complex
# AA, BB, sdim, alphai, beta, vsl, vsr, work, info
if sort_t == 0:
return result[0], result[1], result[-4], result[-3]
else:
return result[0], result[1], result[-4], result[-3], result[2]
| true |
d65092db2b4fc5799d415ed68074150f959e5cff | Python | balintnem3th/balintnem3th | /week-04/day-3/count_letters_test.py | UTF-8 | 1,036 | 3.28125 | 3 | [] | no_license | import unittest
from count_letters import count_letters
class TestStringMethods(unittest.TestCase):
def test_fibonacci_0(self):
self.assertEqual(count_letters(''), {} , 'not working')
def test_fibonacci_0(self):
self.assertEqual(count_letters(), {} , 'not working')
def test_fibonacci_0(self):
self.assertEqual(count_letters('a'), {'a':1} , 'not working')
def test_fibonacci_0(self):
self.assertEqual(count_letters('aa'), {'a':2} , 'not working')
def test_fibonacci_0(self):
self.assertEqual(count_letters('aab'), {'a':2,'b':1} , 'not working')
def test_fibonacci_0(self):
self.assertEqual(count_letters('aaba'), {'a':3,'b':1} , 'not working')
def test_fibonacci_0(self):
self.assertEqual(count_letters('aababababa'), {'a':6,'b':4} , 'not working')
def test_fibonacci_0(self):
self.assertEqual(count_letters('abcdabcdabcd'), {'a':3,'b':3,'c':3,'d':3} , 'not working')
if __name__ == '__main__':
unittest.main() | true |
6d7eb93bc7e38f01f117bb661fe4dc223ec70507 | Python | tomwright01/SLOAntsRegistration | /scripts/averageFrames.py | UTF-8 | 1,245 | 2.765625 | 3 | [] | no_license | import subprocess
import argparse
import logging
import os
def main(framelist,output,verbose,antsPath):
"""
Create an average frame from frames in framelist
"""
logging.info('Averaging frames with command:')
logging.info('==============================')
avgimgPath=os.path.join(antsPath,'AverageImages')
frameStr = ' '.join(framelist)
cmd = '{0} 2 {1} 1 {2}'.format(avgimgPath,output,frameStr)
logging.info(cmd)
logging.info('==============================')
if verbose:
print "Called command:{0}".format(cmd)
subprocess.check_call(cmd,shell=True,executable='/bin/bash')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Uses ANT executable AverageImage to average frames together')
parser.add_argument('framlist',help="list of frames to include in average")
parser.add_argument('output',help="Path to the output image")
parser.add_argument('-v','--verbose',action="store_true")
parser.add_argument('--exePath',help='path to the AverageImage executable',
default='/home/tom/Documents/Projects/antsbin/bin/AverageImages')
args=parser.parse_args()
main(args.framelist,args.output,args.verbose,args.exePath)
| true |
fad07abf58873dd526dcd11e014d7a663999d586 | Python | Panda3D-public-projects-archive/pandacamp | /Handouts/src/1-4 Texturing/02-Customizing.py | UTF-8 | 452 | 2.515625 | 3 | [] | no_license | from Panda import *
# Take the panda texture in pictures/panda.jpg and edit it to include
# some text or a pictures. You can do this with any model - not just the panda!
# Change the file names of pandaInvert and pandaW to your own panda skins.
# Use Gimp to create a negative and add text to the other.
# Create three pandas using the original, invert and text textures.
panda(texture = "pandaW.jpg", hpr = HPR(time, time*1.2, time*time/5))
start() | true |
66d0178b921fe7ff2717fae149a9e85ecf50f378 | Python | Amaranese/SudokuenPython | /json_example.py | UTF-8 | 769 | 3.015625 | 3 | [] | no_license | import json
import requests
def main():
data = {
'username': 'james',
'active': True,
'subscribers': 10,
'order_total': 39.99,
'order_ids': ['ABC123', 'QQQ422', 'LOL300'],
}
print(data)
# printing object as json string
s = json.dumps(data)
print(s)
# getting python object from json string
data2 = json.loads(s)
assert data2 == data
# writing data to file
with open('test_data.json', 'w') as f:
json.dump(data, f)
# reading data from file
with open('test_data.json') as f:
data3 = json.load(f)
assert data3 == data
r = requests.get('https://jsonplaceholder.typicode.com/users')
print(type(r.json()))
if __name__ == '__main__':
main()
| true |
4121632babe9b3cd0896ce5f9e0bfdf100a5d7d4 | Python | 220vma/HW18 | /3.py | UTF-8 | 298 | 2.90625 | 3 | [] | no_license | import os
def find_files(dir):
for i in dir_list(dir):
if os.path.isdir(i):
find_files(i)
else:
print(i)
def dir_list(dir):
for name in os.listdir(dir):
path = os.path.join(dir, name)
yield path
find_files("C:\MSI") | true |
3b7fc5c9c9266d6cdf844a4806ad2999b724a44b | Python | ChuAn0428/Big-Data-Analytics---Machine-Learning-Classification-Methods | /votes.py | UTF-8 | 6,191 | 2.921875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
#################################
# Author: Chu-An Tsai
# 2/23/2020
#################################
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import LogisticRegression
from sklearn import tree
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report
dataset = np.loadtxt("house-votes-84.data", delimiter=',', dtype=str)
newdataset = dataset.copy()
for i in range(len(newdataset)):
for j in range(1, len(newdataset.T)):
if (newdataset[i][j] == 'y'):
newdataset[i][j] = '1'
elif (newdataset[i][j] == 'n'):
newdataset[i][j] = '-1'
else:
newdataset[i][j] = '0'
if newdataset[i][0] == 'republican':
newdataset[i][0] = 1
else:
newdataset[i][0] = 2
newdataset = newdataset.astype(int)
x = newdataset[:,1:17].copy()
y = newdataset[:,0].copy()
x_train, x_test, y_train, y_test = train_test_split(x, y, train_size=0.7, random_state=0)
# Decision Tree
dtree = tree.DecisionTreeClassifier(max_depth=5, min_samples_leaf=3).fit(x_train, y_train)
dtree_pred = dtree.predict(x_test)
con_dtree = confusion_matrix(dtree_pred, y_test)
acc_dtree = accuracy_score(y_test, dtree_pred)
# Naive Bayes
nb_gnb = GaussianNB()
nb_pred = nb_gnb.fit(x_train, y_train).predict(x_test)
con_nb = confusion_matrix(nb_pred, y_test)
acc_nb = accuracy_score(y_test, nb_pred)
# Logistic Regression
lr = LogisticRegression(random_state=0, max_iter=1000).fit(x_train, y_train)
lr_pred = lr.predict(x_test)
con_lr = confusion_matrix(lr_pred, y_test)
acc_lr = accuracy_score(y_test, lr_pred)
# KNN
knn = KNeighborsClassifier(n_neighbors=11).fit(x_train, y_train)
knn_pred = knn.predict(x_test)
con_knn = confusion_matrix(knn_pred, y_test)
acc_knn = accuracy_score(y_test, knn_pred)
def calculation(con_mat, y_true):
# compute accuracy, precision, recall, and F-score
# add up the predicted class 1,2 (row0,1)
prow1 = con_mat[0][0] + con_mat[0][1]
prow2 = con_mat[1][0] + con_mat[1][1]
# add up the actual class 1,2 (column0,1)
acol1 = con_mat[0][0] + con_mat[1][0]
acol2 = con_mat[0][1] + con_mat[1][1]
#total = acol1 + acol2
# precision for each class and average
prec1 = con_mat[0][0]/prow1
prec2 = con_mat[1][1]/prow2
prec_average = (prec1 + prec2)/float(len(con_mat))
# the class-specific accuracy = precision
acc1 = prec1
acc2 = prec2
# overall accuracy
acc_average = (con_mat[0][0]+con_mat[1][1])/float(len(y_true))
# recall for each class and average
recall1 = con_mat[0][0]/acol1
recall2 = con_mat[1][1]/acol2
recall_average = (recall1 + recall2)/float(len(con_mat))
# F-score for each class and average
fscore1 = 2*con_mat[0][0]/(acol1+prow1)
fscore2 = 2*con_mat[1][1]/(acol2+prow2)
fscore_average = (fscore1 + fscore2)/float(len(con_mat))
return round(acc1,3),round(prec1,3),round(recall1,3),round(fscore1,3),round(acc2,3),round(prec2,3),round(recall2,3),round(fscore2,3),round(acc_average,3),round(prec_average,3),round(recall_average,3),round(fscore_average,3)
acc1,prec1,recall1,fscore1,acc2,prec2,recall2,fscore2,acc_average,prec_average,recall_average,fscore_average = calculation(con_dtree, y_test)
print('\nIndicate class:')
print('Republican -> 1')
print('Democrat -> 2')
print("\n1. Decision Trees:")
print("Confusion Matrix:")
print(' Actual')
print(' 1 2')
print('predicted 1',con_dtree[0])
print(' 2',con_dtree[1])
a = [acc1,prec1,recall1,fscore1]
b = [acc2,prec2,recall2,fscore2]
d = [acc_average,prec_average,recall_average,fscore_average]
print('\nClassification Report:')
print('Class: accuracy | precision | recall | f1-score')
print(' 1 :',a)
print(' 2 :',b)
print(' Avg:',d)
print("Accuracy:", round(acc_dtree,3))
acc1,prec1,recall1,fscore1,acc2,prec2,recall2,fscore2,acc_average,prec_average,recall_average,fscore_average = calculation(con_nb, y_test)
print("\n2. Naive Bayes:")
print("Confusion Matrix:")
print(' Actual')
print(' 1 2')
print('predicted 1',con_nb[0])
print(' 2',con_nb[1])
a = [acc1,prec1,recall1,fscore1]
b = [acc2,prec2,recall2,fscore2]
d = [acc_average,prec_average,recall_average,fscore_average]
print('\nClassification Report:')
print('Class: accuracy | precision | recall | f1-score')
print(' 1 :',a)
print(' 2 :',b)
print(' Avg:',d)
print("Accuracy:", round(acc_nb,3))
acc1,prec1,recall1,fscore1,acc2,prec2,recall2,fscore2,acc_average,prec_average,recall_average,fscore_average = calculation(con_lr, y_test)
print("\n3. Logistic Regression:")
print("Confusion Matrix:")
print(' Actual')
print(' 1 2')
print('predicted 1',con_lr[0])
print(' 2',con_lr[1])
a = [acc1,prec1,recall1,fscore1]
b = [acc2,prec2,recall2,fscore2]
d = [acc_average,prec_average,recall_average,fscore_average]
print('\nClassification Report:')
print('Class: accuracy | precision | recall | f1-score')
print(' 1 :',a)
print(' 2 :',b)
print(' Avg:',d)
print("Accuracy:", round(acc_lr,3))
acc1,prec1,recall1,fscore1,acc2,prec2,recall2,fscore2,acc_average,prec_average,recall_average,fscore_average = calculation(con_knn, y_test)
print("\n4. KNN:")
print("Confusion Matrix:")
print(' Actual')
print(' 1 2')
print('predicted 1',con_knn[0])
print(' 2',con_knn[1])
a = [acc1,prec1,recall1,fscore1]
b = [acc2,prec2,recall2,fscore2]
d = [acc_average,prec_average,recall_average,fscore_average]
print('\nClassification Report:')
print('Class: accuracy | precision | recall | f1-score')
print(' 1 :',a)
print(' 2 :',b)
print(' Avg:',d)
print("Accuracy:", round(acc_knn,3))
| true |
4e47d76e5d3ed0c93696a91e46da726791de7dd1 | Python | dspani/elemental_fighters | /clientGUI.py | UTF-8 | 50,392 | 2.671875 | 3 | [] | no_license | # Team Sysadmins
# Version 0.6
# Date: 12/8/2020
# Jayden Stipek
# Duncan Spani
# Steve Foote
# Lucas Bradley
import socket
from threading import Thread
from tkinter import *
import pygame
from pygame.locals import *
import platform
import os
import sys
from subprocess import call
import queue
import random
FORMAT = 'utf-8'
BUFFER_SIZE = 8
# Used by Pygame thread right now
count = 0
"""
Class ClientGUI handles the client side logic for the game - including the
game lobby GUI and logic along with the Game thread and logic.
Connects and communicates with class ServerGUI. Must have connection in order
to launch Lobby and play the game.
"""
# Set GUI for client-side lobby
class ClientGUI:
# Setting up functionality
def __init__(self):
# Set up Networking Base
self.port = 5050
self.host = "localhost"
# remote server IP
# self.host = "64.227.48.38"
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(1)
# default name
self.name = "Anonymous"
# name of current active game
self.activeGame = ""
# name of games that can be joined
self.availableGames = []
# checks that the gameslist is populated with something
self.gamesInList = False
# checks if player is currently in a game
self.inActiveGame = False
# queues used by active game for receiving messages
self.startGameQueue = queue.Queue()
self.gameEndQueue = queue.Queue()
self.gameActionQueue = queue.Queue()
self.gameStatsQueue = queue.Queue()
# root window - hidden until sign-in and connection
self.window = Tk()
self.window.protocol('WM_DELETE_WINDOW', self.onExit)
self.window.title("Fight Game Lobby")
self.window.configure(width = 500, height = 600)
# Game List Box holds list of created games
self.gameListLabel = Label(self.window, text = "Game List", pady = 5)
self.gameListLabel.place(relwidth = 1)
self.gameList = Listbox(self.window, font = ('Arial', 14, 'bold'), selectmode = SINGLE, width = 20, height = 2)
self.gameList.place(relwidth = 1, relheight = .3, rely = .05)
self.gameListScroll = Scrollbar(self.gameList)
self.gameListScroll.pack(side = RIGHT, fill = Y)
# Join Game Button joins available selected game
self.joinBtn = Button(self.window, text = "Join Game", command = self.joinGame)
self.joinBtn.place(relx = .44, rely = .36)
# Create Game Button launch - launches the create game window
self.createBtn = Button(self.window, text = "Create New Game", command = self.createWindow)
self.createBtn.place(relx = .75, rely = .36)
# Chat room area
self.gameListLabel = Label(self.window, text = "Chat Room")
self.gameListLabel.place(relwidth = 1, rely = .43)
self.chatRoomTxt = Text(self.window, font = ('System', 14, 'bold'), width = 20, height = 2)
self.chatRoomTxt.place(relwidth = 1, relheight = .3, rely = .48)
self.chatRoomTxt.config(state=DISABLED)
self.chatScroll = Scrollbar(self.chatRoomTxt)
self.chatScroll.pack(side = RIGHT, fill = Y)
# Message bar for entering chat messages
self.messageBar = Entry(self.window)
self.messageBar.place(relwidth = .7, relx =.04, rely = .81)
self.messageBar.focus()
# Send chat Message Button
self.sendBtn = Button(self.window, text = "Send Message", command = lambda: self.sendMessage(self.messageBar.get()))
self.sendBtn.place(relx = .8, rely = .8)
# Welcome label with Name
self.welcomeLabel = Label(self.window, text = "", font = ('Arial', 14, 'bold'))
self.welcomeLabel.place(relwidth = 1, rely = .9)
# Hide main lobby window until login
self.window.withdraw()
# Set up log-in window
self.login = Toplevel()
self.login.protocol('WM_DELETE_WINDOW', self.onExit)
self.login.title("Welcome to the Fight Game")
self.login.configure(width=300, height=100)
self.userLoginMSG = Label(self.login, text = "Enter User Name to Connect", justify = CENTER)
self.userLoginMSG.place(relx = .24, rely = .05)
# Text input for user name
self.userName = Entry(self.login)
self.userName.place(relheight = .2, relwidth = .5, relx = .25, rely = .3)
self.userName.focus()
# Login Button - which will initialize connect
self.loginBtn = Button(self.login, text = "Login", command = self.loginConnect)
self.loginBtn.place(relx = .45, rely = .6)
self.window.mainloop()
# Logging in and connecting to server
def loginConnect(self):
userName = self.userName.get()[0:8]
self.name = userName.strip()
# Make server connection
self.sock.connect((self.host, self.port))
# send username
userName = userName.encode(FORMAT)
self.sock.send(userName)
self.welcomeLabel.config(text = "Welcome to Elemental Fighters " + str(self.name))
# Remove login window
self.login.destroy()
# Reveal main window
self.window.deiconify()
# Set thread for receiving message from server
receiveThread = Thread(target=self.receive)
receiveThread.start()
# Create the window for a specific Game
def createWindow(self):
# Create Game window
self.createGame = Toplevel()
self.createGame.title("Create a New Game")
self.createGame.configure(width=300, height=100)
# game name
self.newgameNameLbl = Label(self.createGame, text = "Game name: ", justify = LEFT)
self.newgameNameLbl.place(relx = .1, rely = .05)
self.gameName = Entry(self.createGame)
self.gameName.place(relheight = .2, relwidth = .3, relx = .1, rely = .25)
self.gameName.focus()
# Number of Players
self.newgameNumberLbl = Label(self.createGame, text = "Players: ", justify = RIGHT)
self.newgameNumberLbl.place(relx = .7, rely = .05)
self.numPlayers = IntVar(self.createGame)
self.numPlayers.set(2)
self.playerNumOption = OptionMenu(self.createGame, self.numPlayers, 2)
self.playerNumOption.place(relx = .7, rely = .25)
# create Game - passes game name and number of players to server
self.createNewGameBtn = Button(self.createGame, text = "Create Game",
command = lambda: self.createNewGame(self.gameName.get(), self.numPlayers.get()))
self.createNewGameBtn.place(relx = .1, rely = .62)
# function called by Join button - used to spawn new thread for game
def gameWindow(self):
# launches gam GUI in new thread
gameThread = Thread(target=self.launchGameThread)
gameThread.start()
"""
All game GUI related logic is launched from this function
Game loop logic is handled by another function
Messages are received into this function through Queues
Messages are sent utilizing self.send...
"""
# Launches game in separate window
def launchGameThread(self):
# set current client to an active game
self.inActiveGame = True
# disables the join / create game buttons
self.disableButtons()
pygame.init()
# Sprite location directories
sp1 = "sprites/sp1/" # sprite 1
sp2 = "sprites/sp2/" # sprite 2
sp3 = "sprites/sp3/" # sprite 3
sp4 = "sprites/sp4/" # sprite 4
sp5 = "sprites/sp5/" # sprite 5
BACKGROUND = "sprites/bg.png" # background
# set main pygame window and size
colors = {"white" : (255,255,255), "red" : (255,40,40), "yellow" : (255,255,0), "green" : (0,255,0), "black" : (0,0,0), "blue" : (0,0,255)}
tile = 'tile'
end = '.png'
win = pygame.display.set_mode((500,500))
pygame.display.set_caption("Elemental Fighters")
pygame.event.set_blocked(pygame.MOUSEMOTION)
# player frames corresponding to move
Drax = {"Idle": ["000"],
"attack": ["016","017","010","004","005","006","007","008","009"],
"dodge": ["010","011"],
"block": ["017"],
"special": ["016","017","010","004","005","006","007","008","009"],
"death": ["016","015","013","012"],
"health": 13,
"damage": 2,
"speed": 3,
"magic": "fire"
}
Scorpio = {"Idle": ["000"],
"attack": ["000","001","002","003","004","005","006","007","008","009","010"],
"dodge": ["000","005","006","007","000"],
"block": ["009","010"],
"special": ["011","012","013","014","015","016","017","018"],
"death": [],
"health": 14,
"damage": 1,
"speed": 4,
"magic": "earth"
}
Xion = {"Idle": ["022"],
"attack": ["000","001","002","003","004","005","006","007","008","009","010","011","012","013","014","015","016","017","018","019","020","021","022","023","024"],
"dodge": ["000","001","002","003","011","012","013","014"],
"block": ["016","017","018","019","020","021","022","024"],
"special": ["000","001","002","003","004","005","006","007","008","009","010","011","012","013","014","015","016","017","018","019","020","021","022","023","024","016","017","018","019","020","021","022","024"],
"death": [],
"health": 13,
"damage": 1,
"speed": 5,
"magic": "water"
}
Abdul = {"Idle": ["022"],
"attack": ["001","002","003","004","005","006","007","008"],
"dodge": ["003","004","005","006","007","008"],
"block": ["010","021"],
"special": ["014","015","016","017","018"],
"death": ["011","008","012"],
"health": 12,
"damage": 3,
"speed": 2,
"magic": "water"
}
Link = {"Idle": ["000"],
"attack": ["005","006","007","008","009","010","011"],
"dodge": ["003"],
"block": ["005","006","007","008"],
"special": ["008","009","010","005","006","007","008","009","010","005","006","007"],
"death": [],
"health": 15,
"damage": 1,
"speed": 1,
"magic": "fire"
}
global fighters
fighters = {
"Drax": Drax,
"Abdul": Abdul,
"Link": Link,
"Xion": Xion,
"Scorpio": Scorpio
}
# load png of attack for char 1
attack1 = [pygame.image.load(sp1+'tile000.png'), pygame.image.load(sp1+'tile001.png'), pygame.image.load(sp1+'tile002.png'), pygame.image.load(sp1+'tile003.png'), pygame.image.load(sp1+'tile004.png'), pygame.image.load(sp1+'tile005.png'), pygame.image.load(sp1+'tile006.png'), pygame.image.load(sp1+'tile007.png'), pygame.image.load(sp1+'tile008.png'), pygame.image.load(sp1+'tile009.png'), pygame.image.load(sp1+'tile010.png'), pygame.image.load(sp1+'tile011.png'), pygame.image.load(sp1+'tile012.png'), pygame.image.load(sp1+'tile013.png'), pygame.image.load(sp1+'tile014.png'), pygame.image.load(sp1+'tile015.png'), pygame.image.load(sp1+'tile016.png'), pygame.image.load(sp1+'tile017.png'), pygame.image.load(sp1+'tile018.png'), pygame.image.load(sp1+'tile019.png'), pygame.image.load(sp1+'tile020.png'), pygame.image.load(sp1+'tile021.png'), pygame.image.load(sp1+'tile022.png'), pygame.image.load(sp1+'tile023.png'), pygame.image.load(sp1+'tile024.png'),]
# load png of attack for char 2
attack2 = [pygame.image.load(sp2+'tile000.png'), pygame.image.load(sp2+'tile001.png'), pygame.image.load(sp2+'tile002.png'), pygame.image.load(sp2+'tile002.png'), pygame.image.load(sp2+'tile004.png'), pygame.image.load(sp2+'tile005.png'), pygame.image.load(sp2+'tile006.png'), pygame.image.load(sp2+'tile007.png'), pygame.image.load(sp2+'tile008.png'), pygame.image.load(sp2+'tile009.png'), pygame.image.load(sp2+'tile010.png'), pygame.image.load(sp2+'tile011.png'), pygame.image.load(sp2+'tile012.png'), pygame.image.load(sp2+'tile013.png'), pygame.image.load(sp2+'tile014.png'), pygame.image.load(sp2+'tile015.png'), pygame.image.load(sp2+'tile016.png'), pygame.image.load(sp2+'tile017.png'), pygame.image.load(sp2+'tile018.png'), pygame.image.load(sp2+'tile019.png'), pygame.image.load(sp2+'tile020.png'), pygame.image.load(sp2+'tile021.png'), pygame.image.load(sp2+'tile022.png'), pygame.image.load(sp2+'tile023.png'), pygame.image.load(sp2+'tile024.png'),]
# load png of attack for char 3
attack3 = [pygame.image.load(sp3+'tile000.png'), pygame.image.load(sp3+'tile001.png'), pygame.image.load(sp3+'tile002.png'), pygame.image.load(sp3+'tile003.png'), pygame.image.load(sp3+'tile004.png'), pygame.image.load(sp3+'tile005.png'), pygame.image.load(sp3+'tile006.png'), pygame.image.load(sp3+'tile007.png'), pygame.image.load(sp3+'tile008.png'), pygame.image.load(sp3+'tile009.png'), pygame.image.load(sp3+'tile010.png'), pygame.image.load(sp3+'tile011.png'), pygame.image.load(sp3+'tile012.png'), pygame.image.load(sp3+'tile013.png'), pygame.image.load(sp3+'tile014.png'), pygame.image.load(sp3+'tile015.png'), pygame.image.load(sp3+'tile016.png'), pygame.image.load(sp3+'tile017.png'), pygame.image.load(sp3+'tile018.png'), pygame.image.load(sp3+'tile019.png'), pygame.image.load(sp3+'tile020.png'), pygame.image.load(sp3+'tile021.png'), pygame.image.load(sp3+'tile022.png'), pygame.image.load(sp3+'tile023.png'), pygame.image.load(sp3+'tile024.png'),]
# load png of attack for char 4
attack4 = [pygame.image.load(sp4+'tile000.png'), pygame.image.load(sp4+'tile001.png'), pygame.image.load(sp4+'tile002.png'), pygame.image.load(sp4+'tile003.png'), pygame.image.load(sp4+'tile004.png'), pygame.image.load(sp4+'tile005.png'), pygame.image.load(sp4+'tile006.png'), pygame.image.load(sp4+'tile007.png'), pygame.image.load(sp4+'tile008.png'), pygame.image.load(sp4+'tile009.png'), pygame.image.load(sp4+'tile010.png'), pygame.image.load(sp4+'tile011.png'), pygame.image.load(sp4+'tile012.png'), pygame.image.load(sp4+'tile013.png'), pygame.image.load(sp4+'tile014.png'), pygame.image.load(sp4+'tile015.png'), pygame.image.load(sp4+'tile016.png'), pygame.image.load(sp4+'tile017.png'), pygame.image.load(sp4+'tile018.png'), pygame.image.load(sp4+'tile019.png'), pygame.image.load(sp4+'tile020.png'), pygame.image.load(sp4+'tile021.png'), pygame.image.load(sp4+'tile022.png'), pygame.image.load(sp4+'tile023.png'), pygame.image.load(sp4+'tile024.png'),]
# load png of attack for char 5
attack5 = [pygame.image.load(sp5+'tile000.png'), pygame.image.load(sp5+'tile001.png'), pygame.image.load(sp5+'tile002.png'), pygame.image.load(sp5+'tile003.png'), pygame.image.load(sp5+'tile004.png'), pygame.image.load(sp5+'tile005.png'), pygame.image.load(sp5+'tile006.png'), pygame.image.load(sp5+'tile007.png'), pygame.image.load(sp5+'tile008.png'), pygame.image.load(sp5+'tile009.png'), pygame.image.load(sp5+'tile010.png'), pygame.image.load(sp5+'tile011.png'), pygame.image.load(sp5+'tile012.png'), pygame.image.load(sp5+'tile013.png'), pygame.image.load(sp5+'tile014.png'), pygame.image.load(sp5+'tile015.png'), pygame.image.load(sp5+'tile016.png'), pygame.image.load(sp5+'tile017.png'), pygame.image.load(sp5+'tile018.png'), pygame.image.load(sp5+'tile019.png'), pygame.image.load(sp5+'tile020.png'), pygame.image.load(sp5+'tile021.png'), pygame.image.load(sp5+'tile022.png'), pygame.image.load(sp5+'tile023.png'), pygame.image.load(sp5+'tile024.png'),]
char1 = pygame.image.load(sp1+'tile024.png')
char2 = pygame.image.load(sp2+'tile001.png')
char3 = pygame.image.load(sp3+'tile024.png')
char4 = pygame.image.load(sp4+'tile024.png')
char5 = pygame.image.load(sp5+'tile001.png')
background = pygame.image.load(BACKGROUND)
win.blit(background, (0, 0))
clock = pygame.time.Clock()
# starting pos for characters
p1_x = 000
p1_y = 400
p2_x = 400
p2_y = 400
width = 96
height = 96
MAX_HEALTH = 100
TEXT_COLOR = (20,20,20)
FONT = pygame.font.Font(None, 30)
ACTION_FONT = pygame.font.Font(None, 20)
# Flips the sprite for player two
def flip_sprite(sprite, character):
f_character = pygame.transform.flip(character, True, False)
new_sprite = []
for image in sprite:
new_sprite.append(pygame.transform.flip(image, True, False))
return new_sprite, f_character
# Shows the health of the players
def show_health(health1, health2):
p1_color = "green"
p2_color = "green"
if health1/p1_MAX_HEALTH > .50:
p1_color = "green"
elif 50 >= health1/p1_MAX_HEALTH > .25:
p1_color = "yellow"
elif health1/p1_MAX_HEALTH <= .25:
p1_color = "red"
if health2/p2_MAX_HEALTH > .50:
p2_color = "green"
elif 50 >= health2/p2_MAX_HEALTH > .25:
p2_color = "yellow"
elif health2/p2_MAX_HEALTH <= .25:
p2_color = "red"
pygame.draw.rect(win, colors["white"], pygame.Rect(0, 0, 200, 20))
pygame.draw.rect(win, colors["black"], pygame.Rect(2, 2, 196, 16))
pygame.draw.rect(win, colors[p1_color], pygame.Rect(2, 2, (health1/p1_MAX_HEALTH)*196, 16))
pygame.draw.rect(win, colors["white"], pygame.Rect(300, 0, 200, 20))
pygame.draw.rect(win, colors["black"], pygame.Rect(302, 2, 196, 16))
pygame.draw.rect(win, colors[p2_color], pygame.Rect(302, 2, (health2/p2_MAX_HEALTH)*196, 16))
pygame.display.flip()
# Selecting Character from the list of 5 different characters
def character_select():
win.blit(background,(0,0))
pos = [(0,50),(100,50),(200,50),(300,50),(400,50)] # x,y positions for character select
characters = [char1, char2, char3, char4, char5] # idle characters
names = ["Drax", "Scorpio", "Xion", "Abdul", "Link"] # character names
for i in range(5):
win.blit(characters[i],pos[i])
pygame.display.update()
text = FONT.render(str(i + 1), True, TEXT_COLOR)
textR = text.get_rect()
textR.center = (pos[i][0] + 50, pos[i][1] + 110)
name = FONT.render(names[i], True, TEXT_COLOR)
nameR = name.get_rect()
nameR.center = (pos[i][0] + 50, pos[i][1] - 10)
win.blit(name, nameR)
win.blit(text, textR)
text = FONT.render("PRESS KEY TO SELECT FIGHTER", True, TEXT_COLOR)
textR = text.get_rect()
textR.center = (250, 250)
win.blit(text, textR)
pygame.display.update()
while True:
event = pygame.event.wait()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_1:
pygame.draw.rect(win, colors["red"], pygame.Rect(0, 50, 100, 150), 3)
pygame.display.flip()
player = attack1
player_idle = char1
name = names[0]
break
elif event.key == pygame.K_2:
pygame.draw.rect(win, colors["red"], pygame.Rect(100, 50, 100, 150), 3)
pygame.display.flip()
player = attack2
player_idle = char2
name = names[1]
break
elif event.key == pygame.K_3:
pygame.draw.rect(win, colors["red"], pygame.Rect(200, 50, 100, 150), 3)
pygame.display.flip()
player = attack3
player_idle = char3
name = names[2]
break
elif event.key == pygame.K_4:
pygame.draw.rect(win, colors["red"], pygame.Rect(300, 50, 100, 150), 3)
pygame.display.flip()
player = attack4
player_idle = char4
name = names[3]
break
elif event.key == pygame.K_5:
pygame.draw.rect(win, colors["red"], pygame.Rect(400, 50, 100, 150), 3)
pygame.display.flip()
player = attack5
player_idle = char5
name = names[4]
break
# send necessary information to server for starting the game
self.sendStartGame(name)
new_player, new_player_idle = flip_sprite(player, player_idle)
return new_player, new_player_idle, name
# What happens when you choose your attack
def attack(player, player2, name, animation):
if name == "Drax":
frames = Drax[animation]
if len(frames) != 0:
timing = (500 / len(frames))
else:
timing = 1000
path = sp1
for frame in frames:
win.blit(background, (0, 0))
win.blit(player2, (p2_x, p2_y))
show_health(player_health, player2_health)
pygame.display.update()
f = pygame.image.load(path + tile + frame + end)
win.blit(pygame.transform.flip(f, True, False), (p1_x, p1_y))
pygame.display.update()
pygame.time.wait(int(timing))
elif name == "Scorpio":
frames = Scorpio[animation]
if len(frames) != 0:
timing = (500 / len(frames))
else:
timing = 1000
path = sp2
for frame in frames:
win.blit(background, (0, 0))
win.blit(player2, (p2_x, p2_y))
show_health(player_health, player2_health)
pygame.display.update()
f = pygame.image.load(path + tile + frame + end)
win.blit(pygame.transform.flip(f, True, False), (p1_x, p1_y))
pygame.display.update()
pygame.time.wait(int(timing))
elif name == "Xion":
frames = Xion[animation]
if len(frames) != 0:
timing = (500 / len(frames))
else:
timing = 1000
path = sp3
for frame in frames:
win.blit(background, (0, 0))
win.blit(player2, (p2_x, p2_y))
show_health(player_health, player2_health)
pygame.display.update()
f = pygame.image.load(path + tile + frame + end)
win.blit(pygame.transform.flip(f, True, False), (p1_x, p1_y))
pygame.display.update()
pygame.time.wait(int(timing))
elif name == "Abdul":
frames = Abdul[animation]
if len(frames) != 0:
timing = (500 / len(frames))
else:
timing = 1000
path = sp4
for frame in frames:
win.blit(background, (0, 0))
win.blit(player2, (p2_x, p2_y))
show_health(player_health, player2_health)
pygame.display.update()
f = pygame.image.load(path + tile + frame + end)
win.blit(pygame.transform.flip(f, True, False), (p1_x, p1_y))
pygame.display.update()
pygame.time.wait(int(timing))
elif name == "Link":
frames = Link[animation]
if len(frames) != 0:
timing = (500 / len(frames))
else:
timing = 1000
path = sp5
for frame in frames:
win.blit(background, (0, 0))
win.blit(player2, (p2_x, p2_y))
show_health(player_health, player2_health)
pygame.display.update()
f = pygame.image.load(path + tile + frame + end)
win.blit(pygame.transform.flip(f, True, False), (p1_x, p1_y))
pygame.display.update()
pygame.time.wait(int(timing))
# What happens when you get the opponents attack from the server
def p2_attack(player2, player, name, animation):
# keep idle frame p1_x, p1_y
# keep animation frame p2_x, p2_y
if animation == "":
return
if name == "Drax":
frames = Drax[animation]
if len(frames) != 0:
timing = (500 / len(frames))
else:
timing = 1000
path = sp1
for frame in frames:
# idle
win.blit(background, (0, 0))
win.blit(player, (p1_x, p1_y))
show_health(player_health, player2_health)
pygame.display.update()
# animation
f = pygame.image.load(path + tile + frame + end)
win.blit(f, (p2_x, p2_y))
pygame.display.update()
pygame.time.wait(int(timing))
elif name == "Scorpio":
frames = Scorpio[animation]
if len(frames) != 0:
timing = (500 / len(frames))
else:
timing = 1000
path = sp2
for frame in frames:
win.blit(background, (0, 0))
win.blit(player, (p1_x, p1_y))
show_health(player_health, player2_health)
pygame.display.update()
f = pygame.image.load(path + tile + frame + end)
win.blit(f, (p2_x, p2_y))
pygame.display.update()
pygame.time.wait(int(timing))
elif name == "Xion":
frames = Xion[animation]
if len(frames) != 0:
timing = (500 / len(frames))
else:
timing = 1000
path = sp3
for frame in frames:
win.blit(background, (0, 0))
win.blit(player, (p1_x, p1_y))
show_health(player_health, player2_health)
pygame.display.update()
f = pygame.image.load(path + tile + frame + end)
win.blit(f, (p2_x, p2_y))
pygame.display.update()
pygame.time.wait(int(timing))
elif name == "Abdul":
frames = Abdul[animation]
if len(frames) != 0:
timing = (500 / len(frames))
else:
timing = 1000
path = sp4
for frame in frames:
win.blit(background, (0, 0))
win.blit(player, (p1_x, p1_y))
show_health(player_health, player2_health)
pygame.display.update()
f = pygame.image.load(path + tile + frame + end)
win.blit(f, (p2_x, p2_y))
pygame.display.update()
pygame.time.wait(int(timing))
elif name == "Link":
frames = Link[animation]
if len(frames) != 0:
timing = (500 / len(frames))
else:
timing = 1000
path = sp5
for frame in frames:
win.blit(background, (0, 0))
win.blit(player, (p1_x, p1_y))
show_health(player_health, player2_health)
pygame.display.update()
f = pygame.image.load(path + tile + frame + end)
win.blit(f, (p2_x, p2_y))
pygame.display.update()
pygame.time.wait(int(timing))
# Displaying Wait
def display_wait():
name = FONT.render("Waiting for other player...", True, TEXT_COLOR)
nameR = name.get_rect()
nameR.center = (250, 250)
win.blit(name, nameR)
pygame.display.update()
# Displaying Lose
def display_lose():
win.blit(background,(0,0))
name = FONT.render("You Lose!", True, TEXT_COLOR)
nameR = name.get_rect()
nameR.center = (250, 250)
win.blit(name, nameR)
pygame.display.update()
# Displaying Win
def display_win():
win.blit(background,(0,0))
name = FONT.render("You Win!", True, TEXT_COLOR)
nameR = name.get_rect()
nameR.center = (250, 250)
win.blit(name, nameR)
pygame.display.update()
# Displaying Actions
def display_actions():
box_pos = [(20, 80, 70, 40), (120, 80, 70, 40), (20, 160, 70, 40), (120, 160, 70, 40), (65, 240, 80, 40)]
action_cen = [(55, 100), (155, 100), (55, 185), (155, 185), (105, 260)]
key_pos = [(80, 140), (180, 140), (80, 220), (180, 220), (95, 300)]
actions = ['Attack', 'Dodge', 'Block', 'Magic', 'Quit']
keys = ['Q', 'W', 'E', 'R', 'ENTER']
for i in range(5):
if actions[i] == "Quit":
pygame.draw.rect(win, colors["red"], pygame.Rect(box_pos[i]), 2)
name = FONT.render(actions[i], True, colors["red"])
nameR = name.get_rect()
nameR.center = action_cen[i]
key = FONT.render(keys[i], True, colors["red"])
keyR = name.get_rect()
keyR.center = key_pos[i]
win.blit(name, nameR)
win.blit(key, keyR)
else:
pygame.draw.rect(win, colors["black"], pygame.Rect(box_pos[i]), 2)
name = FONT.render(actions[i], True, TEXT_COLOR)
nameR = name.get_rect()
nameR.center = action_cen[i]
key = FONT.render(keys[i], True, TEXT_COLOR)
keyR = name.get_rect()
keyR.center = key_pos[i]
win.blit(name, nameR)
win.blit(key, keyR)
pygame.display.flip()
def display_magic(name,p1):
if p1:
coor = (p1_x, p1_y)
else:
coor = (p2_x, p2_y)
radius = [0,100,200,300,400,500,600,700]
color = "red"
if name == "Drax":
color = "red"
elif name == "Scorpio":
color = "green"
elif name == "Xion":
color = "blue"
elif name == "Abdul":
color = "blue"
elif name == "Link":
color = "red"
for i in range(8):
pygame.draw.circle(win, colors[color],coor, radius[i])
pygame.display.flip()
pygame.time.wait(100)
# Obtaining the players health from the dictonary
def get_player_health(name):
if name == "Drax":
return Drax["health"]
elif name == "Scorpio":
return Scorpio["health"]
elif name == "Xion":
return Xion["health"]
elif name == "Abdul":
return Abdul["health"]
elif name == "Link":
return Link["health"]
# Obtaining Opponent players health from the dictonary
def getOtherPlayerInfo(name):
if name == "Drax":
return attack1, char1
elif name == "Scorpio":
return attack2, char2
elif name == "Xion":
return attack3, char3
elif name == "Abdul":
return attack4, char4
elif name == "Link":
return attack5, char5
def display_draw():
win.blit(background, (0, 0))
name = FONT.render("Draw!", True, TEXT_COLOR)
nameR = name.get_rect()
nameR.center = (250, 250)
win.blit(name, nameR)
pygame.display.update()
def display_action_text(p1_name, players_move, p2_name, action):
p2text = FONT.render(p2_name+" used "+action, True, TEXT_COLOR)
p2textr = p2text.get_rect()
p2textr.center = (350, 250)
win.blit(p2text, p2textr)
p1text = FONT.render(p1_name+" used "+players_move, True, TEXT_COLOR)
p1textr = p1text.get_rect()
p1textr.center = (350, 200)
win.blit(p1text, p1textr)
pygame.display.update()
pygame.time.wait(500)
# DEFAULT PLAYER ANIMATIONS AND NAME FOR FUNCTIONS
p1_player, p1_player_idle, p1_name = character_select()
# wait to receive message from other player
waitingForOtherPlayerChoices = True
while waitingForOtherPlayerChoices:
if not self.startGameQueue.empty():
# receive player 2 default information
p2_name = self.startGameQueue.get()
p2_player, p2_player_idle = getOtherPlayerInfo(p2_name)
waitingForOtherPlayerChoices = False
p1_MAX_HEALTH = get_player_health(p1_name)
player_health = p1_MAX_HEALTH
p2_MAX_HEALTH = get_player_health(p2_name)
player2_health = p2_MAX_HEALTH # subject to change
win.blit(background,(0,0))
# send/recieve player choice
run = True
turn = True
players_move = ""
while run:
for event in pygame.event.get():
# end game tasks
if event.type == QUIT:
# notify other player of quit
self.sendGameActions("QUIT")
self.exitGameTasks()
run = False
pygame.quit()
sys.exit()
# show initial screen
show_health(player_health, player2_health) # change to local and opponent health values
win.blit(p1_player_idle, (p1_x, p1_y))
win.blit(p2_player_idle, (p2_x, p2_y))
display_actions()
pygame.display.update()
# make sure other player has not quit
if not self.gameEndQueue.empty():
p2_attack(p1_player, p2_player_idle, p1_name, "death")
display_win()
pygame.time.wait(5000)
self.exitGameTasks()
run = False
pygame.quit()
sys.exit()
# wait for message to be received
while self.gameActionQueue.empty() and turn == False:
if not self.gameEndQueue.empty():
break
pygame.time.wait(1000)
# retrieve message regarding game actions
if not self.gameActionQueue.empty() and turn == False:
# setup custom event type
eventTest = pygame.event.Event(pygame.USEREVENT, {"action": self.gameActionQueue.get()})
pygame.event.post(eventTest)
playerMove = pygame.event.poll()
# display move on screen
display_action_text(p1_name, players_move, p2_name, playerMove.action)
pygame.time.wait(1000)
# perform player 2 animations
p2_attack(p2_player, p1_player_idle, p2_name, playerMove.action)
# perform player 1 animations
attack(p1_player, p2_player_idle, p1_name, players_move)
# make sure proper magic animation performed
if playerMove.action == "special":
display_magic(p2_name, False)
if players_move == "special":
display_magic(p1_name, True)
# calculate new payer health
player_health -= gameLoop(p1_name, p2_name, players_move, playerMove.action)
# send new health to server for 2nd player
self.sendGameStats(str(player_health))
pygame.time.wait(2000)
# receive player 2 new health
if not self.gameStatsQueue.empty():
otherHealth = self.gameStatsQueue.get()
player2_health = float(otherHealth)
# modify health bar with new health
show_health(player_health, player2_health)
turn = True
if turn == True:
event = pygame.event.wait()
# retrieve keypress and associate it with player move
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
self.sendGameActions("attack")
players_move = "attack"
turn = False
elif event.key == pygame.K_w:
self.sendGameActions("dodge")
players_move = "dodge"
turn = False
elif event.key == pygame.K_e:
self.sendGameActions("block")
players_move = "block"
turn = False
elif event.key == pygame.K_r:
self.sendGameActions("special")
players_move = "special"
turn = False
elif event.key == pygame.K_RETURN:
# handle death animations
attack(p1_player, p2_player_idle, p1_name, "death")
# show game lost message
display_lose()
self.sendGameActions("QUIT")
pygame.time.wait(2000)
self.exitGameTasks()
run = False
pygame.quit()
sys.exit()
win.blit(background, (0, 0))
pygame.display.update()
# calculate who wins based on players health
if player_health <= 0 and player2_health <= 0:
attack(p1_player, p2_player_idle, p1_name, "death")
p2_attack(p2_player, p1_player_idle, p2_name, "death")
display_draw()
pygame.time.wait(5000)
self.sendGameActions("QUIT")
self.exitGameTasks()
run = False
pygame.quit()
sys.exit()
elif player_health <= 0:
attack(p1_player, p2_player_idle, p1_name, "death")
display_lose()
pygame.time.wait(5000)
self.sendGameActions("QUIT")
self.exitGameTasks()
run = False
pygame.quit()
sys.exit()
elif player2_health <= 0:
display_win()
pygame.time.wait(5000)
self.sendGameActions("QUIT")
self.exitGameTasks()
run = False
pygame.quit()
sys.exit()
clock.tick(30) # fps of game
# parse incoming messages for header info and message body
def messageParser(self, message):
if message:
print("Here is the message:\n " + message)
parsedMessage = message.split("\n")
return parsedMessage[0], parsedMessage[1]
return
"""
Check if game is available to join
If it is available add the name to the list
of available games
"""
def isGameAvailable(self, gameString):
parsedGame = gameString.split()
print(parsedGame)
if parsedGame:
if parsedGame[2] == "Waiting":
self.availableGames.append(parsedGame[0])
"""
Parse stringified game list received from server
Update displayed game list in the lobby
Send games to isGameAvailable to check if they can be joined
"""
def gameParser(self, message):
self.availableGames.clear()
self.gameList.delete(0, END)
self.joinBtn.config(state=DISABLED)
self.gamesInList = False
if message != "EMPTY":
self.gamesInList = True
if self.inActiveGame == False:
self.joinBtn.config(state=NORMAL)
parsedGames = message.split(";")
for game in parsedGames:
self.gameList.insert(END, game)
self.isGameAvailable(game)
"""
Receives messages from server and uses messageParser to
determine where to send the body of the message.
"""
def receive(self):
while True:
try:
msg = self.receiveMessages()
# parse for relevant info
header, message = self.messageParser(msg)
if message and header:
# chat messages
if header == "message":
# insert message into lobby chat
self.chatRoomTxt.config(state=NORMAL)
self.chatRoomTxt.insert(END, message + "\n")
self.chatRoomTxt.config(state=DISABLED)
self.chatRoomTxt.see(END)
# stringified game list
elif header == "newgame":
# insert new game info into game list
self.gameParser(message)
# starting game information for player 2
elif header == "sendstart":
self.startGameQueue.put(message)
# game health information
elif header == "gamestats":
self.gameStatsQueue.put(message)
# game actions
elif header == "gamecommand":
# insert game command into queue
if message == "QUIT":
self.gameEndQueue.put(message)
else:
self.gameActionQueue.put(message)
except socket.timeout:
continue
except IOError:
self.sock.close()
sys.exit(0)
# send chat message
def sendMessage(self, message):
if message:
self.messageBar.delete(0, END)
message = "message\n" + message
self.send(message)
# send starting game information
def sendStartGame(self, message):
message = "sendstart\n" + self.activeGame + " " + message
self.send(message)
# send game actions to server
def sendGameActions(self, message):
message = "gamecommand\n" + self.activeGame + " " + message
self.send(message)
# send game stats (AKA health)
def sendGameStats(self, message):
message = "gamestats\n" + self.activeGame + " " + message
self.send(message)
# send join game info to server
def sendJoinGame(self, message):
message = "joingame\n" + message
self.send(message)
# join a selected Game
def joinGame(self):
selectedGame = self.gameList.get(self.gameList.curselection())
gameParsed = selectedGame.strip().split()
if gameParsed:
# ensure game is available to accept players
if gameParsed[0] in self.availableGames:
self.activeGame = gameParsed[0]
self.sendJoinGame(self.activeGame)
self.gameWindow()
"""
All messages are sent with this function
Messages are pre-pended with header by other functions
and then sent with this send function
Sends the receive the size of the message first
"""
def send(self, message):
message = message.encode(FORMAT)
message_length = len(message)
send_length = str(message_length).encode(FORMAT)
send_length += b' ' * (BUFFER_SIZE - len(send_length))
self.sock.send(send_length)
self.sock.send(message)
"""
All received messages utilize this function though are handled
within the receive thread
This function checks the size of the message to receive first
"""
def receiveMessages(self):
message_length = self.sock.recv(BUFFER_SIZE).decode(FORMAT)
if message_length:
message_length = int(message_length)
message = self.sock.recv(message_length).decode(FORMAT)
return message
raise Exception("Received empty message")
"""
Send newly created game info to server
Set this new name to current active game
Launch a new game
"""
def createNewGame(self, game, num):
# destroy the create game window
self.createGame.destroy()
message = "newgame\n" + game + "\t" + str(num)
self.send(message)
self.activeGame = game
self.gameWindow()
# disable some functionality when in game
def disableButtons(self):
if self.inActiveGame == True:
self.joinBtn.config(state=DISABLED)
self.createBtn.config(state=DISABLED)
else:
self.createBtn.config(state=ACTIVE)
if self.gamesInList == True:
self.joinBtn.config(state=ACTIVE)
# exit game and clear all queues, activegame
def exitGameTasks(self):
self.activeGame = ""
self.inActiveGame = False
self.disableButtons()
self.gameActionQueue.queue.clear()
self.startGameQueue.queue.clear()
self.gameEndQueue.queue.clear()
# set up exit behavior
def onExit(self):
self.sock.close()
self.window.destroy()
"""
A function to calculate Magic damage to keep it clean in the loop
Magic Works as follows
fire does double damage to Earth and half to Water
Earth does double damage to Water and half to Fire
Water does double damage to Fire and half to Earth
"""
def calculateMagic(fighter,opponentFighter):
if(fighters[opponentFighter]["magic"] == "fire"): #Fire Case
if(fighters[fighter]["magic"] == "earth"):
return fighters[opponentFighter]["damage"] * 2 # 2X the damage
elif(fighters[fighter]["magic"] == "water"):
return fighters[opponentFighter]["damage"] / 2 # 1/2 the damage
else:
return fighters[opponentFighter]["damage"] # regular damage
elif(fighters[opponentFighter]["magic"] == "water"): #Water Case
if(fighters[fighter]["magic"] == "fire"):
return fighters[opponentFighter]["damage"] * 2 # 2x the damage
elif(fighters[fighter]["magic"] == "earth"):
return fighters[opponentFighter]["damage"] / 2 # 1/2 the damage
else:
return fighters[opponentFighter]["damage"] # regular damage
elif(fighters[opponentFighter]["magic"] == "earth"): #Earth Case
if(fighters[fighter]["magic"] == "water"):
return fighters[opponentFighter]["damage"] * 2
elif(fighters[fighter]["magic"] == "fire"):
return fighters[opponentFighter]["damage"] / 2
else:
return fighters[opponentFighter]["damage"]
"""
This is where the calculations are done on how much
damage is taken from this client. Also takes in
opponenet fighter adn move so to be able to calculate damage
easier
"""
def gameLoop(fighter, opponentFighter, move, opponentMove):
# ------------------------------------Attack Logic----------------------------------------------------
if(opponentMove == "attack"):
if(move == "dodge"):
if(random.randint(1,10) > 5):
print("You Dodged Successfully - No Damage")
else:
print("Dodge Failed!")
print("Opponent Attacked, you take " + str(fighters[opponentFighter]["damage"]) + " Damage\n")
return fighters[opponentFighter]["damage"]
elif(move == "block"):
print("You Blocked their attack!")
print("You Take " + str(fighters[opponentFighter]["damage"]/2) + " Damage\n")
return (fighters[opponentFighter]["damage"]/2)
else:
print("Opponent Attacked, you take " + str(fighters[opponentFighter]["damage"]) + " Damage\n")
return fighters[opponentFighter]["damage"]
# ------------------------------------Magic Logic----------------------------------------------------
elif(opponentMove == "special"):
print("You opponent used Magic")
if(move == "dodge"):
if(random.randint(1,10) > 5):
print("You Dodged Successfully - No Damage")
return 0
else:
print("Dodge Failed!")
magicDamage = calculateMagic(fighter,opponentFighter) #Calculates damage
if(move == "block"):
magicDamage /= 2
print("Opponent used Magic you take " + str(magicDamage) + " Damage\n")
return magicDamage
return 0
app = ClientGUI()
mainloop() | true |
658880d39e56b9de4d4bc1b60e3e16cacda23eb5 | Python | fibre-ether/amazon-clone-backend | /get_amzn_json.py | UTF-8 | 2,586 | 2.625 | 3 | [] | no_license | import sys
from bs4 import BeautifulSoup
import requests
import json
#print(sys.argv[1])
#rint(sys.argv[2])
ItemName=sys.argv[1]
maxitems=int(sys.argv[2])
url = "https://www.amazon.in"
headers = {"User-Agent": "Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:79.0) Gecko/20100101 Firefox/79.0"}
http_proxy = "http://10.10.1.10:3128"
https_proxy = "https://10.10.1.11:1080"
ftp_proxy = "ftp://10.10.1.10:3128"
proxyDict = {
"http" : http_proxy,
"https" : https_proxy,
"ftp" : ftp_proxy
}
def getsoup(item, pagenum):
url = "https://www.amazon.in/s?k="+item+"&page="+pagenum
webpage = requests.get(url, headers=headers)
soup = BeautifulSoup(webpage.content, "lxml")
return soup
name = []
rating = []
price = []
image = []
review_num = []
product_link = []
stop = False
item_name = ItemName
web_page = getsoup(item_name, "1")
try:
num_pages = int(web_page.find_all('ul', class_="a-pagination")[0].find_all('li', class_="a-disabled")[-1].text)
except:
print("Nothing Found")
sys.stdout.flush()
exit()
page_num=1
#print(num_pages, "pages to be scraped")
for i in range(2,num_pages+1):
if stop==True:
break
#print("Scraping page", page_num, "of", num_pages)
web_page = getsoup(item_name, str(i))
items = web_page.find_all('div', class_='s-result-item')
#print(len(items), " items found\n")
for item in items:
if len(price)>maxitems:
stop = True
break
try:
info = (item.find('span', class_="a-text-normal").text,
item.find('span', class_="a-icon-alt").text,
"Rs. "+item.find_all("span", class_="a-price")[0].span.text[1:],
item.find('img', class_='s-image')['src'],
item.find("div", class_="a-size-small").find("span", class_="a-size-base").text,
url+items[10].find_all('a', class_='a-link-normal s-no-outline')[0]["href"])
name.append(info[0])
rating.append(info[1])
price.append(info[2])
image.append(info[3])
review_num.append(info[4])
product_link.append(info[5])
except:
pass
#print("Exception found")
page_num+=1
data = {'name':name, 'price':price, 'ratings':rating, 'image':image, 'review_num':review_num, 'product_link':product_link}
dataItems = json.dumps(data)
'''df = pd.DataFrame(data=data)
df.to_json(f"{item_name}_amazon.json", orient="split", compression="infer")'''
print(dataItems)
sys.stdout.flush() | true |
138fff21c24fdc275c4910989020f4cfecf63a66 | Python | qamine-test/codewars | /kyu_7/the_first_non_repeated_character_in_string/test_first_non_repeated.py | UTF-8 | 2,690 | 3.40625 | 3 | [
"Unlicense",
"BSD-3-Clause"
] | permissive | # Created by Egor Kostan.
# GitHub: https://github.com/ikostan
# LinkedIn: https://www.linkedin.com/in/egor-kostan/
# FUNDAMENTALS ALGORITHMS STRINGS
import unittest
import allure
from utils.log_func import print_log
from kyu_7.the_first_non_repeated_character_in_string.first_non_repeated import first_non_repeated
@allure.epic('7 kyu')
@allure.parent_suite('Beginner')
@allure.suite("Data Structures")
@allure.sub_suite("Unit Tests")
@allure.feature("String")
@allure.story('The First Non Repeated Character In A String')
@allure.tag()
@allure.link(url='',
name='Source/Kata')
class FirstNonRepeatedTestCase(unittest.TestCase):
"""
Testing first_non_repeated function
"""
def test_first_non_repeated(self):
"""
Testing first_non_repeated function
:return:
"""
allure.dynamic.title("Testing first_non_repeated "
"function with various inputs")
allure.dynamic.severity(allure.severity_level.NORMAL)
allure.dynamic.description_html('<h3>Codewars badge:</h3>'
'<img src="https://www.codewars.com/users/myFirstCode'
'/badges/large">'
'<h3>Test Description:</h3>'
"<p></p>")
with allure.step("Enter test string and verify the output"):
test_data = [
("test", 'e'),
("teeter", 'r'),
("1122321235121222", '5'),
('1122321235121222dsfasddssdfa112232123sdfasdfasdf11'
'22321235121222dsfasddssdfa112232123sdfasdfasdf1122'
'321231122321235121222dsfasddssdfa112232123sdfasdfa'
'sdf1122321231122321235121222dsfasddssdfa112232123sd'
'fasdfasdf1122321231122321235121222dsfasddssdfa11223'
'2123sdfasdfasdf1122321231122321235121222dsfasddssdf'
'a112232123sdfasdfasdf112232123asddssdfa112232123sdfa'
'sdfasdf1122z321231122321235121222dsfasddssdf1122321'
'235121222dsfasddssdf1122321235121222dsfasddssdf11223'
'21235121222dsfasddssdf1122321235121222dsfasddssdf112'
'p2321235121222dsfasddssdf1122321235121222dsfasddssdf', 'z'),
('ogmhrsoqiklqfmhgnpjsrikmnlpfj', None),
('knioolrpnutskmqmhqtriipjjushl', None),
]
for s, expected in test_data:
print_log(s=s,
expected=expected)
self.assertEqual(expected,
first_non_repeated(s))
| true |
00a67a7edf1e0bf640678cbaf9c0f6a36e79295f | Python | steadylearner/Rust-Full-Stack | /desktop/pdf/main.py | UTF-8 | 259 | 3.03125 | 3 | [
"MIT"
] | permissive | import tkinter as tk
class Root(tk.Tk):
def __init__(self):
super().__init__()
self.label = tk.Label(self, text="Hello World", padx=5, pady=5)
self.label.pack()
if __name__ == "__main__":
root = Root()
root.mainloop()
| true |
015e9614bb98bdd02c8a960504e3aa06bab28b7c | Python | heshibo1994/leetcode-python-2 | /395. 至少有K个重复字符的最长子串.py | UTF-8 | 876 | 3.890625 | 4 | [] | no_license | # 找到给定字符串(由小写字符组成)中的最长子串 T , 要求 T 中的每一字符出现次数都不少于 k 。输出 T 的长度。
#
# 示例 1:
#
# 输入:
# s = "aaabb", k = 3
#
# 输出:
# 3
#
# 最长子串为 "aaa" ,其中 'a' 重复了 3 次。
#
# 示例 2:
#
# 输入:
# s = "ababbc", k = 2
#
# 输出:
# 5
#
# 最长子串为 "ababb" ,其中 'a' 重复了 2 次, 'b' 重复了 3 次。
#
# 来源:力扣(LeetCode)
# 链接:https://leetcode-cn.com/problems/longest-substring-with-at-least-k-repeating-characters
class Solution(object):
def longestSubstring(self, s, k):
if not s:
return 0
for c in set(s):
if s.count(c) < k:
return max(self.longestSubstring(t, k) for t in s.split(c))
return len(s)
s = Solution()
print(s.longestSubstring(s = "ababbc", k = 2))
| true |
70d3685d263ecd14f17d7ed45b6f7d3544e9d8ff | Python | ninetailskim/CheckInThreePigs | /PandaFace/testtext.py | UTF-8 | 839 | 2.875 | 3 | [] | no_license | from PIL import Image, ImageDraw, ImageFont
import cv2
import numpy as np
def addText( img, text, left, top, textColor=(0, 0, 0), textSize=50):
if (isinstance(img, np.ndarray)): # 判断是否OpenCV图片类型
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
draw = ImageDraw.Draw(img)
fontStyle = ImageFont.truetype(
"simsun.ttc", textSize, encoding="utf-8")
draw.text((left+1, top+1), text, (0, 0, 0), font=fontStyle)
draw.text((left, top), text, textColor, font=fontStyle)
return cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR)
img = np.zeros([50,50], dtype=np.uint8)
img = addText(img, "我", 0,0,textColor=(255,255,255))
cv2.imshow("t", img)
cv2.waitKey(0)
cv2.destroyAllWindows()
lin = "我爱你,亲爱的姑娘"
print(len(lin))
lin = "吔屎啦你!"
print(len(lin))
| true |
a3afc55e229633f495f997bf9dc6356d1342aab2 | Python | gokuSSJ95/Easy-Bookmark-Accessing | /AccessBookmarks.py | UTF-8 | 3,756 | 2.53125 | 3 | [] | no_license | import os
import json, re, webbrowser, socket, time, sys, codecs
def internet_on(host="8.8.8.8", port=53, timeout=3):
"""
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
. """
try:
#response = urr.urlopen('https://www.google.co.in', timeout=10)
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host,port))
return True
except exception as e:
return False
def childRet(dta):
chldData = dta
for i in range(0,len(chldData)):
try:
#print("Name: "+chldData[i]["name"])
#print("URL: "+chldData[i]["url"])
r_entry = {"name" : chldData[i]["name"],
"url" : chldData[i]["url"],
"nameList" : re.sub("[^\w]", " ", chldData[i]["name"].lower()).split(),
"urlList" : re.sub("[^\w]", " ", chldData[i]["url"].lower()).split(),
}
r_data.append(r_entry)
except:
childRet(chldData[i]["children"])
def authFunc(fd,q):
if q>=1 and q<=len(fd):
webbrowser.open_new(fd[q-1]["url"])
else:
print("Invalid input.")
q = int(input("Enter a valid result no. to be opened: "))
authFunc(fd,q)
bookmark_id = 1
r_data = []
user = "C:\\Users\\_UserName_\\AppData\\Local\\Google\\Chrome\\User Data\\Default\\Bookmarks" #Replace _UserName_ with your device's username.
input_file = codecs.open(user, encoding='utf-8')
data = json.load(input_file)
for i in range(0,len(data["roots"]["bookmark_bar"]["children"])):
try:
#print("Name: "+data["roots"]["bookmark_bar"]["children"][i]["name"])
#print("URL: "+data["roots"]["bookmark_bar"]["children"][i]["url"])
r_entry = {"name" : data["roots"]["bookmark_bar"]["children"][i]["name"],
"url" : data["roots"]["bookmark_bar"]["children"][i]["url"],
"nameList" : re.sub("[^\w]", " ", data["roots"]["bookmark_bar"]["children"][i]["name"].lower()).split(),
"urlList" : re.sub("[^\w]", " ", data["roots"]["bookmark_bar"]["children"][i]["url"].lower()).split(),
}
r_data.append(r_entry)
except:
childRet(data["roots"]["bookmark_bar"]["children"][i]["children"])
for i in range(0,len(data["roots"]["other"]["children"])):
try:
#print("Name: "+data["roots"]["other"]["children"][i]["name"])
#print("URL: "+data["roots"]["other"]["children"][i]["url"])
r_entry = {"name" : data["roots"]["other"]["children"][i]["name"],
"url" : data["roots"]["other"]["children"][i]["url"],
"nameList" : re.sub("[^\w]", " ", data["roots"]["other"]["children"][i]["name"].lower()).split(),
"urlList" : re.sub("[^\w]", " ", data["roots"]["other"]["children"][i]["url"].lower()).split(),
}
r_data.append(r_entry)
except:
childRet(data["roots"]["other"]["children"][i]["children"])
query = input("Enter search query: ")
fin_data = []
entry_num = 1
for i in range(0,len(r_data)):
if query.lower() in r_data[i]["nameList"] or query.lower() in r_data[i]["urlList"]:
print("Result No."+str(entry_num))
entry_num+=1
print("Name:",r_data[i]["name"])
print("URL:",r_data[i]["url"])
fin_entry = {"name" : r_data[i]["name"],
"url" : r_data[i]["url"]
}
fin_data.append(fin_entry)
checkRes = int(input("Enter the result no. to be opened: "))
authFunc(fin_data,checkRes)
| true |
6c46c87cabdd6eec2e55bf4240c97721dbc09f36 | Python | oknelvapi/GitPython | /Coursera_online/week3/test3.3.5.py | UTF-8 | 901 | 4.09375 | 4 | [] | no_license | # Знайти другу літеру "f" в рядку
n = str(input())
f = n.find('f') # Знаходимо індекс (місце розташ-ня) 1ї 'f'
f2 = n[f+1::] # робимо зріз рядку; рядок що створиться почнеться від 1ї 'f'
f2find = f2.find('f') # Знаходимо інд. 2ї'f' в новоствореному рядку
if f == -1: # Якщо у введеному рядку немає жодної 'f', то виводимо:
print(-2)
# Як зрозуміти, що в рядку лише 1а'f' ?
elif f2find == -1: # Почнемо пошук в новоствореному після 1ї 'f' рядку!
print(-1)
else: # Місце розташування (індекс) 2ї 'f':
print(f+f2find+1) # Індекс першої 'f' + індекс 2ї + 1 (бо злічення обох 'f' поч. з 0 )
| true |
50d410ed13f2d92b051c5a15ff04dec112a85ffd | Python | sula678/python-note | /machine_learning/coordinate.py | UTF-8 | 2,048 | 3.609375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
from sympy import *
import matplotlib.pyplot as plt
#把鳶尾花讀進來並列出前五項跟後五項
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data', header=None)
#取出第一個特徵花萼長跟第三個特徵花瓣長指定給x,共取前100資料
X = df.iloc[0:100, 0].values
print X
#把前100項中每一項的第4屬性(花屬性, setosa or versicolor)指定給y
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', -1, 1)
#x = np.array([1, 2, 2.5, 3])
#y = np.array([1.5, 3, 4.1, 2.5])
plt.scatter(X, y, color='red', marker='o', label='setosa')
plt.xlabel('test tpetal length')
plt.ylabel('test sepal length')
plt.legend(loc='upper left')
plt.show()
"""
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
# 讀取內建的糖尿病數據集
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean squared error
print("Mean squared error: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
""" | true |
9b4bc5e7c6475e8594669bd633ee8f1fb0e19e32 | Python | tongbc/algorithm | /src/justForReal/bitwiseComplement.py | UTF-8 | 194 | 2.625 | 3 | [] | no_license | class Solution(object):
def bitwiseComplement(self, N):
"""
:type N: int
:rtype: int
"""
X = 1
while N > X: X = X * 2 + 1
return X - N | true |
05a988aff16d2e32a20874bf0c4f80fd6c732206 | Python | Imperoli/rockin_at_work_software | /robocup-at-work/mas_common_robotics/mcr_common/mcr_algorithms/common/src/mcr_algorithms/controller/pid_controller.py | UTF-8 | 1,521 | 2.875 | 3 | [] | no_license | #! /usr/bin/python
import math
import sys
class p_controller:
def __init__ (self, proportional_constant):
if proportional_constant > 1.0 or proportional_constant < 0.0:
sys.exit('error, proportional constant should be less than 1.0 and larger than 0.0')
self.proportional_constant = proportional_constant
def control(self, set_value, current_value):
error = set_value - current_value
control_value = current_value + ( self.proportional_constant * error )
return control_value
class pd_controller:
def __init__ (self, proportional_constant, derivative_constant, sampling_time):
if proportional_constant > 1.0 or proportional_constant < 0.0:
sys.exit('error, proportional constant should be less than 1.0 and larger than 0.0')
if derivative_constant > 1.0 or derivative_constant < 0.0:
sys.exit('error, derivative constant should be less than 1.0 and larger than 0.0')
self.sampling_time = sampling_time
self.error_list = [0.0] * self.sampling_time
self.proportional_constant = proportional_constant
self.derivative_constant = derivative_constant
def control(self, set_value, current_value):
error = set_value - current_value
self.error_list.pop(0)
self.error_list.append(error)
derivative_error = self.error_list[self.sampling_time - 2] - error
control_value = current_value + (self.proportional_constant * error) + (self.derivative_constant * derivative_error)
return control_value
| true |
1a3ffe207815116d866d96ee51a2416ab2ce6116 | Python | facebookresearch/fairmotion | /fairmotion/tasks/motion_prediction/dataset.py | UTF-8 | 1,916 | 2.875 | 3 | [
"BSD-3-Clause"
] | permissive | # Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import pickle
import torch
import torch.utils.data as data
from fairmotion.utils import constants
class Dataset(data.Dataset):
def __init__(self, dataset_path, device, mean=None, std=None):
self.src_seqs, self.tgt_seqs = pickle.load(open(dataset_path, "rb"))
if mean is None or std is None:
self.mean = np.mean(self.src_seqs, axis=(0, 1))
self.std = np.std(self.src_seqs, axis=(0, 1))
else:
self.mean = mean
self.std = std
self.num_total_seqs = len(self.src_seqs)
self.device = device
def __getitem__(self, index):
"""Returns one data pair (source, target)."""
src_seq = (self.src_seqs[index] - self.mean) / (
self.std + constants.EPSILON
)
tgt_seq = (self.tgt_seqs[index] - self.mean) / (
self.std + constants.EPSILON
)
src_seq = torch.Tensor(src_seq).to(device=self.device).double()
tgt_seq = torch.Tensor(tgt_seq).to(device=self.device).double()
return src_seq, tgt_seq
def __len__(self):
return self.num_total_seqs
def get_loader(
dataset_path,
batch_size=100,
device="cuda",
mean=None,
std=None,
shuffle=False,
):
"""Returns data loader for custom dataset.
Args:
dataset_path: path to pickled numpy dataset
device: Device in which data is loaded -- 'cpu' or 'cuda'
batch_size: mini-batch size.
Returns:
data_loader: data loader for custom dataset.
"""
# build a custom dataset
dataset = Dataset(dataset_path, device, mean, std)
# data loader for custom dataset
# this will return (src_seqs, tgt_seqs) for each iteration
data_loader = data.DataLoader(
dataset=dataset, batch_size=batch_size, shuffle=shuffle,
)
return data_loader
| true |
721e90900cfa3547f33eb1ead34cf2129ee03dac | Python | DDmitroIDD/homework | /homework_6/sixles.py | UTF-8 | 2,262 | 3.828125 | 4 | [] | no_license | # практика 1
# Создать словарь оценок предполагаемых студентов (Ключ - ФИ студента, значение - список оценок студентов).
# Найти самого успешного и самого отстающего по среднему баллу.
from names import get_full_name
from random import sample
from random import randint
students = {'Smirnov Semen': [45, 67, 35, 79], 'Ivanov Ivan': [54, 23, 87, 95],
'Dmytriev Dmytro': [36, 74, 89, 22], 'Petrov Petr': [90, 87, 66, 99]}
average_scores = {}
for i in students:
average_scores.setdefault(i, sum(students[i]) / len(students[i]))
print(*max(average_scores.items()), *min(average_scores.items()))
# практика 2
# Создать структуру данных для студентов из имен и фамилий, можно случайных.
# Придумать структуру данных, чтобы указывать, в какой группе учится студент (Группы Python, FrontEnd, FullStack, Java).
# Студент может учиться в нескольких группах. Затем вывести:
# студентов, которые учатся в двух и более группах
# студентов, которые не учатся на фронтенде
# студентов, которые изучают Python или Java
programmers = {}
number_of_students = 12
groups = ['Python', 'FrontEnd', 'FullStack', 'Java']
for i in range(number_of_students):
programmers.setdefault(get_full_name(), sample(groups, k=randint(1, 4)))
print('\n' + 'Студенты, которые учатся в двух и более группах' + '\n')
for j in programmers:
if len(programmers[j]) > 1:
print(j + ' => ', *programmers[j])
print('\n' + 'Студенты, которые не учатся на фронтенде' + '\n')
for x in programmers:
if 'FrontEnd' not in programmers[x]:
print(x + ' => ', *programmers[x])
print('\n' + 'Студенты, которые изучают Python или Java' + '\n')
for y in programmers:
if ('Java' or 'Python') in programmers[y]:
print(y + ' => ', *programmers[y])
| true |
36fd30e4f765c6e6db57a653476bb2133115cc31 | Python | TonyCmC/fuglePlotter | /FugleKLinePlotter.py | UTF-8 | 16,846 | 2.625 | 3 | [] | no_license | import configparser
import datetime
import json
import operator
import re
import requests
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# 導入蠟燭圖套件
import mpl_finance as mpf
# 專門做『技術分析』的套件
from talib import abstract
config = configparser.ConfigParser()
config.read('config.ini')
class FugleKLinePlotter:
# define the font attributes of title
plt.rcParams['font.sans-serif'] = ['Microsoft JhengHei']
plt.rcParams['axes.unicode_minus'] = False
plt.rcParams['axes.titlesize'] = 20
plt.style.use('dark_background')
api_url = config['FUGLE']['API_URL']
def __init__(self, stock_id, file_name):
self.stock_id = stock_id
self.stock_name = ''
self.file_name = file_name
self.data = {}
self.market_time = datetime.datetime.now()
self.last_closed = 0.0
self.highest_price = 0.0
self.lowest_price = 0.0
self.is_stock = True
self.get_price_plot()
self.get_price_info_of_stock()
def request_factory(self, api_url, params=''):
if params == '':
params = {
'symbolId': self.stock_id,
'apiToken': config['FUGLE']['TOKEN']
}
res = requests.get(api_url, params=params)
self.logger(res)
return res.text
def get_endpoint_of_url(self, url):
match = re.search(r'/(\w+)\?', url)
return match.group(1)
def logger(self, res_obj):
res = res_obj
today_date = datetime.datetime.today().strftime('%Y-%m-%d')
with open('logs/{date}-fugle-{filename}.log'.format(date=today_date,
filename=self.get_endpoint_of_url(res.url)),
'a',
encoding='utf-8') as f:
now_timestamp = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
f.write('=====================================\n')
f.write("[{0}]".format(now_timestamp) + '\n')
f.write("requests url: {0}".format(res.url) + '\n')
f.write('response: \n' + res.text + '\n')
f.write('=====================================' + '\n')
def get_price_plot(self):
api_for_stock = self.api_url + '/chart'
res = self.request_factory(api_for_stock)
self.data = json.loads(res)
price_set = self.data.get('data').get('chart')
if price_set == {}:
arranged_dict = {
"time": [],
"open": [],
"high": [],
"low": [],
"close": [],
"volume": []
}
return arranged_dict
self.market_time = self.isoformat_transfer(self.data.get('data').get('info').get('lastUpdatedAt'))
time_series = list(price_set.keys())
new_price_dict = {}
for idx, e in enumerate(time_series):
ticker_stack = datetime.timedelta(minutes=1)
tmp_price_dict = {}
content_stack = price_set.get(e)
if 'volume' not in content_stack.keys():
content_stack['volume'] = 0
if content_stack['volume'] >= 1000:
content_stack['volume'] = int(content_stack['volume'] / 1000)
if idx == 0:
tmp_price_dict[self.isoformat_transfer(time_series[idx])] = content_stack
else:
# 取上一次內容
content_stack = price_set.get(time_series[idx])
# 計算 這次減掉上次tick,計算共漏掉幾分鐘
ticker_minute_diff = (self.isoformat_to_datetime(time_series[idx]) - self.isoformat_to_datetime(
time_series[idx - 1]) - datetime.timedelta(minutes=1))
# 遺漏分鐘數不等於一分鐘
if ticker_minute_diff != datetime.timedelta(minutes=0):
# 計算遺漏的ticker數量
missing_ticker = ticker_minute_diff / datetime.timedelta(minutes=1)
# 迴圈補遺漏
for tick in range(int(missing_ticker)):
previous_content_stack = price_set.get(time_series[idx - 1])
current_timestamp = (self.isoformat_to_datetime(time_series[idx - 1]) + ticker_stack).strftime(
'%Y-%m-%d %H:%M:%S')
ticker_stack += datetime.timedelta(minutes=1)
previous_content_stack_with_zero_vol = previous_content_stack.copy()
previous_content_stack_with_zero_vol['volume'] = 0
tmp_price_dict[current_timestamp] = previous_content_stack_with_zero_vol
tmp_price_dict[self.isoformat_transfer(time_series[idx])] = content_stack
new_price_dict.update(tmp_price_dict)
arranged_dict = {
"time": list(new_price_dict.keys()),
"open": list(map(operator.itemgetter('open'), list(new_price_dict.values()))),
"high": list(map(operator.itemgetter('high'), list(new_price_dict.values()))),
"low": list(map(operator.itemgetter('low'), list(new_price_dict.values()))),
"close": list(map(operator.itemgetter('close'), list(new_price_dict.values()))),
"volume": list(map(operator.itemgetter('volume'), list(new_price_dict.values()))),
}
return arranged_dict
def get_best_five_quote(self, data=''):
if data == '':
api_for_stock = self.api_url + '/quote'
res = self.request_factory(api_for_stock)
data = json.loads(res)
arranged_dict = self.get_price_plot()
if len(arranged_dict['close']) != 0:
current_price_list = [x for x in arranged_dict['close'] if x is not None]
current_closed_price = current_price_list[len(current_price_list) - 1]
else:
current_closed_price = self.last_closed
if current_closed_price > self.last_closed:
stock_mark = '▲'
elif current_closed_price < self.last_closed:
stock_mark = '▼'
else:
stock_mark = '-'
current_volume_list = [x for x in arranged_dict['volume'] if x is not None]
title_diff = round(current_closed_price - self.last_closed, 2)
title_diff_percent = round(title_diff / self.last_closed * 100, 2)
title = '{name}({id}) {time}\n'.format(name=self.stock_name,
id=self.stock_id,
time=self.market_time)
sub_title = '{price} {mark}{diff} ({percent}%) 成交量: {volume}\n'.format(
volume=str(int(sum(current_volume_list))),
price=current_closed_price,
mark=stock_mark,
diff=title_diff,
percent=title_diff_percent)
order_list = data.get('data').get('quote').get('order')
best_asks = order_list.get('bestAsks')
best_bids = order_list.get('bestBids')
ordered_best_bids = sorted(best_bids, key=operator.itemgetter('price'), reverse=True)
result = title + sub_title + '-' * len(title) + '\n'
for idx, bid in enumerate(ordered_best_bids):
buyer = '{vol} @ {price:.2f}'.format(vol=str(bid.get('unit')), price=bid.get('price'))
seller = '{vol_ask} @ {price_ask:.2f}'.format(vol_ask=str(best_asks[idx].get('unit')),
price_ask=best_asks[idx].get('price'))
result += '{buyer:>15}\t|\t{seller:>15}\n'.format(buyer=buyer, seller=seller)
return result
if data == '':
api_for_stock = self.api_url + '/quote'
res = self.request_factory(api_for_stock)
data = json.loads(res)
arranged_dict = self.get_price_plot()
if len(arranged_dict['close']) != 0:
current_price_list = [x for x in arranged_dict['close'] if x is not None]
current_closed_price = current_price_list[len(current_price_list) - 1]
else:
current_closed_price = self.last_closed
if current_closed_price > self.last_closed:
stock_mark = '▲'
elif current_closed_price < self.last_closed:
stock_mark = '▼'
else:
stock_mark = '-'
current_volume_list = [x for x in arranged_dict['volume'] if x is not None]
title_diff = round(current_closed_price - self.last_closed, 2)
title_diff_percent = round(title_diff / self.last_closed * 100, 2)
title = ' {name}({id}) {time}\n'.format(name=self.stock_name,
id=self.stock_id,
time=self.market_time)
sub_title = ' {price} {mark}{diff} ({percent}%) 成交量: {volume}\n'.format(
volume=str(int(sum(current_volume_list))),
price=current_closed_price,
mark=stock_mark,
diff=title_diff,
percent=title_diff_percent)
order_list = data.get('data').get('quote').get('order')
best_asks = order_list.get('bestAsks')
best_bids = order_list.get('bestBids')
ordered_best_bids = sorted(best_bids, key=operator.itemgetter('price'), reverse=True)
result = title + sub_title + '-' * len(title) + '\n'
for idx, bid in enumerate(ordered_best_bids):
result += '{vol} @ {price}\t|\t{vol_ask} @ {price_ask}\n'.format(vol=str(bid.get('unit')).rjust(5),
price=str(bid.get('price')).ljust(5),
vol_ask=str(
best_asks[idx].get('unit')).rjust(5),
price_ask=str(
best_asks[idx].get('price')).ljust(5))
return result
def get_price_info_of_stock(self):
api_for_stock = self.api_url + '/meta'
res = self.request_factory(api_for_stock)
data = json.loads(res)
self.stock_name = data.get('data').get('meta').get('nameZhTw')
self.last_closed = float(round(data.get('data').get('meta').get('priceReference'), 2))
self.highest_price = float(round(data.get('data').get('meta').get('priceHighLimit') or
round(float(self.last_closed) * 1.1, 2)))
self.lowest_price = float(round(data.get('data').get('meta').get('priceLowLimit') or
round(float(self.last_closed) * 0.9, 2)))
print('self.highest_price: ', self.highest_price)
print('self.lowest_price: ', self.lowest_price)
# 針對興櫃公司 or 無昨收的股票(通常為第一天興櫃之類的) 處理
if 'volumePerUnit' not in data.get('data').get('meta').keys() or self.last_closed == 0:
self.is_stock = False
def isoformat_to_datetime(self, datetime_string):
raw_datetime = datetime.datetime.strptime(datetime_string, "%Y-%m-%dT%H:%M:%S.%fZ")
raw_datetime += datetime.timedelta(hours=8)
return raw_datetime
def isoformat_transfer(self, datetime_string):
raw_datetime = datetime.datetime.strptime(datetime_string, "%Y-%m-%dT%H:%M:%S.%fZ")
raw_datetime += datetime.timedelta(hours=8)
parsed_datetime_string = raw_datetime.strftime('%Y-%m-%d %H:%M:%S')
return parsed_datetime_string
def draw_plot(self):
arranged_dict = self.get_price_plot()
# print(arranged_dict)
# 針對第一次興櫃公司處理 (無last_closed資訊則用開盤價當作基準)
if self.last_closed == 0:
self.last_closed = arranged_dict.get('open')[0]
if self.is_stock is False:
if self.highest_price < max(arranged_dict.get('close')):
self.highest_price = max(arranged_dict.get('close'))
if self.lowest_price > min(arranged_dict.get('close')):
self.lowest_price = min(arranged_dict.get('close'))
df = pd.DataFrame(arranged_dict)
fig = plt.figure(figsize=(10, 8))
# 用add_axes創建副圖框
ax = fig.add_axes([0.1, 0.3, 0.8, 0.6])
ax2 = fig.add_axes([0.1, 0.1, 0.8, 0.2])
ax2.set_xticks(range(0, 270, 54))
ax2.set_xticklabels(['09', '10', '11', '12', '13'])
ax.set_ylim(round(self.lowest_price, 2), round(self.highest_price, 2))
mpf.candlestick2_ohlc(ax, df['open'], df['high'], df['low'], df['close'],
width=1, colorup='r', colordown='springgreen', alpha=0.75)
empty_arr = [0 for x in range(270 - len(df))]
df2 = {
'time': empty_arr,
'open': empty_arr,
'high': empty_arr,
'low': empty_arr,
'close': empty_arr,
'volume': empty_arr
}
df2 = pd.DataFrame(df2)
df3 = df.append(df2, ignore_index=True)
mpf.volume_overlay(ax2, df3['open'], df3['close'], df3['volume'],
colorup='r', colordown='springgreen', width=1, alpha=0.8)
# 畫均線圖
sma_5 = abstract.SMA(df, 5)
sma_30 = abstract.SMA(df, 30)
# 開盤價水平線
ax.plot([0, 270], [self.last_closed, self.last_closed])
# 高低點標記
ymax = df['close'].max()
xmax = df['close'].idxmax()
ymin = df['close'].min()
xmin = df['close'].idxmin()
ax.annotate(str(ymax), xy=(xmax, ymax), xycoords='data',
xytext=(0, 15), textcoords='offset points', color='r',
bbox=dict(boxstyle='round,pad=0.2', fc='navy', alpha=0.3),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.95',
color='white'),
horizontalalignment='right', verticalalignment='bottom', fontsize=15)
ax.annotate(str(ymin), xy=(xmin, ymin), xycoords='data',
xytext=(0, -25), textcoords='offset points', color='springgreen',
bbox=dict(boxstyle='round,pad=0.2', fc='navy', alpha=0.3),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0.95',
color='white'),
horizontalalignment='right', verticalalignment='bottom', fontsize=15)
# 5MA + 30MA
ax.plot(sma_5, label='5MA')
ax.plot(sma_30, label='30MA')
current_price_list = [x for x in arranged_dict['close'] if x is not None]
current_closed_price = current_price_list[len(current_price_list) - 1]
if current_closed_price > self.last_closed:
stock_color = 'r'
stock_mark = '▲'
elif current_closed_price < self.last_closed:
stock_color = 'springgreen'
stock_mark = '▼'
else:
stock_color = 'ivory'
stock_mark = '-'
current_volume_list = [x for x in arranged_dict['volume'] if x is not None]
title_diff = round(current_closed_price - self.last_closed, 2)
title_diff_percent = round(title_diff / self.last_closed * 100, 2)
stock_info = '{name}({id})'.format(name=self.stock_name, id=self.stock_id)
if len(stock_info) > 10:
space_fill = 83
else:
space_fill = 90
title = '{stock_info: <{fill}}{time}\n'.format(fill=space_fill-len(str(self.market_time))-len(stock_info),
stock_info=stock_info,
time=self.market_time)
price_info = '{price} {mark}{diff} ({percent}%)'.format(
price=current_closed_price,
mark=stock_mark,
diff=title_diff,
percent=title_diff_percent)
sub_title = '{price_info:<90}成交量: {volume}'.format(
price_info=price_info,
volume=str(int(sum(current_volume_list))))
plt.suptitle(sub_title,y=0.93, size='xx-large', color=stock_color)
title_obj = ax.set_title(title, loc='Left', pad=0.5)
plt.setp(title_obj, color='ivory') # set the color of title to red
ax.legend(fontsize='x-large', loc=2)
file_name = self.stock_id + '-' + self.file_name
fig.savefig('images/lower_{file_name}.png'.format(file_name=file_name), dpi=100)
fig.savefig('images/{file_name}.png'.format(file_name=file_name))
plt.cla()
| true |
0960a60b1ae4497895e2bcb18c806df3d838d955 | Python | MacHu-GWU/HSH-toolbox | /HSH_unit_test/DBA.sqlite3_UT.py | UTF-8 | 1,552 | 2.859375 | 3 | [] | no_license | ##encoding=utf8
##version =py27
##author =sanhe
##date =2014-09-12
from __future__ import print_function
from HSH.DBA.hsh_sqlite3 import Database_schema, iterC, prt_all, stable_insertmany
import sqlite3
import datetime
def unit_test1():
try:
conn = sqlite3.connect("employee.db")
c = conn.cursor()
c.execute("CREATE TABLE people (id INTEGER PRIMARY KEY NOT NULL, name TEXT, enroll_date DATE);")
c.execute("CREATE TABLE salary (id INTEGER PRIMARY KEY NOT NULL, hour_rate INTEGER);")
c.execute("INSERT INTO people (id, name, enroll_date) VALUES (?, ?, ?)", (1, "Jack", datetime.date(2014,8,15) ) )
c.execute("INSERT INTO salary (id, hour_rate) VALUES (?, ?)", (1, 25) )
conn.commit()
except:
print("""Something Wrong, please delete 'records.db' then proceed""")
db_schema = Database_schema("employee.db")
print(db_schema)
print(db_schema.people)
print(db_schema.salary)
def unit_test2():
"""测试stable_insertmany的功能
"""
conn = sqlite3.connect(":memory:")
c = conn.cursor()
c.execute("CREATE TABLE test (id INTEGER PRIMARY KEY NOT NULL, number INTEGER)")
records = [(1, 10), (3, 10), (5, 10)] # insert some records at begin
c.executemany("INSERT INTO test VALUES (?, ?)", records)
records = [(2, 10), (3, 10), (4, 10)]
stable_insertmany(conn, c, "INSERT INTO test VALUES (?, ?)", records)
c.execute("SELECT * FROM test")
prt_all(c)
if __name__ == "__main__":
unit_test1()
unit_test2()
| true |
f4e4d716222d0078735f98e84052cbbee5e18070 | Python | yuzi40277738/HAR | /har_paddle_v1.8/paddle_model.py | UTF-8 | 14,424 | 2.6875 | 3 | [] | no_license | #加载飞桨和相关类库
import paddle
import paddle.fluid as fluid
from paddle.fluid.dygraph import nn
import paddle.fluid.dygraph as dy
from paddle.fluid import layers
import numpy as np
import os
print(paddle.__version__)
class HarFcn(fluid.dygraph.Layer):
__name__ = 'harfcn'
def __init__(self):
super(HarFcn, self).__init__()
self.cnn1 = dy.Sequential(
dy.Conv2D(num_channels=1, num_filters=128, filter_size=3, stride=1, padding=1),
dy.BatchNorm(num_channels=128),
dy.Dropout(p=.2),
)
self.cnn2 = dy.Sequential(
dy.Conv2D(num_channels=128, num_filters=128, filter_size=3, stride=1, padding=1),
dy.BatchNorm(num_channels=128),
dy.Dropout(p=.2),
)
self.cnn3 = dy.Sequential(
dy.Conv2D(num_channels=128, num_filters=128, filter_size=3, stride=1, padding=1),
dy.BatchNorm(num_channels=128),
dy.Dropout(p=.2),
)
self.cls = dy.Sequential(
dy.Linear(input_dim=384, output_dim=128),
dy.Dropout(p=.2),
dy.Linear(input_dim=128, output_dim=5),
)
# 定义网络结构的前向计算过程
def forward(self, x):
x = self.cnn1(x)
x1 = layers.pool2d(x, pool_size=(3,150), pool_type='avg')
x = self.cnn2(x)
x2 = layers.pool2d(x, pool_size=(3,150), pool_type='avg')
x = self.cnn3(x)
x3 = layers.pool2d(x, pool_size=(3,150), pool_type='avg')
# print(x1.shape, x2.shape)
y = layers.concat([x1,x2,x3], axis=1)
y = layers.reshape(y, shape=[y.shape[0],-1,])
# print('y:', y.shape)
# y = layers.concat([h,y], axis=1)
# print(x.shape)
y = self.cls(y)
y = layers.softmax(y, axis=1)
return y
if __name__ == '__main__':
with fluid.dygraph.guard():
model = HarFcn()
model.eval()
x = np.random.rand(3,1,3,150).astype(np.float32)
x = dy.to_variable(x)
y = model(x)
print(y.shape)
from paddle.fluid.dygraph import Linear, to_variable, TracedLayer
# https://www.paddlepaddle.org.cn/documentation/docs/zh/api_cn/dygraph_cn/TracedLayer_cn.html
if __name__ == '__main__':
exe = fluid.Executor(fluid.CPUPlace())
# 定义预测过程
with fluid.dygraph.guard():
model = HarFcn()
model.eval()
# 保存动态图模型
# fluid.save_dygraph(model.state_dict(), 'lstmfcn')
# 保存静态图模型
image = np.random.rand(1,1,3,150).astype(np.float32)
image = fluid.dygraph.to_variable(image)
# class paddle.fluid.dygraph.TracedLayer(program, parameters, feed_names, fetch_names)
out_dygraph, static_layer = TracedLayer.trace(model, inputs=[image])
# 内部使用Executor运行静态图模型
out_static_graph = static_layer([image])
print(out_static_graph[0].shape) # (2, 10)
# 将静态图模型保存为预测模型
static_layer.save_inference_model(dirname='lite')
print("Saved")
# if __name__ == '__main__':
# exe = fluid.Executor(fluid.CPUPlace())
# # 定义预测过程
# with fluid.dygraph.guard():
# model = HarFcn()
# # 加载模型参数
# model_dict, _ = fluid.load_dygraph("mnist_demo")
# model.load_dict(model_dict)
# # 灌入数据
# model.eval()
# tensor_img = np.random.rand(1,1,3,150).astype(np.float32)
# result = model(fluid.dygraph.to_variable(tensor_img))
# # 预测输出取整,即为预测的数字,打印结果
# print("本次预测是:", result.numpy().shape)
# print("本次预测是:", result.numpy())
# # print(model.__dict__)
# # 保存模型
# fluid.save_dygraph(model.state_dict(), 'mnist_demo')
'''
转换模型
!pip install paddlelite
# https://paddle-lite.readthedocs.io/zh/latest/user_guides/model_optimize_tool.html
!paddle_lite_opt --print_model_ops=true --model_dir=fall --valid_targets=arm
!paddle_lite_opt \
--model_dir=./fall \
--optimize_out_type=naive_buffer \
--optimize_out=lstmfcn \
--valid_targets=arm \
--record_tailoring_info =true
WARNING: Logging before InitGoogleLogging() is written to STDERR
I0726 12:23:20.354004 602 cxx_api.cc:251] Load model from file.
I0726 12:23:20.377671 602 optimizer.h:202] == Running pass: lite_quant_dequant_fuse_pass
I0726 12:23:20.378165 602 optimizer.h:219] == Finished running: lite_quant_dequant_fuse_pass
I0726 12:23:20.378181 602 optimizer.h:202] == Running pass: weight_quantization_preprocess_pass
I0726 12:23:20.378237 602 optimizer.h:219] == Finished running: weight_quantization_preprocess_pass
I0726 12:23:20.378247 602 optimizer.h:202] == Running pass: lite_conv_elementwise_fuse_pass
I0726 12:23:20.378563 602 pattern_matcher.cc:108] detected 3 subgraph
I0726 12:23:20.378782 602 optimizer.h:219] == Finished running: lite_conv_elementwise_fuse_pass
I0726 12:23:20.378794 602 optimizer.h:202] == Running pass: lite_conv_bn_fuse_pass
I0726 12:23:20.379238 602 pattern_matcher.cc:108] detected 3 subgraph
I0726 12:23:20.380278 602 optimizer.h:219] == Finished running: lite_conv_bn_fuse_pass
I0726 12:23:20.380304 602 optimizer.h:202] == Running pass: lite_conv_elementwise_fuse_pass
I0726 12:23:20.380762 602 optimizer.h:219] == Finished running: lite_conv_elementwise_fuse_pass
I0726 12:23:20.380786 602 optimizer.h:202] == Running pass: lite_conv_activation_fuse_pass
I0726 12:23:20.381784 602 optimizer.h:219] == Finished running: lite_conv_activation_fuse_pass
I0726 12:23:20.381821 602 optimizer.h:202] == Running pass: lite_var_conv_2d_activation_fuse_pass
I0726 12:23:20.381834 602 optimizer.h:215] - Skip lite_var_conv_2d_activation_fuse_pass because the target or kernel does not match.
I0726 12:23:20.381844 602 optimizer.h:202] == Running pass: lite_fc_fuse_pass
I0726 12:23:20.382098 602 optimizer.h:219] == Finished running: lite_fc_fuse_pass
I0726 12:23:20.382135 602 optimizer.h:202] == Running pass: lite_shuffle_channel_fuse_pass
I0726 12:23:20.382396 602 optimizer.h:219] == Finished running: lite_shuffle_channel_fuse_pass
I0726 12:23:20.382416 602 optimizer.h:202] == Running pass: lite_transpose_softmax_transpose_fuse_pass
I0726 12:23:20.382529 602 optimizer.h:219] == Finished running: lite_transpose_softmax_transpose_fuse_pass
I0726 12:23:20.382542 602 optimizer.h:202] == Running pass: lite_interpolate_fuse_pass
I0726 12:23:20.382720 602 optimizer.h:219] == Finished running: lite_interpolate_fuse_pass
I0726 12:23:20.382735 602 optimizer.h:202] == Running pass: identity_scale_eliminate_pass
I0726 12:23:20.383025 602 pattern_matcher.cc:108] detected 1 subgraph
I0726 12:23:20.383107 602 optimizer.h:219] == Finished running: identity_scale_eliminate_pass
I0726 12:23:20.383137 602 optimizer.h:202] == Running pass: elementwise_mul_constant_eliminate_pass
I0726 12:23:20.383234 602 optimizer.h:219] == Finished running: elementwise_mul_constant_eliminate_pass
I0726 12:23:20.383249 602 optimizer.h:202] == Running pass: lite_sequence_pool_concat_fuse_pass
I0726 12:23:20.383257 602 optimizer.h:215] - Skip lite_sequence_pool_concat_fuse_pass because the target or kernel does not match.
I0726 12:23:20.383265 602 optimizer.h:202] == Running pass: __xpu__resnet_fuse_pass
I0726 12:23:20.383275 602 optimizer.h:215] - Skip __xpu__resnet_fuse_pass because the target or kernel does not match.
I0726 12:23:20.383281 602 optimizer.h:202] == Running pass: __xpu__multi_encoder_fuse_pass
I0726 12:23:20.383289 602 optimizer.h:215] - Skip __xpu__multi_encoder_fuse_pass because the target or kernel does not match.
I0726 12:23:20.383298 602 optimizer.h:202] == Running pass: __xpu__embedding_with_eltwise_add_fuse_pass
I0726 12:23:20.383306 602 optimizer.h:215] - Skip __xpu__embedding_with_eltwise_add_fuse_pass because the target or kernel does not match.
I0726 12:23:20.383314 602 optimizer.h:202] == Running pass: __xpu__fc_fuse_pass
I0726 12:23:20.383322 602 optimizer.h:215] - Skip __xpu__fc_fuse_pass because the target or kernel does not match.
I0726 12:23:20.383330 602 optimizer.h:202] == Running pass: identity_dropout_eliminate_pass
I0726 12:23:20.383339 602 optimizer.h:215] - Skip identity_dropout_eliminate_pass because the target or kernel does not match.
I0726 12:23:20.383347 602 optimizer.h:202] == Running pass: quantized_op_attributes_inference_pass
I0726 12:23:20.383355 602 optimizer.h:215] - Skip quantized_op_attributes_inference_pass because the target or kernel does not match.
I0726 12:23:20.383364 602 optimizer.h:202] == Running pass: npu_subgraph_pass
I0726 12:23:20.383373 602 optimizer.h:215] - Skip npu_subgraph_pass because the target or kernel does not match.
I0726 12:23:20.383379 602 optimizer.h:202] == Running pass: xpu_subgraph_pass
I0726 12:23:20.383388 602 optimizer.h:215] - Skip xpu_subgraph_pass because the target or kernel does not match.
I0726 12:23:20.383395 602 optimizer.h:202] == Running pass: bm_subgraph_pass
I0726 12:23:20.383404 602 optimizer.h:215] - Skip bm_subgraph_pass because the target or kernel does not match.
I0726 12:23:20.383420 602 optimizer.h:202] == Running pass: apu_subgraph_pass
I0726 12:23:20.383430 602 optimizer.h:215] - Skip apu_subgraph_pass because the target or kernel does not match.
I0726 12:23:20.383437 602 optimizer.h:202] == Running pass: rknpu_subgraph_pass
I0726 12:23:20.383445 602 optimizer.h:215] - Skip rknpu_subgraph_pass because the target or kernel does not match.
I0726 12:23:20.383453 602 optimizer.h:202] == Running pass: static_kernel_pick_pass
I0726 12:23:20.385087 602 optimizer.h:219] == Finished running: static_kernel_pick_pass
I0726 12:23:20.385113 602 optimizer.h:202] == Running pass: variable_place_inference_pass
I0726 12:23:20.385563 602 optimizer.h:219] == Finished running: variable_place_inference_pass
I0726 12:23:20.385584 602 optimizer.h:202] == Running pass: argument_type_display_pass
I0726 12:23:20.385596 602 optimizer.h:219] == Finished running: argument_type_display_pass
I0726 12:23:20.385601 602 optimizer.h:202] == Running pass: type_target_cast_pass
I0726 12:23:20.385733 602 optimizer.h:219] == Finished running: type_target_cast_pass
I0726 12:23:20.385746 602 optimizer.h:202] == Running pass: variable_place_inference_pass
I0726 12:23:20.386023 602 optimizer.h:219] == Finished running: variable_place_inference_pass
I0726 12:23:20.386041 602 optimizer.h:202] == Running pass: argument_type_display_pass
I0726 12:23:20.386054 602 optimizer.h:219] == Finished running: argument_type_display_pass
I0726 12:23:20.386063 602 optimizer.h:202] == Running pass: io_copy_kernel_pick_pass
I0726 12:23:20.386191 602 optimizer.h:219] == Finished running: io_copy_kernel_pick_pass
I0726 12:23:20.386207 602 optimizer.h:202] == Running pass: argument_type_display_pass
I0726 12:23:20.386219 602 optimizer.h:219] == Finished running: argument_type_display_pass
I0726 12:23:20.386229 602 optimizer.h:202] == Running pass: variable_place_inference_pass
I0726 12:23:20.386510 602 optimizer.h:219] == Finished running: variable_place_inference_pass
I0726 12:23:20.386528 602 optimizer.h:202] == Running pass: argument_type_display_pass
I0726 12:23:20.386536 602 optimizer.h:219] == Finished running: argument_type_display_pass
I0726 12:23:20.386543 602 optimizer.h:202] == Running pass: type_precision_cast_pass
I0726 12:23:20.386776 602 optimizer.h:219] == Finished running: type_precision_cast_pass
I0726 12:23:20.386788 602 optimizer.h:202] == Running pass: variable_place_inference_pass
I0726 12:23:20.386973 602 optimizer.h:219] == Finished running: variable_place_inference_pass
I0726 12:23:20.386983 602 optimizer.h:202] == Running pass: argument_type_display_pass
I0726 12:23:20.386991 602 optimizer.h:219] == Finished running: argument_type_display_pass
I0726 12:23:20.386996 602 optimizer.h:202] == Running pass: type_layout_cast_pass
I0726 12:23:20.387240 602 optimizer.h:219] == Finished running: type_layout_cast_pass
I0726 12:23:20.387253 602 optimizer.h:202] == Running pass: argument_type_display_pass
I0726 12:23:20.387260 602 optimizer.h:219] == Finished running: argument_type_display_pass
I0726 12:23:20.387267 602 optimizer.h:202] == Running pass: variable_place_inference_pass
I0726 12:23:20.387459 602 optimizer.h:219] == Finished running: variable_place_inference_pass
I0726 12:23:20.387470 602 optimizer.h:202] == Running pass: argument_type_display_pass
I0726 12:23:20.387477 602 optimizer.h:219] == Finished running: argument_type_display_pass
I0726 12:23:20.387482 602 optimizer.h:202] == Running pass: mlu_subgraph_pass
I0726 12:23:20.387492 602 optimizer.h:215] - Skip mlu_subgraph_pass because the target or kernel does not match.
I0726 12:23:20.387501 602 optimizer.h:202] == Running pass: runtime_context_assign_pass
I0726 12:23:20.387516 602 optimizer.h:219] == Finished running: runtime_context_assign_pass
I0726 12:23:20.387523 602 optimizer.h:202] == Running pass: argument_type_display_pass
I0726 12:23:20.387531 602 optimizer.h:219] == Finished running: argument_type_display_pass
I0726 12:23:20.387539 602 optimizer.h:202] == Running pass: mlu_postprocess_pass
I0726 12:23:20.387547 602 optimizer.h:215] - Skip mlu_postprocess_pass because the target or kernel does not match.
I0726 12:23:20.387553 602 optimizer.h:202] == Running pass: memory_optimize_pass
I0726 12:23:20.387697 602 memory_optimize_pass.cc:160] There are 1 types device var.
I0726 12:23:20.387774 602 memory_optimize_pass.cc:209] cluster: t_51
I0726 12:23:20.387785 602 memory_optimize_pass.cc:209] cluster: t_50
I0726 12:23:20.387791 602 memory_optimize_pass.cc:209] cluster: t_49
I0726 12:23:20.387796 602 memory_optimize_pass.cc:209] cluster: t_0
I0726 12:23:20.387802 602 memory_optimize_pass.cc:209] cluster: t_26
I0726 12:23:20.388633 602 optimizer.h:219] == Finished running: memory_optimize_pass
I0726 12:23:20.388697 602 generate_program_pass.h:37] insts.size 31
I0726 12:23:20.455926 602 model_parser.cc:588] Save naive buffer model in 'lstmfcn.nb' successfully
Save the optimized model into :lstmfcnsuccessfully
''' | true |
d7a17fc29390557def686dcdec1b346ade8cd853 | Python | logeswari-j/python | /90.py | UTF-8 | 107 | 3.296875 | 3 | [] | no_license | strg1=input()
sarr16=[]
for i in strg1:
if i.isnumeric():
sarr16.append(i)
print("".join(sarr16))
| true |
c93489d460599dc6754e7374ea04b3290753d213 | Python | bopopescu/projects-2 | /cs365/hw9/submit/tail.py | UTF-8 | 445 | 3.703125 | 4 | [] | no_license | # tail.py
# Mitchell Wade
# March 24, 2015
# This program prints the last n lines of a file. It
# takes two cmd line arguments which are the name of
# the file and the num of lines to print
import sys
if (len(sys.argv) != 3):
print "usage: python tail.py filename numlines"
sys.exit()
file = open(sys.argv[1])
n = int(sys.argv[2])
lines = list(file)
for line in file:
lines.append(line.strip())
for line in lines[-n:]:
print line
| true |
e8830782922f1fe075cfed356a41769f90838bd6 | Python | lusiferjr/Pandas | /project_2/temperature_anomalies.py | UTF-8 | 5,143 | 3.796875 | 4 | [] | no_license | import pandas as ps
# READ CSV
data=ps.read_csv('book.csv',na_values=[-9999])
#FIRST
""" - The numerical values for rainfall and temperature read in as numbers
- The second row of the datafile should be skipped, but the text labels for the columns should be from the first row
- The no-data values should properly be converted to `NaN`"""
data=data.drop([0])
print(data)
#SECOND
"""- How many non-NaN values are there for `TAVG`?
- What about for `TMIN`?
- How many days total are covered by this data file?
- When is the first observation?
- When is the last?
- What was the average temperature of the whole data file (all years)?
- What was the `TMAX` temperature of the ``Summer 69`` (i.e. including months May, June, July, August of the year 1969)?"""
temp=data['TAVG'].isnull().sum()
print('total not nan values in TAVG:',data['TAVG'].__len__()-temp)
temp=data['TMIN'].isnull().sum()
print('total not nan values in TMIN:',data['TMIN'].__len__()-temp)
print('total days in data',data['DATE'].__len__())
print('First observation:',data['DATE'][1],'Last observation:',data['DATE'][data['DATE'].__len__()])
df=data.dropna(subset=['TAVG'])
d=[]
for i in df['TAVG']:
d.append(int(i))
df=df.drop(['TAVG'],axis=1)
df.insert(6,'TAVG',d)
print('average temp is ',df['TAVG'].mean())
d.clear()
df2=data.dropna(subset=['TMAX'])
d=list(df2['TMAX'].astype(int))
df2=df2.drop(['TMAX'],axis=1)
df2.insert(7,'TMAX',d)
print(df2['DATE'])
tmax=df2['TMAX'].values
d.clear()
c=0
for i in df2['DATE']:
if (i[:6]=='196905' or i[:6]=='196906' or i[:6]=='196907' or i[:6]=='196908'):
d.append(tmax[c])
c+=1
print('max value in TMAX in summer of 69',max(d))
#second
print("SECONDS")
"""1. Calculate the monthly average temperatures for the entire data file using the approach taught in the lecture.
2. Save the output to a new Pandas DataFrame called `monthlyData`.
3. Create a new column in the `monthlyData` DataFrame called `TempsC` that has the monthly temperatures in Celsius.
4. Upload the updated script to your repository for this week's exercise."""
#converting values in int
d=[]
data=data.dropna(subset=['TAVG'])
d=list(data['TAVG'].astype(int))
data=data.drop(['TAVG'],axis=1)
data.insert(7,'TAVG',d)
#converting in c
c=[]
def convert(a):
c.append((a-32)*(5/9))
for i in data['TAVG']:
convert(i)
data['TC']=c
#grouping
temp=[]
for i in data['DATE']:
temp.append(i[:6])
data['mn']=temp
df=data.groupby('mn')
#doing monthly mean
mo=[]
me=[]
mf=[]
for i,j in df:
mo.append(i)
me.append(j['TC'].mean())
mf.append(j['TAVG'].mean())
#creating dataframe
monthlyData=ps.DataFrame({
'month':mo,
'mean in c':me,
'mean in f':mf
})
print(monthlyData)
#T#THIRD
print("third")
"""- You need to calculate a mean temperature *for each month* over the period 1952-1980 using the data in the data file.
You should end up with 12 values, 1 mean temperature for each month in that period, and store them in a new Pandas DataFrame called `referenceTemps`.
The columns in the new DataFrame should be titled `Month` and `avgTempsC`, or something similar.
For example, your `referenceTemps` data should be something like that below, 1 value for each month of the year (12 total):
| Month | avgTempsC |
|----------|------------------|
| January | -5.350916 |
| February | -5.941307 |
| March | -2.440364 |
| ... | ... |
Remember, these temperatures should be in degrees Celsius.
- Once you have the monthly mean values for each of the 12 months, you can then calculate a temperature anomaly for every month in the `monthlyData` DataFrame.
The temperature anomaly we want to calculate is simply the temperature for one month in `monthlyData` minus the corresponding monthly average temperature from the `avgTempsC` column in the `referenceTemps` DataFrame.
You should thus end up with a new column in the `monthlyData` DataFrame showing the temperature anomaly `Diff`, the difference in temperature for a given month (e.g., February 1960) compared to the average (e.g., for February 1952-1980).
- Upload the updated script to your repository for this week's exercise."""
temp.clear()
for i in monthlyData['month']:
temp.append(i[:4])
monthlyData['ye']=temp
df=monthlyData.groupby('ye')
final_df=ps.DataFrame()
for i,j in df:
if int(i) > 1951 and int(i) < 1981:
final_df=ps.concat([final_df,j])
mo.clear()
for i in final_df['month']:
mo.append(int(i[4:]))
final_df['mn']=mo
final_df=final_df.groupby('mn')
mo.clear()
me.clear()
mf.clear()
name=['','January','February','March','April','May','June','July','August','September','October','November','December']
for i,j in final_df:
for k in j['mean in c']:
mo.append(k)
me.append(st.mean(mo))
mf.append(name[int(i)])
df=ps.DataFrame({
'month':mf,
'average':me
})
print(df)
mo.clear()
mf.clear()
mf=monthlyData['mean in c'].values
me.clear()
c=0
for i in monthlyData['month']:
mo.append(mf[c]-df.loc[int(i[4:])-1][1])
c+=1
monthlyData['diff']=mo
monthlyData=monthlyData.drop('ye',axis=1)
print(monthlyData)
| true |
2e6f5a0664b9c4e139e9b5a0c9cb50e4fecda2ca | Python | WyattCast44/intro-to-programming | /week-7/2/commands/DrawShapeFile.py | UTF-8 | 517 | 2.765625 | 3 | [] | no_license | from src.shape_painter import *
class DrawShapeFile:
signature = "draw:shapefile"
description = "Allows you to specify a shape file and we will draw the shapes in that file"
def __init__(self, application):
self.application = application
return
def handle(self):
self.filename = self.application.input().ask(
'What shapefile would you like to use?')
shapefile = open(self.filename)
shapes = getShapes(shapefile)
drawShapes(shapes)
| true |
3dddb456a8f004d042e158b9803c2e347b420ab3 | Python | tpherndon/tornado_riak_blog | /riak_client.py | UTF-8 | 4,572 | 2.515625 | 3 | [] | no_license | import base64
import random
import urllib
import urlparse
from tornado import httpclient
class RiakTornadoClient(object):
def __init__(self, host='127.0.0.1', port=8098, prefix='riak', mapred_prefix='mapred', client_id=None):
"""Construct a new Tornado client for Riak. Copied from RiakHttpClient in large part."""
self._host = host
self._port = port
self._prefix = prefix
self._mapred_prefix = mapred_prefix
self._client_id = client_id
if not self._client_id:
self._client_id = 'py_%s' % base64.b64encode(
str(random.randint(1, 1073741824)))
self._client = httpclient.AsyncHTTPClient()
def build_rest_url(self, bucket=None, key=None, path=None, query='', fragment=''):
if not bucket and not path:
raise Exception("You need to supply either a bucket value or a path value.")
if bucket:
url = ''.join(('/', self._prefix))
url = '/'.join((url, urllib.quote_plus(bucket.get_name())))
if key:
url = '/'.join((url, urllib.quote_plus(key)))
if path:
# If the user specifies a path, use that path and nothing else
# Thus, ditch the bucket, key, prefix, etc.
url = path
if query:
q_items = ['='.join((urllib.quote_plus(k), urllib.quote_plus(str(v)))) for k,v in query.items()]
query = '&'.join(q_items)
loc = ':'.join((self._host, str(self._port)))
scheme = 'http'
netloc = loc
path = url
url_parts = (scheme, netloc, path, query, fragment)
return urlparse.urlunsplit(url_parts)
def to_link_header(self, link):
header = '/'.join(('<', self._prefix, urllib.quote_plus(link.get_bucket()),
urllib.quote_plus(link.get_key())))
header = ''.join((header, '>; riaktag="', urllib.quote_plus(link.get_tag()), '"'))
return header
def ping(self, callback):
url = self.build_rest_url(path='/ping')
self._client.fetch(url, callback)
def get(self, callback, robj, r=1, vtag=None):
query = {'r': r}
if vtag:
query['vtag'] = vtag
url = self.build_rest_url(robj.get_bucket(), robj.get_key(), query=query)
self._client.fetch(url, callback)
def put(self, callback, robj, w=1, dw=1, return_body=True):
query = {'w': w, 'dw': dw}
if return_body:
query['returnbody'] = 'true'
url = self.build_rest_url(robj.get_bucket(), robj.get_key(), query=query)
headers = {'Accept': 'text/plain, */*; q=0.5',
'Content-Type': robj.get_content_type(),
'X-Riak-ClientId': self._client_id}
if robj.vclock():
headers['X-Riak-Vclock'] = robj.vclock()
if robj.get_links():
link_body = ', '.join([self.to_link_header(link) for link in robj.get_links()])
headers['Link'] = link_body
request = httpclient.HTTPRequest(url, 'PUT', headers, robj.get_data())
self._client.fetch(request, callback)
def delete(self, callback, robj, rw=1):
query = {'rw': rw}
url = self.build_rest_url(robj.get_bucket(), robj.get_key(), query=query)
request = httpclient.HTTPRequest(url, 'DELETE')
self._client.fetch(request, callback)
def get_keys(self, callback, bucket):
query = {'props': 'true', 'keys': 'true'}
url = self.build_rest_url(bucket, query=query)
print "keys url: ", url
self._client.fetch(url, callback)
def get_bucket_props(self, callback, bucket):
query = {'props': 'true', 'keys': 'false'}
url = self.build_rest_url(bucket, query=query)
print "props url: ", url
self._client.fetch(url, callback)
def set_bucket_props(self, callback, bucket, props):
url = self.build_rest_url(bucket)
headers = {'Content-Type': 'application/json'}
content = json.dumps({'props': props})
request = httpclient.HTTPRequest(url, 'PUT', headers=headers, body=content)
self._client.fetch(request, callback)
def mapred(self, callback, inputs, query, timeout=None):
job = {'inputs': inputs, 'query': query}
if timeout:
job['timeout'] = timeout
content = json.dumps(job)
url = self.build_rest_url(path=''.join(('/', self._mapred_prefix)))
request = httpclient.HTTPRequest(url, 'PUT', body=content)
self._client.fetch(request, callback)
| true |
87393458aabdd33891a55865741201b8316c40a5 | Python | polaris79/mnsrf_ranking_suggestion | /ranking_baselines/DUET/model.py | UTF-8 | 5,688 | 2.546875 | 3 | [
"MIT"
] | permissive | ###############################################################################
# Author: Wasi Ahmad
# Project: https://www.microsoft.com/en-us/research/wp-content/uploads/2016/10/wwwfp0192-mitra.pdf
# Date Created: 7/23/2017
#
# File Description: This script implements the deep semantic similarity model.
###############################################################################
import torch
import torch.nn as nn
import torch.nn.functional as f
# verified from https://github.com/bmitra-msft/NDRM/blob/master/notebooks/Duet.ipynb
class DUET(nn.Module):
"""Learning to Match using Local and Distributed Representations of Text for Web Search."""
def __init__(self, dictionary, args):
""""Constructor of the class."""
super(DUET, self).__init__()
self.dictionary = dictionary
self.config = args
self.local_model = LocalModel(self.config)
self.distributed_model = DistributedModel(self.config, len(self.dictionary))
def forward(self, batch_queries, batch_docs):
"""
Forward function of the dssm model. Return average loss for a batch of queries.
:param batch_queries: 2d tensor [batch_size x vocab_size]
:param batch_docs: 3d tensor [batch_size x num_rel_docs_per_query x vocab_size]
:return: softmax score representing click probability [batch_size x num_rel_docs_per_query]
"""
local_score = self.local_model(batch_queries, batch_docs)
distributed_score = self.distributed_model(batch_queries, batch_docs)
total_score = local_score + distributed_score
return f.log_softmax(total_score, 1)
class LocalModel(nn.Module):
"""Implementation of the local model."""
def __init__(self, args):
""""Constructor of the class."""
super(LocalModel, self).__init__()
self.config = args
self.conv1d = nn.Conv1d(self.config.max_doc_length, self.config.nfilters, self.config.local_filter_size)
self.drop = nn.Dropout(self.config.dropout)
self.fc1 = nn.Linear(self.config.max_query_length, 1)
self.fc2 = nn.Linear(self.config.nfilters, self.config.nfilters)
self.fc3 = nn.Linear(self.config.nfilters, 1)
def forward(self, batch_queries, batch_clicks):
output_size = batch_clicks.size()[:2]
batch_queries = batch_queries.unsqueeze(1).expand(batch_queries.size(0), batch_clicks.size(1),
*batch_queries.size()[1:])
batch_queries = batch_queries.contiguous().view(-1, *batch_queries.size()[2:]).float()
batch_clicks = batch_clicks.view(-1, *batch_clicks.size()[2:]).transpose(1, 2).float()
interaction_feature = torch.bmm(batch_queries, batch_clicks).transpose(1, 2)
convolved_feature = self.conv1d(interaction_feature)
mapped_feature1 = f.tanh(self.fc1(convolved_feature.view(-1, convolved_feature.size(2)))).squeeze(1)
mapped_feature1 = mapped_feature1.view(*convolved_feature.size()[:-1])
mapped_feature2 = self.drop(f.tanh(self.fc2(mapped_feature1)))
score = f.tanh(self.fc3(mapped_feature2)).view(*output_size)
return score
class DistributedModel(nn.Module):
"""Implementation of the distributed model."""
def __init__(self, args, vocab_size):
""""Constructor of the class."""
super(DistributedModel, self).__init__()
self.config = args
self.conv1d = nn.Conv1d(vocab_size, self.config.nfilters, self.config.dist_filter_size)
self.drop = nn.Dropout(self.config.dropout)
self.fc1_query = nn.Linear(self.config.nfilters, self.config.nfilters)
self.conv1d_doc = nn.Conv1d(self.config.nfilters, self.config.nfilters, 1)
self.fc2 = nn.Linear(self.config.max_doc_length - self.config.pool_size - 1, 1)
self.fc3 = nn.Linear(self.config.nfilters, self.config.nfilters)
self.fc4 = nn.Linear(self.config.nfilters, 1)
def forward(self, batch_queries, batch_clicks):
output_size = batch_clicks.size()[:2]
batch_queries = batch_queries.transpose(1, 2).float()
batch_clicks = batch_clicks.view(-1, *batch_clicks.size()[2:]).transpose(1, 2).float()
# apply convolution 1d
convolved_query_features = self.conv1d(batch_queries)
convolved_doc_features = self.conv1d(batch_clicks)
# apply max-pooling 1d
maxpooled_query_features = f.max_pool1d(convolved_query_features, convolved_query_features.size(2)).squeeze(2)
maxpooled_doc_features = f.max_pool1d(convolved_doc_features, self.config.pool_size, 1)
# apply fc to query and convolution 1d to document representation
query_rep = f.tanh(self.fc1_query(maxpooled_query_features))
doc_rep = self.conv1d_doc(maxpooled_doc_features)
# do hadamard (element-wise) product
query_rep = query_rep.unsqueeze(2).expand(*query_rep.size(), doc_rep.size(2)).unsqueeze(1)
query_rep = query_rep.expand(query_rep.size(0), output_size[1], *query_rep.size()[2:])
query_rep = query_rep.contiguous().view(-1, *query_rep.size()[2:])
query_doc_sim = query_rep * doc_rep
# apply fc2
mapped_features = f.tanh(self.fc2(query_doc_sim.view(-1, query_doc_sim.size(2)))).squeeze(1)
mapped_features = mapped_features.view(*query_doc_sim.size()[:-1])
# apply fc3 and dropout
mapped_features_2 = self.drop(f.tanh(self.fc3(mapped_features)))
# apply fc4
score = f.tanh(self.fc4(mapped_features_2)).view(*output_size)
return score
| true |