blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
ad3956873fe6685505139c1b791dcb7a7bed850d | Python | YuyuZha0/leetcode-python | /four_sum.py | UTF-8 | 1,328 | 3.046875 | 3 | [
"Apache-2.0"
] | permissive | class Solution(object):
@staticmethod
def kSum(nums, k, start, target):
result = list()
# if nums[start] * k > target or nums[-1] * k < target:
# return result
if k == 2:
left, right = start, len(nums) - 1
while left < right:
if left > start and nums[left - 1] == nums[left]:
left += 1
continue
delta = nums[left] + nums[right] - target
if delta == 0:
result.append([nums[left], nums[right]])
left += 1
right -= 1
elif delta > 0:
right -= 1
else:
left += 1
return result
for i in range(start, len(nums) - k + 1):
if i > start and nums[i] == nums[i - 1]:
continue
sub_result = Solution.kSum(nums, k - 1, i + 1, target - nums[i])
for sub in sub_result:
sub.insert(nums[i], 0)
result.append(sub)
return result
def fourSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[List[int]]
"""
nums.sort()
return Solution.kSum(nums, 4, 0, target) | true |
bcc1b8c73c888cbf16b40cb200752734afe64bfb | Python | JusAduc/mini-project-whats--for-dinner | /whatDoICallThis.py | UTF-8 | 3,541 | 3.90625 | 4 | [] | no_license | import random
def generator():
print("What type of food would you like?")
result = input("Fast Food, Fine Dining, Chinese, Fictional, Random: ")
if result == "Fast Food":
# All Fast Food Restraunts
restraunts = ["McDonald's", "Wendy's", "In-N-Out Burger"]
answer = random.choices(restraunts)
print(answer)
# Asking user if it wants a new restraunts under Fast Food
print("Would you like a another try?")
again = input("y / n ")
# If yes (y) it pick a new (maybe the same) restraunt
if again == "y":
answer2 = random.choices(restraunts)
print(answer2)
else:
print("Enjoy your food!")
# All Fine Dining Restraunts
elif result == "Fine Dining":
restraunts = ["Krust Krab", "Lindey's", "G. Michael's Bistro & Bar"]
answer = random.choices(restraunts)
print(answer)
# Asking user if it wants a new restraunts under Fine Dining
print("Would you like a another try?")
again = input("y / n ")
# If yes (y) it pick a new (maybe the same) restraunt
if again == "y":
answer2 = random.choices(restraunts)
print(answer2)
else:
print("Enjoy your food!")
# All Chinese Restraunts
elif result == "Chinese":
restraunts = ["Ho-Toy", "Tiger + Lily", "Happy House"]
answer = random.choices(restraunts)
print(answer)
# Asking user if it wants a new restraunts under Chinese
print("Would you like a another try?")
again = input("y / n ")
# If yes (y) it pick a new (maybe the same) restraunt
if again == "y":
answer2 = random.choices(restraunts)
print(answer2)
else:
print("Enjoy your food!")
# All Fictional Restraunts
elif result == "Fictional":
restraunts = ["Central Perk", "Bob's Burgers", "Paunch Burger"]
answer = random.choices(restraunts)
print(answer)
# Asking user if it wants a new restraunts under Fictional
print("Would you like a another try?")
again = input("y / n ")
# If yes (y) it pick a new (maybe the same) restraunt
if again == "y":
answer2 = random.choices(restraunts)
print(answer2)
else:
print("Enjoy your food!")
# All Restraunts
elif result == "Random":
restraunts = ["Bob's Burgers", "Paunch Burger", "Central Perk", "Ho-Toy", "Tiger + Lily", "Happy House",
"Krust Krab", "Lindey's", "G. Michael's Bistro & Bar", "McDonald's", "Wendy's", "In-N-Out Burger"] # That should be all restraunts
answer = random.choices(restraunts)
print(answer)
# Asking user if it wants a new restraunts under Fictional
print("Would you like a another try?")
again = input("y / n ")
# If yes (y) it pick a new (maybe the same) restraunt
if again == "y":
answer2 = random.choices(restraunts)
print(answer2)
else:
print("Enjoy your food!")
else:
print("Please input on of thoes options")
generator()
# Used so you dont have to re-run pyton file in terminal
goAgain = input("Do it again (y)")
# Make Console easier to read (kind of)
print("#############################################################################################################")
if goAgain == "y":
generator()
else:
print("Ok re-run the file")
generator()
| true |
4713c7413f06bf7ef780eabebff9eadac29489bc | Python | ghxm123/chaoticmap_git | /chaotic_maps.py | UTF-8 | 2,079 | 3.078125 | 3 | [] | no_license | import numpy as np
# def logistic_fun(state, r=2):
# x = state
# x_n = r * x * (1-x)
# return np.array([x_n])
# def henon_fun(state, a=1.4, b=0.3):
# x, y = state
# x_n = 1 - a*x**2 + y
# y_n = b*x
# return np.array([x_n,y_n])
def chua_fun(t, state, alpha=15.395, beta=28):
# https://stackoverflow.com/questions/61127919/chuas-circuit-using-python-and-graphing
R = -1.143
C_2 = -0.714
x, y, z = state
# electrical response of the nonlinear resistor
f_x = C_2*x + 0.5*(R-C_2)*(abs(x+1)-abs(x-1))
dx = alpha*(y-x-f_x)
dy = x - y + z
dz = -beta * y
return np.array([dx,dy,dz])
def duffing_fun(t, state, alpha=-1, beta=1, delta=0.3, gamma=0.5, omega=1.2 ):
# https://github.com/andyj1/chaotic-duffing-oscillator/blob/master/src/duffing.py
x , v = state
dx = v
dv = -delta*v - alpha*x - beta*x**3 + gamma*np.cos(omega*t)
return np.array([dx,dv])
def lorenz_fun(t, state, sigma=10, beta=2.67, rho=28):
x, y, z = state
# print(sigma,beta,rho)
dx = sigma * (y - x)
dy = x * (rho - z) - y
dz = x * y - beta * z
return np.array([dx,dy,dz])
def L96(t, x, N=5, F=8):
"""Lorenz 96 model with constant forcing"""
# Setting up vector
d = np.zeros(N)
# Loops over indices (with operations and Python underflow indexing handling edge cases)
for i in range(N):
d[i] = (x[(i + 1) % N] - x[i - 2]) * x[i - 1] - x[i] + F
return d
# t = 0
# tf = 100
# h = 0.01
def rossler_fun(t, state, a=0.2, b=0.2, c=5.7):
x, y, z = state
dx = - y - z
dy = x + a * y
dz = b + z * (x - c)
return np.array([dx, dy, dz])
def vanderpol_fun(t, state, miu=1):
x, y = state
dx = y
dy = miu*(1 - x**2)*y - x
return np.array([dx, dy])
| true |
b2ca772be200b87e93d2efda833285a14e623a3e | Python | HoeYeon/Algorithm | /Python_Algorithm/Baek/1773.py | UTF-8 | 271 | 2.765625 | 3 | [] | no_license | def GCD(a,b): return GCD(b,a%b) if b else a
def LCM(a,b): return a*b//GCD(a,b)
N,M = map(int,input().split())
time = [0 for i in range(M+1)]
li = list(set([int(input()) for i in range(N)]))
for i in li:
for j in range(0,M+1,i):
time[j] = 1
print(sum(time)-1) | true |
e493c6781c785a45e66440e0393a871f114f87c4 | Python | RadkaValkova/SoftUni-Web-Developer | /Programming OOP Python/Exam 22082020/project/rooms/young_couple_with_children.py | UTF-8 | 760 | 3.421875 | 3 | [] | no_license | from project.appliances.fridge import Fridge
from project.appliances.laptop import Laptop
from project.appliances.tv import TV
from project.rooms.room import Room
class YoungCoupleWithChildren(Room):
def __init__(self, family_name: str, salary_one: float, salary_two: float, *children):
super().__init__(name=family_name, budget=salary_one+salary_two, members_count=2+len(children))
self.room_cost = 30
self.children = list(children)
self.appliances = self.members_count * [TV(), Fridge(), Laptop()]
self.expenses = self.calculate_expenses(self.appliances, self.children)
#Calculate the expenses (appliances and children expenses).
# yc = YoungCoupleWithChildren('n', 2.5, 2.5, 's', 'm')
# print(yc.__dict__) | true |
d1bb1aeba54e7f283b86a1c4e2a3d86df8886712 | Python | ssddlscsdx/ANN-toy | /test.py | UTF-8 | 2,122 | 3.265625 | 3 | [] | no_license | import numpy as np
# sigmoid function # I thnk it's wrong, as the derivative is not like this
def nonlin(x, deriv=False):
if (deriv == True):
return x * (1 - x)
return 1 / (1 + np.exp(-x))
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_deriv(x):
return sigmoid(x)*(1-sigmoid(x))
iter=1
# input dataset
X = np.array([[0, 0, 1],
[0, 1, 1],
[1, 0, 1],
[1, 1, 1]])
# output dataset
y = np.array([[0, 0, 1, 1]]).T
# seed random numbers to make calculation
# deterministic (just a good practice)
np.random.seed(1)
# initialize weights randomly with mean 0
weights = 2 * np.random.random((3, 1)) - 1
weights2=weights
# forward propagation
l0 = X
l1 = nonlin(np.dot(l0, weights))
# how much did we miss?
l1_error = y - l1
print 'Loop num: %d. The output are %0.5f, %0.5f, %0.5f, %0.5f' % (iter,l1[0], l1[1], l1[2], l1[3])
print 'Loop num: %d. The target are %0.5f, %0.5f, %0.5f, %0.5f' % (iter, y[0], y[1], y[2], y[3])
print 'Loop num: %d. The error are %0.5f, %0.5f, %0.5f, %0.5f' % (iter, l1_error[0], l1_error[1], l1_error[2], l1_error[3])
# multiply how much we missed by the
# slope of the sigmoid at the values in l1
l1_delta = l1_error * nonlin(l1, True)
old_der=nonlin(l1, True)
der=sigmoid_deriv(l1)
print 'The old derivative are %0.5f, %0.5f, %0.5f, %0.5f' %(old_der[0],old_der[1],old_der[2],old_der[3])
print 'The new derivatives are %0.5f, %0.5f, %0.5f, %0.5f' %(der[0],der[1],der[2],der[3])
productErrorDer=l1_error*der
print 'The old error products are %0.5f, %0.5f, %0.5f, %0.5f' % (l1_delta[0], l1_delta[1], l1_delta[2], l1_delta[3])
print 'The new error products are %0.5f, %0.5f, %0.5f, %0.5f' % (productErrorDer[0], productErrorDer[1], productErrorDer[2], productErrorDer[3])
# update weights
weights += np.dot(l0.T, l1_delta)
weights2 += np.dot(l0.T, productErrorDer)
print 'The old updated weights are %0.5f, %0.5f, %0.5f' % (weights[0], weights[1], weights[2])
print 'The new updated weights are %0.5f, %0.5f, %0.5f' % (weights2[0], weights2[1], weights2[2])
print '####################'
| true |
b181a1f90b028e0ae702c40dc4b7fbb088d6368a | Python | mewbak/llvm-codegen-py | /llvm2icode.py | UTF-8 | 2,147 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python
#
# This tried to convert LLVM IR to SDCC iCode
#
import sys
from llvm.core import *
import llvm
CMP_MAP = {ICMP_EQ: "=="}
def oper(opers):
return [str(x) for x in opers]
with open(sys.argv[1]) as asm:
mod = Module.from_assembly(asm)
tmp_i = 1
def number_tmps(mod):
global tmp_i
for f in mod.functions:
print `f`
f_type = str(f.type)[:-1]
f_type = f_type.split(" ", 1)
f_type = f_type[0] + " function " + f_type[1]
print "proc _%s{%s}" % (f.name, f_type)
for b in f.basic_blocks:
# print "BB name:", b.name
for i in b.instructions:
# print i
if not i.name and i.type != Type.void():
i.name = "t%d" % tmp_i
tmp_i += 1
def arg(a):
if isinstance(a, Argument):
# print "arg name:", a.name
# return str(a)
return "%s{%s}" % (a.name, a.type)
if isinstance(a, GlobalVariable):
# print "arg name:", a.name
# return str(a)
return "%s{%s}" % (a.name, str(a.type)[:-1])
if isinstance(a, ConstantInt):
# print "arg val:", a.z_ext_value
# return str(a)
return "%s{%s}" % (a.z_ext_value, a.type)
1/0
number_tmps(mod)
lab = 1
for f in mod.functions:
# print `f`
for b in f.basic_blocks:
print " _%s($) :" % b.name
for i in b.instructions:
print "#", i
print "# name:", i.name, "type:", i.type, "op:", i.opcode_name, "operands:", i.operands
if i.name:
if i.opcode_name == "icmp":
print "%s{%s} = %s %s %s" % (i.name, i.type, arg(i.operands[0]), CMP_MAP[i.predicate], arg(i.operands[1]))
elif i.opcode_name == "load":
a = i.operands[0]
if isinstance(a, GlobalVariable):
print "%s<nospill>{%s} := %s<addr>" % (i.name, i.type, arg(a))
else:
1/0
# elif i.opcode_name == "add":
else:
print "??? %s{%s}" % (i.name, i.type)
| true |
d1bae8f2f25aaab6a651116fd6cb253616551966 | Python | lukas-ke/lsp | /lua/lua_re.py | UTF-8 | 1,831 | 2.6875 | 3 | [] | no_license | # https://www.lua.org/manual/5.1/manual.html
import re
def match_require(prefix):
return re.search(r'require(\(["|\'])(.*)', prefix)
def in_require(prefix):
m = match_require(prefix)
if m:
return True
return False
keywords = ["and",
"break",
"do",
"else",
"elseif",
"end",
"false",
"for",
"function",
"if",
"in",
"local",
"nil",
"not",
"or",
"repeat",
"return",
"then",
"true",
"until",
"while"]
symbols = [
r"\+",
r"-",
r"\*",
r"\/",
r"\%",
r"\^",
r"\#",
r"\=\=",
r"\~\=",
r"\<\=",
r"\>\=",
r"\<",
r"\>",
r"\=",
r"\(",
r"\)",
"{",
"}",
r"\[",
r"\]",
r"\;",
r"\:",
r"\,",
r"\.",
r"\.\.",
r"\.\.\."]
# Names
LUA_ID = r'[a-zA-Z_]\w*'
# Whitespace, but not endline
LUA_SPACE = r'[ \t]+'
token_specification = [
('COMMENT', r'--.*?$'),
('ID', LUA_ID),
('ASSIGN', r'=(?!=)'),
('COMPARE', r'=='),
('INTEGER', r'[0-9]+'),
('SYMBOL', '|'.join(symbols)),
('NEWLINE', r'\n'),
('SKIP', LUA_SPACE),
('STR', r'".*?"'),
('MISMATCH', r'.')]
TOKEN = re.compile(
'|'.join(f'(?P<{p[0]}>{p[1]})'
for p in token_specification),
flags=re.DOTALL|re.MULTILINE)
COMMENT_PREFIX = re.compile("^(--*)", flags=re.DOTALL|re.MULTILINE)
def print_token(t):
if t.category in keywords:
print(f"{t.line}:{t.column} Keyword {t.category}")
else:
print(f"{t.line}:{t.column} {t.category}: |{t.value}|")
| true |
f505f6e52092850d3094d709ed9cc78e3c7a0dc9 | Python | fdioguardi/UNLP_Deep_Learning | /Practica_1/normalizar_iris.py | UTF-8 | 415 | 3.265625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import pandas as pd
iris = pd.read_csv('iris.csv')
# Eliminar la columna del nombre (los strings causan problemas con las cuentas)
del iris["name"]
zscore = (iris - iris.mean()) / iris.std()
min_max = (iris - iris.min()) / (iris.max() - iris.min())
print("Normalizado con z-score \n", zscore)
print("Normalizado con min-max \n", min_max)
zscore.hist()
min_max.hist()
plt.show()
| true |
623c1f13a96d1be9f3a2c6640fede6bbc9e2205a | Python | tjdgus3160/algorithm | /CodeTree/놀이기구 탑승.py | UTF-8 | 886 | 2.890625 | 3 | [] | no_license | import sys
input=sys.stdin.readline
def check(x,y,i):
cnt,empty=0,0
for nx,ny in [[x+1,y],[x-1,y],[x,y+1],[x,y-1]]:
if 0<=nx<n and 0<=ny<n:
if not board[ny][nx]:
empty+=1
elif board[ny][nx] in dic[i]:
cnt+=1
return cnt,empty
def on_board(i):
tmp=[]
for y in range(n):
for x in range(n):
if board[y][x]:
continue
cnt,empty=check(x,y,i)
tmp.append([cnt,empty,y,x])
tmp.sort(key=lambda x:(-x[0],-x[1],x[2],x[3]))
board[tmp[0][2]][tmp[0][3]]=i
n=int(input())
board=[[0]*n for _ in range(n)]
dic={}
for _ in range(n*n):
i,*tmp=map(int,input().split())
dic[i]=tmp
on_board(i)
score={0:0,1:1,2:10,3:100,4:1000}
res=0
for y in range(n):
for x in range(n):
k,_=check(x,y,board[y][x])
res+=score[k]
print(res) | true |
a2d52a72e5ecc17f9e11c2aeba570d6a6e63225a | Python | AlgoRG/basic_100 | /basic_100_sunny/1042_10315670(AC).py | UTF-8 | 124 | 2.609375 | 3 | [] | no_license | import io,sys
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf8')
a,b=map(int,input().split())
print(a//b)
| true |
48a084a2b23ce41271d4727f0c1848c557f8e76c | Python | proprefenetre/bin | /condex | UTF-8 | 5,080 | 2.765625 | 3 | [
"MIT"
] | permissive | #!/usr/bin/python
# -*- coding: utf-8 -*-
from collections import ChainMap
from functools import partial
import re
import sys
def halp():
return 'usage: condex [EXPR | PP] --sources --help\n\n\tEXPR:\tbash conditional expression ' \
'or positional parameter (escape special characters)\n'
expressions = {
'-a': 'true if file exists; "AND" between []',
'-b': 'true if file exists and is a block special file',
'-c': 'true if file exists and is a character special file',
'-d': 'true if file exists and is a directory',
'-e': 'true if file exists',
'-f': 'true if file exists and is a regular file',
'-g': 'true if file exists and its set-group-id bit is set',
'-h': 'true if file exists and is a symbolic link',
'-k': 'true if file exists and its sticky bit is set',
'-p': 'true if file exists and is a name pipe (FIFO)',
'-r': 'true if file exists and is readable',
'-s': 'true if file exists and has a size greater than zero',
'-t': 'true if file descriptor is open and refers to a terminal',
'-u': 'true if file exists and its set-user-id bit is set',
'-w': 'true if file exists and is writable',
'-x': 'true if file exists and is executable',
'-g': 'true if file exists and is owned by the effective group id',
'-L': 'true if file exists and is a symbolic link',
'-N': 'true if file exists and has been modified since it was last read',
'-O': 'true if file exists and is owned by the effective user id',
'-S': 'true if file exists and is a socket',
'-ef': '(infix) true if file1 and file2 refer to the same device and ' \
'inode numbers',
'-nt': '(infix) true if file1 is newer than file2), or if file1 exists ' \
'and file2 does not',
'-ot': '(infix) true if file1 is older than file2, or if file2 exists ' \
'and file1 does not',
'-o': 'true if shell option is enabled',
'-v': 'true if shell variable is set',
'-R': 'true if shell variable is set and is a name reference',
'-z': 'true if the length of string is zero',
'-n': 'true if the length of string is non-zero',
'-eq': '(infix) equal to',
'-ne': '(infix) not equal to',
'-lt': '(infix) less than',
'-gt': '(infix) greater than',
'-ge': '(infix) greater than or equal',
}
pos_params = {
'$0': 'Filename of script',
'$1': 'Positional parameter 1',
'${10}': 'Positional parameter #10',
'$#': 'Number of positional parameters',
'$*': 'All the positional parameters as a single word; quote for truth',
'$@': 'All the positional parameters as separate strings',
'${#*}': 'Number of positional parameters',
'${#@}': 'Number of positional parameters',
'$?': 'Return value',
'$$': 'Process ID (PID) of script',
'$-': 'Flags passed to script (using set)',
'$_': 'Last argument of previous command',
'$!': 'Process ID (PID) of last job run in background',
'quoted': '$*',
'arguments': 'quoted: $*; separate: $@',
'number': '${#*}; ${#@}',
'return': '$?',
}
redirection = {
'0': 'stdin',
'1': 'stdout',
'2': 'stderr',
'3': 'new file descriptor',
'4': 'new file descriptor',
'>': '"command > file": redirect stdout to file descriptor',
'2>': '"command 2> file": redirect stderr to file',
'<': '"command < file": read from file instead of stdin',
'|': '"command | command2": connect stdout and stdin of commands',
'>|': '"command >| file": overwrite (existing) file',
'<>': '"command <> file": command both reads and writes to/from file',
'>&': '"command 2&>1": data written to 2 (stderr) will go ' \
'to the destination of 1 (stdout)',
'redirection': '"command >file 2>&1": redirect stdout and stderr to file',
'n': '"exec 3< file": specify an alternative file descriptor for e.g. ' \
'"read" to read from ("while read -u 3; do ...; done"). close 3 with ' \
'"exec 3>&-',
'<&-': 'close stdin',
'2>&-': 'close stderr',
'2>/dev/null': 'discard stderr',
'syntax': '"[lh] [op] [rh]":\nlh is always a file descriptor ' \
'(0, 1, 2, ..., n).\n op is one of <, >, >>, >| or <>\n' \
'rh is the target of redirection, i.e. a file, an fd, or ' \
'&-. don\'t include whitespace except within quotes. ' \
'(nb. these rules are conventions, not dictated by bash). '
}
syntax = {
'$': 'identifies a variable, e.g. "$var"'
}
misc = {
'--sources': 'conditional expressions: man bash\n\n' \
'positional parameters: http://www.tldp.org/ldp/abs/html\n\n' \
'redirection: http://wiki.bash-hackers.org/howto/redirection_tutorial',
'--help': halp(),
}
if __name__ == "__main__":
c = ChainMap(expressions, misc, pos_params, redirection, syntax)
try:
print('{}\n'.format(c[sys.argv[1]]))
except KeyError as e:
raise SystemExit('{} not a valid Bash expression or positional'
' parameter'.format(e))
except IndexError:
raise SystemExit(halp())
| true |
1a22a0e70052284ffbcf2bba4a4cfe04a3a8b008 | Python | AnanyaAppan/BMTC-route-duration-prediction | /adaBoost_21_17.py | UTF-8 | 1,090 | 2.6875 | 3 | [] | no_license | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.ensemble import AdaBoostRegressor
from sklearn.datasets import make_regression
from sklearn.preprocessing import StandardScaler
from sklearn import metrics
df = pd.read_csv('lalliTrial/grid_21_17.csv', names=["busId" , "latitude", "longitude", "angle", "speed", "timestamp", "time", "day"], nrows = 100000)
X = df.drop(columns=["busId", "speed", "timestamp"])
y = df["speed"].values
X_train, X_test, y_train, y_test = train_test_split(X,y,test_size=0.20, random_state=42)
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
regressor = AdaBoostRegressor(n_estimators=10, random_state=42)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
print('Mean Absolute Error:', metrics.mean_absolute_error(y_test, y_pred))
print('Mean Squared Error:', metrics.mean_squared_error(y_test, y_pred))
print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(y_test, y_pred)))
print ('score:', regressor.score(X_test, y_test))
| true |
37e451b826fb53ce563ebfe7c7975c9ec89a936b | Python | git-gagan/MywaysTrainee | /RoughExperiments/helloworldTK.py | UTF-8 | 4,839 | 3.125 | 3 | [] | no_license | """#import tkinter module
import tkinter as tk
#Create the GUI application main Window where we are gonna add all the visuals
window = tk.Tk()
#adding inputs/widgets
#widgets are like elements in HTML
inp = tk.Label(window, text = "Hello World!", font = ("Times New Roman", 50)) #label is the output to be shown on the window for single line definitions only
#Frames
frame1 = tk.Frame(window, height = 100, width = 100, cursor = "dot")
frame2 = tk.Frame(window, height = 100, width = 100, cursor = "dot")
l1 = tk.Label(frame1, text = "Mail", height = 1, width = 50)
l2 = tk.Label(frame1, text = "Password", height = 1, width = 50)
inp1 = tk.Entry(frame1, width = 10)
inp2 = tk.Entry(frame1, width = 10)
inp3 = tk.Entry(frame2, width = 10)
window.title("My first Tkinter program") #title
window.geometry("400x400") #size
window.config(bg = "Pink") #bg color
l3 = tk.Label(frame2, text = "Name", height = 1, width = 50)
bt = tk.Button(window, text = "SUBMIT", height =5, width=10, fg="red",font = ("Times New Roman", 15)) #button widget
#pack the input to show the object in the window
inp.pack()
frame1.pack(side = "top")
frame2.pack(side = "bottom")
l1.pack()
inp1.pack()
l2.pack()
inp2.pack()
bt.pack(pady = 50)
l3.pack()
inp3.pack()
#run the main event loop suggesting to display the window till manually closed..
window.mainloop()"""
from tkinter import *
from tkinter import messagebox
window = Tk()
window.title("Custom Design")
window.geometry("500x500")
window.config(bg = "black")
#creating a menu
menu = Menu(window)
File = Menu(menu, tearoff = 0)
File.add_command(label = "New")
File.add_command(label = "Open")
File.add_command(label = "Save")
File.add_command(label = "Save as")
File.add_separator()
File.add_command(label = "Exit", command = window.quit)
Edit = Menu(menu, tearoff = 0)
Edit.add_command(label = "Draw")
Edit.add_command(label = "Select")
Edit.add_command(label = "Cut")
Edit.add_command(label = "Paste")
View = Menu(menu, tearoff = 0)
View.add_command(label = "FullScreen")
View.add_command(label = "Half Screen")
View.add_command(label = "Terminal")
View.add_command(label = "No view")
Go = Menu(menu)
Go.add_command(labe= "Execute")
Run = Menu(menu)
Run.add_command(labe= "Debug")
Run.add_command(labe= "Compile")
Help = Menu(menu)
Help.add_command(labe= "References")
Help.add_command(labe= "Wikipedia")
menu.add_cascade(label = "File", menu = File)
menu.add_cascade(label = "Edit", menu = Edit)
menu.add_cascade(label = "View", menu = View)
menu.add_cascade(label = "Go", menu = Go)
menu.add_cascade(label = "Run", menu = Run)
menu.add_cascade(label = "Help", menu = Help)
window.config(menu = menu)
var = StringVar()
disp = Message(window, textvariable = var, padx = 30, pady = 30)
def clicked():
#message Box
response = messagebox.askyesno("Confirmation","Do you want to login?")
if response:
messagebox.showinfo("LogIN","Logged IN")
la = Label(window, text = "Logged in successfully!", font = ("comic sans ms",15))
la.pack(fill = X, expand = 1)
bt = Button(text = "Log Out!", command = la.destroy)
bt.pack(side = BOTTOM, fill = X)
else:
messagebox.showerror("Negative","You didn't logged in")
bt = Button(window, text = "LOGIN", font = ("comic sans ms",15), bg = "grey", fg = "black", activebackground = "black", activeforeground = "white", command = clicked)
data = StringVar()
def done():
var.set((data.get()))
inp = Entry(window, textvariable = data ,width = 30)
but = Button(window, text = "OK!", command = done)
label1 = Label(text = "label1", bg="red")
label2 = Label(text = "label2", bg="blue")
label3 = Label(text = "label3", bg="green")
label4 = Label(text = "label4", bg="yellow")
#Canvas
c = Canvas(window, height = 300, width = 600)
c.create_line(0,0,600,300, fill = "black" , width = 5)
c.create_line(600,0,0,300, fill = "black" , width = 5)
c.create_rectangle(0,0,300,300, fill = "pink")
#checkbuttons
c1 = Checkbutton(window, text = "Working ?", padx= 15, pady= 15, relief = GROOVE, height = 10, width = 20)
c2 = Checkbutton(window, text = "Done ?", padx = 15, pady = 15, relief = GROOVE, height = 10, width = 20)
label1.pack(side = TOP, fill=X)
bt.pack(fill = X)
label2.pack(side = LEFT, fill=Y)
label3.pack(side = RIGHT, fill=Y)
label4.pack(side = BOTTOM, fill=X)
inp.pack(padx = 15, pady = 15)
but.pack()
c.pack(pady =20)
disp.pack(fill =X)
c1.pack(side = LEFT)
c2.pack(side = RIGHT)
#fill allows to take as much space as available and expand allows additional space to the widget
#For geometry management, either use place or grid or pack
window.mainloop() | true |
b15ca0dd8d8ba04750ed6f0670c7821f02d3ef8e | Python | JCVANKER/anthonyLearnPython | /learning_Python/basis/部分知识点/枚举enumerate()/enumerate.py | UTF-8 | 544 | 4.59375 | 5 | [] | no_license | """
enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引
序列,同时列出数据和数据下标 -- (索引,值),一般用在 for 循环当中。
enumerate(iterable, [start=0])
iterable -- 可迭代对象。
start -- 下标起始位置。
"""
my_list = ['Wuminfeng', '男', 19]
print(list(enumerate(my_list)))
print("----------------------------")
#用于for循环中
numbers = ['one', 'two', 'three']
for i, number in enumerate(numbers):
print(str(i) + " " + number)
| true |
c61fea736dae6a1022a00d17e7c94c832fa27d8a | Python | sudnya/bert-word-embeddings | /scripts/select-clusters.py | UTF-8 | 3,840 | 3.015625 | 3 | [] | no_license |
import logging
from argparse import ArgumentParser
logger = logging.getLogger(__name__)
def main():
parser = ArgumentParser(description="A script for selecting specific set of clusters.")
parser.add_argument("-v", "--verbose", default = False, action="store_true",
help = "Set the log level to debug, printing out detailed messages during execution.")
parser.add_argument("-i", "--input-path", default="",
help = "The path to the input cluster file.")
parser.add_argument("-o", "--output-path", default="",
help = "The path to the output cluster file.")
arguments = vars(parser.parse_args())
if arguments["verbose"]:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
selectClusters(arguments)
def selectClusters(arguments):
inputPath = arguments["input_path"]
outputPath = arguments["output_path"]
clusters = {}
clusterCounts = {}
with open(inputPath) as histogramFile:
while not isEndOfFile(histogramFile):
if isCluster(histogramFile):
cluster, count = parseCluster(histogramFile)
clusterCounts[cluster] = count
clusters[cluster] = {}
else:
word, count = parseWord(histogramFile)
clusters[cluster][word] = count
with open(outputPath, 'w') as histogramFile:
for cluster, count in sorted(clusterCounts.items(), key=lambda x:x[1], reverse=True):
words = clusters[cluster]
if len(words) == 0:
continue
if excludeCluster(words):
continue
histogramFile.write("Cluster, " + str(cluster) + " (" + str(count) + ")\n")
for word, wordCount in sorted(words.items(), key=lambda x:x[1], reverse=True):
histogramFile.write(" '" + word + "' " + str(wordCount) + "\n")
excludedSet = set([' ', 'the', 'I', '/', 'to', ',', '_', ':', 'so', ')', "'", 'are',
'from', 'and', 'or', 'not', 'it', '(', 'a', 'of', '.', 'this', 'have', 'on', 'an', '\n',
'would', 'will', 'do', 'but', 'that', 'like', '[', 'as'])
def excludeCluster(words):
mostFrequentWord = list(reversed(sorted(words.items(), key=lambda x: x[1])))[0][0]
return mostFrequentWord in excludedSet
def isEndOfFile(openFile):
char = openFile.read(1)
openFile.seek(-1, 1)
return len(char) == 0
def isCluster(openFile):
nextLine = openFile.read(7)
openFile.seek(-7, 1)
return nextLine == "Cluster"
def parseCluster(openFile):
line = openFile.readline()
remainder = line[9:]
logger.debug(str(("Remainder", remainder)))
left, right = remainder.split(" ")
return int(left), int(right.strip().strip("(").strip(")"))
def parseWord(openFile):
line = readWordLine(openFile)
logger.debug(str(("wordline", line)))
wordStart = line.find("'")
wordEnd = line.rfind("'")
word = line[wordStart+1:wordEnd]
count = int(line[wordEnd+1:].strip())
return word, count
def readWordLine(openFile):
inWord = False
anyWordCharacters = False
line = ""
while True:
char = openFile.read(1)
if len(char) == 0:
break
line += char
if char == "\n":
if not inWord:
break
if char == "'":
if inWord:
if anyWordCharacters:
inWord = False
else:
inWord = True
continue
if inWord:
anyWordCharacters = True
return line
################################################################################
## Guard Main
if __name__ == "__main__":
main()
################################################################################
| true |
4612148caf5df773efe09cf4aeb4ba539b7e8d49 | Python | Neckmus/itea_python_basics_3 | /iassirskyi_stanislav/06/game.py | UTF-8 | 2,718 | 3.796875 | 4 | [] | no_license | from random import randint, choice
from characters import CHARACTERS, ENEMIES
from game_objects import GAME_OBJECTS, Exit
from game_map import GameMap
def get_trapped(character):
print('You got trapped')
damage = randint(5, 25)
character.get_damaged(damage)
def get_healed(character):
print('You got healed')
hp = randint(5, 25)
character.get_healed(hp)
def fight_with_enemy(character, enemy):
is_won = True
while True:
character.fight(enemy)
if character.is_dead():
is_won = False
print('You lost')
break
elif enemy.is_dead():
break
return is_won
print('Welcome to my game!')
name = input('Enter your name: ')
race = input('Choose race [Human, Orc, Elf]: ')
level = input('Difficulty [Hard, Medium, Easy]: ')
if level == 'Hard':
objects = [choice(GAME_OBJECTS)() for i in range(3)]
enemies = [choice(ENEMIES)() for i in range(6)]
objects += enemies
objects += [Exit()]
elif level == 'Medium':
objects = [choice(GAME_OBJECTS)() for i in range(4)]
enemies = [choice(ENEMIES)() for i in range(4)]
objects += enemies
objects += [Exit()]
elif level == 'Easy':
objects = [choice(GAME_OBJECTS)() for i in range(6)]
enemies = [choice(ENEMIES)() for i in range(2)]
objects += enemies
objects += [Exit()]
x, y = randint(0, 4), randint(0, 4)
char = CHARACTERS[race](name, x, y)
char.show_stats()
game_map = GameMap(6, 6, objects)
game_map.put_char(char, *char.get_coords())
game = True
while game == True:
print(game_map)
step = input('Where you want to move [up, down, left, right?] ')
if step == 'up':
move = (-1, 0)
elif step == 'down':
move = (1, 0)
elif step == 'left':
move = (0, -1)
elif step == 'right':
move = (0, 1)
char_pos = char.get_coords()
char.move(move)
new_pos = char.get_coords()
game_map.moving_char(char, *char_pos, *new_pos)
for obj in objects:
if obj.show_coords()[0] == new_pos:
target = obj.show_coords()[1]
if target == 'Trap':
get_trapped(char)
if char.is_dead():
print('You lose!')
game = False
elif target == 'Heal':
get_healed(char)
elif target == 'Undead' or target == 'Murlock':
print('Enemy!')
fight_with_enemy(char, obj)
if char.is_dead():
print('You lose!')
game = False
elif target == 'Exit':
print('You find Exit')
game = False
print('Game_Over')
| true |
7b26d22f8f4a5d793cb3c291b4ae2b5a3b1b470c | Python | Atheros13/tccProjects | /tccProjects/app/convert564.py | UTF-8 | 4,776 | 2.84375 | 3 | [] | no_license | #-----------------------------------------------------------------------------#
## IMPORTS ##
import xlrd
import calendar
import datetime
import csv
#-----------------------------------------------------------------------------#
## CLASS ##
class Convert564():
def __init__(self, *args, **kwargs):
self.roster = self.open_workbook()
#print(self.roster)
self.create_csv()
def open_workbook(self):
wb = xlrd.open_workbook('G:/Customer Reporting/564 & 470 -Pacific Radiology/Latest Roster 564.xlsx')
sheet = wb.sheet_by_index(0)
heading_row = self.get_heading_row(sheet)
areas = [
'WHANGANUI', 'TARANAKI', 'NELSON', 'OAMARU & DUNSTAN', 'ALL OTHERS'
]
roster = {}
for i in range(len(areas)):
column = i + 2
area = areas[i]
area_roster = self.extract_sheet_data(sheet, heading_row, column)
roster[area] = area_roster
return roster
def get_heading_row(self, sheet):
for r in range(sheet.nrows):
if sheet.cell_value(r,0) == 'Date - ':
return r
def extract_sheet_data(self, sheet, heading_row, column):
#
roster = []
#
date = None
for r in range(heading_row + 1, sheet.nrows+1):
try:
time = sheet.cell_value(r,1)
except:
break
if time == '12am-7am':
date = datetime.datetime(*xlrd.xldate_as_tuple(sheet.cell_value(r+2,0), 0))
role, time = self.convertTime(date, time)
staff = sheet.cell_value(r, column)
roster.append([time, staff, role])
return roster
def convertTime(self, date, time):
role = 'Radiologist'
try:
t = time.replace(' ', '').split('-')[1]
except:
t = time
t = t.replace('.', '')
if 'am' in t:
t = t.split('am')[0]
if int(t) < 12:
t = datetime.timedelta(hours=int(t))
elif '30' in t:
hour = int(t.split('30')[0])
t = datetime.timedelta(hours=hour, minutes=30)
elif 'pm' in t:
t = t.split('pm')[0]
if int(t) < 12:
t = datetime.timedelta(hours=int(t)+12)
elif '30' in t:
hour = int(t.split('30')[0]) + 12
t = datetime.timedelta(hours=hour, minutes=30)
else:
t = datetime.timedelta(hours=23, minutes=59, seconds=59)
print(t)
dt = date + t
dt_string = dt.strftime("%d/%m/%y %I:%M:00 %p")
return role, dt_string
def convertTime1(self, date, time):
role = 'Radiologist'
t = time.replace(' ', '').split('-')[1]
t = t.replace('.', '')
if t == '7am':
t = datetime.timedelta(hours=7)
elif t == '8am':
t = datetime.timedelta(hours=8)
elif t == '830am':
t = datetime.timedelta(hours=8, minutes=30)
elif t == '330pm':
t = datetime.timedelta(hours=15, minutes=30)
elif t == '5pm':
t = datetime.timedelta(hours=17)
elif t == '9pm':
t = datetime.timedelta(hours=21)
elif t == '10pm':
t = datetime.timedelta(hours=22)
elif t == 'midnight':
t = datetime.timedelta(hours=23, minutes=59, seconds=59)
else:
if 'all' in t:
role = time
if '10pm' in t:
t = datetime.timedelta(hours=22)
else:
t = datetime.timedelta(days=1,hours=7)
else:
t = datetime.timedelta(hours=23, minutes=59, seconds=59)
dt = date + t
dt_string = dt.strftime("%d/%m/%y %I:%M:00 %p")
return role, dt_string
def create_csv(self):
# region service endtime name
try:
open('G:/Customer Reporting/564 & 470 -Pacific Radiology/Michael Import/564Roster.csv', 'w').close()
except:
pass
csv_file = open('G:/Customer Reporting/564 & 470 -Pacific Radiology/Michael Import/564Roster.csv', 'w', newline='')
writer = csv.writer(csv_file, delimiter=',')
writer.writerow(['REGION', 'ROLE', 'DATETIME', 'NAME'])
for region in self.roster:
for row in self.roster[region]:
writer.writerow([region, row[2], row[0], row[1]])
csv_file.close()
#-----------------------------------------------------------------------------#
Convert564()
#-----------------------------------------------------------------------------# | true |
96c1af8913e5ddb0acd9765049acfff7235b8c31 | Python | iancze/TWA-3-orbit | /analysis/close/rv/model.py | UTF-8 | 5,159 | 2.5625 | 3 | [
"MIT"
] | permissive | import re
import astropy
import exoplanet as xo
import numpy as np
import pandas as pd
import pymc3 as pm
import theano
# load the exoplanet part
import theano.tensor as tt
from exoplanet.distributions import Angle
import src.data as d
from src.constants import *
with pm.Model() as model:
# parameters
# P, gamma, Ka, Kb, e, omegaA, T0
# Delta CfA - Keck
# Delta CfA - Feros
# Delta CfA - du Pont
# jitter for each instrument?
# Parameters
logKAa = pm.Uniform(
"logKAa", lower=0, upper=np.log(100), testval=np.log(25)
) # km/s
logKAb = pm.Uniform(
"logKAb", lower=0, upper=np.log(100), testval=np.log(25)
) # km/s
KAa = pm.Deterministic("KAa", tt.exp(logKAa))
KAb = pm.Deterministic("KAb", tt.exp(logKAb))
logP = pm.Uniform(
"logP", lower=np.log(20.0), upper=np.log(50.0), testval=np.log(34.87846)
) # days
P = pm.Deterministic("P", tt.exp(logP))
e = pm.Uniform("e", lower=0, upper=1, testval=0.62)
omega = Angle("omega", testval=80.5 * deg) # omega_Aa
gamma = pm.Uniform("gamma", lower=0, upper=20, testval=10.1)
# relative to jd0
t_periastron = pm.Uniform(
"tPeri", lower=1130.0, upper=1180.0, testval=1159.0
) # + 2400000 days
orbit = xo.orbits.KeplerianOrbit(
period=P, ecc=e, t_periastron=t_periastron, omega=omega
)
# since we have 4 instruments, we need to predict 4 different dataseries
def get_RVs(t1, t2, offset):
"""
Helper function for RVs. Closure should encapsulate current K1, K2 values, I hope.
Args:
orbit: exoplanet object
t1: times to query for star 1
t2 : times to query for star 2
offset: (km/s)
Returns:
(rv1, rv2) [km/s] evaluated at those times with offset applied
"""
rv1 = (
1e-3 * orbit.get_radial_velocity(t1, 1e3 * tt.exp(logKAa)) + gamma + offset
) # km/s
rv2 = (
1e-3 * orbit.get_radial_velocity(t2, -1e3 * tt.exp(logKAb)) + gamma + offset
) # km/s
return (rv1, rv2)
offset_keck = pm.Normal("offsetKeck", mu=0.0, sd=5.0) # km/s
offset_feros = pm.Normal("offsetFeros", mu=0.0, sd=5.0) # km/s
offset_dupont = pm.Normal("offsetDupont", mu=0.0, sd=5.0) # km/s
# expects m/s
# dates are the first entry in each tuple of (date, rv, err)
rv1_cfa, rv2_cfa = get_RVs(d.cfa1[0], d.cfa2[0], 0.0)
rv1_keck, rv2_keck = get_RVs(d.keck1[0], d.keck2[0], offset_keck)
rv1_feros, rv2_feros = get_RVs(d.feros1[0], d.feros2[0], offset_feros)
rv1_dupont, rv2_dupont = get_RVs(d.dupont1[0], d.dupont2[0], offset_dupont)
logjit_cfa = pm.Uniform(
"logjittercfa", lower=-5.0, upper=np.log(5), testval=np.log(1.0)
)
logjit_keck = pm.Uniform(
"logjitterkeck", lower=-5.0, upper=np.log(5), testval=np.log(1.0)
)
logjit_feros = pm.Uniform(
"logjitterferos", lower=-5.0, upper=np.log(5), testval=np.log(1.0)
)
logjit_dupont = pm.Uniform(
"logjitterdupont", lower=-5.0, upper=np.log(5), testval=np.log(1.0)
)
jit_cfa = pm.Deterministic("jitCfa", tt.exp(logjit_cfa))
jit_keck = pm.Deterministic("jitKeck", tt.exp(logjit_keck))
jit_feros = pm.Deterministic("jitFeros", tt.exp(logjit_feros))
jit_dupont = pm.Deterministic("jitDupont", tt.exp(logjit_dupont))
# get the total errors
def get_err(rv_err, logjitter):
return tt.sqrt(rv_err ** 2 + tt.exp(2 * logjitter))
# define the likelihoods
pm.Normal(
"cfaRV1Obs", mu=rv1_cfa, observed=d.cfa1[1], sd=get_err(d.cfa1[2], logjit_cfa)
)
pm.Normal(
"cfaRV2Obs", mu=rv2_cfa, observed=d.cfa2[1], sd=get_err(d.cfa2[2], logjit_cfa)
)
pm.Normal(
"keckRV1Obs",
mu=rv1_keck,
observed=d.keck1[1],
sd=get_err(d.keck1[2], logjit_keck),
)
pm.Normal(
"keckRV2Obs",
mu=rv2_keck,
observed=d.keck2[1],
sd=get_err(d.keck2[2], logjit_keck),
)
pm.Normal(
"ferosRV1Obs",
mu=rv1_feros,
observed=d.feros1[1],
sd=get_err(d.feros1[2], logjit_feros),
)
pm.Normal(
"ferosRV2Obs",
mu=rv2_feros,
observed=d.feros2[1],
sd=get_err(d.feros2[2], logjit_feros),
)
pm.Normal(
"dupontRV1Obs",
mu=rv1_dupont,
observed=d.dupont1[1],
sd=get_err(d.dupont1[2], logjit_dupont),
)
pm.Normal(
"dupontRV2Obs",
mu=rv2_dupont,
observed=d.dupont2[1],
sd=get_err(d.dupont2[2], logjit_dupont),
)
# iterate through the list of free_RVs in the model to get things like
# ['logKAa_interval__', etc...] then use a regex to strip away
# the transformations (in this case, _interval__ and _angle__)
# \S corresponds to any character that is not whitespace
# https://docs.python.org/3/library/re.html
sample_vars = [re.sub("_\S*__", "", var.name) for var in model.free_RVs]
all_vars = [
var.name
for var in model.unobserved_RVs
if ("_interval__" not in var.name) and ("_angle__" not in var.name)
]
| true |
b57c1633245b2172f11a58b1e2cac3187d8c2ea3 | Python | ma301gv/SentimentSemEval | /Prepocessing.py | UTF-8 | 2,597 | 2.828125 | 3 | [] | no_license | from DataReader import DataReader
from Normalization import Normalization
import re
import re as regex
import pandas as pd
import nltk
from collections import Counter
### Provides a pre-process for tweet messages.
### Replace emoticons, hash, mentions and urls for codes
### Correct long seguences of letters and punctuations
### Apply the Pattern part-of_speech tagger to the message
### Requires the Pattern library to work (http://www.clips.ua.ac.be/pages/pattern)
def remove_by_regex(tweets, regexp):
tweets.loc[:, "tweet"].replace(regexp, "", inplace=True)
return tweets
df_1 = DataReader('Data/tweeti-b.dist.tsv')
df1 = df_1.data_set
df_2 = DataReader('Data/downloaded.tsv')
df2 = df_2.data_set
df = pd.concat([df1, df2])
df = df.drop_duplicates(['tweet'])
df = remove_by_regex(df, regex.compile(r"http.?://[^\s]+[\s]?"))
df = remove_by_regex(df, regex.compile(r"@[^\s]+[\s]?"))
df = remove_by_regex(df, regex.compile(r"\s?[0-9]+\.?[0-9]*"))
for remove in map(lambda r: regex.compile(regex.escape(r)), [",", ":", "\"", "=", "&", ";", "%", "$",
"@", "%", "^", "*", "(", ")", "{", "}",
"[", "]", "|", "/", "\\", ">", "<", "-",
".", "'", "--", "---", "#"]):
df.loc[:, "tweet"].replace(remove, "", inplace=True)
tokenizer = nltk.word_tokenize
stemmer = nltk.PorterStemmer()
df["tokenized_tweet"] = "defaut value"
for index, row in df.iterrows():
row["tokenized_tweet"] = tokenizer(row["tweet"])
row["tweet"] = list(map(lambda str: stemmer.stem(str.lower()), tokenizer(row["tweet"])))
import os
if os.path.isfile("Data\wordlist.csv"):
word_df = pd.read_csv("Data\wordlist.csv")
word_df = word_df[word_df["occurrences"] > 3]
wordlist = list(word_df.loc[:, "word"])
words = Counter()
for index, row in df.iterrows():
words.update(row["tweet"])
stopwords=nltk.corpus.stopwords.words("english")
whitelist = ["n't", "not"]
for idx, stop_word in enumerate(stopwords):
if stop_word not in whitelist:
del words[stop_word]
word_df = pd.DataFrame(data={"word": [k for k, v in words.most_common() if 3 < v < 500],
"occurrences": [v for k, v in words.most_common() if 3 < v < 500]},
columns=["word", "occurrences"])
word_df.to_csv("Data\wordlist.csv", index_label="idx")
wordlist = [k for k, v in words.most_common() if 3 < v < 500]
print(words.most_common(5))
#print(df) | true |
9d8a177154585d2f13f2c050ad9391e3e628f944 | Python | malcolmreynolds/APGL | /exp/sandbox/Nystrom.py | UTF-8 | 4,209 | 2.96875 | 3 | [] | no_license | import numpy
import logging
import scipy.sparse
import scipy.sparse.linalg
import scipy.linalg
from apgl.util.Util import Util
class Nystrom(object):
"""
A class to find approximations based on the Nystrom method.
"""
def __init__(self):
pass
@staticmethod
def eig(X, n):
"""
Find the eigenvalues and eigenvectors of an indefinite symmetric matrix X.
:param X: The matrix to find the eigenvalues of.
:type X: :class:`ndarray`
:param n: If n is an int, then it is the number of columns to sample otherwise n is an array of column indices.
"""
logging.warn("This method can result in large errors with indefinite matrices")
if type(n) == int:
inds = numpy.sort(numpy.random.permutation(X.shape[0])[0:n])
else:
inds = n
invInds = numpy.setdiff1d(numpy.arange(X.shape[0]), inds)
A = X[inds, :][:, inds]
B = X[inds, :][:, invInds]
Am12 = numpy.linalg.inv(scipy.linalg.sqrtm(A))
#Am12 = Util.matrixPowerh(A, -0.5)
S = A + Am12.dot(B).dot(B.T).dot(Am12)
lmbda, U = numpy.linalg.eig(S)
Ubar = numpy.r_[U, B.T.dot(U).dot(numpy.diag(1/lmbda))]
Z = Ubar.dot(numpy.diag(lmbda**0.5))
sigma, F = numpy.linalg.eig(Z.T.dot(Z))
V = Z.dot(F).dot(numpy.diag(sigma**-0.5))
return sigma, V
@staticmethod
def eigpsd(X, n):
"""
Find the eigenvalues and eigenvectors of a positive semi-definite symmetric matrix.
The input matrix X can be a numpy array or a scipy sparse matrix. In the case that
n==X.shape[0] we convert to an ndarray.
:param X: The matrix to find the eigenvalues of.
:type X: :class:`ndarray`
:param n: If n is an int, then it is the number of columns to sample otherwise n is an array of column indices.
:return lmbda: The set of eigenvalues
:return V: The matrix of eigenvectors as a ndarray
"""
if type(n) == int:
n = min(n, X.shape[0])
inds = numpy.sort(numpy.random.permutation(X.shape[0])[0:n])
elif type(n) == numpy.ndarray:
inds = n
else:
raise ValueError("Invalid n value: " + str(n))
invInds = numpy.setdiff1d(numpy.arange(X.shape[0]), inds)
if numpy.sort(inds).shape[0] == X.shape[0] and (numpy.sort(inds) == numpy.arange(X.shape[0])).all():
if scipy.sparse.issparse(X):
X = numpy.array(X.todense())
lmbda, V = Util.safeEigh(X)
return lmbda, V
tmp = X[inds, :]
A = tmp[:, inds]
B = tmp[:, invInds]
if scipy.sparse.issparse(X):
A = numpy.array(A.todense())
BB = numpy.array((B*B.T).todense())
else:
BB = B.dot(B.T)
#Following line is very slow
#Am12 = scipy.linalg.sqrtm(numpy.linalg.pinv(A))
Am12 = Util.matrixPowerh(A, -0.5)
S = A + Am12.dot(BB).dot(Am12)
S = (S.T + S)/2
lmbda, U = Util.safeEigh(S)
tol = 10**-10
lmbdaN = lmbda.copy()
lmbdaN[numpy.abs(lmbda) < tol] = 0
lmbdaN[numpy.abs(lmbda) > tol] = lmbdaN[numpy.abs(lmbda) > tol]**-0.5
V = X[:, inds].dot(Am12.dot(U)*lmbdaN)
return lmbda, V
@staticmethod
def matrixApprox(X, n):
"""
Compute the matrix approximation using the Nystrom method.
:param X: The matrix to approximate.
:type X: :class:`ndarray`
:param n: If n is an int, then it is the number of columns to sample otherwise n is an array of column indices.
"""
if type(n) == int:
inds = numpy.sort(numpy.random.permutation(X.shape[0])[0:n])
else:
inds = n
A = X[inds, :][:, inds]
B = X[:, inds]
if scipy.sparse.issparse(X):
A = numpy.array(A.todense())
Ainv = scipy.sparse.csr_matrix(numpy.linalg.pinv(A))
XHat = B.dot(Ainv).dot(B.T)
else:
XHat = B.dot(numpy.linalg.pinv(A)).dot(B.T)
return XHat
| true |
8443f7b9467e1b5e4285f579e71238e2a1c92ec1 | Python | fchamicapereira/projecteuler | /13.py | UTF-8 | 558 | 3 | 3 | [] | no_license | lines = [line.rstrip('\n') for line in open('13.dat')]
digitsWanted = 10
counter = 0
carry = 0
sum = 0
totalDigits = len(lines[0])
result = ""
print("total digits = " + str(totalDigits))
while totalDigits > counter:
sum = carry
for line in lines:
sum += int(line[totalDigits-1-counter])
digit = (sum % 10)
result = str(digit) + result
carry = (sum - digit)/10
counter += 1
if totalDigits-1-counter < 0:
result = str(carry) + result
break
print result[:]
print len(result)
print result[:digitsWanted] | true |
047b1bc7b6e64758d55908bd27b2ee6db12a4a5a | Python | jia-11/parcel_model | /activation.py | UTF-8 | 40,226 | 2.578125 | 3 | [] | no_license | """
.. module:: parcel
:synopsis: Collection of droplet activation routines
.. moduleauthor:: Daniel Rothenberg <darothen@mit.edu>
"""
import numpy as np
from numpy import min as nmin
from scipy.special import erfc, erf, erfinv
from micro import *
from constants import *
def act_fraction(Smax, T, rs, kappa, r_drys, Nis):
"""Calculates the equilibrium activated fraction given the details of a population
of aerosol sizes.
NOTE - This works for a *single mode*. In order to study the entire aerosol
population activated in the parcel model, this will need to be called however
many times there are modes for each separate mode.
"""
r_crits, s_crits = zip(*[kohler_crit(T, r_dry, kappa) for r_dry in r_drys])
s_crits = np.array(s_crits)
r_crits = np.array(r_crits)
activated_eq = (Smax >= s_crits)
activated_kn = (rs >= r_crits)
N_tot = np.sum(Nis)
eq_frac = np.sum(Nis[activated_eq])/N_tot
kn_frac = np.sum(Nis[activated_kn])/N_tot
return eq_frac, kn_frac
def ming2006(V, T, P, aerosol):
"""Ming activation scheme.
NOTE - right now, the variable names correspond to the FORTRAN implementation
of the routine. Will change in the future.
TODO: rename variables
TODO: docstring
TODO: extend for multiple modes.
"""
Num = aerosol.Nis*1e-6
RpDry = aerosol.distribution.mu*1e-6
kappa = aerosol.kappa
## pre-algorithm
## subroutine Kohler()... calculate things from Kohler theory, particularly critical
## radii and supersaturations for each bin
r_crits, s_crits = zip(*[kohler_crit(T, r_dry, kappa) for r_dry in aerosol.r_drys])
## subroutine CalcAlphaGamma
alpha = (g*Mw*L)/(Cp*R*(T**2)) - (g*Ma)/(R*T)
gamma = (R*T)/(es(T-273.15)*Mw) + (Mw*(L**2))/(Cp*Ma*T*P)
## re-name variables as in Ming scheme
Dpc = 2.*np.array(r_crits)*1e6
Dp0 = r_crits/np.sqrt(3.)
Sc = np.array(s_crits)+1.0
DryDp = aerosol.r_drys*2.
## Begin algorithm
Smax1 = 1.0
Smax2 = 1.1
iter_count = 1
while (Smax2 - Smax1) > 1e-7:
#print "\t", iter_count, Smax1, Smax2
Smax = 0.5*(Smax2 + Smax1)
#print "---", Smax-1.0
## subroutine Grow()
## subroutine CalcG()
# TODO: implement size-dependent effects on Dv, ka, using Dpc
#G_a = (rho_w*R*T)/(es(T-273.15)*Dv_T(T)*Mw)
G_a = (rho_w*R*T)/(es(T-273.15)*dv(T, (Dpc*1e-6)/2.)*Mw)
#G_b = (L*rho_w*((L*Mw/(R*T))-1))/(ka_T(T)*T)
G_b = (L*rho_w*((L*Mw/(R*T))-1))/(ka(T, 1.007e3, (Dpc*1e-6)/2.)*T)
G = 1./(G_a + G_b) # multiply by four since we're doing diameter this time
Smax_large = (Smax > Sc) # if(Smax>Sc(count1,count2))
WetDp = np.zeros_like(Dpc)
#WetDp[Smax_large] = np.sqrt(Dpc[Smax_large]**2. + 1e12*(G[Smax_large]/(alpha*V))*((Smax-.0)**2.4 - (Sc[Smax_large]-.0)**2.4))
WetDp[Smax_large] = 1e6*np.sqrt((Dpc[Smax_large]*1e-6)**2. + (G[Smax_large]/(alpha*V))*((Smax-.0)**2.4 - (Sc[Smax_large]-.0)**2.4))
#print Dpc
#print WetDp/DryDp
#print WetDp
## subroutine Activity()
def Activity(dry, wet, dens, molar_weight):
temp1 = (dry**3)*dens/molar_weight
temp2 = ((wet**3) - (dry**3))*1e3/0.018
act = temp2/(temp1+temp2)*np.exp(0.66/T/wet)
#print dry[0], wet[0], dens, molar_weight, act[0]
return act
# Is this just the Kohler curve?
Act = np.ones_like(WetDp)
WetDp_large = (WetDp > 1e-5) # if(WetDp(i,j)>1e-5)
Act[WetDp_large] = Seq(WetDp[WetDp_large]*1e-6, DryDp[WetDp_large], T, kappa) + 1.0
#Act[WetDp_large] = Activity(DryDp[WetDp_large]*1e6, WetDp[WetDp_large], 1.7418e3, 0.132)
#print Act
## subroutine Conden()
## subroutine CalcG()
# TODO: implement size-dependent effects on Dv, ka, using WetDp
#G_a = (rho_w*R*T)/(es(T-273.15)*Dv_T(T)*Mw)
G_a = (rho_w*R*T)/(es(T-273.15)*dv(T, (WetDp*1e-6)/2.)*Mw)
#G_b = (L*rho_w*((L*Mw/(R*T))-1))/(ka_T(T)*T)
G_b = (L*rho_w*((L*Mw/(R*T))-1))/(ka(T, 1.3e3, (WetDp*1e-6)/2.)*T)
G = 1./(G_a + G_b) # multiply by four since we're doing diameter this time
WetDp_large = (WetDp > Dpc) # (WetDp(count1,count2)>Dpc(count1,count2))
#WetDp_large = (WetDp > 0)
f_stre = lambda x: "%12.12e" % x
f_strf = lambda x: "%1.12f" % x
#for i, a in enumerate(Act):
# if WetDp[i] > Dpc[i]:
# print " ",i+1, Act[i], f_stre(Smax-Act[i])
CondenRate = np.sum((np.pi/2.)*1e3*G[WetDp_large]*(WetDp[WetDp_large]*1e-6)*Num[WetDp_large]*1e6*
(Smax-Act[WetDp_large]))
#print iter_count, "%r %r %r" % (Smax, CondenRate, alpha*V/gamma)
DropletNum = np.sum(Num[WetDp_large])
ActDp = 0.0
for i in xrange(1, len(WetDp)):
if (WetDp[i] > Dpc[i]) and (WetDp[i-1] < Dpc[i]):
ActDp = DryDp[i]
## Iteration logic
if CondenRate < (alpha*V/gamma):
Smax1 = Smax*1.0
else:
Smax2 = Smax*1.0
iter_count += 1
Smax = Smax-1.0
return Smax, None
def arg2000(V, T, P, aerosols):
## Originally from Abdul-Razzak 1998 w/ Ma. Need kappa formulation
alpha = (g*Mw*L)/(Cp*R*(T**2)) - (g*Ma)/(R*T)
gamma = (R*T)/(es(T-273.15)*Mw) + (Mw*(L**2))/(Cp*Ma*T*P)
## Condensation effects
G_a = (rho_w*R*T)/(es(T-273.15)*dv_cont(T, P)*Mw)
G_b = (L*rho_w*((L*Mw/(R*T))-1))/(ka_cont(T)*T)
G = 1./(G_a + G_b)
Smis = []
Sparts = []
for aerosol in aerosols:
sigma = aerosol.distribution.sigma
am = aerosol.distribution.mu*1e-6
N = aerosol.distribution.N*1e6
kappa = aerosol.kappa
fi = 0.5*np.exp(2.5*(np.log(sigma)**2))
gi = 1.0 + 0.25*np.log(sigma)
A = (2.*sigma_w(T)*Mw)/(rho_w*R*T)
_, Smi2 = kohler_crit(T, am, kappa)
zeta = (2./3.)*A*(np.sqrt(alpha*V/G))
etai = ((alpha*V/G)**(3./2.))/(N*gamma*rho_w*2.*np.pi)
##
Spa = fi*((zeta/etai)**(1.5))
Spb = gi*(((Smi2**2)/(etai + 3.*zeta))**(0.75))
S_part = (1./(Smi2**2))*(Spa + Spb)
Smis.append(Smi2)
Sparts.append(S_part)
Smax = 1./np.sqrt(np.sum(Sparts))
act_fracs = []
for Smi, aerosol in zip(Smis, aerosols):
ui = 2.*np.log(Smi/Smax)/(3.*np.sqrt(2.)*np.log(aerosol.distribution.sigma))
N_act = 0.5*aerosol.distribution.N*erfc(ui)
act_fracs.append(N_act/aerosol.distribution.N)
return Smax, act_fracs
def fn2005(V, T, P, aerosols, tol=1e-6, max_iters=100):
"""
NS 2003 algorithm + FN 2005 corrections for diffusive growth rate
"""
#aer = aerosols[0]
A = (4.*Mw*sigma_w(T))/(R*T*rho_w)
## Compute rho_air by assuming air is saturated at given T, P
# Petty (3.41)
qsat = 0.622*(es(T-273.15)/P)
Tv = T*(1.0 + 0.61*qsat)
rho_air = P/Rd/Tv # air density
#print "rho_air", rho_air
Dp_big = 5e-6
Dp_low = np.min([0.207683*(ac**-0.33048), 5.0])*1e-5
Dp_B = 2.*dv_cont(T, P)*np.sqrt(2*np.pi*Mw/R/T)/ac
Dp_diff = Dp_big - Dp_low
Dv_ave = (dv_cont(T, P)/Dp_diff)*(Dp_diff - Dp_B*np.log((Dp_big + Dp_B)/(Dp_low+Dp_B)))
G_a = (rho_w*R*T)/(es(T-273.15)*Dv_ave*Mw)
G_b = (L*rho_w)*(L*Mw/R/T - 1.0)/(ka_cont(T)*T)
G = 4./(G_a + G_b)
alpha = (g*Mw*L)/(Cp*R*(T**2)) - (g*Ma)/(R*T)
gamma = (P*Ma)/(Mw*es(T-273.15)) + (Mw*L*L)/(Cp*R*T*T)
## Compute sgi of each mode
sgis = []
for aerosol in aerosols:
_, sgi = kohler_crit(T, aerosol.distribution.mu*1e-6, aerosol.kappa)
sgis.append(sgi)
#print "--"*20
def S_integral(Smax):
delta = Smax**4 - (16./9.)*(A*A*alpha*V/G)
#delta = 1.0 - (16./9.)*alpha*V*(1./G)*((A/(Smax**2))**2)
if delta > 0:
sp_sm_sq = 0.5*(1.0 + np.sqrt(delta))
sp_sm = np.sqrt(sp_sm_sq)
else:
arg = (2e7*A/3.)*Smax**(-0.3824)
sp_sm = np.min([arg, 1.0])
Spart = sp_sm*Smax
#print "Spart", Smax, Spart, delta
I1s, I2s = 0.0, 0.0
for aerosol, sgi in zip(aerosols, sgis):
log_sig = np.log(aerosol.distribution.sigma)
Ni = aerosol.distribution.N*1e6
upart = 2.*np.log(sgi/Spart)/(3.*np.sqrt(2)*log_sig)
umax = 2.*np.log(sgi/Smax)/(3.*np.sqrt(2)*log_sig)
def I1(Smax):
A1 = (Ni/2.)*((G/alpha/V)**0.5)
A2 = Smax
C1 = erfc(upart)
C2 = 0.5*((sgi/Smax)**2)
C3a = np.exp(9.*(log_sig**2)/2.)
C3b = erfc(upart + 3.*log_sig/np.sqrt(2.))
return A1*A2*(C1 - C2*C3a*C3b)
def I2(Smax):
A1 = A*Ni/3./sgi
A2 = np.exp(9.*(log_sig**2.)/8.)
C1 = erf(upart - 3.*log_sig/(2.*np.sqrt(2.)))
C2 = erf(umax - 3.*log_sig/(2.*np.sqrt(2.)))
return A1*A2*(C1 - C2)
beta = 0.5*np.pi*gamma*rho_w*G/alpha/V#/rho_air
#beta = 0.5*np.pi*gamma*rho_w/bet2_par/alpha/V/rho_air
#print "++", Smax, I1(Smax), I2(Smax)
I1s += I1(Smax)
I2s += I2(Smax)
return Smax*beta*(I1s + I2s) - 1.0
x1 = 1e-5
y1 = S_integral(x1)
x2 = 1.0
y2 = S_integral(x2)
#print (x1, y1), (x2, y2)
#print "BISECTION"
#print "--"*20
for i in xrange(max_iters):
## Bisection
#y1, y2 = S_integral(x1), S_integral(x2)
x3 = 0.5*(x1+x2)
y3 = S_integral(x3)
#print "--", x3, y3, "--"
if np.sign(y1)*np.sign(y3) <= 0.:
x2 = x3
y2 = y3
else:
x1 = x3
y1 = y3
if np.abs(x2-x1) < tol*x1: break
#print i, (x1, y1), (x2, y2)
## Converged ; return
x3 = 0.5*(x1 + x2)
Smax = x3
#print "Smax = %f (%f)" % (Smax, 0.0)
act_fracs = []
for aerosol, sgi in zip(aerosols, sgis):
ui = 2.*np.log(sgi/Smax)/(3.*np.sqrt(2.)*np.log(aerosol.distribution.sigma))
N_act = 0.5*aerosol.distribution.N*erfc(ui)
act_fracs.append(N_act/aerosol.distribution.N)
return Smax, act_fracs
def ns2003(V, T, P, aerosols, tol=1e-6, max_iters=100):
"""Sketch implementation of Nenes and Seinfeld (2003) parameterization
"""
nmd_par = len(aerosols) # number of modes
vhfi = 3.0 # van't hoff factor (ions/molecule)
## Setup constants
akoh_par = 4.0*Mw*sigma_w(T)/R/T/rho_w
## Compute rho_air by assuming air is saturated at given T, P
# Petty (3.41)
qsat = 0.622*(es(T-273.15)/P)
Tv = T*(1.0 + 0.61*qsat)
rho_air = P/Rd/Tv # air density
alpha = g*Mw*L/Cp/R/T/T - g*Ma/R/T
gamma = P*Ma/es(T-273.15)/Mw + Mw*L*L/Cp/R/T/T
bet2_par = R*T*rho_w/es(T-273.15)/Dv/Mw/4. + L*rho_w/4./Ka/T*(L*Mw/R/T - 1.0)
beta = 0.5*np.pi*gamma*rho_w/bet2_par/alpha/V/rho_air
cf1 = 0.5*(((1/bet2_par)/(alpha*V))**0.5)
cf2 = akoh_par/3.0
sgis = []
for aerosol in aerosols:
_, sgi = kohler_crit(T, aerosol.distribution.mu*1e-6, aerosol.kappa)
sgis.append(sgi)
def sintegral(spar):
## descriminant criterion
descr = 1.0 - (16./9.)*alpha*V*bet2_par*((akoh_par/(spar**2))**2)
if descr <= 0.0:
crit2 = True
ratio = (2e7/3.)*akoh_par*spar**(-0.3824)
if ratio > 1.0: ratio = 1.0
ssplt2 = spar*ratio
else:
crit2 = False
ssplt1 = 0.5*(1.0 - np.sqrt(descr)) # min root of both
ssplt2 = 0.5*(1.0 + np.sqrt(descr)) # max root of both
ssplt1 = np.sqrt(ssplt1)*spar # multiply ratios with smax
ssplt2 = np.sqrt(ssplt2)*spar
ssplt = ssplt2 # store ssplit in common
summ, summat, summa = 0, 0, 0
sqtwo = np.sqrt(2.0)
for aerosol, sgi in zip(aerosols, sgis):
sg_par = sgi
tpi = aerosol.distribution.N*1e6
dlgsg = np.log(aerosol.distribution.sigma)
dlgsp = np.log(sg_par/spar)
orism1 = 2.0*np.log(sg_par/ssplt2)/(3.*sqtwo*dlgsg)
orism2 = orism1 - 3.0*dlgsg/(2.0*sqtwo)
orism3 = 2.0*dlgsp/(3.0*sqtwo*dlgsg) - 3.0*dlgsg/(2.0*sqtwo)
orism4 = orism1 + 3.0*dlgsg/sqtwo
orism5 = 2.0*dlgsp/(3*sqtwo*dlgsg)
ekth = np.exp((9./2.)*dlgsg*dlgsg)
integ1 = tpi*spar*((1-erf(orism1)) - 0.5*((sg_par/spar)**2)*ekth*(1.0-erf(orism4)))
integ2 = (np.exp((9./8.)*dlgsg*dlgsg)*tpi/sg_par)*(erf(orism2) - erf(orism3))
nd = (tpi/2.)*(1.0 - erf(orism5))
summ += integ1
summat += integ2
summa += nd
return summa, summ, summat
## Initial values for bisection
x1 = 1e-5 # min cloud supersaturation
ndrpl, sinteg1, sinteg2 = sintegral(x1)
print ndrpl, sinteg1, sinteg2
y1 = (sinteg1*cf1 + sinteg2*cf2)*beta*x1 - 1.0
x2 = 1.0 # max cloud supersaturation
ndrpl, sinteg1, sinteg2 = sintegral(x2)
print ndrpl, sinteg1, sinteg2
y2 = (sinteg1*cf1 + sinteg2*cf2)*beta*x2 - 1.0
print (x1, y1), (x2, y2)
print "BISECTION"
print "--"*20
## Perform bisection
for i in xrange(max_iters):
x3 = 0.5*(x1 + x2)
ndrpl, sinteg1, sinteg3 = sintegral(x3)
y3 = (sinteg1*cf1 + sinteg2*cf2)*beta*x3 - 1.0
if np.sign(y1)*np.sign(y3) <= 0.:
y2 = y3
x2 = x3
else:
y1 = y3
x1 = x3
if np.abs(x2-x1) <= tol*x1: break
print i, (x1, y1), (x2, y2)
## Converged ; return
x3 = 0.5*(x1 + x2)
ndrpl, sinteg1, sinteg3 = sintegral(x3)
y3 = (sinteg1*cf1 + sinteg2*cf2)*beta*x3 - 1.0
Smax = x3
print "Smax = %f (%f)" % (Smax, 0.0)
act_fracs = []
for aerosol, sgi in zip(aerosols, sgis):
ui = 2.*np.log(sgi/Smax)/(3.*np.sqrt(2.)*np.log(aerosol.distribution.sigma))
N_act = 0.5*aerosol.distribution.N*erfc(ui)
act_fracs.append(N_act/aerosol.distribution.N)
return Smax, act_fracs
### PCE Parameterization
def lognorm_to_norm(x, mu, sigma):
"""
Map a value from the lognormal distribution with given mu and sigma to the
standard normal distribution with mean 0 and std 1
"""
return (np.log(x)-mu)/sigma
def uni_to_norm(x, a, b):
"""
Map a value from the uniform distribution [a, b] to the normal distribution
"""
return np.sqrt(2.)*erfinv(2.*(x-a)/(b-a) - 1.0)
def pce_param(V, T, P, aerosols):
Smaxes = []
for aerosol in aerosols:
N = aerosol.distribution.N
mu = aerosol.distribution.mu
sigma = aerosol.distribution.sigma
kappa = aerosol.kappa
Smax = _pce_fit(N, mu, sigma, kappa, V, T, P)
Smaxes.append(Smax)
print "PCE with", N, mu, sigma, kappa, V, T, P, Smax
min_smax = nmin(Smaxes)
if 0. <= min_smax <= 0.5:
Smax = min_smax
else:
return 0., [0.]*len(aerosols)
## Compute scrit of each mode
scrits = []
for aerosol in aerosols:
_, scrit = kohler_crit(T, aerosol.distribution.mu*1e-6, aerosol.kappa)
scrits.append(scrit)
act_fracs = []
for aerosol, scrit in zip(aerosols, scrits):
ui = 2.*np.log(scrit/Smax)/(3.*np.sqrt(2.)*np.log(aerosol.distribution.sigma))
N_act = 0.5*aerosol.distribution.N*erfc(ui)
act_fracs.append(N_act/aerosol.distribution.N)
return Smax, act_fracs
def _pce_fit(N, mu, sigma, kappa, V, T, P):
## P in Pa
dist_bounds = {
'mu': [0.01, 0.25],
#'N': [100., 10000.],
'kappa': [0.1, 1.2],
'sigma': [1.2, 3.0],
'V': [0., 4.0],
'T': [235., 310.],
'P': [50000., 105000.],
}
dist_params = {
'N': [ 7.5, 1.95 ],
}
N = lognorm_to_norm(N, *dist_params['N'])
mu = uni_to_norm(mu, *dist_bounds['mu'])
kappa = uni_to_norm(kappa, *dist_bounds['kappa'])
sigma = uni_to_norm(sigma, *dist_bounds['sigma'])
V = uni_to_norm(V, *dist_bounds['V'])
T = uni_to_norm(T, *dist_bounds['T'])
P = uni_to_norm(P, *dist_bounds['P'])
Smax = 6.4584111537e-6*N**6 - \
2.5994976288e-5*N**5 - \
1.7065251097e-7*N**4*P**2 + \
1.3741352226e-5*N**4*P + \
2.8567989557e-5*N**4*T**2 - \
7.4876643038e-5*N**4*T - \
2.0388391982e-6*N**4*V**2 + \
4.3054466907e-5*N**4*V + \
3.6504788687e-6*N**4*kappa**2 + \
8.7165631487e-7*N**4*kappa + \
1.6542743001e-5*N**4*mu**2 + \
4.8195946039e-6*N**4*mu + \
3.9282682647e-6*N**4*sigma**2 + \
1.1137326431e-5*N**4*sigma + \
2.795758112227e-5*N**4 + \
1.5947545697e-6*N**3*P**2 - \
6.9358311166e-5*N**3*P - \
0.00014252420422*N**3*T**2 + \
0.00039466661884*N**3*T + \
2.15368184e-5*N**3*V**2 - \
0.00025279065671*N**3*V + \
4.6142483833e-6*N**3*kappa**2 - \
2.5055687574e-5*N**3*kappa - \
3.0424806654e-6*N**3*mu**2 - \
4.5156027497e-5*N**3*mu - \
1.780917608e-6*N**3*sigma**2 - \
2.516400813e-5*N**3*sigma - \
0.0003567127574296*N**3 + \
5.9696014699e-7*N**2*P**4 - \
1.3472490172e-5*N**2*P**3 - \
1.0610551852e-6*N**2*P**2*T**2 + \
2.0181530448e-6*N**2*P**2*T + \
2.5327194907e-7*N**2*P**2*V**2 - \
1.4006527233e-6*N**2*P**2*V + \
5.4851851852e-7*N**2*P**2*kappa**2 - \
1.320380981e-6*N**2*P**2*kappa + \
1.7644666667e-7*N**2*P**2*mu**2 - \
2.7894950894e-7*N**2*P**2*mu + \
1.8201189815e-7*N**2*P**2*sigma**2 - \
5.0510811394e-7*N**2*P**2*sigma - \
6.88818634103e-6*N**2*P**2 + \
5.0207581099e-5*N**2*P*T**2 - \
0.00013814911722*N**2*P*T - \
6.2792651121e-6*N**2*P*V**2 + \
7.2980075931e-5*N**2*P*V - \
3.7856114614e-6*N**2*P*kappa**2 + \
1.2860228333e-5*N**2*P*kappa - \
1.5691902399e-6*N**2*P*mu**2 + \
8.2376491667e-6*N**2*P*mu - \
1.3435745045e-6*N**2*P*sigma**2 + \
6.0282465278e-6*N**2*P*sigma + \
0.0001877522259389*N**2*P - \
4.0442507595e-5*N**2*T**4 + \
5.6586533058e-5*N**2*T**3 - \
8.9548419306e-6*N**2*T**2*V**2 + \
0.00014183762216*N**2*T**2*V + \
1.7477041667e-7*N**2*T**2*kappa**2 - \
2.2336680774e-5*N**2*T**2*kappa - \
3.9516949861e-5*N**2*T**2*mu**2 - \
1.428384236e-5*N**2*T**2*mu - \
8.1085041667e-6*N**2*T**2*sigma**2 - \
4.4004842538e-5*N**2*T**2*sigma + \
0.00038258884934483*N**2*T**2 + \
2.9970384599e-5*N**2*T*V**2 - \
0.00041049796829*N**2*T*V + \
6.5092115599e-6*N**2*T*kappa**2 + \
3.0809800694e-5*N**2*T*kappa + \
9.9551207477e-5*N**2*T*mu**2 + \
1.0952167639e-5*N**2*T*mu + \
2.1329980047e-5*N**2*T*sigma**2 + \
8.81912525e-5*N**2*T*sigma - \
0.0008911162845737*N**2*T + \
6.9026802931e-6*N**2*V**4 - \
4.7531336217e-5*N**2*V**3 + \
2.5832318241e-6*N**2*V**2*kappa**2 - \
1.2472907784e-6*N**2*V**2*kappa + \
1.1149875079e-5*N**2*V**2*mu**2 - \
2.9708960501e-6*N**2*V**2*mu + \
3.2880035648e-7*N**2*V**2*sigma**2 + \
7.9685785603e-6*N**2*V**2*sigma - \
8.857197689645e-5*N**2*V**2 - \
1.3905780926e-5*N**2*V*kappa**2 + \
2.6425726833e-5*N**2*V*kappa - \
4.4290453362e-5*N**2*V*mu**2 + \
3.4602470958e-5*N**2*V*mu - \
9.497372933e-6*N**2*V*sigma**2 - \
8.4509070972e-6*N**2*V*sigma + \
0.0007493009795633*N**2*V + \
2.8884698866e-6*N**2*kappa**4 + \
1.349739092e-6*N**2*kappa**3 + \
1.7550156389e-5*N**2*kappa**2*mu**2 - \
1.9786638902e-6*N**2*kappa**2*mu + \
5.529520787e-6*N**2*kappa**2*sigma**2 + \
1.2209966835e-5*N**2*kappa**2*sigma - \
5.448370112109e-5*N**2*kappa**2 - \
4.359847391e-5*N**2*kappa*mu**2 - \
2.2737228056e-5*N**2*kappa*mu - \
9.990113266e-6*N**2*kappa*sigma**2 - \
5.9185131528e-5*N**2*kappa*sigma + \
9.22206763018e-6*N**2*kappa + \
3.0424263183e-5*N**2*mu**4 - \
1.9098455668e-5*N**2*mu**3 + \
8.54937625e-6*N**2*mu**2*sigma**2 - \
4.684071842e-6*N**2*mu**2*sigma - \
0.00035110649314667*N**2*mu**2 - \
3.6261121147e-6*N**2*mu*sigma**2 - \
9.2769369028e-5*N**2*mu*sigma + \
0.00011212992202954*N**2*mu + \
5.0891009441e-6*N**2*sigma**4 + \
3.5893477645e-6*N**2*sigma**3 - \
7.197212424173e-5*N**2*sigma**2 - \
0.00011060069230486*N**2*sigma + \
0.00151313669719111*N**2 - \
1.1284287469e-6*N*P**4 + \
3.0704412322e-5*N*P**3 + \
1.6278653855e-6*N*P**2*T**2 - \
3.3672619444e-6*N*P**2*T - \
6.2110532065e-8*N*P**2*V**2 + \
3.2172427639e-6*N*P**2*V - \
2.8443321387e-7*N*P**2*kappa**2 + \
7.4341916667e-7*N*P**2*kappa - \
7.2252756038e-7*N*P**2*mu**2 + \
8.4614527778e-7*N*P**2*mu - \
1.2720654237e-7*N*P**2*sigma**2 + \
2.7419097222e-7*N*P**2*sigma + \
8.764064001385e-6*N*P**2 - \
9.5804124167e-5*N*P*T**2 + \
0.00027775604478*N*P*T + \
1.2225588236e-5*N*P*V**2 - \
0.0001716045343*N*P*V + \
1.8806313889e-6*N*P*kappa**2 - \
1.2701263287e-6*N*P*kappa + \
1.3923449444e-5*N*P*mu**2 - \
1.4186857698e-6*N*P*mu + \
3.9634190278e-6*N*P*sigma**2 + \
9.947051115e-6*N*P*sigma - \
0.0003223815596977*N*P + \
7.5931315992e-5*N*T**4 - \
0.00011373913927*N*T**3 + \
1.5275617964e-5*N*T**2*V**2 - \
0.00027033311532*N*T**2*V - \
9.5487976257e-6*N*T**2*kappa**2 + \
6.1326942361e-5*N*T**2*kappa + \
1.1264628419e-5*N*T**2*mu**2 + \
0.00013788716208*N*T**2*mu + \
8.4656605352e-6*N*T**2*sigma**2 + \
9.7041865833e-5*N*T**2*sigma - \
0.00046604057151*N*T**2 - \
5.2633321153e-5*N*T*V**2 + \
0.00082461082486*N*T*V + \
1.9930343472e-5*N*T*kappa**2 - \
0.00014021885993*N*T*kappa - \
3.2740759028e-5*N*T*mu**2 - \
0.00033633604715*N*T*mu - \
2.7467490833e-5*N*T*sigma**2 - \
0.0002267701814*N*T*sigma + \
0.0010623078872764*N*T - \
1.158262868e-5*N*V**4 + \
0.00010282581987*N*V**3 + \
2.0236346649e-7*N*V**2*kappa**2 - \
7.0861126667e-6*N*V**2*kappa - \
1.8571179464e-7*N*V**2*mu**2 - \
2.9025647069e-5*N*V**2*mu - \
2.8250510694e-6*N*V**2*sigma**2 - \
1.0873061236e-5*N*V**2*sigma + \
9.5035330545615e-5*N*V**2 - \
4.7114408333e-7*N*V*kappa**2 + \
3.5583995899e-5*N*V*kappa + \
4.3931099764e-5*N*V*mu**2 + \
9.4893199047e-5*N*V*mu + \
1.9266056153e-5*N*V*sigma**2 + \
8.172457216e-5*N*V*sigma - \
0.00114623544365757*N*V + \
2.5465455757e-6*N*kappa**4 - \
1.1844938245e-5*N*kappa**3 - \
1.2548361851e-5*N*kappa**2*mu**2 - \
6.6498102778e-6*N*kappa**2*mu - \
2.6413318548e-6*N*kappa**2*sigma**2 - \
1.9743217083e-5*N*kappa**2*sigma - \
5.237116508322e-5*N*kappa**2 + \
2.2628180833e-5*N*kappa*mu**2 + \
6.4409460169e-5*N*kappa*mu + \
2.1659551389e-6*N*kappa*sigma**2 + \
8.2294682962e-5*N*kappa*sigma + \
0.00031141975514413*N*kappa - \
1.1565474947e-5*N*mu**4 - \
2.1450508636e-5*N*mu**3 + \
6.1585477702e-6*N*mu**2*sigma**2 - \
8.815663375e-5*N*mu**2*sigma + \
8.443778184842e-5*N*mu**2 - \
3.4406999306e-5*N*mu*sigma**2 + \
0.00027943018423*N*mu*sigma + \
0.00073132224303402*N*mu - \
8.4378328798e-6*N*sigma**4 - \
2.0928942447e-5*N*sigma**3 + \
9.361717372097e-5*N*sigma**2 + \
0.00042563590086478*N*sigma - \
0.00207631579223133*N - \
1.9577562243e-8*P**6 + \
9.8981784049e-7*P**5 - \
5.8363352597e-8*P**4*T**2 + \
2.6457614122e-8*P**4*T + \
9.0459993866e-8*P**4*V**2 + \
2.1439092975e-7*P**4*V + \
1.7814328446e-7*P**4*kappa**2 - \
4.1686622901e-7*P**4*kappa + \
5.3644855238e-8*P**4*mu**2 - \
2.0156224591e-7*P**4*mu + \
5.8210558734e-8*P**4*sigma**2 - \
1.8962978248e-7*P**4*sigma + \
4.46780827354e-7*P**4 - \
1.1972281072e-6*P**3*T**2 + \
5.3768532472e-6*P**3*T + \
2.2417961995e-7*P**3*V**2 - \
6.4936735747e-6*P**3*V - \
7.4042040112e-7*P**3*kappa**2 + \
3.1988643743e-6*P**3*kappa - \
1.1174867493e-7*P**3*mu**2 + \
4.9097886778e-6*P**3*mu + \
2.5563905537e-7*P**3*sigma**2 + \
3.0112545186e-6*P**3*sigma - \
2.528028422697e-5*P**3 - \
6.2461608542e-8*P**2*T**4 + \
7.2160816962e-9*P**2*T**3 - \
3.9231712963e-8*P**2*T**2*V**2 + \
1.2045330835e-7*P**2*T**2*V - \
1.2065046296e-7*P**2*T**2*kappa**2 + \
5.5655764074e-8*P**2*T**2*kappa - \
7.1769768519e-7*P**2*T**2*mu**2 + \
9.214390015e-7*P**2*T**2*mu - \
1.1352175926e-7*P**2*T**2*sigma**2 - \
7.2727690784e-8*P**2*T**2*sigma + \
9.20245283507e-7*P**2*T**2 + \
8.0179117696e-8*P**2*T*V**2 + \
4.235625e-8*P**2*T*V + \
2.258923022e-7*P**2*T*kappa**2 - \
4.446875e-7*P**2*T*kappa + \
1.319233337e-6*P**2*T*mu**2 - \
2.0462986111e-6*P**2*T*mu + \
8.8850197051e-8*P**2*T*sigma**2 + \
3.2751388889e-8*P**2*T*sigma - \
3.083990086676e-7*P**2*T + \
1.3373206908e-7*P**2*V**4 + \
9.0363593389e-8*P**2*V**3 + \
2.4463467593e-7*P**2*V**2*kappa**2 - \
3.2486785978e-7*P**2*V**2*kappa + \
2.7125416667e-7*P**2*V**2*mu**2 - \
7.8996752457e-8*P**2*V**2*mu + \
2.3869884259e-7*P**2*V**2*sigma**2 - \
3.9030882886e-8*P**2*V**2*sigma - \
1.676552162853e-6*P**2*V**2 - \
3.2961961287e-7*P**2*V*kappa**2 + \
1.0572459722e-6*P**2*V*kappa + \
5.8846025249e-7*P**2*V*mu**2 - \
1.2420694444e-7*P**2*V*mu + \
1.1084042637e-7*P**2*V*sigma**2 + \
5.9708680556e-7*P**2*V*sigma - \
2.731802676607e-6*P**2*V + \
1.6946466336e-7*P**2*kappa**4 - \
1.697455568e-7*P**2*kappa**3 + \
4.3087824074e-7*P**2*kappa**2*mu**2 - \
2.6231749106e-8*P**2*kappa**2*mu + \
4.1886990741e-7*P**2*kappa**2*sigma**2 - \
2.1180014438e-7*P**2*kappa**2*sigma - \
2.68356980142e-6*P**2*kappa**2 - \
1.1445592205e-6*P**2*kappa*mu**2 + \
2.838125e-7*P**2*kappa*mu - \
7.6035506889e-7*P**2*kappa*sigma**2 + \
3.2736111111e-9*P**2*kappa*sigma + \
5.198700314056e-6*P**2*kappa + \
2.570161515e-7*P**2*mu**4 - \
4.2080255264e-7*P**2*mu**3 + \
1.7831666667e-7*P**2*mu**2*sigma**2 - \
1.3384887703e-6*P**2*mu**2*sigma - \
2.364220102728e-6*P**2*mu**2 + \
8.8640907579e-8*P**2*mu*sigma**2 + \
1.2368444444e-6*P**2*mu*sigma + \
4.855707265804e-6*P**2*mu + \
9.059626753e-8*P**2*sigma**4 - \
1.5254956604e-7*P**2*sigma**3 - \
1.126043894964e-6*P**2*sigma**2 + \
2.8315063203e-6*P**2*sigma - \
2.778055205468e-6*P**2 - \
2.1400747816e-6*P*T**4 + \
4.9258105417e-6*P*T**3 + \
5.6327936107e-7*P*T**2*V**2 + \
9.3250965278e-6*P*T**2*V + \
3.2710316757e-6*P*T**2*kappa**2 - \
8.2942513889e-6*P*T**2*kappa + \
7.9343747988e-6*P*T**2*mu**2 - \
1.9181326389e-5*P*T**2*mu + \
1.5642303199e-6*P*T**2*sigma**2 - \
7.5503763889e-6*P*T**2*sigma + \
2.871260752173e-5*P*T**2 + \
1.4432569444e-7*P*T*V**2 - \
3.6214883811e-5*P*T*V - \
7.5203763889e-6*P*T*kappa**2 + \
2.3241170134e-5*P*T*kappa - \
1.7993693056e-5*P*T*mu**2 + \
5.5177810267e-5*P*T*mu - \
1.9631597222e-6*P*T*sigma**2 + \
2.2474684728e-5*P*T*sigma - \
0.00010697897078404*P*T + \
4.3918577044e-7*P*V**4 - \
6.5398867255e-6*P*V**3 - \
1.6642529664e-6*P*V**2*kappa**2 + \
4.5294820833e-6*P*V**2*kappa - \
8.3413225416e-7*P*V**2*mu**2 + \
3.3331961111e-6*P*V**2*mu - \
5.4879252019e-7*P*V**2*sigma**2 + \
2.5988245833e-6*P*V**2*sigma - \
6.06167138571e-6*P*V**2 + \
1.76840125e-6*P*V*kappa**2 - \
1.2335091147e-5*P*V*kappa - \
7.172135e-6*P*V*mu**2 - \
1.4280271047e-5*P*V*mu - \
1.7709023611e-6*P*V*sigma**2 - \
1.5558439144e-5*P*V*sigma + \
0.0001341572007329*P*V - \
1.3847755834e-6*P*kappa**4 + \
2.9663988051e-6*P*kappa**3 - \
6.701873906e-7*P*kappa**2*mu**2 + \
1.2602319444e-6*P*kappa**2*mu - \
1.6684083648e-6*P*kappa**2*sigma**2 + \
4.8915402778e-6*P*kappa**2*sigma + \
1.830572029266e-5*P*kappa**2 + \
7.2758208333e-6*P*kappa*mu**2 - \
4.4294673496e-6*P*kappa*mu + \
5.7967319444e-6*P*kappa*sigma**2 - \
7.5561654674e-6*P*kappa*sigma - \
6.41037679493e-5*P*kappa + \
1.1303131108e-7*P*mu**4 + \
4.477378242e-6*P*mu**3 - \
8.1756005619e-8*P*mu**2*sigma**2 + \
1.8019847222e-5*P*mu**2*sigma + \
3.591958513889e-6*P*mu**2 + \
3.5776194444e-6*P*mu*sigma**2 - \
2.1832827595e-5*P*mu*sigma - \
0.000104836086236*P*mu + \
4.4303746065e-7*P*sigma**4 + \
3.5105365953e-6*P*sigma**3 - \
5.903007579301e-6*P*sigma**2 - \
6.46457674367e-5*P*sigma + \
0.000248152385516871*P + \
9.3486231047e-7*T**6 - \
1.8168892496e-6*T**5 - \
1.2017117971e-7*T**4*V**2 - \
6.0623117829e-6*T**4*V - \
2.2138037142e-6*T**4*kappa**2 + \
5.3907485972e-6*T**4*kappa - \
1.5012731379e-5*T**4*mu**2 + \
2.9320254519e-5*T**4*mu - \
9.9591672455e-7*T**4*sigma**2 + \
4.5407528726e-6*T**4*sigma - \
2.1701625406048e-5*T**4 - \
8.86080478e-8*T**3*V**2 + \
1.4786332237e-5*T**3*V + \
3.9370511477e-6*T**3*kappa**2 - \
1.1123904654e-5*T**3*kappa + \
1.9629299957e-5*T**3*mu**2 - \
4.3941049344e-5*T**3*mu + \
1.1089594981e-6*T**3*sigma**2 - \
9.8896573442e-6*T**3*sigma + \
4.92291899313038e-5*T**3 - \
1.8208011851e-7*T**2*V**4 - \
3.3830247075e-6*T**2*V**3 + \
1.5875634259e-7*T**2*V**2*kappa**2 - \
8.2594310191e-7*T**2*V**2*kappa - \
7.0372356481e-6*T**2*V**2*mu**2 + \
1.1314601854e-5*T**2*V**2*mu + \
2.6109148148e-7*T**2*V**2*sigma**2 - \
1.4817938429e-6*T**2*V**2*sigma + \
2.584581978913e-6*T**2*V**2 + \
8.1820206167e-6*T**2*V*kappa**2 - \
2.152989125e-5*T**2*V*kappa + \
3.5087114657e-5*T**2*V*mu**2 - \
7.62298625e-5*T**2*V*mu + \
3.7441553065e-6*T**2*V*sigma**2 - \
1.9480860556e-5*T**2*V*sigma + \
8.078026266135e-5*T**2*V - \
1.4079168102e-6*T**2*kappa**4 + \
8.494577264e-7*T**2*kappa**3 - \
1.9940069444e-6*T**2*kappa**2*mu**2 - \
3.7981316228e-6*T**2*kappa**2*mu + \
1.2927453704e-7*T**2*kappa**2*sigma**2 - \
5.9931564037e-6*T**2*kappa**2*sigma + \
2.918584228186e-5*T**2*kappa**2 - \
5.4461049955e-7*T**2*kappa*mu**2 + \
1.7234859722e-5*T**2*kappa*mu - \
1.9437451044e-6*T**2*kappa*sigma**2 + \
1.7031693056e-5*T**2*kappa*sigma - \
6.0924394269614e-5*T**2*kappa - \
1.3344139356e-5*T**2*mu**4 + \
6.9440170146e-6*T**2*mu**3 - \
6.7769083333e-6*T**2*mu**2*sigma**2 + \
3.2599345224e-6*T**2*mu**2*sigma + \
0.00022442210764479*T**2*mu**2 + \
7.6319402846e-6*T**2*mu*sigma**2 + \
7.2359722222e-6*T**2*mu*sigma - \
0.0003491716663951*T**2*mu - \
1.1631111786e-6*T**2*sigma**4 + \
2.6724248621e-6*T**2*sigma**3 + \
1.649502809964e-5*T**2*sigma**2 - \
6.1536219106916e-5*T**2*sigma + \
0.000140143705596224*T**2 + \
2.7736623456e-7*T*V**4 + \
1.5654708427e-5*T*V**3 + \
2.1280663429e-6*T*V**2*kappa**2 - \
3.8522365278e-6*T*V**2*kappa + \
2.1368239286e-5*T*V**2*mu**2 - \
3.6494785e-5*T*V**2*mu + \
2.4667129876e-8*T*V**2*sigma**2 + \
8.2984694444e-7*T*V**2*sigma - \
1.16151114743199e-6*T*V**2 - \
2.4386513472e-5*T*V*kappa**2 + \
7.2626637568e-5*T*V*kappa - \
8.6385334444e-5*T*V*mu**2 + \
0.00022158505878*T*V*mu - \
6.7549275e-6*T*V*sigma**2 + \
6.7690826574e-5*T*V*sigma - \
0.000319030815886*T*V + \
5.7579921563e-6*T*kappa**4 - \
5.9128382699e-6*T*kappa**3 + \
5.8599504703e-6*T*kappa**2*mu**2 + \
1.0919901389e-5*T*kappa**2*mu + \
2.3570428983e-6*T*kappa**2*sigma**2 + \
1.1143115278e-5*T*kappa**2*sigma - \
9.05515382865e-5*T*kappa**2 - \
5.1905069444e-6*T*kappa*mu**2 - \
4.8460056021e-5*T*kappa*mu - \
1.9954263889e-6*T*kappa*sigma**2 - \
4.0364821013e-5*T*kappa*sigma + \
0.0002112574003288*T*kappa + \
3.6831785874e-5*T*mu**4 - \
2.2679927293e-5*T*mu**3 + \
1.3165213945e-5*T*mu**2*sigma**2 - \
1.5411466667e-5*T*mu**2*sigma - \
0.0005305662075903*T*mu**2 - \
1.4521058333e-5*T*mu*sigma**2 - \
2.36356423e-5*T*mu*sigma + \
0.0008959089906671*T*mu + \
3.1144363024e-6*T*sigma**4 - \
1.1900713105e-5*T*sigma**3 - \
3.6444491206927e-5*T*sigma**2 + \
0.000221944115643271*T*sigma - \
0.000541037097804642*T + \
1.3872452626e-7*V**6 + \
2.6280928855e-6*V**5 + \
1.4116924747e-6*V**4*kappa**2 - \
2.6532439544e-6*V**4*kappa + \
4.0826945223e-6*V**4*mu**2 - \
6.7305962741e-6*V**4*mu + \
3.2377252949e-7*V**4*sigma**2 - \
2.3853637883e-7*V**4*sigma - \
2.12902319506e-6*V**4 - \
3.4146924781e-6*V**3*kappa**2 + \
1.1308634888e-5*V**3*kappa - \
4.3203910556e-6*V**3*mu**2 + \
2.0522142146e-5*V**3*mu - \
1.1130033126e-7*V**3*sigma**2 + \
9.6641202763e-6*V**3*sigma - \
6.9565263098929e-5*V**3 + \
9.124821437e-7*V**2*kappa**4 - \
2.374870717e-7*V**2*kappa**3 + \
2.0551150926e-6*V**2*kappa**2*mu**2 + \
2.1737964134e-6*V**2*kappa**2*mu + \
1.5400502778e-6*V**2*kappa**2*sigma**2 + \
2.1762389258e-6*V**2*kappa**2*sigma - \
1.908307985662e-5*V**2*kappa**2 - \
2.2644466603e-6*V**2*kappa*mu**2 - \
7.2844616667e-6*V**2*kappa*mu - \
1.327008481e-6*V**2*kappa*sigma**2 - \
7.3647294444e-6*V**2*kappa*sigma + \
3.083805354799e-5*V**2*kappa + \
6.376029485e-6*V**2*mu**4 - \
3.4468156835e-6*V**2*mu**3 + \
1.9027688889e-6*V**2*mu**2*sigma**2 - \
1.9749133587e-6*V**2*mu**2*sigma - \
8.992990807087e-5*V**2*mu**2 - \
1.2826181036e-6*V**2*mu*sigma**2 - \
8.6458333333e-7*V**2*mu*sigma + \
0.000119137833700857*V**2*mu + \
3.1330206897e-7*V**2*sigma**4 - \
5.9611039059e-7*V**2*sigma**3 - \
5.61848663091e-6*V**2*sigma**2 + \
1.0076975521136e-5*V**2*sigma - \
2.21975659993502e-6*V**2 - \
5.5542905873e-6*V*kappa**4 + \
8.2797491074e-6*V*kappa**3 - \
5.5680226085e-6*V*kappa**2*mu**2 - \
1.0037508333e-6*V*kappa**2*mu - \
5.6383374563e-6*V*kappa**2*sigma**2 + \
4.7589241667e-6*V*kappa**2*sigma + \
8.232677423507e-5*V*kappa**2 + \
2.0663045e-5*V*kappa*mu**2 + \
7.3770892156e-6*V*kappa*mu + \
1.2777554444e-5*V*kappa*sigma**2 + \
4.4264848543e-6*V*kappa*sigma - \
0.0002273892174654*V*kappa - \
1.7755825532e-5*V*mu**4 + \
2.2503273728e-5*V*mu**3 - \
7.4071030901e-6*V*mu**2*sigma**2 + \
5.0431555833e-5*V*mu**2*sigma + \
0.00023575988653791*V*mu**2 + \
1.6386676389e-5*V*mu*sigma**2 - \
4.6608524981e-5*V*mu*sigma - \
0.00059482989619126*V*mu - \
4.6406148944e-7*V*sigma**4 + \
1.2492365373e-5*V*sigma**3 + \
6.64618520895e-6*V*sigma**2 - \
0.00023138236574996*V*sigma + \
0.000737904956621858*V + \
8.9855895986e-7*kappa**6 + \
2.5470308117e-9*kappa**5 + \
1.2059968463e-6*kappa**4*mu**2 + \
3.1793778702e-6*kappa**4*mu + \
1.0401467472e-6*kappa**4*sigma**2 + \
2.1906938947e-6*kappa**4*sigma - \
2.433005812556e-5*kappa**4 - \
4.7304299279e-7*kappa**3*mu**2 - \
6.5083289391e-6*kappa**3*mu - \
1.4235522045e-7*kappa**3*sigma**2 - \
5.8027556768e-6*kappa**3*sigma + \
1.4243394357223e-5*kappa**3 + \
3.9738486622e-6*kappa**2*mu**4 - \
2.1223005863e-6*kappa**2*mu**3 + \
1.0183000926e-5*kappa**2*mu**2*sigma**2 - \
1.2959512062e-5*kappa**2*mu**2*sigma - \
3.411711272594e-5*kappa**2*mu**2 - \
1.0317703172e-5*kappa**2*mu*sigma**2 + \
1.7773383333e-5*kappa**2*mu*sigma - \
3.0801900036594e-5*kappa**2*mu + \
1.5939164141e-6*kappa**2*sigma**4 + \
1.3527431493e-6*kappa**2*sigma**3 - \
2.054649657405e-5*kappa**2*sigma**2 - \
2.925616248782e-5*kappa**2*sigma + \
0.00020667647366024*kappa**2 - \
7.1129482287e-6*kappa*mu**4 + \
2.347842395e-7*kappa*mu**3 - \
2.3796144071e-5*kappa*mu**2*sigma**2 + \
1.8951372222e-5*kappa*mu**2*sigma + \
4.588667312192e-5*kappa*mu**2 + \
2.7744291667e-5*kappa*mu*sigma**2 - \
4.024641369e-5*kappa*mu*sigma + \
0.0001409512704825*kappa*mu - \
2.3923579072e-6*kappa*sigma**4 - \
6.8064892728e-6*kappa*sigma**3 + \
2.465996737684e-5*kappa*sigma**2 + \
0.000123292481750089*kappa*sigma - \
0.000440486811020821*kappa + \
4.9302956951e-6*mu**6 + \
2.366766686e-6*mu**5 + \
8.8283463137e-6*mu**4*sigma**2 - \
1.3070610726e-5*mu**4*sigma - \
0.0001299209556219*mu**4 - \
1.7778517447e-5*mu**3*sigma**2 + \
2.0156346188e-5*mu**3*sigma + \
7.22110904344e-6*mu**3 + \
2.7912531806e-7*mu**2*sigma**4 - \
1.6060547415e-6*mu**2*sigma**3 - \
4.639438308883e-5*mu**2*sigma**2 + \
5.69529527941e-5*mu**2*sigma + \
0.00107865381644979*mu**2 + \
1.6967924396e-6*mu*sigma**4 - \
8.6474054636e-6*mu*sigma**3 + \
5.0242308290501e-5*mu*sigma**2 + \
0.00011747955359353*mu*sigma - \
0.00160943569318205*mu + \
7.81942578e-7*sigma**6 - \
2.6973326406e-6*sigma**5 - \
1.688778013916e-5*sigma**4 + \
6.520497638123e-5*sigma**3 + \
9.1407247080762e-5*sigma**2 - \
0.00056464306868082*sigma + \
0.00111457799998979
return Smax
| true |
8251e1637e64603e631ea837187ff7d77581333e | Python | pkilli/INF5620 | /assignment2/Neumann_discr.py | UTF-8 | 8,665 | 2.921875 | 3 | [] | no_license | """
1D wave equation with Dirichlet or Neumann conditions
and variable wave velocity::
u, x, t, cpu = solver(I, V, f, c, U_0, U_L, L, dt, C, T,
user_action=None, version='scalar',
stability_safety_factor=1.0)
Solve the wave equation u_tt = (c**2*u_x)_x + f(x,t) on (0,L) with
u=U_0 or du/dn=0 on x=0, and u=u_L or du/dn=0
on x = L. If U_0 or U_L equals None, the du/dn=0 condition
is used, otherwise U_0(t) and/or U_L(t) are used for Dirichlet cond.
Initial conditions: u=I(x), u_t=V(x).
T is the stop time for the simulation.
dt is the desired time step.
C is the Courant number (=max(c)*dt/dx).
stability_safety_factor enters the stability criterion:
C <= stability_safety_factor (<=1).
I, f, U_0, U_L, and c are functions: I(x), f(x,t), U_0(t),
U_L(t), c(x).
U_0 and U_L can also be 0, or None, where None implies
du/dn=0 boundary condition. f and V can also be 0 or None
(equivalent to 0). c can be a number or a function c(x).
user_action is a function of (u, x, t, n) where the calling code
can add visualization, error computations, data analysis,
store solutions, etc.
"""
import sympy as sy
import numpy as np
import time
def solver(I, V, f, c, U_0, U_L, L, dt, C, T, version,stab_factor,user_action=None):
"""Solve u_tt=(c^2*u_x)_x + f on (0,L)x(0,T]."""
Nt = int(round(T/dt))
t = np.linspace(0, Nt*dt, Nt+1) # Mesh points in time
# Find max(c) using a fake mesh and adapt dx to C and dt
if isinstance(c, (float,int)):
c_max = c
elif callable(c):
c_max = max([c(x_) for x_ in np.linspace(0, L, 101)])
dx = dt*c_max/(stab_factor*C)
Nx = int(round(L/dx))
x = np.linspace(0, L, Nx+1) # Mesh points in space
# Treat c(x) as array
if isinstance(c, (float,int)):
c = np.zeros(x.shape) + c
elif callable(c):
# Call c(x) and fill array c
c_ = np.zeros(x.shape)
for i in range(Nx+1):
c_[i] = c(x[i])
c = c_
q = c**2
C2 = (dt/dx)**2; dt2 = dt*dt # Help variables in the scheme
# Wrap user-given f, I, V, U_0, U_L if None or 0
if f is None or f == 0:
f = (lambda x, t: 0) if version == 'scalar' else \
lambda x, t: np.zeros(x.shape)
if I is None or I == 0:
I = (lambda x: 0) if version == 'scalar' else \
lambda x: np.zeros(x.shape)
if V is None or V == 0:
V = (lambda x: 0) if version == 'scalar' else \
lambda x: np.zeros(x.shape)
if U_0 is not None:
if isinstance(U_0, (float,int)) and U_0 == 0:
U_0 = lambda t: 0
if U_L is not None:
if isinstance(U_L, (float,int)) and U_L == 0:
U_L = lambda t: 0
u = np.zeros(Nx+1) # Solution array at new time level
u_1 = np.zeros(Nx+1) # Solution at 1 time level back
u_2 = np.zeros(Nx+1) # Solution at 2 time levels back
import time; t0 = time.clock() # CPU time measurement
Ix = range(0, Nx+1)
It = range(0, Nt+1)
# Load initial condition into u_1
for i in range(0,Nx+1):
u_1[i] = I(x[i])
if user_action is not None:
user_action(u_1, x, t, 0)
# Special formula for the first step
for i in Ix[1:-1]:
u[i] = u_1[i] + dt*V(x[i]) + \
0.5*C2*(0.5*(q[i] + q[i+1])*(u_1[i+1] - u_1[i]) - \
0.5*(q[i] + q[i-1])*(u_1[i] - u_1[i-1])) + \
0.5*dt2*f(x[i], t[0])
i = Ix[0]
if U_0 is None:
# Set boundary values (x=0: i-1 -> i+1 since u[i-1]=u[i+1]
# when du/dn = 0, on x=L: i+1 -> i-1 since u[i+1]=u[i-1])
ip1 = i+1
im1 = ip1 # i-1 -> i+1
u[i] = u_1[i] + dt*V(x[i]) + \
0.5*C2*(0.5*(q[i] + q[ip1])*(u_1[ip1] - u_1[i]) - \
0.5*(q[i] + q[im1])*(u_1[i] - u_1[im1])) + \
0.5*dt2*f(x[i], t[0])
else:
u[i] = U_0(dt)
i = Ix[-1]
if U_L is None:
im1 = i-1
ip1 = im1 # i+1 -> i-1
u[i] = u_1[i] + dt*V(x[i]) + \
0.5*C2*(0.5*(q[i] + q[ip1])*(u_1[ip1] - u_1[i]) - \
0.5*(q[i] + q[im1])*(u_1[i] - u_1[im1])) + \
0.5*dt2*f(x[i], t[0])
else:
u[i] = U_L(dt)
if user_action is not None:
user_action(u, x, t, 1)
# Update data structures for next step
#u_2[:] = u_1; u_1[:] = u # safe, but slower
u_2, u_1, u = u_1, u, u_2
for n in It[1:-1]:
# Update all inner points
if version == 'scalar':
for i in Ix[1:-1]:
u[i] = - u_2[i] + 2*u_1[i] + \
C2*(0.5*(q[i] + q[i+1])*(u_1[i+1] - u_1[i]) - \
0.5*(q[i] + q[i-1])*(u_1[i] - u_1[i-1])) + \
dt2*f(x[i], t[n])
elif version == 'vectorized':
u[1:-1] = - u_2[1:-1] + 2*u_1[1:-1] + \
C2*(0.5*(q[1:-1] + q[2:])*(u_1[2:] - u_1[1:-1]) -
0.5*(q[1:-1] + q[:-2])*(u_1[1:-1] - u_1[:-2])) + \
dt2*f(x[1:-1], t[n])
else:
raise ValueError('version=%s' % version)
# Insert boundary conditions
i = Ix[0]
if U_0 is None:
# Set boundary values
# x=0: i-1 -> i+1 since u[i-1]=u[i+1] when du/dn=0
# x=L: i+1 -> i-1 since u[i+1]=u[i-1] when du/dn=0
ip1 = i+1
im1 = ip1
u[i] = - u_2[i] + 2*u_1[i] + \
C2*(0.5*(q[i] + q[ip1])*(u_1[ip1] - u_1[i]) - \
0.5*(q[i] + q[im1])*(u_1[i] - u_1[im1])) + \
dt2*f(x[i], t[n])
else:
u[i] = U_0(t[n+1])
i = Ix[-1]
if U_L is None:
im1 = i-1
ip1 = im1
u[i] = - u_2[i] + 2*u_1[i] + \
C2*(0.5*(q[i] + q[ip1])*(u_1[ip1] - u_1[i]) - \
0.5*(q[i] + q[im1])*(u_1[i] - u_1[im1])) + \
dt2*f(x[i], t[n])
else:
u[i] = U_L(t[n+1])
if user_action is not None:
if user_action(u, x, t, n+1):
break
# Update data structures for next step
#u_2[:] = u_1; u_1[:] = u # safe, but slower
u_2, u_1, u = u_1, u, u_2
# Important to correct the mathematically wrong u=u_2 above
# before returning u
u = u_1
cpu_time = t0 - time.clock()
return cpu_time
def test_convergence_rate(L,w,q,u, u_exact):
""" finding the convergence rates for several dt's
testing the scheme against a known solution. Using sympy to find source term
"""
x,t,w,L = sy.symbols("x t w L")
#Find source term: f
#Find u_tt
u_tt = sy.diff(u,t,t)
#Find q*u_x, first u_x
u_x = sy.diff(u,x)
q_u_x = q*u_x
q_u_xx = sy.diff(q_u_x,x)
f = u_tt - q_u_xx
f = sy.lambdify((x,t),f)
u = sy.lambdify((x,t),u)
q = sy.lambdify((x),q)
L=1
w=1
c = lambda x : np.sqrt(q(x))
U_0 = None
U_L = None
V = None
I = lambda x : u(x,0)
C = 0.89
dt = 0.1
T = 2
stab_factor = 1.0
dt_values = [dt*2**(-i) for i in range(5)]
E_values = []
def plot(u,x,t,n):
"""user_action function for solver."""
import matplotlib.pyplot as plt
plt.plot(x, u, 'r-')
plt.draw()
time.sleep(2) if t[n] == 0 else time.sleep(0.2)
class Action:
"""Store last solution."""
def __call__(self, u, x, t, n):
if n == len(t)-1:
self.u = u.copy()
self.x = x.copy()
self.t = t[n]
action = Action()
for _dt in dt_values:
dx = solver(I,V,f,c,U_0,U_L,L,_dt,C,T,"scalar",stab_factor,user_action=action)
u_num = action.u
#E = np.sqrt(dx*sum(u_exact(action.x, action.t)-u_num)**2)
E = np.absolute(u_exact(action.x, action.t)-u_num).max() #sup norm
E_values.append(E)
def convergence_rate(E, h):
m = len(dt_values)
r = [np.log(E[i-1]/E[i])/np.log(h[i-1]/h[i]) for i in range(1,m, 1)]
r = [round(r_,2) for r_ in r]
return r
solver(I,V,f,c,U_0,U_L,L,dt,C,T,"scalar",stab_factor,user_action=plot)
return convergence_rate(E_values, dt_values)
if __name__ == "__main__":
print "Task a:"
x,t,w,L = sy.symbols("x t w L")
L = 1
w = 1
u_exact = lambda x,t: np.cos(np.pi*x/float(L))*np.cos(w*t)
q_a = 1+(x-(L)/2)**4
u = sy.cos(sy.pi*x/float(L))*sy.cos(w*t)
r1 = test_convergence_rate(L,w,q_a,u,u_exact)
print r1
print "----------"
print "Task b"
q_b = 1 + sy.cos(sy.pi*x/L)
r2 = test_convergence_rate(L,w,q_b,u,u_exact)
print r2
| true |
670de7ad501fad8dad4f7653568b606db7dff413 | Python | Yang-chen205/badou-Turing | /113-吕一萌-南京/第二周/my_bilinear_interpolation.py | UTF-8 | 1,419 | 3.09375 | 3 | [] | no_license | import numpy as np
import cv2
def my_imread(path) -> np.ndarray:
return cv2.imread(path)
img = my_imread("lenna.png")
print(img)
# print(img.shape)
ori_h, ori_w, channel = img.shape[:3]
scale = 3 / 2
new_h, new_w = int(ori_h * scale), int(ori_w * scale)
# print(img.dtype)
img_new = np.zeros((new_h, new_w, channel), img.dtype)
mid = scale / 2
for i in range(3):
for dst_y in range(new_h):
for dst_x in range(new_w):
# find the origin x and y coordinates of dst image x and y
# use geometric center symmetry
# if use direct way, src_x = dst_x * scale_x
src_x = (dst_x + 0.5) * scale - 0.5
src_y = (dst_y + 0.5) * scale - 0.5
# find the coordinates of the points which will be used to compute the interpolation
src_x0 = int(np.floor(src_x))
src_x1 = min(src_x0 + 1, ori_w - 1)
src_y0 = int(np.floor(src_y))
src_y1 = min(src_y0 + 1, ori_h - 1)
# calculate the interpolation
temp0 = (src_x1 - src_x) * img[src_y0, src_x0, i] + (src_x - src_x0) * img[src_y0, src_x1, i]
temp1 = (src_x1 - src_x) * img[src_y1, src_x0, i] + (src_x - src_x0) * img[src_y1, src_x1, i]
img_new[dst_y, dst_x, i] = int((src_y1 - src_y) * temp0 + (src_y - src_y0) * temp1)
print("new:")
print(img_new)
cv2.imwrite("lenna_bilinear_interpoaton.png", img_new)
| true |
d0da34241817029ac7047a54632c7e2a81d2a4b2 | Python | JGornas/SmsGate | /send_sms.py | UTF-8 | 1,933 | 2.640625 | 3 | [
"MIT"
] | permissive | import os
import argparse
import logging
from twilio.rest import Client
from twilio.base.exceptions import TwilioRestException, TwilioException
class Logger:
def __init__(self, level=logging.INFO, filename="smsgate.log", mode="a", encoding="utf-8"):
self.root_logger = logging.getLogger()
self.root_logger.setLevel(level)
handler = logging.FileHandler(filename, mode, encoding)
handler.setFormatter(logging.Formatter("%(asctime)s:%(levelname)s:%(message)s"))
self.root_logger.addHandler(handler)
class SmsSender(Logger):
def __init__(self, account_sid=os.getenv("ACCOUNT_SID"), auth_token=os.getenv("AUTH_TOKEN")):
super().__init__()
self.client = Client(account_sid, auth_token)
def send_sms(self, text, sender_number, receiver_number):
content = self.client.messages.create(body=text,
from_=sender_number,
to=receiver_number)
print(content.date_updated, content.sid)
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument("message", help="message content")
parser.add_argument("-s", "--sender", help="custom sender number",
type=int, default=os.getenv("SENDER_NUMBER"))
parser.add_argument("-r", "--receiver", help="custom receiver number",
type=int, default=os.getenv("RECEIVER_NUMBER"))
return parser.parse_args()
if __name__ == "__main__":
args = parse_arguments()
try:
s = SmsSender()
s.send_sms(args.message, args.sender, args.receiver)
print(f"'{args.message}' - Message sent successfully from +{args.sender} to +{args.receiver}.")
except TwilioRestException:
print("Unable to send message. Invalid phone number.")
except TwilioException:
print("Unable to send message. Invalid credentials.")
| true |
6a6e3efc712f659fd93cf93e68e655b635441d34 | Python | poojitha9-jpg/data-preprocessing | /preprocessing (3).py | UTF-8 | 1,187 | 3.671875 | 4 | [] | no_license | #!/usr/bin/env python
# coding: utf-8
# In[63]:
# Import library
import pandas as pd #Data manipulation
import numpy as np #Data manipulation
import matplotlib.pyplot as plt # Visualization
import seaborn as sns #Visualization
data=pd.read_csv('C:/Users/M V N POOJITHA/Downloads/credit_train.csv') #loading dataset
# In[73]:
def preprocessing():
'''missing values,checking data types'''
print("numbers of rows and columns:",data.shape)
print(data.index)
print(data.info())
print(data.describe())
print(data.dtypes)
print(type(data))
#df1.dropna(inplace=True)
#z=df1.isnull().sum()
new_data = data.dropna(axis = 0, how ='any')
# comparing sizes of data frames
print("Old data frame length:", len(data), "\nNew data frame length:",
len(new_data), "\nNumber of rows with at least 1 NA value: ",
(len(data)-len(new_data)))
print("missing values:",data.isnull().sum())
print("remove missing values:",new_data.isnull().sum())
def catergorical():
'''see the categorical data'''
X=data.iloc[:,:-1].values
return X
# In[74]:
preprocessing()
# In[66]:
catergorical()
| true |
962b6f829825d7d8d172776c76822996945e6cf5 | Python | lindsaywan/AllGeneratorFunction | /adrienne_potential_fit.py | UTF-8 | 3,816 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 23 00:22:47 2015
@author: Lindsay
This module fits the potential data by Adrienne Dubin from Merkel cell
potential recordings.
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
def func_exp(params, x):
a, b, c = params
y = a * np.exp(b * x) + c
return y
def get_sse(params, func, x, y):
ynew = func(params, x)
residual = ynew - y
sse = (residual ** 2).sum()
return sse
def multi_curve_fit(x, y, x0, bounds):
fit_params_list = []
for i, yi in enumerate(y.T):
result = minimize(get_sse, x0, args=(func_exp, x, yi),
method='SLSQP', bounds=bounds)
fit_params_list.append(result.x)
fit_params_array = np.array(fit_params_list)
x0_new = np.median(fit_params_array, axis=0)
fit_params_list_new = []
for i, yi in enumerate(y.T):
result = minimize(get_sse, x0_new, args=(func_exp, x, yi),
method='SLSQP', bounds=bounds)
fit_params_list_new.append(result.x)
fit_params_array_new = np.array(fit_params_list_new)
fit_params_final = np.median(fit_params_array_new, axis=0)
return fit_params_final
if __name__ == '__main__':
# Merkel cell voltage traces
traces = np.loadtxt('./Adrienne recordings data/Organized data/'
'Merkel cell potential/Vol_614D 028 mechano in CC.csv',
delimiter=',')
stimul = np.loadtxt('./Adrienne recordings data/Organized data/'
'Merkel cell potential/'
'Disp_614D 028 mechano in CC.csv', delimiter=',')
# %% Examine the data
t = traces[:, 0]
v = traces[:, 1:]
s = stimul[:, 1:]
fit_num = 0
mc_pot_rcd = v[:, fit_num]
fig, axs = plt.subplots()
volt_plot = axs.plot(t, v, c='0')
volt_plot = axs.plot(t, v[:, fit_num], c='0')
fig2, axs2 = plt.subplots()
stimul_plot = axs2.plot(t, s, c='0.7')
stimul_plot = axs2.plot(t, s[:, fit_num], c='0')
# %% Set parameters and fit the curves
fs = 5e-5 # in sec, sampling frequency
# starting points
# fit_start = np.array([0.05, 0.05, 0.07, 0.07, 0.05, 0.046, 0.044]) # 621A 004
fit_start = np.array([0.05, 0.05, 0.06, 0.056, 0.05, 0.058, 0.048]) # 614D 028
# fit_start = np.array([0.05, 0.05, 0.06, 0.06, 0.048, 0.06, 0.05, 0.056, 0.061]) # 614C 018
fit_end = 0.13 # in sec
fit_start_index = (fit_start/fs).astype(int)
fit_end_index = round(fit_end/fs)
# Merkel cell voltage x0
x0 = np.array((10, -10, -60))
# Neuron current x0
bounds = ((0, None), (None, 0), (-91, 0))
# %% Curve fit in main cell
fit_params_list = []
for i, trace in enumerate(traces.T[1:]):
time_interval = t[fit_start_index[i]:fit_end_index] - fit_start[i]
voltage = trace[fit_start_index[i]:fit_end_index]
result = minimize(get_sse, x0, args=(func_exp, time_interval, voltage),
method='SLSQP', bounds=bounds)
fit_params_list.append(result.x)
fit_params_array = np.array(fit_params_list)
# Plot multiple fit curves
fig3, axs3 = plt.subplots()
plot_from = 0
for j in range(plot_from, v.shape[1]-1):
a,b,c = fit_params_array[j, :]
time_interval = t[fit_start_index[j]:fit_end_index] - fit_start[j]
fit_trace = a*np.exp(time_interval*b)+c
fit_plot = axs3.plot(t, v[:, j], c='0.7')
fit_plot = axs3.plot(time_interval+fit_start[j], fit_trace, c='k')
axs3.set_xticks(np.arange(min(t), max(t), 0.1))
axs3.set_yticks(np.arange(-60, 20, 20))
fig3.tight_layout()
# np.savetxt('voltage_fit.csv', fit_trace, delimiter=',')
# np.savetxt('voltage_fit_time.csv', time_interval+fit_start[j], delimiter=',')
| true |
621c3efb76eaad178d036585cb1cd133ab3a6bf6 | Python | wyxct/spider | /the_new_york_time/part_1.py | UTF-8 | 1,605 | 2.59375 | 3 | [] | no_license | import requests
import json
import re
import csv
from selenium import webdriver
from bs4 import BeautifulSoup
from lxml import etree
driver=webdriver.Chrome()
def get_form(url):
data=['Qualified for the November debate','NATIONAL POLLING AVERAGE','INDIVIDUAL CONTRIBUTIONS','WEEKLY NEWS COVERAGE']
write(data)
driver.get(url)
elem=driver.find_element_by_css_selector('#democratic-polls > div.g-graphic.g-graphic-freebird > div.g-item.g-overview > div.g-item.g-graphic.g-candidate-overview > div > div.g-candidates-table-outer.g-table-outer > table > tbody > tr.g-cand-rollup > td')
driver.execute_script('arguments[0].click()', elem)
html = driver.page_source
bs=BeautifulSoup(html,'html.parser')
bs1=bs.find('div',class_='g-item g-overview').find('tbody')
rows=bs1.find_all('tr')
for row in rows:
try:
data=[]
cols=row.find_all('td')
data.append(cols[0].find('span',class_='g-desktop').get_text())
data.append(cols[1].find('span',class_='g-contents').get_text())
data.append(cols[2].find('span',class_='g-contents').get_text())
data.append(cols[3].find('span',class_='g-contents').get_text())
write(data)
except:
break
def write(data):
file=open('data.csv','a',newline='')
content = csv.writer(file, dialect='excel')
content.writerow(data)
def main():
url="https://www.nytimes.com/interactive/2020/us/elections/democratic-polls.html"
get_form(url)
if __name__ == '__main__':
main() | true |
64e9320bb57e4b2966483fea8506b350795cf858 | Python | ricardo7al/compresion-datos-python | /compressString.py | UTF-8 | 268 | 3.015625 | 3 | [] | no_license |
text = b'zyx zyx zyx zyx zyx zyx zyx zyx zyx'
import sys, zlib
print ("Text:", text)
print ("Size in bytes:", sys.getsizeof(text))
print ()
compressed = zlib.compress(text)
print("Text compressed:", compressed)
print("size in bytes:", sys.getsizeof(compressed))
| true |
7548497331367e566e67572787f5d2b734edfe43 | Python | marikoll/ProgrammingExercise_NP | /Exercise_1.py | UTF-8 | 1,166 | 3 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Dec 6 21:13:18 2020
@author: maritkollstuen
"""
import requests
import pandas as pd
import json
# Import JSON data
url = 'https://api.npolar.no/marine/biology/sample/?q=&fields=expedition,utc_date,programs,conveyance&limit=all&format=json&variant=array'
data = requests.get(url).json()
column_names = data.pop(0)
# Convert to Pandas DataFrame
df = pd.DataFrame(data, columns = column_names)
df['utc_date'] = pd.to_datetime(df['utc_date'])
df = df.sort_values(by=['expedition', 'utc_date'])
# Extract unique expeditions and their start- and end date
# Assumed the first program and vessel are the same throughout the expetition
df_to_json = df.groupby('expedition')['utc_date'].agg(['first',
'last']).rename(columns={'first':'start_date',
'last':'end_date'})
df_to_json['programs'] = df.groupby('expedition')['programs'].agg('first')
df_to_json['conveyance'] = df.groupby('expedition')['conveyance'].agg('first')
# Convert back to JSON
result = df_to_json.to_json(orient = "index")
parsed = json.loads(result)
print(json.dumps(parsed, indent = 4)) | true |
4e1530cb5f0db579bc0f96bd6a87145026132996 | Python | j-lindsey/learningPython | /main.py | UTF-8 | 6,120 | 4.4375 | 4 | [] | no_license | name = input('What is your name? ')
print('hellooooo ' + name)
#Fundamental Data typesT
#int - integer
print(2 + 4)
print(type(2 + 4))
#float - float point
print(type(9.9 + 1.1))
print(2**3) #2 to power of 3
print(3 // 4) #rounds value to integer
print(5 % 4) #remainder
#math functions
print(round(3.1)) #rounds number
print(abs(-20)) #returns absolute value
#operator precedence
print((5 + 4) * 10 / 2) #guess 45
print(((5 + 4) * 10) / 2) # guess 45
print((5 + 4) * (10 / 2)) #guess 45
print(5 + (4 * 10) / 2) #guess 25
print(5 + 4 * 10 // 2) # 25
print(bin(5)) #bin returns binary value
print(int('0b101', 2))
counter = 0
counter += 1
counter += 1
counter += 1
counter += 1
counter -= 1
counter *= 2
#Before you click RUN, guess what the counter variable holds in memory!
print(counter) # guess 6
#bool - true/false
#str - string
#formatted strings
name = "joelle"
age = 3
print(f'hi {name}, You are {age} years old')
print('hi {}, You are {} years old'.format(name, age))
print('hi {1}, You are {0} years old'.format(name, age))
print('hi {new_name}, You are {age} years old'.format(new_name='sally',
age=100))
num = '01234567'
print(num[::-1]) #can be used to reverse a string
quote = 'to be or not to be'
print(quote.upper())
print(quote.capitalize())
print(quote.find('be')) #finds index of first occurance of text
print(quote.replace('be', 'me')) #does not change the original string
print(quote)
#type conversions
birth_year = input('what year were you born? ')
age = 2021 - int(birth_year)
print(f'your age is {age}')
username = input('What is your username? ')
password = input('Please enter your password ')
sec_password = '*' * len(password)
print(
f'{username}, your password {sec_password} is {len(password)} letters long'
)
#list -
li = [1, 2, 3, 4, 5]
li2 = [1, 2, 'a', True] # can have multiple variable types
amazon_cart = ['notebooks', 'sunglasses', 'toys', 'grapes']
amazon_cart[0] = 'laptop'
print(
amazon_cart[0:3]) #list slicing creates new list does not change original
print(amazon_cart)
# using this list:
basket = ["Banana", ["Apples", ["Oranges"], "Blueberries"]]
# access "Oranges" and print it:
# You will find the answer if you scroll down to the bottom, but attempt it yourself first!
print(basket[1][1][0])
#list methods
#adding
basket1 = [1, 2, 3, 4, 5]
new_list = basket1.append(100) #append changes list in place
new_list = basket1
print(basket1)
print(new_list)
basket1.insert(4, 100) #modifies array in place
print(basket1)
basket1.extend([100, 101])
print(basket1)
#removing
basket1.pop() #removes last item
print(basket1)
basket1.pop(0) #removes item at 0 index
print(basket1)
basket1.remove(4) #removes the number 4 in the list
print(basket1)
#basket1.clear()
print(basket1) #removes all items in the new_list
print(basket1.index(2))
print(basket1.count(100))
basket1.sort() #modifies existing array
print(basket1)
sorted(basket1) #creates new array
basket1.reverse()
print(basket1)
print(basket1[::-1]) #reverses list in new instance
print(list(range(1, 100)))
sentence = '!'
new_sentence = sentence.join(['hi', 'my', 'name', 'is', 'jojo'])
print(new_sentence)
#list unpacking
a, b, c, *other, d = [1, 2, 3, 4, 5, 6, 7, 8, 9]
print(a)
print(b)
print(c)
print(other)
#dictionary (hash table, objects)
dictionary = {'a': 1, 'b': 2}
print(dictionary['b'])
user = {'basket': [1, 2, 3], 'greet': 'hello'}
print(user.get('age'))
print(user.get('age',
55)) #if age doesnt exist won't get erro when running code.
print('basket' in user)
print('basket' in user.keys())
print(user.items())
#fix this code so that it prints a sorted list of all of our friends (alphabetical). Scroll to see answer
friends = ['Simon', 'Patty', 'Joy', 'Carrie', 'Amira', 'Chu']
new_friend = ['Stanley']
friends.append(new_friend[0])
friends.sort()
print(friends)
#Scroll down to see the answers!
#1 Create a user profile for your new game. This user profile will be stored in a dictionary with keys: 'age', 'username', 'weapons', 'is_active' and 'clan'
userprofile = {
'age': 25,
'username': 'joelle',
'weapons': ['axe'],
'is_active': True,
'clan': '1'
}
#2 iterate and print all the keys in the above user.
print(userprofile.keys())
#3 Add a new weapon to your user
userprofile['weapons'].append('knife')
print(userprofile)
#4 Add a new key to include 'is_banned'. Set it to false
userprofile['is_banned'] = False #can use user.update({'is_banned': False})
print(userprofile)
#5 Ban the user by setting the previous key to True
userprofile['is_banned'] = True
print(userprofile)
#6 create a new user2 my copying the previous user and update the age value and username value.
user2 = userprofile.copy()
print(user2)
user2['username'] = 'John'
user2['age'] = 50
print(user2)
print(userprofile)
#tuple are immutable
#benefit is can't be changed, good for communicating things that aren't modified
#faster than lists
my_tuple = (1, 2, 3, 4, 5)
print(my_tuple[2])
print(5 in my_tuple)
new_tuple = my_tuple[1:2]
print(new_tuple)
x, y, x, *other = (1, 2, 3, 4, 5)
print(my_tuple.count(2))
print(my_tuple.index(3))
print(len(my_tuple))
#set inordered collections of unique objects
my_set = {1, 2, 3, 4, 5, 5}
my_list = [1, 2, 3, 4, 5, 5]
my_set.add(100)
my_set.add(2)
print(my_set)
print(set(my_list)) # converts list to set of unique values
print(1 in my_set) #checks if 1 exists in my_set
your_set = {4,5,6,7,8,9,10}
print(my_set.difference(your_set)) #sets out the difference doesnt modify set
print(my_set.discard(5)) #modifies by removing value
print(my_set)
print(my_set.difference_update(your_set)) #modifies set
print(my_set)
print(my_set.intersection(your_set)) #intsection between two sets
print(my_set.isdisjoint(your_set)) #are they different sets
print(my_set.union(your_set)) #adds sets together without duplicates
my_set = {4,5}
print(my_set.issubset(your_set))
print(my_set.issuperset(your_set))
print(your_set.isupserset(my_set))
#Classes - customized types
#Specialized Data Types - packages and modules from libraries
| true |
105111fa1821e10e016c247c24d9a9f5ff269e25 | Python | CexBomb/Master_Data_Science_Repo | /Spark/TF-IDF.py | UTF-8 | 2,133 | 2.84375 | 3 | [] | no_license | paragraphs = sc.newAPIHadoopFile('data/shakespeare.txt', "org.apache.hadoop.mapreduce.lib.input.TextInputFormat","org.apache.hadoop.io.LongWritable", "org.apache.hadoop.io.Text",conf={"textinputformat.record.delimiter": '\n\n'}).map(lambda l:l[1])
# Me cargo todo lo que no sea una letra o numero
cleanParagraphs = paragraphs.map(lambda paragraph: re.sub('[^a-zA-Z0-9 ]','',paragraph.lower().strip()))
# Quito los parrafos vacíos
cleanParagraphs = cleanParagraphs.map(lambda paragraph: re.sub('[ ]+',' ',paragraph)).filter(lambda l: l!='')
cleanParagraphs.toDebugString() #Para ver el "linaje". Todo el camino que recorre el RDD. Se llama DAG. Esto permite recuperarlo en caso de caida
cleanParagraphs.getStorageLevel() #Muestra los niveles de almacenaje del RDD
# Ejercicio: contar el número de palabras de cada parrafo
cleanParagraphs.map(lambda x: len(x.split(' '))).take(5)
# Hacer histograma de lo anterior
tmp = cleanParagraphs.map(lambda x: len(x.split(' '))).map(lambda num: (num,1))
tmp = tmp.reduceByKey(lambda x,y: x+y)
#Sacar la frecuencia de cada palabra
import numpy as np
numpy.histogram
# TF - IDF
# Encontramos la fracuencia de las palabras
from pyspark.mllib.feature import HashingTF
wordInDoc = cleanParagraphs.flatMap(lambda p: p.split(' ')).distinct().cache()
hashingTF = HashingTF(wordInDoc.count())
tf = hashingTF.transform(cleanParagraphs)
hashingTF.indexO
# IDF
# Le da un peso a cada palabra
from pyspark.mllib.feature import IDF
idf = IDF(minDocFreq=2).fit(tf) #El número mínimo de palabras en los documentos para ser tenidas en cuenta
tfidf = idf.transform(tf)
tfidf = tfidf.zipWithIndex()
tfidf = tfidf.map(lambda (doc_tfidf,index): (index,doc_tfidf)).cache()
def change_SparseVector(vec):
llaves = vec.indices
valores = vec.values
llave_valor = zip(llaves, valores)
return dict(llave_valor)
def devuelve_query(query,dicD):
sum = 0
for q in query:
if q in dicD:
sum += dicD[q]
return sum
tfidf = tfidf.map(lambda (i,v): (i, change_SparseVector(v)) )
query = [1245,10978]
tfidf.map(lambda (idD,dicD): (idD,devuelve_query(query,dicD)))
| true |
8c27610c5596f34b4670d9a9028480c5a683e221 | Python | launchany/workshop-labs | /mq-2/emit_log.py | UTF-8 | 578 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
import pika
import sys
#
# A simple script to publish a message to a RabbitMQ topic
#
# Based on the script found at: https://www.rabbitmq.com/tutorials/tutorial-three-python.html
#
connection = pika.BlockingConnection(
pika.ConnectionParameters(host='rabbitmq'))
channel = connection.channel()
channel.exchange_declare(exchange='labs-mq-2', exchange_type='fanout')
message = ' '.join(sys.argv[1:]) or "info: Hello World!"
channel.basic_publish(exchange='labs-mq-2', routing_key='', body=message)
print(" [x] Sent %r" % message)
connection.close()
| true |
6e664ae54f2703ea5ace040ff94c34d1fb218945 | Python | dylanmcg22/Scripts | /functions to return names and first letter of name.py | UTF-8 | 468 | 4.625 | 5 | [] | no_license | #this function will take and return the
#first letter of the name
def get_initial(name):
initial = name[0:1].upper()
return initial
# Ask for someone's name and return the initials
first_name = input('Enter your first name: ')
middle_name = input('Enter your middle name: ')
last_name = input('Enter your last name: ')
print('Yours initials are: ' \
+ get_initial(first_name) \
+ get_initial(middle_name) \
+ get_initial(last_name)) | true |
2733e184cc451ce9e274cf656884e2b644cd2f57 | Python | ricardodani/vaclabtest | /nqueens.py | UTF-8 | 2,442 | 3.8125 | 4 | [] | no_license | # -*- coding: utf-8 -*-
# Author: Ricardo Dani (https://github.com/ricardodani)
import sys
def new_board(n):
return {"%dx%d" % (x % n, x / n): '-' for x in range(n*n)}
def can_attack_test(position, queens):
'''
Consider 2 queens {i: j} & {k: l}
They can attack each other if:
- They are on the same row: i == k
- They are on the same column: j == l
- They are on the same diagonal: |i - k| == |j - l|
'''
i, j = position.split('x')
for queen in queens:
k, l = queen.split('x')
if i == k or j == l or abs(int(i) - int(k)) == abs(int(j) - int(l)):
return True
return False
def print_board(board, n):
def _empty_if_none(value):
if value is None:
return ' '
return value
print ''.join([
'| %s %s' % (_empty_if_none(board["%dx%d" % (x, y)]), '|\n' if y % n == n - 1 else '')
for x in range(n)
for y in range(n)
])
def nqueens(n):
'''
N Queens Problem
There are N queens to be placed on the chessboard NxN so they don’t threaten
each other. Create program that computes the number of ways this is possible
for the given N.
I.e: N = 4
Board should be: 4x4
Correct answer
| | Q | | |
| | | | Q |
| Q | | | |
| | | Q | |
Qs = [{0: 2}, {1: 0}, {2: 3}, {3: 1}]
↓
k: the column
v: the row
'''
print "Solving N-Queens problem with n == %d" % n
# one attempt per column
results = []
for attempt in range(n):
board = new_board(n)
queen = '0x%d' % attempt
queens = [queen]
board[queen] = 'Q'
positions = board.keys()
positions.sort()
for position in positions:
if not can_attack_test(position, queens):
board[position] = 'Q'
queens.append(position)
if len(queens) == n:
results.append((board, queens))
if results:
print "{} results found: \n{}".format(len(results), [x[1] for x in results])
for result in results:
print_board(result[0], n)
else:
print "No results found"
if __name__ == '__main__':
default = 4 # min 1
try:
n = max(int(sys.argv[1]), 1) if len(sys.argv) == 2 else default
except ValueError:
print "Invalid argument."
else:
nqueens(n)
| true |
e8c2c5c0b5b59f2b3c70c98929f80936c7d4c5be | Python | mollinaca/ac | /code/abc/164/c.py | UTF-8 | 132 | 2.953125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
n = int(input())
s = []
for _ in range(n):
s.append(input())
print (len(set(s)))
| true |
c5b96d97f3c8916ba0e5d938f7a94fb1a87002f1 | Python | stevenlundy/pu-algos-1 | /2-stacks-queues/linkedlist.py | UTF-8 | 950 | 3.6875 | 4 | [] | no_license | class Linkedlist:
def __init__(self):
self.head = None
self.tail = None
def __iter__(self):
self.current = self.head
return self
def next(self):
if self.current == None:
raise StopIteration
else:
value = self.current.value
self.current = self.current.next
return value
def remove_head(self):
if self.head:
item = self.head
if self.head == self.tail:
self.head = None
self.tail = None
else:
self.head = self.head.next
return item.value
def add_to_head(self, value):
item = Node(value)
if self.head:
item.next = self.head
else:
self.tail = item
self.head = item
def add_to_tail(self, value):
item = Node(value)
if self.tail:
self.tail.next = item
else:
self.head = item
self.tail = item
class Node:
def __init__(self, value):
self.value = value
self.next = None
| true |
8afe58a4f73c8670891a6f0bcd7e7f83afcddf53 | Python | mfaria724/CI2691-lab-algoritmos-1 | /Laboratorio 05/Soluciones/PreLaboratorio/Prelab5ejercicio1r.py | UTF-8 | 1,376 | 4.15625 | 4 | [] | no_license | #
# Prelab5ejercicio1r.py
#
# DESCRIPCION: Ejercicio del Prelaboratorio 2 modificado con acciones que verifican las aserciones.
# El programa para calcular las raices del polinomio AX^2 + BX + C. Versión robusta
#
# AUTOR: Kevin Mena y Rosseline Rodriguez
# Variables:
# A: entero // ENTRADA: Primer coeficiente
# B: entero // ENTRADA: Segundo coeficiente
# C: entero // ENTRADA: Tercer coeficiente
# x1: float // SALIDA: Primera raiz
# x2: float // SALIDA: Segunda raiz
import sys
# Valores iniciales:
x1 = 0.0
x2 = 0.0
while True:
A = int(input("Indique el primer coeficiente: "))
B = int(input("Indique el segundo coeficiente: "))
C = int(input("Indique el tercer coeficiente: "))
# Precondicion:
try:
assert(A != 0 and 4 * A * C <= B * B)
break
except:
print("La precondicion no se cumple: primer coeficiente nulo o discriminante negativo ")
print("Vuelva a intentar")
# Calculos:
x1 = (-B + (B*B - 4*A*C)**0.5) / (2*A)
x2 = (-B - (B*B - 4*A*C)**0.5) / (2*A)
# Postcondicion:
try:
assert(
(A * x1 * x1 + B * x1 + C == 0.0) and
(A * x2 * x2 + B * x2 + C == 0.0)
)
except:
print("Error en los calculos no se cumple la postcondicion ")
print("El programa terminara")
sys.exit()
# Salida:
print("Las raices son: ", x1, " y ", x2)
| true |
a2c87889beeba2df356edbfbfd409a14d4da4a6a | Python | zodang/python_practice | /high_challenge/심화문제 10.1.py | UTF-8 | 214 | 3.4375 | 3 | [] | no_license | import numpy as np
#np.arrange(1,21) == np.array(range(1,21))
num_arr = np.arange(1,21)
print(num_arr)
print(num_arr[::-1])
print("num_arr 내의 모든 원소의 합:",sum(num_arr))
print(num_arr.reshape(5,4))
| true |
25f21cd59f220788d61ba4fa3f1c09451e1671d1 | Python | NeuroDataDesign/brainlit | /tests/archive/test_preprocess.py | UTF-8 | 11,183 | 2.609375 | 3 | [
"Apache-2.0"
] | permissive | # import pytest
# import numpy as np
# from brainlit import preprocessing
# from numpy.testing import (
# assert_equal,
# assert_allclose,
# assert_array_equal,
# assert_almost_equal,
# assert_array_almost_equal,
# )
# def test_center():
# img = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
# centered_image = np.array([[-4, -3, -2], [-1, 0, 1], [2, 3, 4]])
# assert_array_equal(preprocessing.center(img), centered_image)
# def test_contrast_normalize():
# img = np.array([[0, 1, 0], [0, 0, 0], [0, -1, 0]])
# expected = np.array([[0, 2.12132034, 0], [0, 0, 0], [0, -2.12132034, 0]])
# assert_almost_equal(preprocessing.contrast_normalize(img), expected)
# def test_pad_undopad_transform():
# np.random.seed(6)
# img = np.random.randint(0, 256, size=(50, 50))
# window_size = np.array([5, 5])
# step_size = np.array([2, 2])
# padded, pad_size = preprocessing.window_pad(img, window_size, step_size)
# new_img = preprocessing.undo_pad(padded, pad_size)
# assert_array_equal(img, new_img)
# def test_pad_undopad_transform_3D():
# np.random.seed(6)
# img = np.random.randint(0, 256, size=(50, 50, 50))
# window_size = np.array([5, 5, 5])
# step_size = np.array([2, 2, 2])
# padded, pad_size = preprocessing.window_pad(img, window_size, step_size)
# new_img = preprocessing.undo_pad(padded, pad_size)
# assert_array_equal(img, new_img)
# def test_image_vector_transform():
# np.random.seed(6)
# img = np.random.randint(0, 256, size=(10, 10))
# window_size = np.array([3, 3])
# step_size = np.array([1, 1])
# vector = preprocessing.vectorize_img(img, window_size, step_size)
# new_image = preprocessing.imagize_vector(vector, img.shape, window_size, step_size)
# assert_array_equal(img, new_image)
# img = np.random.randint(0, 256, size=(20, 20))
# window_size = np.array([3, 3])
# step_size = np.array([1, 1])
# vector = preprocessing.vectorize_img(img, window_size, step_size)
# new_image = preprocessing.imagize_vector(vector, img.shape, window_size, step_size)
# assert_array_equal(img, new_image)
# def test_image_vector_transform_3D():
# np.random.seed(6)
# img = np.random.randint(0, 256, size=(10, 10, 10))
# window_size = np.array([3, 3, 3])
# step_size = np.array([1, 1, 1])
# vector = preprocessing.vectorize_img(img, window_size, step_size)
# new_image = preprocessing.imagize_vector(vector, img.shape, window_size, step_size)
# assert_array_equal(img, new_image)
# img = np.random.randint(0, 256, size=(20, 20, 20))
# window_size = np.array([3, 3, 3])
# step_size = np.array([1, 1, 1])
# vector = preprocessing.vectorize_img(img, window_size, step_size)
# new_image = preprocessing.imagize_vector(vector, img.shape, window_size, step_size)
# assert_array_equal(img, new_image)
# def test_window_pad_2D():
# # Trivial example
# img = np.zeros([50, 50])
# window_size = np.array([3, 3])
# step_size = np.array([1, 1])
# [img_padded, padding] = preprocessing.window_pad(img, window_size, step_size)
# assert img_padded.shape == (52, 52)
# assert_array_equal(padding, np.array([[1, 1], [1, 1]]))
# img = np.zeros([50, 50])
# window_size = np.array([5, 5])
# step_size = np.array([1, 1])
# [img_padded, padding] = preprocessing.window_pad(img, window_size, step_size)
# assert img_padded.shape == (54, 54)
# assert_array_equal(padding, np.array([[2, 2], [2, 2]]))
# img = np.zeros([50, 50])
# window_size = np.array([5, 5])
# step_size = np.array([1, 4])
# [img_padded, padding] = preprocessing.window_pad(img, window_size, step_size)
# assert img_padded.shape == (54, 52)
# assert_array_equal(padding, np.array([[2, 2], [2, 0]]))
# img = np.zeros([50, 50])
# window_size = np.array([3])
# step_size = np.array([1, 1])
# with pytest.raises(ValueError):
# preprocessing.window_pad(img, window_size, step_size)
# window_size = np.array([3, 3, 3])
# with pytest.raises(ValueError):
# preprocessing.window_pad(img, window_size, step_size)
# window_size = np.array([[3, 3], [3, 3]])
# with pytest.raises(ValueError):
# preprocessing.window_pad(img, window_size, step_size)
# window_size = np.array([3, 3])
# step_size = np.array([1])
# with pytest.raises(ValueError):
# preprocessing.window_pad(img, window_size, step_size)
# step_size = np.array([1, 1, 1])
# with pytest.raises(ValueError):
# preprocessing.window_pad(img, window_size, step_size)
# step_size = np.array([[1, 1], [1, 1]])
# with pytest.raises(ValueError):
# preprocessing.window_pad(img, window_size, step_size)
# def test_window_pad_3D():
# img = np.zeros([50, 50, 50])
# window_size = np.array([3, 3, 3])
# step_size = np.array([1, 1, 1])
# [img_padded, padding] = preprocessing.window_pad(img, window_size, step_size)
# assert img_padded.shape == (52, 52, 52)
# assert_array_equal(padding, np.array([[1, 1], [1, 1], [1, 1]]))
# img = np.zeros([50, 50, 50])
# window_size = np.array([5, 5, 5])
# step_size = np.array([1, 1, 1])
# [img_padded, padding] = preprocessing.window_pad(img, window_size, step_size)
# assert img_padded.shape == (54, 54, 54)
# assert_array_equal(padding, np.array([[2, 2], [2, 2], [2, 2]]))
# img = np.zeros([50, 50, 50])
# window_size = np.array([5, 5, 5])
# step_size = np.array([1, 4, 1])
# [img_padded, padding] = preprocessing.window_pad(img, window_size, step_size)
# assert img_padded.shape == (54, 52, 54)
# assert_array_equal(padding, np.array([[2, 2], [2, 0], [2, 2]]))
# def test_vectorize_image():
# np.random.seed(6)
# img = np.random.randint(0, 256, size=(10, 10))
# window_size = np.array([3, 3])
# step_size = np.array([1, 1])
# vector = preprocessing.vectorize_img(img, window_size, step_size)
# assert_array_equal(vector[:, 0].flatten(), img[0:3, 0:3].flatten())
# assert_array_equal(vector[:, 5].flatten(), img[0:3, 5:8].flatten())
# assert_array_equal(vector[:, 8].flatten(), img[1:4, 0:3].flatten())
# img = np.random.randint(0, 256, size=(10, 10))
# window_size = np.array([3, 3])
# step_size = np.array([2, 1])
# vector = preprocessing.vectorize_img(img, window_size, step_size)
# assert_array_equal(vector[:, 0].flatten(), img[0:3, 0:3].flatten())
# assert_array_equal(vector[:, 5].flatten(), img[0:3, 5:8].flatten())
# assert_array_equal(vector[:, 8].flatten(), img[2:5, 0:3].flatten())
# img = np.zeros([50, 50])
# window_size = np.array([3])
# step_size = np.array([1, 1])
# with pytest.raises(ValueError):
# preprocessing.vectorize_img(img, window_size, step_size)
# window_size = np.array([3, 3, 3])
# with pytest.raises(ValueError):
# preprocessing.vectorize_img(img, window_size, step_size)
# window_size = np.array([[3, 3], [3, 3]])
# with pytest.raises(ValueError):
# preprocessing.vectorize_img(img, window_size, step_size)
# window_size = np.array([3, 3])
# step_size = np.array([1])
# with pytest.raises(ValueError):
# preprocessing.vectorize_img(img, window_size, step_size)
# step_size = np.array([1, 1, 1])
# with pytest.raises(ValueError):
# preprocessing.vectorize_img(img, window_size, step_size)
# step_size = np.array([[1, 1], [1, 1]])
# with pytest.raises(ValueError):
# preprocessing.vectorize_img(img, window_size, step_size)
# def test_vectorize_image_3D():
# np.random.seed(6)
# img = np.random.randint(0, 256, size=(10, 10, 10))
# window_size = np.array([3, 3, 3])
# step_size = np.array([1, 1, 1])
# vector = preprocessing.vectorize_img(img, window_size, step_size)
# assert_array_equal(vector[:, 0].flatten(), img[0:3, 0:3, 0:3].flatten())
# assert_array_equal(vector[:, 5].flatten(), img[0:3, 0:3, 5:8].flatten())
# assert_array_equal(vector[:, 8].flatten(), img[0:3, 1:4, 0:3].flatten())
# img = np.random.randint(0, 256, size=(10, 10, 10))
# window_size = np.array([3, 3, 3])
# step_size = np.array([1, 2, 1])
# vector = preprocessing.vectorize_img(img, window_size, step_size)
# assert_array_equal(vector[:, 0].flatten(), img[0:3, 0:3, 0:3].flatten())
# assert_array_equal(vector[:, 5].flatten(), img[0:3, 0:3, 5:8].flatten())
# assert_array_equal(vector[:, 8].flatten(), img[0:3, 2:5, 0:3].flatten())
# def test_undo_pad():
# np.random.seed(6)
# img = np.random.randint(0, 256, size=(10, 10))
# padding = np.array([2, 2])
# with pytest.raises(ValueError):
# preprocessing.undo_pad(img, padding)
# padding = np.array([[2, 2], [2, 2], [2, 2]])
# with pytest.raises(ValueError):
# preprocessing.undo_pad(img, padding)
# def test_imagize_vector():
# img = np.zeros([50, 50])
# orig_shape = np.array([50, 50])
# window_size = np.array([3])
# step_size = np.array([1, 1])
# with pytest.raises(ValueError):
# preprocessing.imagize_vector(img, orig_shape, window_size, step_size)
# window_size = np.array([3, 3, 3])
# with pytest.raises(ValueError):
# preprocessing.imagize_vector(img, orig_shape, window_size, step_size)
# window_size = np.array([[3, 3], [3, 3]])
# with pytest.raises(ValueError):
# preprocessing.imagize_vector(img, orig_shape, window_size, step_size)
# window_size = np.array([3, 3])
# step_size = np.array([1])
# with pytest.raises(ValueError):
# preprocessing.imagize_vector(img, orig_shape, window_size, step_size)
# step_size = np.array([1, 1, 1])
# with pytest.raises(ValueError):
# preprocessing.imagize_vector(img, orig_shape, window_size, step_size)
# step_size = np.array([[1, 1], [1, 1]])
# with pytest.raises(ValueError):
# preprocessing.imagize_vector(img, orig_shape, window_size, step_size)
# def test_whiten():
# img = np.zeros([50, 50])
# window_size = np.array([3])
# step_size = np.array([1, 1])
# with pytest.raises(ValueError):
# preprocessing.whiten(img, window_size, step_size)
# window_size = np.array([3, 3, 3])
# with pytest.raises(ValueError):
# preprocessing.whiten(img, window_size, step_size)
# window_size = np.array([[3, 3], [3, 3]])
# with pytest.raises(ValueError):
# preprocessing.whiten(img, window_size, step_size)
# window_size = np.array([3, 3])
# step_size = np.array([1])
# with pytest.raises(ValueError):
# preprocessing.whiten(img, window_size, step_size)
# step_size = np.array([1, 1, 1])
# with pytest.raises(ValueError):
# preprocessing.whiten(img, window_size, step_size)
# step_size = np.array([[1, 1], [1, 1]])
# with pytest.raises(ValueError):
# preprocessing.whiten(img, window_size, step_size)
# window_size = np.array([3, 3])
# step_size = np.array([1, 1])
# with pytest.raises(ValueError):
# preprocessing.whiten(img, window_size, step_size, type="as")
| true |
02305ac525acb3fcdbdc2367227f9715e19ed9e5 | Python | manansheel1991/Python-Challenges-Solved | /C5 - Save a dictionary.py | UTF-8 | 411 | 2.953125 | 3 | [] | no_license | import pickle
def SaveDictionary(dictionary, OutPath):
with open(OutPath, 'wb') as file:
pickle.dump(dictionary, file)
def LoadDictionary(InPath):
with open(InPath, 'rb') as file:
return pickle.load(file)
dict1 = {'Manan': 30, 'Barkha': 25}
SaveDictionary(
dict1, r'C:\Users\manan\Documents\Python-Challenges-Solved\test_dict')
LoadDictionary('test_dict')
| true |
5ae5557120b1301b068c0e75cd74186e1b128e97 | Python | michaelc32592/march_madness | /intro_data_analysis.py | UTF-8 | 19,341 | 2.53125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 9 18:06:37 2018
@author: micha
"""
import pandas as pd
import psycopg2 as pg2
from sqlalchemy import create_engine
import matplotlib.pyplot as plt
conn = pg2.connect(database = 'March_Madness' , user = 'postgres', password = 'crump83')
engine = create_engine('postgresql://postgres:crump83@localhost:5432/March_Madness')
cur = conn.cursor()
df = pd.read_sql('SELECT * FROM "cities" ', conn)
df2 = pd.read_sql('SELECT * FROM "Conferences" ', conn)
df3 = pd.read_sql('SELECT * FROM "ConferenceTourneyGames" ', conn)
df4 = pd.read_sql('SELECT * FROM "NCAATourneyCompactResults" ', conn)
df5 = pd.read_sql('SELECT * FROM "NCAATourneyDetailedResults" ',conn)
df6 = pd.read_sql('SELECT * FROM "NCAATourneySeedRoundSlots" ', conn)
df7 = pd.read_sql('SELECT * FROM "NCAATourneySeeds" ', conn)
df8 = pd.read_sql('SELECT * FROM "NCAATOurneySlots" ', conn)
df9 = pd.read_sql('SELECT * FROM "RegularSeasonCompactResults" ', conn)
df10 = pd.read_sql('SELECT * FROM "RegularSeasonDetailedResults" ',conn)
df11 = pd.read_sql('SELECT * FROM "Seasons" ', conn)
df12 = pd.read_sql('SELECT * FROM "SecondaryTourneyCompactResults"',conn)
df13 = pd.read_sql('SELECT * FROM "SecondaryTourneyTeams" ', conn)
df14 = pd.read_sql('SELECT * FROM "TeamCoaches" ', conn)
df15 = pd.read_sql('SELECT * FROM "Teams" ', conn)
#df2 = pd.read_sql('SELECT * FROM "WC_Two" ', conn)
#df3 = pd.read_sql('SELECT * FROM "WC_Three" ', conn)
#df4 = pd.read_sql('SELECT * FROM "WC_Four" ', conn)
#figure out who won the most
#2017 analysis
#regular season success
sql = r'''
SELECT "WTeamID",COUNT("WTeamID") FROM "RegularSeasonCompactResults"
WHERE "Season" = 2017
GROUP BY "WTeamID"
ORDER BY COUNT("WTeamID") DESC
LIMIT 10;
'''
#Winning team's seed in 2017
sql5 = '''
SELECT "NCAATourneySeeds"."Seed","NCAATourneyDetailedResults"."WTeamID", "NCAATourneyDetailedResults"."Season" FROM "NCAATourneySeeds"
INNER JOIN "NCAATourneyDetailedResults" ON "NCAATourneySeeds"."TeamID" = "NCAATourneyDetailedResults"."WTeamID" AND
"NCAATourneySeeds"."Season" = "NCAATourneyDetailedResults"."Season"
WHERE "NCAATourneyDetailedResults"."Season" = 2017
'''
sql70 = '''
SELECT "TeamID", "Seed" FROM "NCAATourneySeeds"
WHERE "Season" = 2017
'''
seed2 = pd.read_sql(sql70,conn)
seed2 = seed2[['TeamID','Seed']]
sql50 = '''
SELECT "Seed", "TeamID" FROM "NCAATourneySeeds"
WHERE "Season" = 2017
'''
seed_l = pd.read_sql(sql50,conn)
#Losing Team Seed in 2017
#Average Team Stats (add rebounds, turnovers (both winner and loser), etc)
#WTO are offensive players
#winning team points scored Group By sort by desc
sql6 = '''
SELECT "WTeamID",AVG("WScore") FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "WTeamID"
ORDER BY AVG("WScore") DESC
'''
points_scored_win = pd.read_sql(sql6, conn)
#winning team points allowed glroup by sort by desc
sql7 = '''
SELECT "WTeamID",AVG("LScore") FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "WTeamID"
ORDER BY AVG("LScore") DESC
'''
points_scored_loss = pd.read_sql(sql7, conn)
#winning team rebounds
sql20 = '''
SELECT "WTeamID",AVG("WOR"+"WDR") FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "WTeamID"
ORDER BY AVG("WOR"+"WDR") DESC
'''
w_rebounds = pd.read_sql(sql20,conn)
#do overall points (both winning and losing)
sql25 = '''
SELECT "WTeamID", SUM("WScore"),COUNT("WScore")
FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "WTeamID"
'''
total_wpts = pd.read_sql(sql25,conn)
total_wpts['TeamID'] = total_wpts['WTeamID']
#losing sum/count
sql26 = '''
SELECT "LTeamID", SUM("LScore"),COUNT("LScore")
FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "LTeamID"
'''
total_lpts = pd.read_sql(sql26, conn)
total_lpts['TeamID'] = total_lpts['LTeamID']
total_avg_pts = pd.merge(total_wpts, total_lpts, how = 'inner', on = 'TeamID')
total_avg_pts['total_pts'] = total_avg_pts['sum_x'] + total_avg_pts['sum_y']
total_avg_pts['total_games'] = total_avg_pts['count_x'] + total_avg_pts['count_y']
total_avg_pts['ppg'] = total_avg_pts['total_pts']/total_avg_pts['total_games']
total_avg_pts['Average Pts'] = total_avg_pts['ppg']
total_avg_pts = total_avg_pts[['TeamID','Average Pts']]
#Points allowed
sql27 = '''
SELECT "WTeamID", SUM("LScore"),COUNT("LScore")
FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "WTeamID"
'''
points_allowed_w = pd.read_sql(sql27,conn)
points_allowed_w['TeamID'] = points_allowed_w['WTeamID']
sql28 = '''
SELECT "LTeamID", SUM("WScore"),COUNT("WScore")
FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "LTeamID"
'''
points_allowed_l = pd.read_sql(sql28,conn)
points_allowed_l['TeamID'] = points_allowed_l['LTeamID']
total_avg_pa = pd.merge(points_allowed_w,points_allowed_l, how = 'inner', on = 'TeamID')
total_avg_pa['total_pts'] = total_avg_pa['sum_x'] + total_avg_pa['sum_y']
total_avg_pa['total_games'] = total_avg_pa['count_x'] + total_avg_pa['count_y']
total_avg_pa['Average PA'] = total_avg_pa['total_pts']/total_avg_pa['total_games']
total_avg_pa = total_avg_pa[['TeamID','Average PA']]
#join winning and losing sum columns and counts
#divide out
#ORB
sql29 = '''
SELECT "WTeamID", SUM("WOR"),COUNT("WOR")
FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "WTeamID"
'''
o_rbs_w = pd.read_sql(sql29, conn)
o_rbs_w['TeamID'] = o_rbs_w['WTeamID']
sql30 = '''
SELECT "LTeamID", SUM("LOR"),COUNT("LOR")
FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "LTeamID"
'''
o_rbs_l = pd.read_sql(sql30, conn)
o_rbs_l['TeamID'] = o_rbs_l['LTeamID']
o_rbs_for = pd.merge(o_rbs_w,o_rbs_l, how = 'inner', on = 'TeamID')
o_rbs_for['total_rbs'] = o_rbs_for['sum_x'] + o_rbs_for['sum_y']
o_rbs_for['total_games'] = o_rbs_for['count_x'] + o_rbs_for['count_y']
o_rbs_for['orpg'] = o_rbs_for['total_rbs']/o_rbs_for['total_games']
o_rbs_for = o_rbs_for[['TeamID','orpg']]
#offensive rebounds allowed
sql31 = '''
SELECT "WTeamID", SUM("LOR"),COUNT("LOR")
FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "WTeamID"
'''
o_rbs_a_w = pd.read_sql(sql31, conn)
o_rbs_a_w['TeamID'] = o_rbs_a_w['WTeamID']
sql32 = '''
SELECT "LTeamID", SUM("WOR"),COUNT("WOR")
FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "LTeamID"
'''
o_rbs_a_l = pd.read_sql(sql32,conn)
o_rbs_a_l['TeamID'] = o_rbs_a_l['LTeamID']
o_rbs_against = pd.merge(o_rbs_a_w,o_rbs_a_l, how = 'inner', on = 'TeamID')
o_rbs_against['total_rb_a'] = o_rbs_against['sum_x'] + o_rbs_against['sum_y']
o_rbs_against['total_games'] = o_rbs_against['count_x'] + o_rbs_against['count_y']
o_rbs_against['orapg'] = o_rbs_against['total_rb_a']/o_rbs_against['total_games']
o_rbs_against= o_rbs_against[['TeamID','orapg']]
#Do check-look at points for/points against games
#Turnovers
#turnovers made
sql33 = '''
SELECT "WTeamID", SUM("LTO"),COUNT("LTO")
FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "WTeamID"
'''
to_w = pd.read_sql(sql33, conn)
to_w['TeamID'] =to_w['WTeamID']
sql34 = '''
SELECT "LTeamID", SUM("WTO"),COUNT("WTO")
FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "LTeamID"
'''
to_l = pd.read_sql(sql34, conn)
to_l['TeamID'] = to_l['LTeamID']
to_made = pd.merge(to_w,to_l, how = 'inner', on = 'TeamID')
to_made['total_tos_made'] = to_made['sum_x'] + to_made['sum_y']
to_made['total_games'] = to_made['count_x'] + to_made['count_y']
to_made['avg_to_made'] = to_made['total_tos_made']/to_made['total_games']
to_made = to_made[['TeamID','avg_to_made']]
#turnovers against
sql35 = '''
SELECT "WTeamID", SUM("WTO"),COUNT("WTO")
FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "WTeamID"
'''
to_against_w = pd.read_sql(sql35, conn)
to_against_w['TeamID'] =to_against_w['WTeamID']
sql36 = '''
SELECT "LTeamID", SUM("LTO"),COUNT("LTO")
FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "LTeamID"
'''
to_against_l = pd.read_sql(sql36, conn)
to_against_l['TeamID'] = to_against_l['LTeamID']
to_against = pd.merge(to_against_w,to_against_l, how = 'inner', on = 'TeamID')
to_against['total_tos_against'] = to_against['sum_x'] + to_against['sum_y']
to_against['total_games'] = to_against['count_x'] + to_against['count_y']
to_against['avg_to_allowed'] = to_against['total_tos_against']/to_against['total_games']
to_against = to_against[['TeamID','avg_to_allowed']]
#Home vs Away
#winning team home vs losing team home
sql8 = '''
SELECT "WTeamID", COUNT("WLoc") FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017 AND "WLoc" = 'H'
GROUP BY "WTeamID"
ORDER BY COUNT("WLoc") DESC;
'''
sql9 = '''
SELECT "WTeamID", COUNT("WLoc") FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017 AND "WLoc" = 'N'
GROUP BY "WTeamID"
ORDER BY COUNT("WLoc") DESC
;
'''
sql10 = '''
SELECT "WTeamID", COUNT("WLoc") FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017 AND "WLoc" = 'A'
GROUP BY "WTeamID"
ORDER BY COUNT("WLoc") DESC
;
'''
sql60='''
SELECT "LTeamID", COUNT("WLoc") FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017 AND "WLoc" = 'A'
GROUP BY "LTeamID"
ORDER BY COUNT("WLoc") DESC
;
'''
sql61 = '''
SELECT "LTeamID", COUNT("WLoc") FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017 AND "WLoc" = 'H'
GROUP BY "LTeamID"
ORDER BY COUNT("WLoc") DESC
'''
home_wins = pd.read_sql(sql8, conn)
home_losses = pd.read_sql(sql60, conn)
neutral_wins = pd.read_sql(sql9, conn)
neutral_wins.columns = ['WTeamID','Neutral']
away_wins = pd.read_sql(sql10, conn)
away_losses = pd.read_sql(sql61, conn)
home_record = pd.merge(home_wins, home_losses, left_on = "WTeamID", right_on = "LTeamID")
home_record['Home Record'] = home_record['count_x']/(home_record['count_x'] + home_record['count_y'])
home_record['TeamID'] = home_record['WTeamID']
home_record = home_record[['TeamID','Home Record']]
away_record = pd.merge(away_wins, away_losses, left_on = "WTeamID", right_on = "LTeamID")
away_record['Away Record'] = away_record['count_x']/(away_record['count_x'] + away_record['count_y'])
away_record['TeamID'] = away_record['WTeamID']
away_record= away_record[['TeamID','Away Record']]
#do home record, away record
#build a function looking at location, only include the one that applies (ie away record if team is away)
#overall record
sql11 = '''
SELECT "WTeamID", COUNT("WTeamID") FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "WTeamID"
ORDER BY COUNT("WTeamID") DESC
;
'''
wins = pd.read_sql(sql11, conn)
wins.columns = ['TeamID','Wins']
sql12 = '''
SELECT "LTeamID", COUNT("LTeamID") FROM "RegularSeasonDetailedResults"
WHERE "Season" = 2017
GROUP BY "LTeamID"
ORDER BY COUNT("LTeamID") DESC
;
'''
losses = pd.read_sql(sql12, conn)
losses.columns = ['TeamID','Losses']
record = pd.merge(wins,losses, on = "TeamID")
record["overall_win_pct"] = record["Wins"]/(record["Wins"] + record["Losses"])
record = record[['TeamID','overall_win_pct']]
#record.to_sql("2017 Record",engine)
#strength of schedule (ie record of losing teams if they won and vice versa)
sql14 = r'''
SELECT "WTeamID",AVG("win_pct") FROM (
SELECT "RegularSeasonDetailedResults"."Season", "RegularSeasonDetailedResults"."WTeamID", "2017 Record"."TeamID","2017 Record"."win_pct"
FROM "RegularSeasonDetailedResults"
INNER JOIN "2017 Record" ON "RegularSeasonDetailedResults"."LTeamID"="2017 Record"."TeamID"
WHERE "Season" = 2017) AS foo
GROUP BY "WTeamID"
ORDER BY AVG("win_pct") DESC
'''
W_strength_of_schedule = pd.read_sql(sql14, conn)
W_strength_of_schedule['TeamID'] = W_strength_of_schedule['WTeamID']
W_strength_of_schedule['WSOS'] = W_strength_of_schedule['avg']
W_strength_of_schedule = W_strength_of_schedule[['TeamID','WSOS']]
sql15 = r'''
SELECT "LTeamID",AVG("win_pct") FROM (
SELECT "RegularSeasonDetailedResults"."Season", "RegularSeasonDetailedResults"."LTeamID", "2017 Record"."TeamID","2017 Record"."win_pct"
FROM "RegularSeasonDetailedResults"
INNER JOIN "2017 Record" ON "RegularSeasonDetailedResults"."WTeamID"="2017 Record"."TeamID"
WHERE "Season" = 2017) AS foo
GROUP BY "LTeamID"
ORDER BY AVG("win_pct") DESC
'''
L_strength_of_schedule = pd.read_sql(sql15, conn)
L_strength_of_schedule['LSOS'] = L_strength_of_schedule['avg']
L_strength_of_schedule['TeamID'] = L_strength_of_schedule['LTeamID']
L_strength_of_schedule = L_strength_of_schedule[['TeamID','LSOS']]
#Coach Analysis
#Total Tourney Wins/Losses for Coaches
sql16 = r'''
SELECT "TeamCoaches"."CoachName",COUNT("NCAATourneyDetailedResults"."WTeamID")
FROM "TeamCoaches"
INNER JOIN "NCAATourneyDetailedResults" ON "TeamCoaches"."TeamID" = "NCAATourneyDetailedResults"."WTeamID" AND "TeamCoaches"."Season" = "NCAATourneyDetailedResults"."Season"
GROUP BY "CoachName"
ORDER BY "count" DESC
'''
coach_wins = pd.read_sql(sql16, conn)
sql17 = r'''
SELECT "TeamCoaches"."CoachName",COUNT("NCAATourneyDetailedResults"."LTeamID")
FROM "TeamCoaches"
INNER JOIN "NCAATourneyDetailedResults" ON "TeamCoaches"."TeamID" = "NCAATourneyDetailedResults"."LTeamID" AND "TeamCoaches"."Season" = "NCAATourneyDetailedResults"."Season"
GROUP BY "CoachName"
ORDER BY "count" DESC
'''
coach_losses = pd.read_sql(sql17, conn)
coach_record = pd.merge(coach_wins,coach_losses, on = "CoachName")
coach_record["coach_win_pct"] = coach_record["count_x"]/(coach_record["count_x"]+ coach_record["count_y"])
coach_record = coach_record[['CoachName','coach_win_pct']]
sql71 = r'''
SELECT * FROM "TeamCoaches"
WHERE "Season" = 2017
'''
team_coach = pd.read_sql(sql71, conn)
team_coach_record = pd.merge(coach_record, team_coach, on = "CoachName")
team_coach_record = team_coach_record[['TeamID','coach_win_pct']]
#play by play
#efficiency
#reliability on one player
#winning clutch made fts
#MAKE SURE TO CHANGE TO 17 ONCE DOWNLOADED
sql22 = '''
SELECT "WTeamID",COUNT("EventType") FROM "PlaybyPlayEvents17"
WHERE "ElapsedSeconds" > 2200 AND "WTeamID" = "EventTeamID" AND "EventType" = 'made1_free'
GROUP BY "WTeamID"
ORDER BY COUNT("EventType") DESC
'''
clutch_ft_made = pd.read_sql(sql22, conn)
sql23 = '''
SELECT "WTeamID",COUNT("EventType") FROM "PlaybyPlayEvents17"
WHERE "ElapsedSeconds" > 2200 AND "WTeamID" = "EventTeamID" AND "EventType" = 'miss1_free'
GROUP BY "WTeamID"
ORDER BY COUNT("EventType") DESC
'''
clutch_ft_miss = pd.read_sql(sql23, conn)
clutch_ft = pd.merge(clutch_ft_made,clutch_ft_miss, on = "WTeamID")
clutch_ft["clutch_FtPct"] = clutch_ft['count_x']/(clutch_ft['count_x']+ clutch_ft['count_y'])
clutch_ft['TeamID'] = clutch_ft['WTeamID']
clutch_ft = clutch_ft[['TeamID','clutch_FtPct']]
#Look at momentum as well (weight later wins more)
#Conference Record
sql40 = '''
SELECT "WTeamID",COUNT("WTeamID") FROM "ConferenceTourneyGames"
WHERE "Season" = 2017
GROUP BY "WTeamID"
ORDER BY COUNT("WTeamID") DESC
'''
conference_wins = pd.read_sql(sql40, conn)
#conference_wins['TeamID'] = conference_wins['WTeamID']
sql41 = '''
SELECT "LTeamID",COUNT("LTeamID") FROM "ConferenceTourneyGames"
WHERE "Season" = 2017
GROUP BY "LTeamID"
ORDER BY COUNT("LTeamID") DESC
'''
conference_losses = pd.read_sql(sql41, conn)
#conference_losses['TeamID'] = conference_losses["LTeamID"]
conference_record = pd.merge(conference_wins,conference_losses,how = 'left', left_on = "WTeamID", right_on = "LTeamID")
conference_record.fillna(0, inplace = True)
conference_record['conf_win_pct'] = conference_record['count_x']/(conference_record['count_x'] + conference_record['count_y'])
conference_record['TeamID'] = conference_record['WTeamID']
conference_record=conference_record[['TeamID','conf_win_pct']]
#combine inputs
#total_avg_pts - total points they got on average TeamID, total_avg_pts['ppg']
#points_scored_win-points scored in a win
#total_avg_pa - total average points given up TeamID, total_avg_pa['ppg']
#home court- home court advantage WTeamID, home_court['ratio']
#W_strength_of_schedule - strength of schedules of wins
#L_strength_of_schedule - strength of schedules of losses
#coach record- success of coaches
#clutch_ft - last 2 minutes while winning field goals made
#o_rbs_for- average offensive rebounds for
#o_rbs_against-average offensive rebounds against
#to_made = turnover done (ie while on defense)
#to_against = when they turn it over
#conference record-how they did in the conference tourney
#seed_2: seed of 2017 tourney "TeamID", seel_d["Seed"]
#put everything together
#join everything by 'TeamID'
overall_df = pd.merge(total_avg_pts,total_avg_pa, how = 'left',on = 'TeamID').merge(home_record, how = 'left',on = 'TeamID').merge(away_record,how = 'left',on = 'TeamID').merge(
W_strength_of_schedule,how = 'left',on = 'TeamID').merge(L_strength_of_schedule, how = 'left',on = 'TeamID').merge(team_coach_record,how = 'left',on = 'TeamID').merge(
clutch_ft, how = 'left',on = 'TeamID').merge(o_rbs_for, how = 'left',on = 'TeamID').merge(o_rbs_against, how = 'left',on = 'TeamID').merge(to_made, how = 'left',on = 'TeamID').merge(
to_against, how = 'left',on = 'TeamID').merge(conference_record,how = 'left', on = 'TeamID').merge(seed2,how = 'left', on = 'TeamID')
#fillna
overall_df["conf_win_pct"].fillna(0, inplace = True)
overall_df["coach_win_pct"].fillna(overall_df["coach_win_pct"].mean(), inplace = True)
overall_df["Home Record"].fillna(overall_df["Home Record"].mean(), inplace = True)
overall_df["Away Record"].fillna(overall_df["Away Record"].mean(), inplace = True)
overall_df['Seed'] = overall_df['Seed'].str[1:]
if overall_df['Seed'].isna == False:
overall_df['Seed'] = overall_df['Seed'].astype(int)
#add in location once results are known
#calculate differences
#do half with 'LTeamID' first and randomize so output isn't always 1
#Set Up Tourney Schedule
#do first four Games
sql201 = '''
SELECT * FROM "NCAATOurneySlots" WHERE "Season" = 2017
'''
schedule = pd.read_sql(sql201,conn)
sql202 = r'''
SELECT * FROM "NCAATOurneySlots" WHERE "Season" = 2017 AND "StrongSeed" LIKE '%%a'
'''
#figure out what to do with this
cur.execute(sql202, conn)
strong_seed = pd.merge(seed2, schedule, left_on = 'Seed', right_on = 'StrongSeed')
strong_seed['High Seed'] = strong_seed['TeamID']
weak_seed = pd.merge(seed2, schedule, left_on = 'Seed', right_on = 'WeakSeed')
weak_seed['Low Seed'] = weak_seed['TeamID']
seeds = pd.merge(strong_seed, weak_seed, on = 'index')
seeds = seeds[['High Seed','StrongSeed_y','Low Seed','WeakSeed_y']]
sql203 = r'''
SELECT "WTeamID","LTeamID" FROM "NCAATourneyCompactResults"
WHERE "Season" = 2017
'''
#add in something about season
winner = pd.read_sql(sql203, conn)
winner_check = pd.read_sql(sql203,conn)
#randomize data
import random
a,b = winner.shape
winner['Rand'] = random.random()
for i in range(a):
winner.iloc[i,b] = random.random()
winner['Team1'] = 3
winner['Team2'] = 4
winner['Output'] = 2
c = b+1
d = b+2
e = b+3
for j in range(a):
if winner.iloc[j,b] < 0.5:
winner.iloc[j,c] = winner.iloc[j,0]
winner.iloc[j,d] = winner.iloc[j,1]
winner.iloc[j,e] = 1
if winner.iloc[j,b] >= .5:
winner.iloc[j,c] = winner.iloc[j,1]
winner.iloc[j,d] = winner.iloc[j,0]
winner.iloc[j,e] = 0
winner2 = winner[['Team1','Team2','Output']]
final_df_left = pd.merge(overall_df,winner2, left_on = 'TeamID',right_on = 'Team1')
final_df = pd.merge(final_df_left, overall_df, left_on = 'Team2', right_on = 'TeamID')
#pull out seed#
| true |
070e902f6d37fab208d27eb7a217968d26ac7ae1 | Python | ashwinvis/advent-of-code | /2019/src/advent19/sol02.py | UTF-8 | 869 | 3.03125 | 3 | [
"Apache-2.0"
] | permissive | from itertools import product
import numpy as np
from .day02 import day02
def run_intcode(intcode):
intcode_array = np.asarray(intcode, dtype=np.int32)
day02.run_intcode(intcode_array)
return intcode_array
if __name__ == "__main__":
intcode = np.loadtxt("input/02.txt", dtype=int, delimiter=',')
# restore the gravity assist program (your puzzle input) to the "1202
# program alarm" state
intcode[1] = 12
intcode[2] = 2
print(run_intcode(intcode))
intcode = np.loadtxt("input/02.txt", dtype=int, delimiter=',')
for noun, verb in product(range(100), range(100)):
intcode[1] = noun
intcode[2] = verb
output = run_intcode(np.copy(intcode))[0]
if output == 19690720:
print("noun, verb =", noun, verb)
print("answer for part 2 =", 100 * noun + verb)
break
| true |
5b6458b3e6462a7aeda6175a9e3e69d4a85b5df2 | Python | benshulman/noisemap | /data-processing/noisescore-scrape/noise_score_box.py | UTF-8 | 799 | 2.796875 | 3 | [] | no_license | '''
Trim NoiseTube observations to keep only those within a bounding box
Some samples were way out in the middle of nowhere, I'll discard those.
I chose a bounding box by examining observations on
the map in eda-nt-clean.ipynb.
'''
import pandas as pd
noise = pd.read_csv('/Users/Ben/Dropbox/Insight/noisescore/noise-score-clean.csv')
print(
'N obs unboxed: ' +
str(len(noise))
)
bounds = [[42.23, -71.20], [42.419, -70.95]]
noise_boxed = noise[(
# bottom
(noise.lat > bounds[0][0]) &
# top
(noise.lat < bounds[1][0]) &
# left
(noise.lng > bounds[0][1]) &
# right
(noise.lng < bounds[1][1])
)].reset_index(drop = True).drop('Unnamed: 0', axis = 1)
print(
'N obs boxed: ' +
str(len(noise_boxed))
)
noise_boxed.to_csv('/Users/Ben/Dropbox/Insight/noisescore/noise-score-boxed.csv') | true |
d8e3a33e152027c1f5f59eb92d8dff15ab9aef4e | Python | questoph/vocab-trainer | /tasks/test_stats.py | UTF-8 | 1,780 | 3.0625 | 3 | [
"MIT"
] | permissive | #-*- coding: UTF-8 -*-
from __main__ import *
from tasks.process_list import *
from operator import itemgetter
word_list = input_list(language)
if len(word_list) > 0:
runs_total = max([int(item['test_count']) for item in word_list])
correct_answers = sum([int(item['correct_count']) for item in word_list])
wrong_answers = sum([int(item['wrong_count']) for item in word_list])
top_correct = sorted(word_list, key=itemgetter('correct_count'), reverse=True)
top10_correct = ', '.join(map(str, [item["word"] for item in top_correct[:10]]))
top_wrong = sorted(word_list, key=itemgetter('wrong_count'), reverse=True)
top10_wrong = ', '.join(map(str, [item["word"] for item in top_wrong[:10]]))
if runs_total > 0:
print("Here is your test statistics for {}." .format(language))
print("\n-----")
print("Number of words in list: {}" .format(len(word_list)))
print("- Number of test runs total:{}" .format(runs_total))
print("- Number of correct answers: {}" .format(correct_answers))
print("- Number of wrong answers: {}" .format(wrong_answers))
print("\n-----")
print("- Top 10 correct words: {}" .format(top10_correct))
print("- Top 10 wrong words: {}" .format(top10_wrong))
print("\n-----")
input("That's all for now. Hit enter to return to the main menu. ")
os.system('clear')
elif runs_total == 0:
print("There are no stats available for {} yet. You need to train some words first." .format(language))
print("\n-----")
input("That's all for now. Hit enter to return to the main menu. ")
os.system('clear')
elif len(word_list) == 0:
print("There are no words available for {} yet. You need to enter some first." .format(language))
print("\n-----")
input("That's all for now. Hit enter to return to the main menu. ")
os.system('clear') | true |
179882840f2adbdccf7fb187bba656f4a7663c44 | Python | dockerizeme/dockerizeme | /hard-gists/1693769/snippet.py | UTF-8 | 2,846 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
from __future__ import division
import numpy as np
import matplotlib.pyplot as plt
from scipy import optimize
from numpy import newaxis, r_, c_, mat, e
from numpy.linalg import *
def plotData(X, y):
#pos = (y.ravel() == 1).nonzero()
#neg = (y.ravel() == 0).nonzero()
pos = (y == 1).nonzero()[:1]
neg = (y == 0).nonzero()[:1]
plt.plot(X[pos, 0].T, X[pos, 1].T, 'k+', markeredgewidth=2, markersize=7)
plt.plot(X[neg, 0].T, X[neg, 1].T, 'ko', markerfacecolor='r', markersize=7)
def sigmoid(z):
g = 1. / (1 + e**(-z.A))
return g
def costFunction(theta, X, y):
m = X.shape[0]
predictions = sigmoid(X * c_[theta])
J = 1./m * (-y.T.dot(np.log(predictions)) - (1-y).T.dot(np.log(1 - predictions)))
#grad = 1./m * X.T * (predictions - y)
return J[0][0]##, grad.A
def predict(theta, X):
p = sigmoid(X * c_[theta]) >= 0.5
return p
def plotDecisionBoundary(theta, X, y):
plotData(X[:, 1:3], y)
if X.shape[1] <= 3:
plot_x = r_[X[:,2].min()-2, X[:,2].max()+2]
plot_y = (-1./theta[2]) * (theta[1]*plot_x + theta[0])
plt.plot(plot_x, plot_y)
plt.legend(['Admitted', 'Not admitted', 'Decision Boundary'])
plt.axis([30, 100, 30, 100])
else:
pass
if __name__ == '__main__':
data = np.loadtxt('ex2data1.txt', delimiter=',')
X = mat(c_[data[:, :2]])
y = c_[data[:, 2]]
# ============= Part 1: Plotting
print 'Plotting data with + indicating (y = 1) examples and o ' \
'indicating (y = 0) examples.'
plotData(X, y)
plt.ylabel('Exam 1 score')
plt.xlabel('Exam 2 score')
plt.legend(['Admitted', 'Not admitted'])
plt.show()
raw_input('Press any key to continue\n')
# ============= Part 2: Compute cost and gradient
m, n = X.shape
X = c_[np.ones(m), X]
initial_theta = np.zeros(n+1)
cost, grad = costFunction(initial_theta, X, y), None
print 'Cost at initial theta (zeros): %f' % cost
print 'Gradient at initial theta (zeros):\n%s' % grad
raw_input('Press any key to continue\n')
# ============= Part 3: Optimizing using fminunc
options = {'full_output': True, 'maxiter': 400}
theta, cost, _, _, _ = \
optimize.fmin(lambda t: costFunction(t, X, y), initial_theta, **options)
print 'Cost at theta found by fminunc: %f' % cost
print 'theta: %s' % theta
plotDecisionBoundary(theta, X, y)
plt.show()
raw_input('Press any key to continue\n')
# ============== Part 4: Predict and Accuracies
prob = sigmoid(mat('1 45 85') * c_[theta])
print 'For a student with scores 45 and 85, we predict an admission ' \
'probability of %f' % prob
p = predict(theta, X)
print 'Train Accuracy:', (p == y).mean() * 100
raw_input('Press any key to continue\n')
| true |
070dd087ac83a0a2c0b487017fcdc2be9d12c67e | Python | Calvin-alis/hillel_hometask | /main.py | UTF-8 | 7,534 | 2.84375 | 3 | [] | no_license |
import os
import sqlite3
from utilits import generate_password as gp
from utilits import open_file
from utilits import create_fake_email as cfe
from utilits import normalize_and_calculate as nac
from utilits import spacemarin_count as spacemarin
# импорты для 3 домашки
from utilits import create_fake_name
from utilits import create_fake_phone
from utilits import check_name
from utilits import check_number
from flask import Flask, request
from datetime import datetime
app = Flask(__name__)
print('Git test')
@app.route('/hello/')
def hello_world():
return 'Hello, World!'
@app.route('/test/')
def test_func() -> str:
name = 'Alex'
return name
@app.route('/generate-password/')
def generate_password():
# validate password-len from client
password_len = request.args.get('password-len')
if not password_len:
password_len = 10
else:
if password_len.isdigit():
password_len = int(password_len)
# 10 .. 100
else:
password_len = 10
# return 'Invalid parameter password-len. Should be int.'
password = gp(password_len)
return f'{password}'
#декоратор для вывода зависимостей
#отдельно реализовал функция open_file
@app.route('/requirements/')
def requirements() -> str:
files = open_file('/Users/alksandr/first_in_class/homework_hillel/homework_second/requirments.txt')
return f'{files}\n' if len(files) > 0 else 'Empty file'
#функция генерация рандомного usera
@app.route('/generate-users/')
def generate_users():
# через curl все выводит, но когда пытаешься ввести все вручную пишет ошибку и возращает сообщение
try:
count_of_gen_users = int(request.args.get('user-generate'))
except:
return 'Error type'
if int(count_of_gen_users) > 0 and int(count_of_gen_users) < 1000:
result = cfe(count_of_gen_users)
elif int(count_of_gen_users) > 1000:
result = cfe(999)
elif int(count_of_gen_users) <= 0:
result = cfe()
else:
return 'example@gmail.com'
return f'{result }\n'
#функция для подсчета среднего веса, роста
@app.route('/mean/')
def calculate_mean():
path = nac('hw (2) (1).csv')
return f'{path}\n'
#функция для подсчета количества космонавтов в космасе
@app.route('/space/')
def calculate_spacemen():
return 'Космодесантников к космасе на данный момент: ' + str(int(spacemarin()))
#@app.route('/generate-password2/')
#def generate_password2():
# import random
# import string
# choices = string.ascii_letters + string.digits + '#$%^'
# result = ''
# for _ in range(10):
# result += random.choice(choices)
# return f'{result}\n'
@app.route('/emails/create/')
def create_email():
import sqlite3
con = sqlite3.connect('homework_three.db')
# http://127.0.0.1:5000/emails/create/?contactName=Alex&Email=awdaw@mail.com
contact_name = request.args['contactName']
email_value = request.args['Email']
cur = con.cursor()
sql_query = f'''
INSERT INTO emails (contactName, emailValue)
VALUES ('{contact_name}', '{email_value}');
'''
cur.execute(sql_query)
con.commit()
con.close()
return 'create_email'
@app.route('/emails/read/')
def update_email():
import sqlite3
con = sqlite3.connect('homework_three.db')
cur = con.cursor()
sql_query = f'''
SELECT * FROM emails;
'''
cur.execute(sql_query)
result = cur.fetchall()
con.close()
return str(result)
@app.route('/emails/update/')
def delete_email():
import sqlite3
contact_name = request.args['contactName']
email_value = request.args['Email']
con = sqlite3.connect('homework_three.db')
cur = con.cursor()
sql_query = f'''
UPDATE emails
SET contactName = '{contact_name}'
WHERE emailValue = '{email_value}';
'''
cur.execute(sql_query)
con.commit()
con.close()
return 'update_email'
# 3 - домашняя работа
# реализован CRUD - что является dll в нашей работе
# добавил и усвовершенстовал таблицы
# сделал дополнительные проверки
@app.route('/phones/create/')
def create_phones():
import sqlite3
connect = sqlite3.connect('homework_three.db')
#что б избежать ошибок делаем дефолт значение и таким образом страхуем себя от плохого запроса
contact_name = request.args.get('contactName', default= create_fake_name())
phone_value = request.args.get('phoneValue', default= create_fake_phone())
cur = connect.cursor()
sql_query_param = f'''
INSERT INTO phones (contactName, phoneValue)
VALUES ('{check_name(contact_name)}', '{check_number(phone_value)}');
'''
cur.execute(sql_query_param)
connect.commit()
connect.close()
return 'create phones'
@app.route('/phones/read/')
def read_phones_info():
conect = sqlite3.connect('homework_three.db')
cur = conect.cursor()
sql_params = '''
SELECT * FROM phones
'''
cur.execute(sql_params)
res = cur.fetchall()
conect.close()
return str(res)
@app.route('/phones/update/')
def update_info():
connect = sqlite3.connect('homework_three.db')
cur = connect.cursor()
name = request.args['ContactName']
phone_number = request.args['phoneNumber']
sql_param = f'''
UPDATE phones
SET contactName = '{name}'
WHERE phoneValue = '{phone_number}';
'''
cur.execute(sql_param)
connect.commit()
connect.close()
return 'update_info'
# есть идея но пока нахожусь на стадии разработки инструмента для обновление каскадно все ключи
@app.route('/phones/update-key/')
def update_key():
connect = sqlite3.connect('homework_three.db')
cur = connect.cursor()
sql_param = '''
UPDATE phones CASCADE
SET ID = REPLACE AUTOINCREMENT ;
'''
cur.execute(sql_param)
connect.commit()
connect.close()
return 'update_key'
@app.route('/phones/delete/')
def delete_info():
connect = sqlite3.connect('homework_three.db')
cur = connect.cursor()
name = request.args['ContactName']
sql_query = f'''
DELETE FROM phones
WHERE contactName = '{name}';
'''
cur.execute(sql_query)
connect.commit()
connect.close()
return 'delete_phones'
if __name__ == '__main__':
app.run(host='0.0.0.0')
"""
http://google.com:443/search/?name=hillel&city=Dnepr
1. Protocol
http:// - protocol (https)
ftp:// - file transfer protocol
smtp:// - simple mail transfer protocol
ws:// (wss)
2. Domain (IPv4, IPv6)
google.com, facebook.com, lms.hillel.com
developer.mozilla.org -> 99.86.4.33 (DNS)
0-255.0-255.0-255.0-255
192.172.0.1
# WRONG
192.172.0
192.172.0.1.2
256.192.1.1
localhost -> 127.0.0.1
3. Port
http - 80
https - 443
smtp - 22
5000+
0 - 65535
4. Path
/generate-password/ -> generate_password()
/search/ -> make_search()
5. Query parameters
? - sep
name=hillel&city=Dnepr -
{'name': 'hillel', 'city': 'Dnepr'}
"""
| true |
6565d0fbf0937f62a29c64c29aa520233651a16f | Python | milindmalshe/Graph-Cconvolutional-Neural-Network-Polymer-Structure-Prediction | /save_structure.py | UTF-8 | 2,068 | 2.609375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import sys
import random
file_to_read = str(sys.argv[1])
option_fun = sys.argv[2]
fun_id = sys.argv[3]
def gen_filename(file_to_read, opt_fun, fun_id):
file_old = file_to_read
if file_to_read.endswith('epo'):
polymer = file_to_read[-4:]
file_to_read = file_to_read[:-4]
opt_fun = int(opt_fun)
#first decide on the string name to add:
if opt_fun==0:
opt_str = 'C'
elif opt_fun==1:
opt_str = 'Np'
elif opt_fun==2:
opt_str = 'Ns'
elif opt_fun==3:
opt_str = 'O'
else:
opt_str = 'None'
gen_rand = str(random.randint(0, 999))
str_out = file_to_read + opt_str + gen_rand + polymer
maintain_data(file_to_read=file_old, gen_rand=gen_rand, fun_id=fun_id)
return str_out
def maintain_data(file_to_read, gen_rand, fun_id):
polymer_name = file_to_read[-4:]
data_filename = polymer_name + 'info' + '.txt'
if file_to_read.startswith('data.3rr'):
#if it is a new file then create an array of length 9
#indicating that a max of 10 insertions is allowed
#the first element in the array correspond to the random number
new_array = np.zeros((1, 10))
new_array[0, 0] = int(gen_rand)
new_array[0, 1] = int(fun_id)
with open(data_filename, 'ab') as f:
np.savetxt(f, new_array)
else:
Z = np.loadtxt(data_filename) #load old array
if len(Z.flatten()) == 10:
Z = Z[None, :]
count = np.count_nonzero(Z[-1, :])
#modify new array to account for
last_array = Z[-1, :]
new_array = last_array.copy()
new_array[0] = int(gen_rand)
new_array[count] = int(fun_id)
new_array = new_array[None, :]
with open(data_filename, 'ab') as f:
np.savetxt(f, new_array)
return None
if __name__ == "__main__":
str_out = gen_filename(file_to_read=file_to_read, opt_fun=option_fun, fun_id=fun_id)
print str_out
| true |
d280de2da95ee0c83230958d67cd71cdfdd62bc2 | Python | JmanJ/Chat-bot | /Bot_Module/DataStore/MyObject.py | UTF-8 | 211 | 2.921875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
class MyObject():
def __init__(self):
pass
def put(self, name, value, importance=0):
setattr(name, value)
def get(self, name):
return getattr(name) | true |
dcceb45e6fbd93fc9b87f53b10d8045471fc282b | Python | eto-ne-gang/eto-ne-itertools | /eto_ne_itertools.py | UTF-8 | 5,617 | 4.375 | 4 | [
"MIT"
] | permissive | """A module with alternatives of some functions from itertools module.
Functions
---------
count - an infinite arithmetic sequence
cycle - an infinite cycle over the iterable object
repeat - repetition of a value
product - cartesian product of input iterables
combinations - combinations of certain length with unique elements
combinations_with_replacement - combinations of certain length with
unique elements that might contain duplicates
permutations - permutations of certain length with unique elements
"""
from typing import Generator, Iterable
def count(start: int = 0, step: int = 1) -> Generator:
"""
Returns a generator of an infinite arithmetic sequence of integers
with the first element equal to start and a certain step.
:param start: the beginning number
:param step: difference between first and second element
:return: generator of an infinite arithmetic sequence
"""
num = start
while True:
yield num
num += step
def cycle(iterable: Iterable) -> Generator:
"""
Returns an infinite generator over the
content of the given iterable object.
:param iterable: iterable object to create a cycle over
:return: an infinite generator
"""
num = 0
while True:
yield iterable[num]
num = (num + 1) % len(iterable)
def repeat(val):
"""
Return a generator of repeated value.Default
number of repetitions equals to infinity.
:param val: a value to repeat
:return: generator of repeated values
"""
while True:
yield val
def product(*iterables: Iterable):
"""
Return a generator with a cartesian product of given iterables.
:param iterables: iterable objects
:return: generator of cartesian product
"""
if iterables:
# traverse through all elements of the first iterable
for elem_1 in iterables[0]:
# recursively traverse through the product
# of all iterables except the first one
for prod in product(*iterables[1:]):
# add an element from the first iterable at the beginning
yield elem_1, *prod
else:
yield ()
def combinations(r: int, n: int) -> Generator:
"""
Return a generator of combinations with unique elements of length
r that consist of the first n integer values starting from 0.
:param r: length of each combination
:param n: number of integers to choose from
:return: generator of combionations
"""
if r > n:
return
# generate the first combination
nums = list(range(r))
# return the first combination
yield tuple(nums)
while True:
curr_idx = None
# find index of rightmost element that can be modified
for idx in reversed(range(r)):
if nums[idx] != idx + n - r:
curr_idx = idx
break
# if nothing can be modified, there are no more permutations
else:
return
# increase the selected element by 1
nums[curr_idx] += 1
# for each element to the right from the selected one, switch it to
# the smallest element that is currently possible at that position
for idx in range(curr_idx + 1, r):
nums[idx] = nums[idx - 1] + 1
# return the current combination
yield tuple(num for num in nums)
def combinations_with_replacement(r: int, n: int) -> Generator:
"""
Return a generator of combinations with replacement of length
r that consist of the first n integer values starting from 0.
:param r: length of each combination
:param n: number of integers to choose from
:return: generator of combionations with replacement
"""
if r > n:
return
# generate the first combination
nums = [0] * r
# return the first combination
yield tuple(0 for _ in nums)
while True:
curr_idx = None
# find index of rightmost element that can be modified
for idx in reversed(range(r)):
if nums[idx] != n - 1:
curr_idx = idx
break
# if nothing can be modified, there are no more permutations
else:
return
# increase the selected element by 1
nums[curr_idx] += 1
# for each element to the right from the selected one, switch it to
# the smallest element that is currently possible at that position
for idx in range(curr_idx + 1, r):
nums[idx] = nums[idx - 1]
# return the current combination
yield tuple(num for num in nums)
def permutations(iterable: Iterable, length: int = None) -> Generator:
"""
Recursively generates k-permutations of an iterable.
Yields a new permutation (in ascending order) with each next() call.
If length is not specified, it is set to the length of the iterable.
Returns an iterable of all permutations.
:param iterable: iterable to get permutations from
:param length: length of each permutation
:return: generator object (iterable of all permutations found)
"""
if length is None:
length = len(iterable)
if length == 1:
for elem in iterable:
yield (elem,)
else:
for elem in iterable:
new_iterable = [*iterable]
new_iterable.remove(elem)
for pre_permutation in permutations(new_iterable, length-1):
yield elem, *pre_permutation
| true |
91246ab476555225bd98d9d8755c8ec5f93b05fa | Python | firelighted/swyne | /swyne/layout.py | UTF-8 | 16,857 | 2.796875 | 3 | [] | no_license |
from .node import *
import math
from pyglet.gl import *
######### Contains various useful nodes for layout
# BackgroundNode
# ListenerNode
# HintedLayoutNode
# PaddingLayoutNode
# ForceMinLayoutNode
# RowsLayoutNode
# ColsLayoutNode
# ScrollLayoutNode
# BlocksLayoutNode
###################################################
class BackgroundNode(AbstractNode):
def __init__(self):
super().__init__()
self.color = (255,255,255,255)
self.pos = Vector2(0,0)
self.dims = Vector2(0,0)
def serialize(self):
return list(self._color)
def deserialize(self,data):
self.color = tuple(data)
def draw(self):
if self.color[3] != 255:
glEnable(GL_BLEND)
glBlendFunc(GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)
glColor4f(self.color[0]/255, self.color[1]/255, self.color[2]/255, self.color[3]/255)
glRectf(self.pos.x,self.pos.y,self.pos.x+self.dims.x,self.pos.y+self.dims.y)
default_draw(self)
def set_layout(self,pos,dims):
default_set_layout(self,pos,dims)
self.pos = pos
self.dims = dims
def layout_hints(self):
return default_layout_hints(self)
#################################################
# a node that can keep a listener up to date
class ListenerNode(AbstractNode):
def __init__(self):
super().__init__()
self.pos = Vector2(0,0)
self.dims = Vector2(0,0)
self._listener = None
self._listening_for = []
@property
def listener(self): return self._listener
@listener.setter
def listener(self, func, *args, **kwargs):
self._listener = None
gen = func(*args,**kwargs)
if isinstance(gen, types.GeneratorType):
try:
event = next(gen)
self._listener = gen
self._listening_for = event
# send an on_layout event on init so the listener can know
self.dispatch("on_layout",self.pos.x,self.pos.y,self.dims.x,self.dims.y)
except StopIteration:
pass
def dispatch(self, event_name,*args):
event = self._listening_for
good = isinstance(event,str) and event_name == event
good = good or isinstance(event,list) and event_name in event
good = good or (event is None)
if good:
try:
next_event = self._listener.send((event_name,*args))
self._listening_for = next_event
except StopIteration:
self._listener = None
self._listening_for = []
# tell your children
if event_name not in ["on_draw", "on_layout"]:
for child in self.children_with_attr("dispatch"):
child.dispatch(event_name,*args)
def draw(self):
self.dispatch("on_draw")
default_draw(self)
def set_layout(self,pos,dims):
default_set_layout(self,pos,dims)
self.pos = pos
self.dims = dims
self.dispatch("on_layout",pos.x,pos.y,dims.x,dims.y)
def layout_hints(self):
return default_layout_hints(self)
###################################################
# a node where you can specify mindims and maxdims
# type "*" to inherit the propertyy from the children
class HintedLayoutNode(AbstractNode):
def __init__(self):
super().__init__()
self.pos = Vector2(0,0)
self.dims = Vector2(0,0)
self.mindims = Vector2("*","*")
self.maxdims = Vector2("*","*")
def serialize(self):
return [self.mindims.x,self.maxdims.x,self.mindims.y,self.maxdims.y]
def deserialize(self,data):
if isinstance(data,int):
self.mindims.x = data
self.maxdims.x = data
self.mindims.y = data
self.maxdims.y = data
else:
assert(isinstance(data,list))
if len(data) == 2:
self.mindims.x = data[0]
self.maxdims.x = data[0]
self.mindims.y = data[1]
self.maxdims.y = data[1]
elif len(data) == 4:
self.mindims.x = data[0]
self.maxdims.x = data[1]
self.mindims.y = data[2]
self.maxdims.y = data[3]
else:
raise ValueError("Bad data for layout hints: "+str(data))
def set_layout(self,pos,dims):
default_set_layout(self,pos,dims)
self.pos = pos
self.dims = dims
def layout_hints(self):
def inherit(v,w):
if v == "*": return w
return v
c_mindims, c_maxdims = default_layout_hints(self)
mindims = Vector2(inherit(self.mindims.x, c_mindims.x), inherit(self.mindims.y, c_mindims.y))
maxdims = Vector2(inherit(self.maxdims.x, c_maxdims.x), inherit(self.maxdims.y, c_maxdims.y))
return mindims,maxdims
# padding can be a positive number or "*"
# if "*", then the max_width is infinite in that direction
# and padding fills in the rest of the space
class PaddingLayoutNode(AbstractNode):
def __init__(self):
super().__init__()
self.pos = Vector2(0,0)
self.dims = Vector2(0,0)
self.padding = {"top":"*", "bottom":"*", "left":"*", "right":"*"}
def serialize(self):
return self.padding
def deserialize(self,data):
if isinstance(data,dict):
self.padding = data
elif isinstance(data,int) or isinstance(data,str):
self.padding = {"top":data, "bottom":data, "left":data, "right":data}
else:
assert(isinstance(data,list))
if len(data) == 2:
self.padding = {"top":data[0], "bottom":data[0], "left":data[1], "right":data[1]}
elif len(data) == 4:
# top right bottom left. HTML convention.
self.padding = {"top":data[0], "right":data[1], "bottom":data[2], "left":data[3]}
else:
raise ValueError("Bad data for padding: "+str(data))
def set_layout(self,pos,dims):
self.pos = pos
self.dims = dims
min_dims, max_dims = default_layout_hints(self)
w = dims.x
x = 0
if self.padding["left"] != "*":
w -= self.padding["left"]
x = self.padding["left"]
if self.padding["right"] != "*":
w -= self.padding["right"]
if max_dims.x < w:
if self.padding["left"] == "*" and self.padding["right"] == "*":
x += int((w-max_dims.x)/2)
elif self.padding["left"] == "*":
x += w-max_dims.x
if self.padding["left"] == "*" or self.padding["right"] == "*":
w = max_dims.x
h = dims.y
y = 0
if self.padding["bottom"] != "*":
h -= self.padding["bottom"]
y = self.padding["bottom"]
if self.padding["top"] != "*":
h -= self.padding["top"]
if max_dims.y < h:
if self.padding["bottom"] == "*" and self.padding["top"] == "*":
y += int((h-max_dims.y)/2)
elif self.padding["bottom"] == "*":
y += h-max_dims.y
if self.padding["bottom"] == "*" or self.padding["top"] == "*":
h = max_dims.y
default_set_layout(self, pos+Vector2(x,y), Vector2(w,h))
def layout_hints(self):
min_dims, max_dims = default_layout_hints(self)
if self.padding["left"] == "*" or self.padding["right"] == "*":
max_dims.x = float('inf')
if self.padding["top"] == "*" or self.padding["bottom"] == "*":
max_dims.y = float('inf')
def de_star(x):
if x == "*": return 0
return x
extraw = de_star(self.padding["left"]) + de_star(self.padding["right"])
extrah = de_star(self.padding["top"]) + de_star(self.padding["bottom"])
extra = Vector2(extraw,extrah)
return min_dims + extra, max_dims+extra
class ForceMinLayoutNode(AbstractNode):
def __init__(self):
super().__init__()
self.pos = Vector2(0,0)
self.dims = Vector2(0,0)
self.which_dims = "XY"
def serialize(self):
return self.which_dims
def deserialize(self,data):
self.which_dims = data
def set_layout(self,pos,dims):
self.pos = pos
self.dims = dims
default_set_layout(self,pos,dims)
def layout_hints(self):
c_min_dims, c_max_dims = default_layout_hints(self)
max_dims = Vector2(0,0)
if "X" in self.which_dims: max_dims.x = c_min_dims.x
else: max_dims.x = c_max_dims.x
if "Y" in self.which_dims: max_dims.y = c_min_dims.y
else: max_dims.y = c_max_dims.y
return c_min_dims, max_dims
####################################################
def _distribute_layout(mins,maxs,length):
n = len(mins)
lengths = [mins[i] for i in range(n)]
length -= sum(lengths)
if length == 0: return lengths
diffs = set([maxs[i] - mins[i] for i in range(n)])
while True:
if len(diffs) == 0: break
size = min(diffs)
diffs.remove(size)
if size == 0: continue
num = len([i for i in range(len(mins)) if maxs[i]-lengths[i] >= size])
if num*size < length:
for i in range(n):
if maxs[i]-lengths[i] >= size: lengths[i] += size
length -= num*size
else:
to_add = math.floor(length/num)
for i in range(n):
if maxs[i]-lengths[i] >= size: lengths[i] += to_add
break
return lengths
class RowsLayoutNode(AbstractNode):
def __init__(self):
super().__init__()
self.pos = Vector2(0,0)
self.dims = Vector2(0,0)
def set_layout(self,pos,dims):
self.pos = pos
self.dims = dims
mins = []
maxs = []
widths = []
children = list(reversed(self.children_with_attr("set_layout")))
for child in children:
if hasattr(child, "layout_hints"):
child_min_dims, child_max_dims = child.layout_hints()
else:
child_min_dims, child_max_dims = default_layout_hints(self)
mins.append(child_min_dims.y)
maxs.append(child_max_dims.y)
widths.append(min(dims.x,child_max_dims.x))
heights = _distribute_layout(mins,maxs,dims.y)
height = 0
for i in range(len(children)):
children[i].set_layout(Vector2(pos.x,pos.y+height),Vector2(widths[i],heights[i]))
height += heights[i]
def layout_hints(self):
min_dims = Vector2(0,0)
max_dims = Vector2(float('inf'),0)
maxs = []
for child in self.children_with_attr("layout_hints"):
child_min_dims, child_max_dims = child.layout_hints()
if child_min_dims.x > min_dims.x: min_dims.x = child_min_dims.x
maxs.append(child_max_dims.x)
min_dims.y += child_min_dims.y
max_dims.y += child_max_dims.y
for maxx in maxs:
if maxx < max_dims.x and maxx >= min_dims.x:
max_dims.x = maxx
return min_dims, max_dims
class ColsLayoutNode(AbstractNode):
def __init__(self):
super().__init__()
self.pos = Vector2(0,0)
self.dims = Vector2(0,0)
def set_layout(self,pos,dims):
self.pos = pos
self.dims = dims
mins = []
maxs = []
heights = []
children = self.children_with_attr("set_layout")
for child in children:
child_min_dims, child_max_dims = child.layout_hints()
mins.append(child_min_dims.x)
maxs.append(child_max_dims.x)
heights.append(min(dims.y,child_max_dims.y))
widths = _distribute_layout(mins,maxs,dims.x)
width = 0
for i in range(len(children)):
children[i].set_layout(Vector2(pos.x+width,pos.y),Vector2(widths[i],heights[i]))
width += widths[i]
def layout_hints(self):
min_dims = Vector2(0,0)
max_dims = Vector2(0,float('inf'))
maxs = []
for child in self.children_with_attr("layout_hints"):
child_min_dims, child_max_dims = child.layout_hints()
if child_min_dims.y > min_dims.y: min_dims.y = child_min_dims.y
maxs.append(child_max_dims.y)
min_dims.x += child_min_dims.x
max_dims.x += child_max_dims.x
for maxy in maxs:
if maxy < max_dims.y and maxy >= min_dims.y:
max_dims.y = maxy
return min_dims, max_dims
############################ Scrolling
# assumes node has pos,dims,translate properties
def draw_with_stencil(node):
glEnable(GL_STENCIL_TEST)
glClearStencil(0)
glClear(GL_STENCIL_BUFFER_BIT)
glColorMask(GL_FALSE, GL_FALSE, GL_FALSE, GL_FALSE)
glDepthMask(GL_FALSE)
glStencilMask(0xFF)
glStencilFunc(GL_ALWAYS, 0xFF, 0xFF)
glStencilOp(GL_REPLACE, GL_REPLACE, GL_REPLACE)
glColor4f(1.0,1.0,1.0,1.0)
glRectf(node.pos.x,node.pos.y,node.pos.x+node.dims.x,node.pos.y+node.dims.y)
glColorMask(GL_TRUE, GL_TRUE, GL_TRUE, GL_TRUE)
glDepthMask(GL_TRUE)
glStencilFunc(GL_EQUAL, 0xFF, 0xFF)
glStencilOp(GL_KEEP, GL_KEEP, GL_KEEP)
glPushMatrix()
glTranslatef(-node.translate.x, node.translate.y,0)
default_draw(node)
glPopMatrix()
glDisable(GL_STENCIL_TEST)
class ScrollLayoutNode(AbstractNode):
def __init__(self):
super().__init__()
self.pos = Vector2(0,0)
self.dims = Vector2(0,0)
self.which_dims = "XY"
self.translate = Vector2(0,0)
self.max_translate = Vector2(0,0)
def serialize(self):
return self.which_dims
def deserialize(self,data):
self.which_dims = data
def draw(self):
draw_with_stencil(self)
def set_layout(self,pos,dims):
self.pos = pos
self.dims = dims
c_min_dims, c_max_dims = default_layout_hints(self)
c_pos = Vector2(pos.x,pos.y)
c_dims = Vector2(dims.x,dims.y)
if "X" in self.which_dims:
if c_max_dims.x == float('inf'):
c_dims.x = max(c_min_dims.x, dims.x)
else:
c_dims.x = c_max_dims.x
self.max_translate.x = max(0, c_dims.x-dims.x)
if "Y" in self.which_dims:
if c_max_dims.y == float('inf'):
c_dims.y = max(c_min_dims.y, dims.y)
else:
c_dims.y = c_max_dims.y
c_pos.y += dims.y - c_dims.y # regardless of if dimsy < dims.y
self.max_translate.y = max(0, c_dims.y-dims.y)
self.translate.x = min(self.translate.x, self.max_translate.x)
self.translate.y = min(self.translate.y, self.max_translate.y)
default_set_layout(self,c_pos,c_dims)
def layout_hints(self):
c_min_dims, c_max_dims = default_layout_hints(self)
min_dims = Vector2(10,10)
max_dims = Vector2(float('inf'),float('inf'))
if "X" not in self.which_dims:
min_dims.x, max_dims.x = c_min_dims.x, c_max_dims.x
if "Y" not in self.which_dims:
min_dims.y, max_dims.y = c_min_dims.y, c_max_dims.y
return min_dims, max_dims
class BlocksLayoutNode(AbstractNode):
def __init__(self):
super().__init__()
self.pos = Vector2(0,0)
self.dims = Vector2(0,0)
self.translate = Vector2(0,0)
self.max_translate = Vector2(0,0)
def draw(self):
draw_with_stencil(self)
def set_layout(self,pos,dims):
self.pos = pos
self.dims = dims
line_x = 0
line_y = dims.y
line_h = 0
children = self.children_with_attr("set_layout")
for child in children:
cdims, _ = child.layout_hints()
if cdims.x + line_x > dims.x:
line_x = 0
line_y -= line_h
line_h = cdims.y
else:
if line_h < cdims.y: line_h = cdims.y
child.set_layout(pos+Vector2(line_x, line_y-cdims.y),cdims)
line_x += cdims.x
self.max_translate.y = max(0, -line_y+line_h)
self.translate.y = min(self.translate.y, self.max_translate.y)
def layout_hints(self):
min_w = 0
min_h = 0
children = self.children_with_attr("layout_hints")
for child in children:
cdims, _ = child.layout_hints()
if cdims.x > min_w: min_w = cdims.x
if cdims.y > min_h: min_h = cdims.y
min_dims = Vector2(min_w,min_h)
max_dims = Vector2(float('inf'),float('inf'))
return min_dims, max_dims
| true |
42859c64b457e4fba0cbc0dc092fc59b47f593f3 | Python | sherlock270/Graphs | /projects/graph/src/graph.py | UTF-8 | 2,636 | 3.640625 | 4 | [] | no_license | import random
class Graph:
"""Represent a graph as a dictionary of vertices mapping labels to edges."""
def __init__(self):
self.vertices = dict()
def add_vertex(self, label):
self.vertices[label] = (Vertex(label))
def show_graph(self):
return self.vertices
def add_edge(self, vertex, destination):
vert = self.vertices[vertex]
vert.edges.add(Edge(destination))
def dft(self, node):
visited = []
stack = [node]
while len(stack) > 0:
vert = stack.pop(0)
if vert not in visited:
visited.append(vert)
if vert.edges:
for edge in vert.edges:
stack.insert(0, self.vertices[edge.destination])
return visited
def dfs(self, node, target):
visited = []
stack = [node]
while len(stack) > 0:
vert = stack.pop(0)
if vert not in visited:
visited.append(vert)
if vert.label == target:
return True
if vert.edges:
for edge in vert.edges:
stack.insert(0, self.vertices[edge.destination])
return False
def bft(self, start_node):
queue = []
visited = []
queue.insert(0, start_node)
while len(queue) > 0:
vert = queue.pop()
if vert.label not in visited:
visited.append(vert.label)
if vert.edges:
for edge in vert.edges:
queue.insert(0, self.vertices[edge.destination])
return visited
def bfs(self, start_node, target):
queue = []
visited = []
queue.insert(0, start_node)
while len(queue) > 0:
vert = queue.pop()
print('vert', vert.label)
if vert.label not in visited:
visited.append(vert.label)
if vert.edges:
for edge in vert.edges:
queue.insert(0, self.vertices[edge.destination])
if target == vert.label:
return True
return False
class Vertex:
def __init__(self, label, x=None, y=None):
self.label = label
self.edges = set()
if x == None:
self.x = random.random() * 10 - 5
else:
self.x = x
if y == None:
self.y = random.random() * 10 - 5
else:
self.y = y
class Edge:
def __init__(self, destination):
self.destination = destination
| true |
f850001900fed389ba622405510987c693cc334a | Python | oshuakbaev/pp2 | /TSIS2/ip-address.py | UTF-8 | 178 | 3.21875 | 3 | [] | no_license | addr = list(input())
for x in addr:
if x == ".":
index = addr.index(x)
addr.remove(x)
addr.insert(index,'[.]')
addr2 = ''.join(addr)
print(addr2)
| true |
d493c7b70db9f55cddc00db1c04c6671f73109cc | Python | ptanguy/ROS_Package | /src/nao_xaal/scripts/kinect.py | UTF-8 | 957 | 2.796875 | 3 | [] | no_license | #!/usr/bin/python
import rospy
import tf
import geometry_msgs.msg
class Kinect:
def __init__(self):
self.listener = tf.TransformListener()
self.rate = rospy.Rate(0.5)
self.distance = 1
def fallDetection(self):
self.resetDistance()
while self.distance > 0.15:
print "distance h of neck and hip ", self.distance
try:
(trans1, rot1) = self.listener.lookupTransform('/openni_depth_frame', '/neck_1', rospy.Time(0))
(trans2, rot2) = self.listener.lookupTransform('/openni_depth_frame', '/left_hip_1', rospy.Time(0))
self.distance = abs(trans1[2]-trans2[2])
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
continue
self.rate.sleep()
rospy.loginfo("Fall detected!")
return True
def resetDistance(self):
self.distance = 1
| true |
062569bc24a2a5570b9faf63ddea5e356dc18ded | Python | iyline-sigey/PREDICTIVE-ANALYSIS-ON-REMOTE-LEARNING | /model.py | UTF-8 | 2,611 | 2.796875 | 3 | [
"MIT"
] | permissive | import streamlit as st
import numpy as np
import pandas as pd
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential, load_model
from keras.layers import Dense, Embedding, LSTM, Bidirectional,Flatten,Dropout
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
from keras import regularizers
from keras import layers
import re
import pickle
df=pd.read_csv('remote_clean.csv')
vocabulary_size = 10000
max_words = 5000
max_len = 200
#Neural Networks
st.header('Neural Networks')
X=df.clean_tweet.values
y=df.sentiment.values
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
y=le.fit_transform(y)
# Split the data into train and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
# create the tokenizer that comes with Keras.
tokenizer = Tokenizer(num_words=vocabulary_size)
tokenizer.fit_on_texts(X_train)
#convert the texts to sequences.
X_train_seq = tokenizer.texts_to_sequences(X_train)
X_val_seq = tokenizer.texts_to_sequences(X_test)
X_train_seq_padded = pad_sequences(X_train_seq, maxlen=200)
X_val_seq_padded = pad_sequences(X_val_seq, maxlen=200)
#Intialize the model
model = Sequential()
model.add(layers.Embedding(max_words, 40, input_length=max_len))
model.add(layers.Bidirectional(layers.LSTM(20,dropout=0.6)))
model.add(layers.Dense(1,activation='sigmoid'))
#Call comipiler ab=nd the checkpoints
model.compile(optimizer='rmsprop',loss='binary_crossentropy', metrics=['accuracy'])
#fit the model
history = model.fit(X_train_seq_padded, y_train, epochs=10,validation_data=(X_val_seq_padded, y_test))
model.save('movie_sent.h5')
@st.cache
def predict(message):
model=load_model('movie_sent.h5')
with open('tokenizer.pickle', 'rb') as handle:
tokenizer = pickle.load(handle)
x_1 = tokenizer.texts_to_sequences([message])
x_1 = pad_sequences(x_1, maxlen=500)
prediction = model.predict(x_1)[0][0]
return prediction
message = st.text_area("Enter Tweet,Type Here ..")
if st.button("Analyze"):
with st.spinner("Analyzing the tweet …"):
prediction=predict(message)
if prediction >0.6:
st.success("Positive review with {:.2f} confidence".format(prediction))
st.balloons()
elif prediction <0.40:
st.error("Negative review with {:.2f} confidence".format(1-prediction))
else:
st.warning("Not sure! Try to add some more words") | true |
f9c71c0d67ffac8284eec89fb46fd6ee0b924a61 | Python | wjdtjf1234/assignment01 | /assignment03.py | UTF-8 | 6,119 | 3 | 3 | [] | no_license | import numpy as np
import matplotlib.pyplot as plt
import random
import math
def generatePointCluster(xoff,yoff):
f=0.
v=0.
l=1
xt=[[xoff,yoff,l] for k in range(50)]
x=np.array(xt)
for k in range(50):
if(l==3):
x[k,2]=l
l=1
else:
x[k,2]=l
l+=1
f=random.random()
x[k,0]+=f
f=random.random()
x[k,1]+=f
return x
def computeCentroid(p1,p2,p3):
c1=[0,0]
c2=[0,0]
c3=[0,0]
for k in range(50):
if(p1[k,2]==1):
c1[0]+=p1[k,0]
c1[1]+=p1[k,1]
if(p2[k,2]==1):
c1[0]+=p2[k,0]
c1[1]+=p2[k,1]
if(p3[k,2]==1):
c1[0]+=p3[k,0]
c1[1]+=p3[k,1]
for k in range(50):
if(p1[k,2]==2):
c2[0]+=p1[k,0]
c2[1]+=p1[k,1]
if(p2[k,2]==2):
c2[0]+=p2[k,0]
c2[1]+=p2[k,1]
if(p3[k,2]==2):
c2[0]+=p3[k,0]
c2[1]+=p3[k,1]
for k in range(50):
if(p1[k,2]==3):
c3[0]+=p1[k,0]
c3[1]+=p1[k,1]
if(p2[k,2]==3):
c3[0]+=p2[k,0]
c3[1]+=p2[k,1]
if(p3[k,2]==3):
c3[0]+=p3[k,0]
c3[1]+=p3[k,1]
c1[0]=c1[0]/50
c1[1]=c1[1]/50
c2[0]=c2[0]/50
c2[1]=c2[1]/50
c3[0]=c3[0]/50
c3[1]=c3[1]/50
c=[c1,c2,c3]
return c
p1= generatePointCluster(0.,0.)
p2= generatePointCluster(2.,2.)
p3= generatePointCluster(-2.,2.)
fig, ax= plt.subplots()
c= computeCentroid(p1,p2,p3)
for k in range(50):
if(p1[k,2]==1):
ax.scatter(p1[k,0],p1[k,1],color="red")
elif(p1[k,2]==2):
ax.scatter(p1[k,0],p1[k,1],color="blue")
elif(p1[k,2]==3):
ax.scatter(p1[k,0],p1[k,1],color="green")
if(p2[k,2]==1):
ax.scatter(p2[k,0],p2[k,1],color="red")
elif(p2[k,2]==2):
ax.scatter(p2[k,0],p2[k,1],color="blue")
elif(p2[k,2]==3):
ax.scatter(p2[k,0],p2[k,1],color="green")
if(p3[k,2]==1):
ax.scatter(p3[k,0],p3[k,1],color="red")
elif(p3[k,2]==2):
ax.scatter(p3[k,0],p3[k,1],color="blue")
elif(p3[k,2]==3):
ax.scatter(p3[k,0],p3[k,1],color="green")
ax.scatter(c[0][0],c[0][1],color="orange")
ax.scatter(c[1][0],c[1][1],color="indigo")
ax.scatter(c[2][0],c[2][1],color="purple")
def computeDistance(d1,d2,d3,d1_x,d1_y,d2_x,d2_y,d3_x,d3_y,p1,p2,p3,c):
for k in range(50):
d1_x=c[0][0]-p1[k][0]
d1_y=c[0][1]-p1[k][1]
d1=math.sqrt((d1_x)**2 +(d1_y)**2)
d2_x=c[1][0]-p1[k][0]
d2_y=c[1][1]-p1[k][1]
d2=math.sqrt((d2_x)**2 +(d2_y)**2)
d3_x=c[2][0]-p1[k][0]
d3_y=c[2][1]-p1[k][1]
d3=math.sqrt((d3_x)**2 +(d3_y)**2)
if(d1<=d2):
if(d1<=d3):
p1[k][2]=1
else:
p1[k][2]=3
else:
if(d2<=d3):
p1[k][2]=2
else:
p1[k][2]=3
for k in range(50):
d1_x=c[0][0]-p2[k][0]
d1_y=c[0][1]-p2[k][1]
d1=math.sqrt((d1_x)**2 +(d1_y)**2)
d2_x=c[1][0]-p2[k][0]
d2_y=c[1][1]-p2[k][1]
d2=math.sqrt((d2_x)**2 +(d2_y)**2)
d3_x=c[2][0]-p2[k][0]
d3_y=c[2][1]-p2[k][1]
d3=math.sqrt((d3_x)**2 +(d3_y)**2)
if(d1<=d2):
if(d1<=d3):
p2[k][2]=1
else:
p2[k][2]=3
else:
if(d2<=d3):
p2[k][2]=2
else:
p2[k][2]=3
for k in range(50):
d1_x=c[0][0]-p3[k][0]
d1_y=c[0][1]-p3[k][1]
d1=math.sqrt((d1_x)**2 +(d1_y)**2)
d2_x=c[1][0]-p3[k][0]
d2_y=c[1][1]-p3[k][1]
d2=math.sqrt((d2_x)**2 +(d2_y)**2)
d3_x=c[2][0]-p3[k][0]
d3_y=c[2][1]-p3[k][1]
d3=math.sqrt((d3_x)**2 +(d3_y)**2)
if(d1<=d2):
if(d1<=d3):
p3[k][2]=1
else:
p3[k][2]=3
else:
if(d2<=d3):
p3[k][2]=2
else:
p3[k][2]=3
return p1,p2,p3
def computeEnergy(c_t,c):
c_t[0][0]=c_t[0][0]-c[0][0]
c_t[0][1]=c_t[0][1]-c[0][1]
c_t[1][0]=c_t[1][0]-c[1][0]
c_t[1][1]=c_t[1][1]-c[1][1]
c_t[2][0]=c_t[2][0]-c[2][0]
c_t[2][1]=c_t[2][1]-c[2][1]
d1= (c_t[0][0])**2 + (c_t[0][1])**2
d2= (c_t[1][0])**2 + (c_t[1][1])**2
d3= (c_t[2][0])**2 + (c_t[2][1])**2
d= d1+d2+d3
return d
p1= generatePointCluster(0.,0.)
p2= generatePointCluster(2.,2.)
p3= generatePointCluster(-2.,2.)
p1_t=p1
p2_t=p2
p3_t=p3
c=computeCentroid(p1_t,p2_t,p3_t)
c_t=c
for x in range(6):
fig, ax2= plt.subplots()
for k in range(50):
if(p1[k,2]==1):
ax2.scatter(p1[k,0],p1[k,1],color="red")
elif(p1[k,2]==2):
ax2.scatter(p1[k,0],p1[k,1],color="blue")
elif(p1[k,2]==3):
ax2.scatter(p1[k,0],p1[k,1],color="green")
if(p2[k,2]==1):
ax2.scatter(p2[k,0],p2[k,1],color="red")
elif(p2[k,2]==2):
ax2.scatter(p2[k,0],p2[k,1],color="blue")
elif(p2[k,2]==3):
ax2.scatter(p2[k,0],p2[k,1],color="green")
if(p3[k,2]==1):
ax2.scatter(p3[k,0],p3[k,1],color="red")
elif(p3[k,2]==2):
ax2.scatter(p3[k,0],p3[k,1],color="blue")
elif(p3[k,2]==3):
ax2.scatter(p3[k,0],p3[k,1],color="green")
ax2.scatter(c[0][0],c[0][1],color="orange")
ax2.scatter(c[1][0],c[1][1],color="indigo")
ax2.scatter(c[2][0],c[2][1],color="black")
p1_t,p2_t,p3_t= computeDistance(0,0,0,0,0,0,0,0,0,p1_t,p2_t,p3_t,c)
c_t=c
c=computeCentroid(p1_t,p2_t,p3_t)
d= computeEnergy(c_t,c)
print("Energy for iteration #%d : %f"%(x+1,d))
print("c1=%f %f, c2=%f %f, c3= %f %f"%(c[0][0],c[0][1],c[1][0],c[1][1],c[2][0],c[2][1]))
print("_____________________________________________________") | true |
849178688940145728aeee36018e03b1f28dfef8 | Python | juergenmeinecke/EMET1001 | /pyplots/source_files/oneoverx.py | UTF-8 | 295 | 3.203125 | 3 | [] | no_license | from pylab import *
xneg = np.arange(-10,-0.1,0.01)
xpos = np.arange(0.1,10,0.01)
plot(xneg, xneg**(-1), color='crimson', linewidth=2.0)
plot(xpos, xpos**(-1), color='crimson', linewidth=2.0)
grid(True)
title('Illustration:- The function $1/x$ has different one-sided limits at zero')
show()
| true |
7c0b484d93f0ced51d4d4ae7fbcde49ec41e0137 | Python | smihir/bayesian-classifiers | /evaluate.py | UTF-8 | 1,096 | 2.953125 | 3 | [] | no_license | from __future__ import division
import matplotlib.pyplot as plt
from naivebayes import NaiveBayes
from tan import Tan
import numpy as np
import sys
def evaluate_tan():
t = Tan(sys.argv[1], evaluate = True)
out = t.evaluate(sys.argv[2])
process(out, 'TAN')
def evaluate_naivebayes():
nb = NaiveBayes(sys.argv[1], evaluate = True)
out = nb.evaluate(sys.argv[2])
process(out, 'Naive Bayes')
def process(out, classifier):
x = list()
y = list()
for run in out:
test_data_size = run[0][1]
train_data_size = run[0][2]
c = [d[0] for d in run]
avg_correct = sum(d[0] for d in run) / len(run)
x.append(train_data_size)
y.append(avg_correct / test_data_size)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title('Accuracy vs. Training Data Size for ' + classifier)
ax.set_xlabel('Training Data Size')
ax.set_ylabel('Accuracy')
ax.plot(x, y, 'ro')
ax.plot(x, y, c='b')
def plot():
plt.show()
if __name__ == '__main__':
evaluate_tan()
evaluate_naivebayes()
plot()
| true |
c700dd4fac54e8d37a393680f77d2dcc74df9964 | Python | saeedghx68/simple-twitter | /models/user.py | UTF-8 | 516 | 2.671875 | 3 | [] | no_license | from base.base_model import BaseClass
from app import db
class User(BaseClass):
__tablename__ = 'users'
username = db.Column(db.String(32), unique=True, nullable=False)
password = db.Column(db.String(256), nullable=False)
full_name = db.Column(db.String(128), nullable=False)
def as_json(self):
return {
"user_id": self.id,
"username": self.username,
"full_name": self.full_name,
}
def __str__(self):
return f'{self.username}'
| true |
248b220d24a604cd35f77f7d445becbb936870dd | Python | alessandrofd/PythonCookbook | /chapter02/Recipe2_11.py | UTF-8 | 1,902 | 4.5625 | 5 | [] | no_license | __author__ = 'Alessandro'
# The strip() method can be used to strip characters from the beginning or end of a string. lstrip() and rstrip()
# perform stripping from the left or right side, respectively. By default, these methods strip whitespace, but other
# characters can be given.
# Whitespace stripping
s = ' hello world \n'
print(s.strip())
print(s.lstrip())
print(s.rstrip())
# Character stripping
t = '---------hello=========='
print(t.lstrip('-'))
print(t.strip('-='))
# DISCUSSION
# The various strip() methods are commonly used when reading and cleaning up data for later processing. For example, you
# can use them to get rid of whitespace, remove quotations, and other tasks.
# Be aware that stripping does not apply to any text in the middle of the string. For example:
s = ' hello world \n'
s = s.strip()
print(s)
# If you needed to do something to the inner space, you would need to use another technique, such as using the replace()
# method or a regular expression substitution. For example:
print(s.replace(' ', ''))
import re
print(re.sub('\s+', ' ', s))
# It is often the case that you want to combine string stripping operations with some other kind of iterative processing
# such as reading lines of data from a file. If so, this is one area where a generator expression can be useful. For
# example:
# with open(filename) as f:
# lines = (line.strip() for line in f)
# for line in lines;
# ...
# Here, the expression lines = (line.strip() for line in f) acts as a kind of data transform. It's efficient because it
# doesn't read the data into any kind fo temporary list first. It just creates an iterator where all of the lines
# produced have the stripping operation applied to them.
# For even more advanced stripping, you might want to turn to the translate() method. See the next recipe on sanitizing
# strings for further details. | true |
9a5f9c2becbe481c5e12ff475f69fb2144104ae7 | Python | BeLinKang/DngAdmin | /app/models.py | UTF-8 | 35,797 | 2.65625 | 3 | [] | no_license | from django.db import models
import random#随机模块
#——————————一键生成后缀规范——————————————————
# 静态框表名_id后缀(生成静态框,不可修改,验证规则=是否为数字,不能重复,不能为空值) 数据库类型==models.IntegerField_整形数字
# 文本框表名_str后缀(生成文本框,验证规则=填写不能为空) 数据库类型==models.CharField_字符串类型
# 禁用文本框表名_stop后缀(禁止填写,禁止修改) 数据库类型==models.CharField_字符串类型
# 密码框表名_psd后缀(禁用文本框,验证规则=密码必须6到12位,且不能出现空格,存时候会默认转MD5) 数据库类型==models.CharField_字符串类型
# 手机表名_phone后缀(生成文本框,验证规则=是否为手机号) 数据库类型==models.CharField_字符串类型
# 邮箱框表名_email后缀(生成文本框,验证规则=是否为邮箱) 数据库类型==models.CharField_字符串类型
# 身份证框表名_entity后缀(生成文本框,验证规则=18位数字身份证,不支持字母身份证) 数据库类型==models.CharField_字符串
# 数字框表名_int后缀(生成数字框,验证规则=只能输入非负整数,做大输入1个亿) 数据库类型==models.IntegerField整形数字
# 下拉框表名_xiala后缀(生成下拉框,验证规则=默认下拉值,default默认值必须写) 数据库类型==models.CharField_字符串 添加好选择元组 choices=(('nan','男'),('nv','女')),default='男'
# 选择框表名_xuanze后缀(生成选择框,验证规则=默认选择值,default默认值必须写) 数据库类型==models.CharField_字符串 添加好选择元组 choices=(('nan','男'),('nv','女')),default='男'
# 竖单选框表名_shudanxuan后缀(生成竖单选框,验证规则=默认选择值,default默认值必须写) 数据库类型==models.CharField_字符串 添加好选择元组 choices=(('nan','男'),('nv','女')),default='男'
# 横单选框表名_hengdanxuan后缀(生成横单选框,验证规则=默认选择值,default默认值必须写) 数据库类型==models.CharField_字符串 添加好选择元组 choices=(('nan','男'),('nv','女')),default='男'
# 开关框表名_bool后缀(生成开关框) 数据库类型==models.BooleanField_布尔真假类型
# 日期框表名_years后缀(生成日期框,验证规则=是否为时间) 数据库类型==DateTimeField 时间类型 格式=日期,(2099-12-28 00:00:00)
# 日期时间框表名_datetime后缀(生成日期+时间框,验证规则=是否为时间) 数据库类型==DateTimeField 时间类型 格式=日期+时间,(2099-12-28 23:59:59)
# 富文本框表名_text后缀(生成超大文本框,验证规则=填写不能为空,字数限制1万以内) 数据库类型==models.TextField_富文本
# 自动创建时间create_time 完整默认字段名称(请规范写,不然会前端要求填写创建时间)
# 自动更新时间update_time 完整默认字段名称(请规范写,不然会前端要求填写更新时间)
#————————————————字段属性说明——————————————
# verbose_name=字段备注
# blank=是否为必填项blank=False 等于必填,如果 blank=True,表示可以不填
# max_length=字符串的最大值,默认设置255
# unique=True=如果为True, 数值不能重复,这个字段在表中必须有唯一值
# default=默认填写值
# choices=元组选择 例子:models.CharField(max_length=255,choices=(('male','男'),('female','女')),default='male',verbose_name='性别')
# DatetimeField、DateField、TimeField这个三个时间字段独有
#——————————注意事项——————————————————
# 注意事项:(一) 不要用自增id,来作为用户ID或者业务id,不少新手都会这种方法,会使得业务与id生成强耦合,导致id生成算法难以升级,未来分布式数据库,和分表都会麻烦(如果准备分布式ID主键建议采用UUID,有条件采用雪花算法),
# 注意事项:(二) 不要修改已经建立的数据库字段,会带来未知风险,建议对字段新增,不要删除修改系统已经存在的数据库字段,
# 注意事项:(三) 创建字段名称记得带类型后缀,方便前台识别,生成对应表单输入样式
# 注意事项:(四) 你不确定未来会迁移什么类型数据库,为了保证通用,尽量全部小写,慎用驼峰命名法,数据库高手忽略此条
class user(models.Model):#前台会员表
uid_int = models.IntegerField(blank=False, verbose_name='会员ID')#会员ID, 设置不能为空
username_str = models.CharField(max_length=255, unique=True, blank=False, verbose_name='会员账号') #会员账号, unique不能重复,不许为空
password_str = models.CharField(max_length=255, blank=False, verbose_name='会员密码') #会员密码MD5加密,不许为空
name_str = models.CharField(max_length=255, blank=True, verbose_name='昵称') #会员昵称
gender_str = models.CharField(max_length=255, blank=True, verbose_name='性别') #性别,默认空
introduce_str = models.CharField(max_length=255, blank=True, verbose_name='个人简介') #个人简介,默认空
emall_str = models.CharField(max_length=255, blank=True, verbose_name='邮箱') #会员邮箱
mobile_str = models.CharField(max_length=255, blank=True, verbose_name='手机号') #手机号接收短信等
group_int = models.IntegerField(default=2, verbose_name='用户组')#填写用户组ID
rank_str = models.CharField(max_length=255, blank=True, verbose_name='等级')
gm_bool = models.BooleanField(default=False, verbose_name='前台管理') # 账号超级管理员开关,False=不是超级管理 True=是超级管理员
money_int = models.IntegerField(default=0, verbose_name='余额')#余额,默认值为0,不支持小数点
totalmoney_int = models.IntegerField(default=0, verbose_name='累计充值') # 默认值为0,不支持小数点
totalspend_int = models.IntegerField(default=0, verbose_name='累计消费') # 默认值为0,不支持小数点
integral_int = models.IntegerField(default=0, verbose_name='积分')#积分,默认值为0
spread_int = models.IntegerField(default=0, verbose_name='推广注册') # 默认值为0,不支持小数点
ip_str = models.CharField(max_length=255, blank=True, verbose_name='登录IP') #登录ip地址
shebei_str = models.CharField(max_length=255, blank=True, verbose_name='登录设备') #登录后台设备
cookie_str = models.CharField(max_length=255, blank=True, verbose_name='cookie') # 后台客户cookie
token_str = models.CharField(max_length=255, blank=True, verbose_name='token密钥') #后台客户token密钥,预留加密授权登录用
days_int = models.IntegerField(default=0, verbose_name='登录天数')#登录天数
pwderror_int = models.IntegerField(default=0, verbose_name='密错次数')#密码错误次数
frozen_bool = models.BooleanField(default=True, verbose_name='登录开关') #账号限制登录,False=没有禁止 True=账号禁止
frozentime_str = models.CharField(max_length=255, blank=True, verbose_name='冻结时间') #冻结
vipime_time = models.DateTimeField(blank=True, default='2099-12-28 23:59:59', verbose_name='登录时限') # 登录有效期,开通一年有效期,半年有效期会员账号用
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') #后台注册时间
update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间')#最后一次登录时间
class dnguser(models.Model):#后台会员表
uid_int = models.IntegerField(blank=False, verbose_name='会员ID')#会员ID, 设置不能为空
username_str = models.CharField(max_length=255, unique=True, blank=False, verbose_name='会员账号') #会员账号, unique不能重复,不许为空
password_str = models.CharField(max_length=255, blank=False, verbose_name='会员密码') #会员密码MD5加密,不许为空
name_str = models.CharField(max_length=255, blank=True, verbose_name='昵称') #会员昵称
gender_str = models.CharField(max_length=255, blank=True, verbose_name='性别') #性别,默认空
introduce_str = models.CharField(max_length=255, blank=True, verbose_name='个人简介') #个人简介,默认空
emall_str = models.CharField(max_length=255, blank=True, verbose_name='邮箱') #会员邮箱
mobile_str = models.CharField(max_length=255, blank=True, verbose_name='手机号') #手机号接收短信等
group_int = models.IntegerField(default=2, verbose_name='用户组')#填写用户组ID
rank_str = models.CharField(max_length=255, blank=True, verbose_name='等级')
gm_bool = models.BooleanField(default=False, verbose_name='超级管理') # 账号超级管理员开关,False=不是超级管理 True=是超级管理员
money_int = models.IntegerField(default=0, verbose_name='余额')#余额,默认值为0,不支持小数点
totalmoney_int = models.IntegerField(default=0, verbose_name='累计充值') # 默认值为0,不支持小数点
totalspend_int = models.IntegerField(default=0, verbose_name='累计消费') # 默认值为0,不支持小数点
integral_int = models.IntegerField(default=0, verbose_name='积分')#积分,默认值为0
spread_int = models.IntegerField(default=0, verbose_name='推广注册')#默认值为0,不支持小数点
ip_str = models.CharField(max_length=255, blank=True, verbose_name='登录IP') #登录ip地址
shebei_str = models.CharField(max_length=255, blank=True, verbose_name='登录设备') #登录后台设备
cookie_str = models.CharField(max_length=255, blank=True, verbose_name='cookie') # 后台客户cookie
token_str = models.CharField(max_length=255, blank=True, verbose_name='token密钥') #后台客户token密钥,预留加密授权登录用
days_int = models.IntegerField(default=0, verbose_name='登录天数')#登录天数
pwderror_int = models.IntegerField(default=0, verbose_name='密错次数')#密码错误次数
frozen_bool = models.BooleanField(default=True, verbose_name='登陆开关') #账号限制登录,False=没有禁止 True=账号禁止
frozentime_str = models.CharField(max_length=255, blank=True, verbose_name='冻结时间') #冻结
vipime_time = models.DateTimeField(blank=True, default='2099-12-28 23:59:59', verbose_name='登录时限') # 登录有效期,开通一年有效期,半年有效期会员账号用
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') #后台注册时间
update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间')#最后一次登录时间
class usergroup(models.Model): #前台会员组表
gid_int = models.IntegerField(blank=False, unique=True, verbose_name='用户组id')
gname_str = models.CharField(max_length=255, unique=True, blank=False, verbose_name='用户组名称')
uperior_int = models.IntegerField(default=0, verbose_name='上级用户组') #0没有上级,填写菜单ID
integral_int = models.IntegerField(default=0, verbose_name='积分阈值')
money_int = models.IntegerField(default=0, verbose_name='余额阈值')
totalmoney_int = models.IntegerField(default=0, verbose_name='充值阈值')
totalspend_int = models.IntegerField(default=0, verbose_name='消费阈值')
spread_int = models.IntegerField(default=0, verbose_name='推广阈值')
added_int = models.IntegerField(default=0, verbose_name='每日新增')
look_int = models.IntegerField(default=0, verbose_name='每日查看')
space_int = models.IntegerField(default=0, verbose_name='每日上传')
download_int = models.IntegerField(default=0, verbose_name='每日下载')
trial_bool = models.BooleanField(default=False, verbose_name='自动过审')
upload_bool = models.BooleanField(default=False, verbose_name='上传权限')
download_bool = models.BooleanField(default=False, verbose_name='下载权限')
menu_text = models.TextField(blank=True, verbose_name='菜单权限')#填写对应菜单ID, 格式:|菜单1||菜单2||菜单3|
added_text = models.TextField(blank=True, verbose_name='新增权限')#填写对应菜单ID, 格式:|菜单1||菜单2||菜单3|
delete_text = models.TextField(blank=True, verbose_name='删除权限')#填写对应菜单ID, 格式:|菜单1||菜单2||菜单3|
update_text = models.TextField(blank=True, verbose_name='修改权限')#填写对应菜单ID, 格式:|菜单1||菜单2||菜单3|
see_text = models.TextField(blank=True, verbose_name='查看权限')#填写对应菜单ID, 格式:|菜单1||菜单2||菜单3|,PS:拥有菜单权限,就默认可以查看菜单,此查看权限是,方便设置此菜单下一些查看带星号手机之类的权限
class dngusergroup(models.Model): #后台用户组表
gid_int = models.IntegerField(blank=False, unique=True, verbose_name='用户组id')
gname_str = models.CharField(max_length=255, unique=True, blank=False, verbose_name='用户组名称')
uperior_int = models.IntegerField(default=0, verbose_name='上级用户组') #0没有上级,填写菜单ID
integral_int = models.IntegerField(default=0, verbose_name='积分阈值')
money_int = models.IntegerField(default=0, verbose_name='余额阈值')
totalmoney_int = models.IntegerField(default=0, verbose_name='充值阈值')
totalspend_int = models.IntegerField(default=0, verbose_name='消费阈值')
spread_int = models.IntegerField(default=0, verbose_name='推广阈值')
added_int = models.IntegerField(default=0, verbose_name='每日新增')
look_int = models.IntegerField(default=0, verbose_name='每日查看')
space_int = models.IntegerField(default=0, verbose_name='每日上传')
download_int = models.IntegerField(default=0, verbose_name='每日下载')
trial_bool = models.BooleanField(default=False, verbose_name='自动过审')
upload_bool = models.BooleanField(default=False, verbose_name='上传权限')
download_bool = models.BooleanField(default=False, verbose_name='下载权限')
menu_text = models.TextField(blank=True, verbose_name='菜单权限')#填写对应菜单ID, 格式:|菜单1||菜单2||菜单3|
added_text = models.TextField(blank=True, verbose_name='新增权限')#填写对应菜单ID, 格式:|菜单1||菜单2||菜单3|
delete_text = models.TextField(blank=True, verbose_name='删除权限')#填写对应菜单ID, 格式:|菜单1||菜单2||菜单3|
update_text = models.TextField(blank=True, verbose_name='修改权限')#填写对应菜单ID, 格式:|菜单1||菜单2||菜单3|
see_text = models.TextField(blank=True, verbose_name='查看权限')#填写对应菜单ID, 格式:|菜单1||菜单2||菜单3|,PS:拥有菜单权限,就默认可以查看菜单,此查看权限是,方便设置此菜单下一些查看带星号手机之类的权限
class route(models.Model): #前台菜单表
uid_int = models.IntegerField(blank=False, unique=True, verbose_name='菜单id')
name_str = models.CharField(max_length=255, unique=True, blank=False, verbose_name='菜单名称')
url_str = models.CharField(max_length=255, blank=True, verbose_name='菜单URL')
icon_str = models.CharField(max_length=255, blank=True, default='fa fa-desktop', verbose_name='菜单图标')
model_str = models.CharField(max_length=255, blank=True, default='cover', verbose_name='菜单模型')# list=数据列表页面, form=表单提交页面 ,cover=无属性封面 ,url = 单独链接菜单,none = 空白页
superior_int = models.IntegerField(default=0, verbose_name='上级菜单')
sort_int = models.IntegerField(default=0, verbose_name='菜单排序')
integral_int = models.IntegerField(default=0, verbose_name='积分门槛')
money_int = models.IntegerField(default=0, verbose_name='余额门槛')
totalmoney_int = models.IntegerField(default=0, verbose_name='充值门槛')
totalspend_int = models.IntegerField(default=0, verbose_name='消费门槛')
spread_int = models.IntegerField(default=0, verbose_name='推广门槛')
display_bool = models.BooleanField(default=True, verbose_name='菜单显示')
prove_bool = models.BooleanField(default=True, verbose_name='权限验证')
seotirle_str = models.CharField(max_length=255, blank=True, verbose_name='SEO标题')
keywords_str = models.CharField(max_length=255, blank=True, verbose_name='SEO关键词')
description_str = models.CharField(max_length=255, blank=True, verbose_name='SEO描述')
class dngroute(models.Model): # 后台菜单表
uid_int = models.IntegerField(blank=False, unique=True, verbose_name='菜单id')
name_str = models.CharField(max_length=255, unique=True, blank=False, verbose_name='菜单名称')
url_str = models.CharField(max_length=255, blank=True, verbose_name='菜单URL')
icon_str = models.CharField(max_length=255, blank=True, default='fa fa-desktop', verbose_name='菜单图标')
model_str = models.CharField(max_length=255, blank=True, default='cover', verbose_name='菜单模型')# list=数据列表页面, form=表单提交页面 ,cover=无属性封面 ,url = 单独链接菜单,none = 空白页
superior_int = models.IntegerField(default=0, verbose_name='上级菜单')
sort_int = models.IntegerField(default=0, verbose_name='菜单排序')
integral_int = models.IntegerField(default=0, verbose_name='积分门槛')
money_int = models.IntegerField(default=0, verbose_name='余额门槛')
totalmoney_int = models.IntegerField(default=0, verbose_name='充值门槛')
totalspend_int = models.IntegerField(default=0, verbose_name='消费门槛')
spread_int = models.IntegerField(default=0, verbose_name='推广门槛')
display_bool = models.BooleanField(default=True, verbose_name='菜单显示')
prove_bool = models.BooleanField(default=True, verbose_name='权限验证')
seotirle_str = models.CharField(max_length=255, blank=True, verbose_name='SEO标题')
keywords_str = models.CharField(max_length=255, blank=True, verbose_name='SEO关键词')
description_str = models.CharField(max_length=255, blank=True, verbose_name='SEO描述')
class red(models.Model): #前台日志
uid_int = models.IntegerField(blank=False, verbose_name='会员id') # 所属会员的ID
title_str = models.CharField(max_length=255, blank=True, verbose_name='访问标题')
url_str = models.CharField(max_length=255, blank=True, verbose_name='访问网址')
shebei_str = models.CharField(max_length=255, blank=True, verbose_name='登录设备')
ip_str = models.CharField(max_length=255, blank=True, verbose_name='登录IP')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
class dngred(models.Model): #后台日志
uid_int = models.IntegerField(blank=False, verbose_name='会员id') # 所属会员的ID
title_str = models.CharField(max_length=255, blank=True, verbose_name='访问标题')
url_str = models.CharField(max_length=255, blank=True, verbose_name='访问网址')
shebei_str = models.CharField(max_length=255, blank=True, verbose_name='登录设备')
ip_str = models.CharField(max_length=255, blank=True, verbose_name='登录IP')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')
class htmlsetup(models.Model): #前台设置
title_str = models.CharField(max_length=255, blank=False, default='DngAdmin后台系统-为极速开发而生!', verbose_name='首页SEO标题')
logotitle_str = models.CharField(max_length=255, blank=False, default='DNG系统', verbose_name='品牌名称')
keywords_str = models.CharField(max_length=255, blank=True, default='DngAdmin后台系统', verbose_name='META关键词')
description_str = models.CharField(max_length=255, blank=True, default='DngAdmin后台系统1.0-基于python和Django原生开发,为极速开发而生!', verbose_name='META描述')
file_str = models.CharField(max_length=255, blank=True, verbose_name='备案号') #备案号
statistics_text = models.TextField(blank=True, verbose_name='统计代码')#统计代码
register_bool = models.BooleanField(default=True, verbose_name='注册开关')
http_bool = models.BooleanField(default=True, verbose_name='网站开关')
inwidth_int = models.IntegerField(default=120, verbose_name='最小表宽')
wide_int = models.IntegerField(default=800, verbose_name='弹窗宽度')
high_int = models.IntegerField(default=600, verbose_name='弹窗高度')
limit_int = models.IntegerField(default=20, verbose_name='默认条数')
toolbar_bool = models.BooleanField(default=True, verbose_name='头工具栏')
skinline_str = models.CharField(max_length=255, blank=True, verbose_name='表格边线')
skinsize_str = models.CharField(max_length=255, blank=True, default='sm',verbose_name='表格缩放')
page_bool = models.BooleanField(default=True, verbose_name='底部分页')
exports_str = models.CharField(max_length=255, blank=True,default='exports', verbose_name='导出表格')
print_str = models.CharField(max_length=255, blank=True, default='print', verbose_name='打印表格')
search_bool = models.BooleanField(default=True, verbose_name='搜索表格')
class setup(models.Model): #后台设置
setupname_str = models.CharField(max_length=255, blank=False, default='DngAdmin后台系统', verbose_name='系统名称') #系统名称, 不许为空
domain_str = models.CharField(max_length=255, blank=False, verbose_name='系统域名') #系统域名, 不许为空
file_str = models.CharField(max_length=255, blank=True, verbose_name='备案号') #备案号
edition_str = models.CharField(max_length=255, blank=True, default='DngAdmin版本1.0', verbose_name='版本号') #版本号
statistics_text = models.TextField(blank=True, verbose_name='统计代码')#统计代码
inwidth_int = models.IntegerField(default=160, verbose_name='最小表宽')
wide_int = models.IntegerField(default=800, verbose_name='弹窗宽度')
high_int = models.IntegerField(default=600, verbose_name='弹窗高度')
limit_int = models.IntegerField(default=20, verbose_name='默认条数')
toolbar_bool = models.BooleanField(default=True, verbose_name='头工具栏')
skinline_str = models.CharField(max_length=255, blank=True, verbose_name='表格边线')
skinsize_str = models.CharField(max_length=255, blank=True, default='sm',verbose_name='表格缩放')
page_bool = models.BooleanField(default=True, verbose_name='底部分页')
exports_str = models.CharField(max_length=255, blank=True,default='exports', verbose_name='导出表格')
print_str = models.CharField(max_length=255, blank=True, default='print', verbose_name='打印表格')
search_bool = models.BooleanField(default=True, verbose_name='搜索表格')
class protect(models.Model): #前台安全
uid_int = models.IntegerField(blank=False, unique=True, verbose_name='安全ID') # 安全策略的ID, 设置不能为空,不可重复
entrance_str = models.CharField(max_length=255, blank=True, verbose_name='安全入口') #后台安全入口
prescription_int = models.IntegerField(blank=True, default=86400, verbose_name='Cookies时效') #Cookies时效, 单位毫秒,默认24小时
salt_str = models.CharField(max_length=255, blank=True, verbose_name='加密盐') #解析COOKIE的加密盐
apipsd_str = models.CharField(max_length=255, blank=True, verbose_name='Api密码') # 解析COOKIE的加密盐
tokenpsd_str = models.CharField(max_length=255, blank=True, verbose_name='Token密钥') # 解析COOKIE的加密盐
requests_int = models.IntegerField(blank=False, default=10, verbose_name='密错次数') #防暴力破解,超过次数限制登录
psdreq_int = models.IntegerField(blank=False, default=24, verbose_name='冻结时间') # 密码错误后冻结,单位小时
graphic_bool = models.BooleanField(default=True, verbose_name='图码验证') # 图形验证码开关,False=关闭 True=开启
station_bool = models.BooleanField(default=False, verbose_name='邮件验证') # 跨站POST开关,False=关闭 True=开启
sms_bool = models.BooleanField(default=False, verbose_name='短信验证') #短信验证开关,False=不开 True=开启
useragent_str = models.CharField(max_length=255, blank=True, verbose_name='允许设备') #允许useragent设备,分割线|分割
area_str = models.CharField(max_length=255, blank=True, verbose_name='允许地区') #允许登录得地区,分割线|分割
tongshi_bool = models.BooleanField(default=False, verbose_name='同时在线') #同时在线开关,False=不允许同时 True=允许同时
iptxt_text = models.TextField(blank=True, verbose_name='禁止IP')#富文本超大字符串, |符号分割
class security(models.Model): #后台安全
uid_int = models.IntegerField(blank=False, unique=True, verbose_name='安全ID') # 安全策略的ID, 设置不能为空,不可重复
entrance_str = models.CharField(max_length=255, blank=True, verbose_name='安全入口') #后台安全入口
prescription_int = models.IntegerField(blank=True, default=86400, verbose_name='Cookies时效') #Cookies时效, 单位毫秒,默认24小时
salt_str = models.CharField(max_length=255, blank=True, verbose_name='加密盐') #解析COOKIE的加密盐
apipsd_str = models.CharField(max_length=255, blank=True, verbose_name='Api密码') # 解析COOKIE的加密盐
tokenpsd_str = models.CharField(max_length=255, blank=True, verbose_name='Token密钥') # 解析COOKIE的加密盐
requests_int = models.IntegerField(blank=False, default=10, verbose_name='密错次数') #防暴力破解,超过次数限制登录
psdreq_int = models.IntegerField(blank=False, default=24, verbose_name='冻结时间') # 密码错误后冻结,单位小时
graphic_bool = models.BooleanField(default=True, verbose_name='图码验证') # 图形验证码开关,False=关闭 True=开启
station_bool = models.BooleanField(default=False, verbose_name='邮件验证') # 跨站POST开关,False=关闭 True=开启
sms_bool = models.BooleanField(default=False, verbose_name='短信验证') #短信验证开关,False=不开 True=开启
useragent_str = models.CharField(max_length=255, blank=True, verbose_name='允许设备') #允许useragent设备,分割线|分割
area_str = models.CharField(max_length=255, blank=True, verbose_name='允许地区') #允许登录得地区,分割线|分割
tongshi_bool = models.BooleanField(default=False, verbose_name='同时在线') #同时在线开关,False=不允许同时 True=允许同时
iptxt_text = models.TextField(blank=True, verbose_name='禁止IP')#富文本超大字符串, |符号分割
class mail(models.Model): #邮件设置
mail_id = models.IntegerField(blank=False, unique=True, verbose_name='邮件ID')
type_str = models.CharField(max_length=255, blank=True, default='POP3/SMTP',verbose_name='邮件发送方式')
host_str = models.CharField(max_length=255, blank=True, default='smtp.qq.com', verbose_name='SMTP服务器')
port_str = models.CharField(max_length=255, blank=True, default='587',verbose_name='SMTP端口')
pass_str = models.CharField(max_length=255, blank=True, verbose_name='SMTP授权码')
from_str = models.CharField(max_length=255, blank=True, verbose_name='发件人邮箱')
requests_int = models.IntegerField(blank=False, default=30, verbose_name='用户每日邮件上限')
youxiao_int = models.IntegerField(blank=False, default=180, verbose_name='有效时间:秒')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') # 后台注册时间
update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间') # 最后一次登录时间
class sms(models.Model): #短信设置
mail_id = models.IntegerField(blank=False, unique=True, verbose_name='短信ID')
ali_shudanxuan = models.CharField(max_length=255, choices=(('阿里市场-国阳网','阿里市场-国阳网'),('阿里市场-聚美智数','阿里市场-聚美智数'),('阿里市场-鼎信科技','阿里市场-鼎信科技'),('阿里市场-云智信','阿里市场-云智信'),('阿里市场-深智科技','阿里市场-深智科技'),('自定义短信模块','自定义短信模块')),default='阿里市场-国阳网', verbose_name='短信供应商',)
appcode_str = models.CharField(max_length=255, blank=True, verbose_name='阿里AppCode')
requests_int = models.IntegerField(blank=False, default=20, verbose_name='用户每日短信上限')
youxiao_int = models.IntegerField(blank=False, default=180, verbose_name='有效时间:秒')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') # 后台注册时间
update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间') # 最后一次登录时间
class pluguser(models.Model): #插件设置
plug_id = models.IntegerField(blank=False, unique=True, verbose_name='用户ID')
plugname_stop = models.CharField(max_length=255, blank=True,verbose_name='DNG账号')
pluggroup_stop = models.CharField(max_length=255, blank=True,verbose_name='用户组')
mobile_stop = models.CharField(max_length=255, blank=True, verbose_name='手机号') # 手机号接收短信等
money_stop = models.IntegerField(default=0, verbose_name='余额') # 余额,默认值为0,不支持小数点
integral_stop = models.IntegerField(default=0, verbose_name='积分') # 积分,默认值为0,不支持小数点
spread_stop = models.IntegerField(default=0, verbose_name='推广') # 推广,默认值为0,不支持小数点
appcode_stop = models.CharField(max_length=255, blank=True, verbose_name='AppCode密钥')
cookie_stop = models.CharField(max_length=255, blank=True, verbose_name='Cookie密钥')
token_stop = models.CharField(max_length=255, blank=True, verbose_name='Token密钥')
lockcode_stop = models.CharField(max_length=255, blank=True, verbose_name='机器码')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') #后台注册时间
update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间')#最后一次登录时间
class formdemo(models.Model): #表单组件演示
demoid_id = models.IntegerField(blank=False, unique=True, verbose_name='表单ID')
wenben_str = models.CharField(max_length=255, blank=True, verbose_name='文本框')
jinyong_stop = models.CharField(max_length=255, blank=True, default='新手用户组',verbose_name='禁用框')
mima_psd = models.CharField(max_length=255, blank=True, verbose_name='密码框')
shouji_phone = models.CharField(max_length=255, blank=True, verbose_name='手机框')
youjian_email = models.CharField(max_length=255, blank=True, verbose_name='邮件框')
shenfen_entity = models.CharField(max_length=255, blank=True, verbose_name='身份证框')
shuzi_int = models.IntegerField(blank=True, default=0, verbose_name='数字框')
xuanze_xiala = models.CharField(max_length=255, choices=(('下拉选项 01','下拉选项 01'),('下拉选项 02','下拉选项 02'),('下拉选项 03','下拉选项 03'),('下拉选项 04','下拉选项 04')),default='下拉选项 01', verbose_name='下拉框',)
xuanze_xuanze = models.CharField(max_length=255, choices=(('选择选项 01','选择选项 01'),('选择选项 02','选择选项 02'),('选择选项 03','选择选项 03'),('选择选项 04','选择选项 04')),default='选择选项 01', verbose_name='选择框', )
shu_shudanxuan = models.CharField(max_length=255, choices=(('竖单选项 01','竖单选项 01'),('竖单选项 02','竖单选项 02'),('竖单选项 03','竖单选项 03'),('竖单选项 04','竖单选项 04')),default='竖单选项 01', verbose_name='竖单选框', )
heng_hengdanxuan = models.CharField(max_length=255, choices=(('横单选项 01','横单选项 01'),('横单选项 02','横单选项 02'),('横单选项 03','横单选项 03'),('横单选项 04','横单选项 04')),default='横单选项 01', verbose_name='横单选框', )
kaiguan_bool = models.BooleanField(default=False, verbose_name='启动开关')
riqi_years = models.DateTimeField(blank=True, default='2021-06-01', verbose_name='日期框')
datetime_datetime = models.DateTimeField(blank=True, default='2099-12-28 23:59:59', verbose_name='日期时间框')
fuwenben_text = models.TextField(blank=True,verbose_name='富文本框')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') #后台注册时间
update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间')#最后一次登录时间
class shenbao(models.Model): #故障申报演示
sb_id = models.IntegerField(blank=False, unique=True, verbose_name='申报ID')
name_str = models.CharField(max_length=255, blank=True, verbose_name='申报人')
yonghuzu_stop = models.CharField(max_length=255, blank=True, default='维护组', verbose_name='申报组')
shouji_phone = models.CharField(max_length=255, blank=True, verbose_name='手机')
youjian_email = models.CharField(max_length=255, blank=True, verbose_name='邮件')
xuanze_xiala = models.CharField(max_length=255, choices=(('营销部', '营销部'), ('技术部', '技术部'), ('售后部', '售后部'), ('后勤部', '后勤部')), default='营销部',verbose_name='故障部门', )
xuanze_xuanze = models.CharField(max_length=255, choices=(('路由器', '路由器'), ('交换机', '交换机'), ('电脑', '电脑'), ('打印机', '打印机')), default='路由器',verbose_name='故障设备', )
shu_shudanxuan = models.CharField(max_length=255, choices=(('李老师', '李老师'), ('罗老师', '罗老师'), ('金老师', '金老师'), ('宋老师', '宋老师')), default='李老师',verbose_name='故障联络人', )
heng_hengdanxuan = models.CharField(max_length=255, choices=(('网络故障', '网络故障'), ('电力故障', '电力故障'), ('通信故障', '通信故障'), ('显示故障', '显示故障')), default='网络故障',verbose_name='故障项目', )
kaiguan_bool = models.BooleanField(default=False, verbose_name='联系开关')
riqi_years = models.DateTimeField(blank=True, default='2021-06-01', verbose_name='故障日期')
fuwenben_text = models.TextField(blank=True, verbose_name='故障详细描述')
create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间') # 后台注册时间
update_time = models.DateTimeField(auto_now=True, verbose_name='更新时间') # 最后一次登录时间
| true |
67ca656d9a91c28a25dca98145360925f103b641 | Python | Vaileung/coobook_test | /三、数字日期和时间/3.6 复数的数学运算.py | UTF-8 | 446 | 3.609375 | 4 | [] | no_license | a = complex(2, 4)
b = 3 - 5j
print(a)
print(b)
print(a.real)
print(a.imag)
print(a.conjugate())
print('001'.center(50, '='))
print(a + b)
print(a * b)
print(a / b)
print(abs(a))
print('002'.center(50, '='))
import cmath
print(cmath.sin(a))
print(cmath.cos(a))
print(cmath.exp(a))
print('003'.center(50, '='))
import numpy as np
a = np.array([2 + 3j, 4 + 5j, 6 - 7j, 8 + 9j])
print(a)
print(a + 2)
print(np.sin(a))
print(cmath.sqrt(-1)) | true |
0ce92a8213b0d252cd1df45a0bb4d5e3ca8028fc | Python | diksha12p/DSA_Practice_Problems | /Find the Town Judge.py | UTF-8 | 491 | 2.984375 | 3 | [] | no_license | class Solution:
def findJudge(self, N: int, trust) -> int:
candidates = [False for _ in range(N)]
for entry in trust:
candidates[entry[0] - 1] = True
for i,x in enumerate(candidates):
if not x:
return i+1
return -1
# judge = [i for i, x in enumerate(candidates) if not x else -1][0]
# return judge
sol = Solution()
N = 4
trust = [[1,3],[1,4],[2,3],[2,4],[4,3]]
print(sol.findJudge(N, trust))
| true |
cf4bc2e866515a3013bf0e67fabe725b579d3839 | Python | borin98/Projetos-De-Programa-o-No-Atom | /Projetos Em Phyton/herança.py | UTF-8 | 870 | 3.328125 | 3 | [] | no_license | class Pais ( ) :
def __init__ ( self, sobrenome, cor_dos_olhos ) :
self.sobrenome = sobrenome
self.cor_dos_olhos = cor_dos_olhos
def informacao ( self ) :
print ( "Último Nome : "+self.sobrenome )
print ( "Cor Dos Olhos : "+self.cor_dos_olhos )
class Crianca ( Pais ) :
def __init__ ( self, sobrenome, cor_dos_olhos, numero_brinquedos ) :
Pais.__init__ ( self, sobrenome, cor_dos_olhos )
self.numero_brinquedos = numero_brinquedos
def informacao ( self ) :
print ( "Último Nome : "+self.sobrenome )
print ( "Cor Dos Ólhos : "+self.cor_dos_olhos )
print ( "Número de brinquedos : "+str ( self.numero_brinquedos ) )
pessoa_adulta = Pais ( "Silva", "azul" )
pessoa_adulta.informacao ( )
pessoa_crianca = Crianca ( "Silva", "castanho", 14 )
pessoa_crianca.informacao ( )
| true |
2c40b2e10318088e1dd9519f02312c4d6e62cc4d | Python | devmohit-live/Scrapers | /amazon.py | UTF-8 | 830 | 2.625 | 3 | [] | no_license | import requests as rqs,os
from bs4 import BeautifulSoup as soup
user_agent = {'User-agent': 'Mozilla/5.0'}
http_response= rqs.get("https://www.amazon.in/s?k=fossil+watches",headers=user_agent)
http_response_text=http_response.text
soup_object=soup(http_response_text,"lxml")
i=0
os.mkdir('amazon_img')
for a in soup_object.find_all("div", {"class":"a-section a-spacing-medium"}):
#print(a.prettify())
i=i+1
try:
name=a.img["alt"]
print(name)
image=a.img["src"]
print(image)
price=a.find("span",{"class":"a-price-whole"})
print("₹"+price.text+"\n")
byte=rqs.get(image).content
with open("amazon_img/"+str(i)+".jpg","wb+") as f:
f.write(byte)
except Exception as e:
print("Not Found.",e)
| true |
b3e76c34cee279620723eab9c92a61be7192ee6b | Python | jordovolk/Programming-Examples | /p5.py | UTF-8 | 878 | 3.96875 | 4 | [] | no_license | # Programmer: Jordan Volk
# Date Written: October 13, 2015
# Program Name: P5.py
# Company Name: HTC-CCIS1505
#1
strWholeName = raw_input("Enter your first and last name: ")
print "Hi", strWholeName + ", your name has", len(strWholeName), "characters in it"
print
#2
tupMon = ("January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "Novemeber", "December",)
#3
for month in tupMon:
print month[:3]
print
#4
for month in tupMon:
if "J" in month[0]:
print month[:3]
#5
print
strMonth = raw_input("Enter a month of the year ")
strMonth = strMonth.title()
if strMonth in tupMon:
print "Month found"
else:
print "Month not found"
print
#6
strName = raw_input("Enter your name ")
for strNumber in range(1, 11, 1):
print strName, "loop counter = ", strNumber
print
#7
for strOdd in range(1,20,2):
print strOdd
| true |
c27593e5d26325ff33ac4c5776d7aa9971945cc3 | Python | khmahmud101/Data-Structure-Algorithm | /stack.py | UTF-8 | 631 | 4.03125 | 4 | [] | no_license | li = []
li.append(1)
print(li)
li.append(2)
print(li)
li.append(3)
print(li)
li.pop()
print(li)
li.pop()
li.pop()
print(li)
if li != []:
li.pop()
class Stack:
def __init__(self):
self.items = []
def push(self,item):
self.items.append(item)
print("push item",self.items)
def pop(self):
return self.items.pop()
def is_empty(self):
if self.items == []:
return True
return False
if __name__ == "__main__":
s = Stack()
s.push(1)
s.push(2)
s.push(3)
while not s.is_empty():
item = s.pop()
print("pop item:",item)
| true |
df3d9b9b7ad474e34b29601fdf628d4a24c37e44 | Python | ana-romero/mega2021-kenzo-sage | /kenzo_interfaces.py | UTF-8 | 1,661 | 2.515625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
r"""
Check for Kenzo
"""
from sage.libs.ecl import ecl_eval
from . import Feature, FeatureTestResult
class Kenzo(Feature):
r"""
A :class:`sage.features.Feature` describing the presence of ``Kenzo``.
EXAMPLES::
sage: from sage.features.kenzo import Kenzo
sage: Kenzo().is_present() # optional - kenzo
FeatureTestResult('Kenzo', True)
"""
def __init__(self):
r"""
TESTS::
sage: from sage.features.kenzo import Kenzo
sage: isinstance(Kenzo(), Kenzo)
True
"""
Feature.__init__(self, name="Kenzo", spkg="kenzo",
url="https://github.com/miguelmarco/kenzo/")
def _is_present(self):
r"""
Check whether Kenzo is installed and works.
EXAMPLES::
sage: from sage.features.kenzo import Kenzo
sage: Kenzo()._is_present() # optional - kenzo
FeatureTestResult('Kenzo', True)
"""
# Redirection of ECL and Maxima stdout to /dev/null
# This is also done in the Maxima library, but we
# also do it here for redundancy.
ecl_eval(r"""(defparameter *dev-null* (make-two-way-stream
(make-concatenated-stream) (make-broadcast-stream)))""")
ecl_eval("(setf original-standard-output *standard-output*)")
ecl_eval("(setf *standard-output* *dev-null*)")
try:
ecl_eval("(require :kenzo)")
except RuntimeError:
return FeatureTestResult(self, False, reason="Unable to make ECL require kenzo")
return FeatureTestResult(self, True)
| true |
9be652bb1a66750aa04310804f7b27ecf9b7557d | Python | xiawen0731/drawer | /ui/frame_operation.py | UTF-8 | 1,657 | 2.75 | 3 | [] | no_license | # encoding=utf8
import Tkinter as tk
import conf
import choose
class OperationFrame(tk.Frame):
def __init__(self, parent, app):
tk.Frame.__init__(self, parent, bg=conf.BG)
self.parent = parent
self.app = app
self.emp_list = choose.load_list()
self.choosing = False
self.bind_all("<space>", self.key)
self.render()
def key(self, event):
if self.choosing == False:
self.start_choosing()
else:
self.stop_choosing()
def render(self):
self.pack(fill=tk.X)
self.result_str = tk.StringVar()
result_label = tk.Label(self, textvariable=self.result_str,
justify=tk.CENTER, width=20)
result_label.pack(side=tk.LEFT)
end_btn = tk.Button(self, text=u'结束', width=10,
command=self.stop_choosing)
end_btn.pack(side=tk.RIGHT)
start_btn = tk.Button(self, text=u'开始', width=10,
command=self.start_choosing)
start_btn.pack(side=tk.RIGHT)
def choose(self):
if not self.choosing:
return
self.result = choose.get_one(self.emp_list)
self.result_str.set('%s - %s' % self.result)
self.app.master.after(conf.CHOOSING_INTERVAL, self.choose)
def start_choosing(self):
self.choosing = True
self.choose()
def stop_choosing(self):
if self.choosing == False:
return
self.choosing = False
self.emp_list = self.emp_list - set([self.result])
self.app.result_frame.add_one(self.result_str.get())
| true |
e65d32870edd9a042d921a866136d773f718cf38 | Python | kudashevvn/python | /practic_6_10_3.py | UTF-8 | 454 | 3.203125 | 3 | [] | no_license | class Clients:
def __init__(self, client_name, client_balance):
self.client_name = client_name
self.client_babance = client_balance
def set_balance(self, client_balance):
if client_balance > 0 and isinstance(client_balance, int):
self.client_babance = client_balance
def get_about_client(self):
return str(f'Клиент {self.client_name}. Баланс: {self.client_babance} руб.') | true |
4779d735ce890e152b798a1674e778aa6d9a9900 | Python | danieljhegeman/blackjack | /Hand.py | UTF-8 | 489 | 3.5625 | 4 | [] | no_license | class Hand():
def __init__(self, cards=None):
if not cards:
cards = []
self.cards = cards
def addCard(self, card):
self.cards.append(card)
def isBusted(self):
return self.score() > 21
def score(self):
aceCount = 0
total = 0
for card in self.cards:
if card == 1:
aceCount += 1
total += card
while aceCount > 0:
if total + 9 <= 21:
aceCount -= 1
total += 9
else:
break
return total
| true |
a133f9070dc1160acf64b0eb89635a3faa04ac0a | Python | Grey2k/jb.academy.python.tik-tak-toe | /Problems/Poster artist/main.py | UTF-8 | 38 | 2.71875 | 3 | [] | no_license | title = input().upper()
print(title)
| true |
e25d4988416b772794a5cc08bd893a8afe7127d0 | Python | moontree/leetcode | /version1/1207_Unique_Number_of_Occurrences.py | UTF-8 | 1,534 | 3.8125 | 4 | [] | no_license | """
Given an array of integers arr,
write a function that returns true if and only if the number of occurrences of each value in the array is unique.
Example 1:
Input:
arr = [1,2,2,1,1,3]
Output:
true
Explanation:
The value 1 has 3 occurrences, 2 has 2 and 3 has 1. No two values have the same number of occurrences.
Example 2:
Input:
arr = [1,2]
Output:
false
Example 3:
Input:
arr = [-3,0,1,-3,1,1,1,-3,10,0]
Output:
true
Constraints:
1 <= arr.length <= 1000
-1000 <= arr[i] <= 1000
"""
class Solution(object):
def uniqueOccurrences(self, arr):
"""
:type arr: List[int]
:rtype: bool
"""
cache = {}
for v in arr:
cache[v] = cache.get(v, 0) + 1
return len(cache.values()) == len(set(cache.values()))
examples = [
{
"input": {
"arr": [1, 2, 2, 1, 1, 3],
},
"output": True
}, {
"input": {
"arr": [1, 2],
},
"output": False
}, {
"input": {
"arr": [-3, 0, 1, -3, 1, 1, 1, -3, 10, 0],
},
"output": True
}
]
import time
if __name__ == '__main__':
solution = Solution()
for n in dir(solution):
if not n.startswith('__'):
func = getattr(solution, n)
print(func)
for example in examples:
print '----------'
start = time.time()
v = func(**example['input'])
end = time.time()
print v, v == example['output'], end - start
| true |
857a47e18dc96e28e04a08f09cbc429214bbe2f3 | Python | devaljain1998/networkx-ezdxfplay | /Algorithms/MText/functions.py | UTF-8 | 9,776 | 2.921875 | 3 | [] | no_license | import sys
import ezdxf
import os
import pprint
import math
import ezdxf
import json
from ezdxf.math import Vector
from pillarplus.math import find_distance, get_angle_between_two_points, directed_points_on_line
file_path = 'Algorithms/MText/input/'
input_file = 'chamber.dxf'
output_file_path = 'Algorithms/MText/output/'
input_file_name = input_file.split('.')[0]
output_file = 'chamber_mtext.dxf'
# Reading the DXF file
try:
dwg = ezdxf.readfile(file_path + input_file)
except IOError:
print(f'Not a DXF file or a generic I/O error.')
sys.exit(1)
except ezdxf.DXFStructureError:
print(f'Invalid or corrupted DXF file.')
sys.exit(2)
# Adding a new layer:
dwg.layers.new('TextLayer')
dwg.layers.new('PipingLayer')
msp = dwg.modelspace()
print(f'DXF File read success from {file_path}.')
# Reading the identification JSON:
json_file_path = 'Algorithms/MText/identification.json'
try:
with open(json_file_path) as json_file:
identification_json = json.load(json_file)
except Exception as e:
print(f'Failed to load identification due to: {e}.')
sys.exit(1)
MTEXT_ATTACHMENT_POINTS = {
"MTEXT_TOP_LEFT": 1,
"MTEXT_TOP_CENTER": 2,
"MTEXT_TOP_RIGHT": 3,
"MTEXT_MIDDLE_LEFT": 4,
"MTEXT_MIDDLE_CENTER": 5,
"MTEXT_MIDDLE_RIGHT": 6,
"MTEXT_BOTTOM_LEFT": 7,
"MTEXT_BOTTOM_CENTER": 8,
"MTEXT_BOTTOM_RIGHT": 9,
}
def add_text_to_chamber(entity, params):
"""This function adds text to a chamber.
Params is really useful here as it will be consisting of the outer boundries.
Args:
entity ([type]): [description]
params (dict): The params dict of PillarPlus.
"""
print(f'Inside text to function: {entity}')
# Get the centre point of the chamber
centre_point = entity["location"]
# Find in which direction we need to draw the line from the outer
# 1. Get the 4 corners:
min_x, min_y = params["PP-OUTER minx"], params["PP-OUTER miny"]
max_x, max_y = params["PP-OUTER maxx"], params["PP-OUTER maxy"]
# 2. Now find the direction by check where is the centre-point closest
if find_distance((min_x, 0), (centre_point[0], 0)) <= find_distance((centre_point[0], 0), (max_x, 0)):
dir_x = min_x
else:
dir_x = max_x
if find_distance((0, min_y), (0, centre_point[1])) <= find_distance((0, centre_point[1]), (0, max_y)):
dir_y = min_y
else:
dir_y = max_y
# Stretch distance in the direction of x and y:
angle: float = get_angle_between_two_points(
(dir_x, 0, 0), (0, dir_y, 0)) / 2
# Draw in line in the direction of angle:
slant_line_length = 300
slant_line = directed_points_on_line(
centre_point, angle, slant_line_length)
msp.add_line(centre_point, slant_line[0], dxfattribs={
'layer': 'TextLayer'})
# Drawing straight line:
straight_line_length = 500
angle: float = 0
straight_line = directed_points_on_line(
slant_line[0], angle, straight_line_length)
msp.add_line(slant_line[0], straight_line[0],
dxfattribs={'layer': 'TextLayer'})
# Types of chambers:
# gully trap chamber
# inspection chamber
# rainwater chamber
if entity['type'] == 'gully trap chamber':
size = '1\'.0"X1\'0"'
text = f"""
F.GL: {entity['finish_floor_level']}
I.LVL: {entity['invert_level']}
DEPTH: {entity['chamber_depth']}
{entity['type'].upper()}
SIZE: {size}
"""
# MTEXT Formatting
mtext = msp.add_mtext("", dxfattribs={'layer': 'TextLayer'})
mtext += text
mtext.dxf.char_height = 50
point = list(straight_line[0])
# Increasing the Y coordinate for proper positioning
point[1] += 300
mtext.set_location(point, None, MTEXT_ATTACHMENT_POINTS["MTEXT_TOP_CENTER"])
# Setting border for the text:
#mtext.dxf.box_fill_scale = 5
print('Box Fill Scale: ', mtext.dxf.box_fill_scale)
print('width', mtext.dxf.width)
elif entity['type'] == 'inspection chamber':
size = '1\'.6"X1\'6"'
text = f"""
F.GL: {entity['finish_floor_level']}
I.LVL: {entity['invert_level']}
DEPTH: {entity['chamber_depth']}
{entity['type'].upper()}
SIZE: {size}
"""
mtext = msp.add_mtext(text, dxfattribs={'layer': 'TextLayer'})
mtext.set_location(straight_line[1])
elif entity['type'] == 'rainwater chamber':
size = '1\'.0"X1\'0"'
text = f"""
F.GL: {entity['finish_floor_level']}
I.LVL: {entity['invert_level']}
DEPTH: {entity['chamber_depth']}
{entity['type'].upper()}
SIZE: {size}
"""
mtext = msp.add_mtext(text, dxfattribs={'layer': 'TextLayer'})
mtext.set_location(straight_line[1])
else:
raise ValueError(
'Only chambers with types: ("gully trap chamber", "inspection chamber", "rainwater chamber") are allowed.')
# Saving the file:
try:
dwg.saveas(output_file_path + output_file)
except Exception as e:
print(f'Failed to save the file due to the following exception: {e}')
sys.exit(1)
print(
f'Successfully added slant_line: {slant_line} and straight_line: {straight_line}')
def add_text_to_piping(text: str, location: tuple, distance: float, rotation: float):
"""This function adds text to piping at certain 'distance' from the location provided with the rotation.
The function accepts the text of the format: "///$$".
This function then changes all the '/' and '$' into '\n' (Newline Char).
Args:
text (str): The text which is needed to be inserted. The text will be of the format "///$$"
location (tuple): The location from which needs to be considered.
distance (float): The distance from the location after which the text needed to be printed.
rotation (float): Rotation on which the text needs to be printed on the dxf file.
"""
# Replacing all the '/' and '$' with NEWLINE Char.
text = text.replace('/', '\n')
text = text.replace('$', '\n')
# Finding the point where text should be placed.
line = directed_points_on_line(location, rotation, distance)
point_on_which_text_is_to_be_placed = line[0]
# Placing the point at the location
mtext = msp.add_mtext(text, dxfattribs = {'layer' : 'PipingText', 'style': 'OpenSans'})
# Setting the location
mtext.set_location(point_on_which_text_is_to_be_placed)
# Char font size:
mtext.dxf.char_height = 1
print(f'Success in adding mtext at the location: {point_on_which_text_is_to_be_placed} and angle: {angle_in_degree}.')
try:
dwg.saveas(output_file_path + output_file)
except Exception as e:
print(f'Failed to save the file due to the following exception: {e}')
sys.exit(1)
def add_text_on_wall(point: tuple, text: str, wall):
"""This function adds text on the wall.
Args:
point (tuple): A list of point.
text (str): The which is needed to be added.
wall (entity): Wall is an entity.
"""
# Get corners of the wall
corners = wall['corners']
# Check the point is closed to which corner
closest_corner = corners[0] if find_distance(point, corners[0]) <= find_distance(point, corners[1]) else corners[1]
# Get the in-angle and get opposite angle for it.
in_angle = wall['in_angle']
opposite_in_angle = in_angle - 180
# Stretch distance in the direction of x and y:
vector = Vector(closest_corner[0] - point[0], closest_corner[1] - point[1])
angle = vector.angle
angle = math.degrees(angle)
# In degree
angle_for_slant_line = (angle + opposite_in_angle) / 2
# Draw in line in the direction of angle:
slant_line_length = 300
slant_line = directed_points_on_line(
point, math.radians(angle_for_slant_line), slant_line_length)
msp.add_line(point, slant_line[0], dxfattribs={
'layer': 'TextLayer'})
# Drawing straight line:
straight_line_length = 500
angle: float = 0 if closest_corner[0] > 0 else math.pi
straight_line = directed_points_on_line(
slant_line[0], angle, straight_line_length)
msp.add_line(slant_line[0], straight_line[0],
dxfattribs={'layer': 'TextLayer'})
mtext = msp.add_mtext(text, dxfattribs={'layer': 'TextLayer'})
mtext.dxf.char_height = 50
point = list(straight_line[0])
# Increasing the Y coordinate for proper positioning
point[0] -= 250
# point[0] += 270
point[1] += 60
mtext.set_location(point, None, MTEXT_ATTACHMENT_POINTS["MTEXT_TOP_CENTER"])
print('width', mtext.dxf.width)
print(f'Success in adding mtext at the location: {point} and angle: {opposite_in_angle}.')
try:
dwg.saveas(output_file_path + output_file)
except Exception as e:
print(f'Failed to save the file due to the following exception: {e}')
sys.exit(1)
# DRIVER: add_text_to_chamber
# entities = identification_json['entities']
# params = identification_json["params"]
# # Calling function by hardcoding:
# add_text_to_chamber(entities[67], params)
# DRIVER: add_text_to_piping
# add_text_to_piping("H/el/lo P/il$lar$Plus!", (0, 0), 10, math.pi / 4)
#DRIVER: add_text_to_wall
walls = identification_json['walls']
wall = walls[0]
corners = wall['corners']
point = ((corners[0][0] + corners[1][0]) / 2, (corners[0][1] + corners[1][1]) / 2, (corners[0][2] + corners[1][2]) / 2)
add_text_on_wall(point, "Hello PillarPlus!", wall)
| true |
05b7dd3ca71ece8860ff9f75c8671b06f3ba702f | Python | Luolingwei/LeetCode | /Math/Q168_Excel Sheet Column Title.py | UTF-8 | 272 | 3.25 | 3 | [] | no_license | class Solution:
def convertToTitle(self, n):
ans=''
dic=ord('A')
while n>0:
n,reminder=divmod(n-1,26)
ans+=chr(dic+reminder)
return ans[::-1]
a=Solution()
print(a.convertToTitle(701))
print(a.convertToTitle(28)) | true |
74b3913b595e224431248158a0473f8c7ac63a06 | Python | fpsawicki/02504-Computer-Vision | /slam/matrix.py | UTF-8 | 2,913 | 3.0625 | 3 | [] | no_license | import numpy as np
import cv2
def choose_points(src_pts, dst_pts, choices):
if choices > src_pts.shape[0]:
raise Exception(f'Invalid number of choices, max: {src_pts.shape[0]}')
corrs = []
choices = np.random.choice(src_pts.shape[0], size=choices, replace=False)
for i in choices:
corrs.append((src_pts[i], dst_pts[i]))
# normalize points ?
return np.array(corrs)
def find_projection_matrix(camera_matrix, src_pts, dst_pts, rot1, rot2, trans, choices=10):
""" Tries to find unique solution for projection matrix
camera_matrix: numpy array of calibrated camera (we assume that both cameras have the same matrix)
src_pts: camera_1 feature points
dst_pts: camera_2 feature points
rot1: rotation_1 from essential matrix decomposition
rot2: rotation_2 from essential matrix decomposition
trans: translation from essential matrix decomposition
choices: how many random source/destination points to use for finding projection matrix
returns: dictionary with projection matrices for 2 cameras, translation vector and rotation_translation for 2nd camera
"""
# creates projection matrix for the reference (first) camera
rt_mat_orig = np.hstack((np.identity(3), np.zeros(3)[np.newaxis].T))
projection_mat_orig = np.dot(camera_matrix, rt_mat_orig)
solutions = []
points = choose_points(src_pts, dst_pts, choices)
combinations = [(rot1, trans), (rot1, -trans), (rot2, trans), (rot2, -trans)]
for rot, t in combinations:
# creates projection matrix for the second camera
rt_mat_2nd = np.hstack((rot, t))
projection_mat_2nd = np.dot(camera_matrix, rt_mat_2nd)
pts_3d = cv2.triangulatePoints(
projection_mat_orig, projection_mat_2nd, points[:, 0], points[:, 1]
)
pts_3d = pts_3d / pts_3d[3]
if np.any(pts_3d[2, :] < 0):
continue # invalid solution, point is behind the camera
solutions.append({
'pro_mat_1st': projection_mat_orig,
'pro_mat_2nd': projection_mat_2nd,
't_vec': t,
'rt_mat': rt_mat_2nd
})
if len(solutions) > 1:
choices += 1
if choices > src_pts.shape[0]:
raise Exception('Couldnt find unique solution to point triangulation')
return find_projection_matrix(
camera_matrix, src_pts, dst_pts, rot1, rot2, trans, choices=choices
)
if not solutions:
raise Exception('Couldnt find any solution to point triangulation')
return solutions[0]
def calc_fundamental_matrix(camera_matrix, essential_matrix):
pinv_camera_t = np.linalg.inv(camera_matrix.T)
pinv_camera = np.linalg.inv(camera_matrix)
x = np.dot(pinv_camera_t, essential_matrix)
F = np.dot(x, pinv_camera) # C^-T * E * C^-1
F = F / F[-1, -1]
return F | true |
e763628e69166c7d4a91691dfc9bdedece78cb10 | Python | gadididi/ex_5-machine-learning | /ex_5.py | UTF-8 | 3,190 | 2.703125 | 3 | [] | no_license | import sys
import torch
import numpy as np
import torchvision
from torch.utils import data
import torch.nn.functional as F
import matplotlib.pyplot as plt
from cnn import Net
from gcommand_dataset import GCommandLoader
cuda = torch.cuda.is_available()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print(device)
EPOCHS = 30
best_model = Net()
best_model.to(device)
USAGE = "ex_5.py <path_to_train> <path_to_valid> <path_to_test>"
def train(train_loader):
best_model.train()
losses = 0
# getting the training set
for batch_idx, (data, target) in enumerate(train_loader):
data = data.to(device)
target = target.to(device)
output_train = best_model(data)
loss = F.nll_loss(output_train.squeeze(), target)
best_model.optimizer.zero_grad()
loss.backward()
best_model.optimizer.step()
def number_of_correct(pred, target):
# count number of correct predictions
return pred.squeeze().eq(target).sum().item()
def get_likely_index(tensor):
# find most likely label index for each element in the batch
return tensor.argmax(dim=-1)
def test(val_loader):
best_model.eval()
with torch.no_grad():
correct = 0
for data, target in val_loader:
data = data.to(device)
target = target.to(device)
output = best_model(data)
pred = get_likely_index(output)
correct += number_of_correct(pred, target)
print(
f"\n\tAccuracy: {correct}/{len(val_loader.dataset)} ({100. * correct / len(val_loader.dataset):.2f}%)\n")
def prediction(test_loader, classes):
best_model.eval()
i = 0
predicts_list = []
with torch.no_grad():
for image, labels in test_loader:
image, labels = image.to(device), labels.to(device)
output = best_model(image)
predicted = output.data.max(1, keepdim=True)[1].item()
data_ = int(test_loader.dataset.spects[i][0].split("/")[4].split('.')[0])
predicts_list.append((data_, predicted))
i += 1
predicts_list = sorted(predicts_list)
f = open("test_y", "w")
for e in predicts_list:
line = str(e[0]) + ".wav, " + classes[e[1]] + '\n'
f.write(line)
f.close()
def run_model(train_loader, val_loader, test_loader=None):
for e in range(1, EPOCHS + 1):
print("epoch number: ", e)
train(train_loader)
test(val_loader)
if test_loader is not None:
classes = train_loader.dataset.classes
prediction(test_loader, classes)
def main():
if len(sys.argv) < 4:
print(USAGE)
exit(1)
train_set = GCommandLoader(sys.argv[1])
val_set = GCommandLoader(sys.argv[2])
test_set = GCommandLoader(sys.argv[3])
train_loader = torch.utils.data.DataLoader(
train_set, batch_size=64, shuffle=True,
pin_memory=True)
val_loader = torch.utils.data.DataLoader(
val_set, batch_size=64, shuffle=True,
pin_memory=True)
test_loader = torch.utils.data.DataLoader(test_set)
run_model(train_loader, val_loader, test_loader)
if __name__ == '__main__':
main()
| true |
c6a3d8f69f45c5b80115639060ddaceeb6019106 | Python | slowsheepsheep/basic-python-tutorial | /dev/__init__.py | UTF-8 | 1,409 | 4.78125 | 5 | [] | no_license |
if __name__ == "__main__":
#字符串类型
cnStr = "空行与代码缩进不同,空行并不是Python语法的一部分。书写时不插入空行,\n" \
"Python解释器运行也不会出错。但是空行的作用在于分隔两段不同功能或含义的代码,\n" \
"便于日后代码的维护或重构。"
print(cnStr)
# =表示赋值的意思,==才是判断相等的
num1 = 2
num2 = 3
print(num1 == num2)
#python里靠缩进来维护逻辑关系,在一个条件的缩进块里的才会根据条件结果执行
if num2 > 2:
print(num1)
if num1 > 2:
print(num2)
#这个语句不会管上面的if num2>2,始终会执行的
print(num1+num2)
#列表:list
numList = [100,99,88]
print(numList)
# range(30): [0,30) 理论上列表可以保存很多元素
for i in range(30):
numList.append(i)
#列表有很多方法,例如翻转方法(reverse),reverse是列表内翻转
numList.reverse()
print("翻转后的列表numList内容是:",numList) #这个地方打印出来的结果,跟上面打印出来的是反的
# strList = ["hi","今天晚上有空吗","我请你吃饭"]
#长度函数:len,相当于一把尺子,能计算很多种数据类型的长度
print("numList's length is:",len(numList))
aStr = "dshadhlashdl"
print(len(aStr)) | true |
a81aa19f5ba7190b174c80713e65f384f6b4bfe8 | Python | OscarZeng/Data_Structure_and_Algo | /LeetCode/Unique Binary Search Trees II.py | UTF-8 | 1,232 | 3.609375 | 4 | [] | no_license | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def generateTrees(self, n: int) -> List[TreeNode]:
if n == 0:
return []
def generateUniqueTrees(left, right):
#The reason we have this condition
#is that we need to complete the tree by adding the None to the leaves of the tree
if left > right:
return [None]
ans = []
#generate different trees including left and right
for i in range(left, right+1):
#Generate all the possible trees in left
leftAns = generateUniqueTrees(left, i-1)
#Generate all the possible trees in righ
t rightAns = generateUniqueTrees(i+1, right)
for l in leftAns:
for r in rightAns:
root = TreeNode(i)
root.left = l
root.right = r
ans.append(root)
return ans
return generateUniqueTrees(1,n) | true |
e92bedfd8205aa22824fb61e56bd84eab4410c30 | Python | indraneelray/leetcode30daysMay | /21CountSquareSubmatrices.py | UTF-8 | 736 | 2.6875 | 3 | [] | no_license | class Solution:
def countSquares(self, matrix: List[List[int]]) -> int:
dp = [ [ 0 for i in range(0,len(matrix[0])+1) ] for j in range(0,len(matrix)+1) ]
# for i in range(0, len(matrix)+1):
# for j in range(0, len(matrix[0])+1):
# dp[i][j]=0
total = 0
#print (dp)
for i in range(1,len(matrix)+1):
for j in range (1,len(matrix[0])+1):
#print(matrix[i-1][j-1])
if matrix[i-1][j-1]==0:
dp[i][j] =0
else:
dp[i][j] = min(dp[i-1][j], dp[i][j-1], dp[i-1][j-1])+1
total += dp[i][j]
print (dp)
return total | true |
7cbbfcc813cfc8c14c82fe0eb89968ec8303dbf0 | Python | trannamtrung1st/FQCS-Research | /FQCS.ColorDetection/FQCS_lib/FQCS/tf2_yolov4/csp_darknet53.py | UTF-8 | 4,201 | 3.0625 | 3 | [
"MIT"
] | permissive | """Implements YOLOv4 backbone layer: CSPDarknet53"""
import tensorflow as tf
from .layers import conv_bn
def residual_block(inputs, num_blocks):
"""
Applies several residual connections.
Args:
inputs (tf.Tensor): 4D (N,H,W,C) input tensor
num_blocks (int): Number of residual blocks
Returns:
tf.Tensor: 4D (N,H,W,C) output Tensor
"""
_, _, _, filters = inputs.shape
x = inputs
for _ in range(num_blocks):
block_inputs = x
x = conv_bn(x, filters, kernel_size=1, strides=1, activation="mish")
x = conv_bn(x, filters, kernel_size=3, strides=1, activation="mish")
x = x + block_inputs
return x
def csp_block(inputs, filters, num_blocks):
"""
Create a CSPBlock which applies the following scheme to the input (N, H, W, C):
- the first part (N, H, W, C // 2) goes into a series of residual connection
- the second part is directly concatenated to the output of the previous operation
Args:
inputs (tf.Tensor): 4D (N,H,W,C) input tensor
filters (int): Number of filters to use
num_blocks (int): Number of residual blocks to apply
Returns:
tf.Tensor: 4D (N,H/2,W/2,filters) output tensor
"""
half_filters = filters // 2
x = conv_bn(
inputs,
filters=filters,
kernel_size=3,
strides=2,
zero_pad=True,
padding="valid",
activation="mish",
)
route = conv_bn(x,
filters=half_filters,
kernel_size=1,
strides=1,
activation="mish")
x = conv_bn(x,
filters=half_filters,
kernel_size=1,
strides=1,
activation="mish")
x = residual_block(x, num_blocks=num_blocks)
x = conv_bn(x,
filters=half_filters,
kernel_size=1,
strides=1,
activation="mish")
x = tf.keras.layers.Concatenate()([x, route])
x = conv_bn(x,
filters=filters,
kernel_size=1,
strides=1,
activation="mish")
return x
def csp_darknet53(input_shape):
"""
CSPDarknet53 implementation based on AlexeyAB/darknet config
https://github.com/AlexeyAB/darknet/blob/master/cfg/yolov4.cfg
"""
inputs = tf.keras.Input(shape=input_shape)
# First downsampling: L29 -> L103
x = conv_bn(inputs,
filters=32,
kernel_size=3,
strides=1,
activation="mish")
# This block could be expressed as a CSPBlock with modification of num_filters in the middle
# For readability purpose, we chose to keep the CSPBlock as simple as possible and have a little redondancy
x = conv_bn(
x,
filters=64,
kernel_size=3,
strides=2,
zero_pad=True,
padding="valid",
activation="mish",
)
route = conv_bn(x, filters=64, kernel_size=1, strides=1, activation="mish")
shortcut = conv_bn(x,
filters=64,
kernel_size=1,
strides=1,
activation="mish")
x = conv_bn(shortcut,
filters=32,
kernel_size=1,
strides=1,
activation="mish")
x = conv_bn(x, filters=64, kernel_size=3, strides=1, activation="mish")
x = x + shortcut
x = conv_bn(x, filters=64, kernel_size=1, strides=1, activation="mish")
x = tf.keras.layers.Concatenate()([x, route])
x = conv_bn(x, filters=64, kernel_size=1, strides=1, activation="mish")
# Second downsampling: L105 -> L191
x = csp_block(x, filters=128, num_blocks=2)
# Third downsampling: L193 -> L400
output_1 = csp_block(x, filters=256, num_blocks=8)
# Fourth downsampling: L402 -> L614
output_2 = csp_block(output_1, filters=512, num_blocks=8)
# Fifth downsampling: L616 -> L744
output_3 = csp_block(output_2, filters=1024, num_blocks=4)
return tf.keras.Model(inputs, [output_1, output_2, output_3],
name="CSPDarknet53")
| true |
3669bb385ed5ba87e61175e12973a9da73de67c8 | Python | turnaround0/UpliftModeling | /experiment/measure.py | UTF-8 | 5,467 | 3.234375 | 3 | [] | no_license | import pandas as pd
import matplotlib.pyplot as plt
def performance(pr_y1_t1, pr_y1_t0, y, t, groups=10):
"""
1. Split the total customers into the given number of groups
2. Calculate the statistics of each segment
Args:
pr_y1_t1: the series (list) of the customer's expected return
pr_y1_t0: the expected return when a customer is not treated
y: the observed return of customers
t: whther each customer is treated or not
groups: the number of groups (segments). Should be 5, 10, or 20
Return:
DataFrame:
columns:
'n_y1_t1': the number of treated responders
'n_y1_t0': the number of not treated responders
'r_y1_t1': the average return of treated customers
'r_y1_t0': the average return of not treated customers
'n_t1': the number of treated customers
'n_t0': the number of not treated customers
'uplift': the average uplift (the average treatment effect)
rows: the index of groups
"""
### check valid arguments
if groups not in [5, 10, 20]:
raise Exception("uplift: groups must be either 5, 10 or 20")
### check for NAs.
if pr_y1_t1.isnull().values.any():
raise Exception("uplift: NA not permitted in pr_y1_t1")
if pr_y1_t0.isnull().values.any():
raise Exception("uplift: NA not permitted in pr_y1_t0")
if y.isnull().values.any():
raise Exception("uplift: NA not permitted in y")
if t.isnull().values.any():
raise Exception("uplift: NA not permitted in t")
### check valid values for y and t
# if set(y) != {0, 1}:
# raise Exception("uplift: y must be either 0 or 1")
if set(t) != {0, 1}:
raise Exception("uplift: t must be either 0 or 1")
### check length of arguments
if not (len(pr_y1_t1) == len(pr_y1_t0) == len(y) == len(t)):
raise Exception("uplift: arguments pr_y1_t1, pr_y1_t0, y and t must all have the same length")
### define dif_pred
dif_pred = pr_y1_t1 - pr_y1_t0
### Make index same
y.index = dif_pred.index
t.index = dif_pred.index
mm = pd.DataFrame({
'dif_pred': dif_pred,
'y': y,
't': t,
'dif_pred_r': dif_pred.rank(ascending=False, method='first')
})
mm_groupby = mm.groupby(pd.qcut(mm['dif_pred_r'], groups, labels=range(1, groups + 1), duplicates='drop'))
n_y1_t1 = mm_groupby.apply(lambda r: r[r['t'] == 1]['y'].sum())
n_y1_t0 = mm_groupby.apply(lambda r: r[r['t'] == 0]['y'].sum())
n_t1 = mm_groupby['t'].sum()
n_t0 = mm_groupby['t'].count() - n_t1
df = pd.DataFrame({
'n_t1': n_t1,
'n_t0': n_t0,
'n_y1_t1': n_y1_t1,
'n_y1_t0': n_y1_t0,
'r_y1_t1': n_y1_t1 / n_t1,
'r_y1_t0': n_y1_t0 / n_t0,
})
fillna_columns = ['n_y1_t1', 'n_y1_t0', 'n_t1', 'n_t0']
df[fillna_columns] = df[fillna_columns].fillna(0)
df.index.name = 'groups'
df['uplift'] = df['r_y1_t1'] - df['r_y1_t0']
df['uplift'] = round(df['uplift'], 6)
return df
def qini(perf, plotit=True):
nrow = len(perf)
# Calculating the incremental gains.
# - First, the cumulitative sum of the treated and the control groups are
# calculated with respect to the total population in each group at the
# specified decile
# - Afterwards we calculate the percentage of the total amount of people
# (both treatment and control) are present in each decile
cumul_y1_t1 = (perf['n_y1_t1'].cumsum() / perf['n_t1'].cumsum()).fillna(0)
cumul_y1_t0 = (perf['n_y1_t0'].cumsum() / perf['n_t0'].cumsum()).fillna(0)
deciles = [i / nrow for i in range(1, nrow + 1)]
### Model Incremental gains
inc_gains = (cumul_y1_t1 - cumul_y1_t0) * deciles
inc_gains = [0.0] + list(inc_gains)
### Overall incremental gains
overall_inc_gain = sum(perf['n_y1_t1']) / sum(perf['n_t1']) \
- sum(perf['n_y1_t0']) / sum(perf['n_t0'])
### Random incremental gains
random_inc_gains = [i * overall_inc_gain / nrow for i in range(nrow + 1)]
### Compute area under the model incremental gains (uplift) curve
x = [0] + deciles
y = list(inc_gains)
auuc = 0
auuc_rand = 0
auuc_list = [auuc]
for i in range(1, len(x)):
auuc += 0.5 * (x[i] - x[i - 1]) * (y[i] + y[i - 1])
auuc_list.append(auuc)
### Compute area under the random incremental gains curve
y_rand = random_inc_gains
auuc_rand_list = [auuc_rand]
for i in range(1, len(x)):
auuc_rand += 0.5 * (x[i] - x[i - 1]) * (y_rand[i] + y_rand[i - 1])
auuc_rand_list.append(auuc_rand)
### Compute the difference between the areas (Qini coefficient)
Qini = auuc - auuc_rand
### Plot incremental gains curve
if plotit:
x_axis = x
plt.plot(x_axis, inc_gains)
plt.plot(x_axis, random_inc_gains)
plt.show()
### Qini 30%, Qini 10%
n_30p = int(nrow * 3 / 10)
n_10p = int(nrow / 10)
qini_30p = auuc_list[n_30p] - auuc_rand_list[n_30p]
qini_10p = auuc_list[n_10p] - auuc_rand_list[n_10p]
res = {
'qini': Qini,
'inc_gains': inc_gains,
'random_inc_gains': random_inc_gains,
'auuc_list': auuc_list,
'auuc_rand_list': auuc_rand_list,
'qini_30p': qini_30p,
'qini_10p': qini_10p,
}
return res
| true |
54823aab40ee729e39a1a2e85caf5919b0820a27 | Python | seidels/structured-phylodynamic-models | /simulation/sc/generate_tip_times.py | UTF-8 | 413 | 3.109375 | 3 | [
"Apache-2.0"
] | permissive | import numpy as np
import sys
# command line argument can provide seed, otw set seed to 1
if len(sys.argv) < 2:
np.random.seed(1)
else:
np.random.seed(int(sys.argv[1]))
# generate 100 random tip times between 0-10.
tipDates=np.random.uniform(0, 10, 101)
with open('randomTipTimes.txt', 'w') as text_file:
for i in range(1,101):
text_file.write(str(i) + '\t' + str(tipDates[i]) + '\n')
| true |
3c5bd7bfea7a8294825eadad457c7c1d38abdd78 | Python | Subham-sarkar/Build-Hub | /mypro.py | UTF-8 | 981 | 2.75 | 3 | [] | no_license | import os
lookup = {
'c':'C',
'py':'Python',
'java':'Java',
'pl':'Perl',
'cpp':'C++',
'net':'.NET',
'txt':'Text',
'js':'java Script',
'html':'Html',
'css':'CSS',
'php':'PHP',
'm':'Objective C',
'cs':'C#',
'vb':'Visual Basic'
}
def fext(s):
name,ext = s.split('.')
try:
return lookup[ext]
except:
return ext
def mypro(author):
#print(author)
os.chdir('files')
l = list()
b = list()
f = open('Name.txt','r')
for line in f:
#print(line)
key, fauthor = line.split('|')
fauthor = fauthor.strip()
#print(key,fauthor)
#print(author)
if author == fauthor:
#print(True)
b = [key, author, fext(key)]
#print(b)
l.append(b)
f.close()
os.chdir('./..')
return l
| true |
9192af5897ba8eb25522bcf8fa4d34fd9b40c235 | Python | subhamraj5/Adcuratio_Assignment | /Main.py | UTF-8 | 2,118 | 2.8125 | 3 | [] | no_license | from RbasApp.ActionType import Action
from RbasApp.Resources import Resource
from RbasApp.Roles import Role
from RbasApp.Users import User
import warnings
warnings.filterwarnings("ignore")
class Main:
def create_resource(self,r_id,r_name):
return Resource(r_id,r_name)
def create_user(self,u_id,u_pwd,u_name,u_age):
return User(u_id,u_pwd,u_name,u_age)
def create_role(self,r_id,r_name):
return Role(r_id,r_name)
def create_action_type(self,a_id,a_name):
return Action(a_id,a_name)
def check_user_access(self,user):
if user.get_user_role().get_role_name()=="admin":
print(user.get_user_name()+" has Access to all resources with READ, WRITE and DELETE permission")
elif user.get_user_role().get_role_name()=="viewer":
print(user.get_user_name()+" has Access to all resources with READ only permission")
else:
print(user.get_user_name()+" has Access to all resources with WRITE only permission")
driver=Main()
#Creating Role
admin=driver.create_role(1, "admin")
member=driver.create_role(1, "member")
viewer=driver.create_role(1, "viewer")
#Creating Resource
R1=driver.create_resource(1,"R1")
R2=driver.create_resource(2,"R2")
R3=driver.create_resource(3,"R3")
R4=driver.create_resource(4,"R4")
R5=driver.create_resource(5,"R5")
#Creating Acion Type
read=driver.create_action_type(1,"Read")
write=driver.create_action_type(2,"Write")
delete=driver.create_action_type(3,"Delete")
rwd=driver.create_action_type(4,"RWD")
#Creating RBAS
u1=driver.create_user(1, "pwd","Raj",26)
u2=driver.create_user(1, "pwd","Rohan",25)
u3=driver.create_user(1, "pwd","Shruti",24)
u4=driver.create_user(1, "pwd","Tuba",25)
u1.set_user_role(admin)
u1.set_user_action_type(rwd)
u2.set_user_role(member)
u2.set_user_action_type(write)
u3.set_user_role(viewer)
u3.set_user_action_type(read)
driver.check_user_access(u1)
driver.check_user_access(u2)
driver.check_user_access(u3)
| true |
c43ffb8db8ccd951cbc1897d6466ea4a762703c4 | Python | joemarshall/grovepi-emulator | /components/groveultrasonic.py | UTF-8 | 1,712 | 2.640625 | 3 | [
"LicenseRef-scancode-public-domain"
] | permissive |
import grovepi
from gpe_utils.tkimports import *
from . import propgrid
class GroveUltrasonic:
def __init__(self,inputNum):
self.pin=inputNum
self.value=tk.IntVar()
grovepi.digValues[self.pin]=2 # tell grovepi that we are an ultrasonic ranger
def title(self):
return "D%d: Grove Ultrasonic Ranger"%self.pin
@classmethod
def classDescription(cls):
return "Grove Ultrasonic Ranger"
def initSmall(self,parent):
self.label=ttk.Label(parent,text=self.title())
self.label.grid()
self.slider=ttk.Scale(parent,from_=0,to=400,orient=tk.HORIZONTAL,command=self.OnSliderChange,variable=self.value)
self.slider.grid()
def initPropertyPage(self,parent):
self.propGrid=propgrid.PropertyGrid(parent,title=self.title())
self.valueProperty=propgrid.IntProperty("Distance (cm)",value=0)
self.propGrid.Append( self.valueProperty )
self.propGrid.SetCallback(self.OnPropGridChange)
self.propGrid.pack(fill=tk.X)
def OnPropGridChange(self,property,value):
if property=="Distance (cm)":
self.setValue(value)
def OnSliderChange(self,event):
self.setValue(self.value.get())
def setValue(self,value):
if value>400: value=400
if value<0:value=0
self.valueProperty.SetValue(value)
self.value.set(value)
grovepi.digValues[self.pin]=value+2
def getCSVCode(self):
return {"imports":["sensors"],"pin_mappings":["\"ultrasonic%d\":%d"%(self.pin,self.pin)],"reader":"sensors.ultrasonic%d.get_level()"%self.pin,"variable":"ultrasonic%d"%self.pin}
| true |
0ebf40ce933b348e8b510a07d0fe6f20d17d6a37 | Python | romrell4/470-AI | /Reversi/Board.py | UTF-8 | 4,994 | 3.265625 | 3 | [] | no_license | from Square import Square
import Enums
from Enums import Color, Direction
class Board:
def __init__(self, size, config):
self.config = config
self.size = size
self.max = size - 1
self.weight = 0
self.grid = []
for i in range(size):
self.grid.append([])
for j in range(size):
value = self.getSquareValue(i, j)
self.weight += value
self.grid[i].append(Square(i, j, value))
for i in range(size):
for j in range(size):
if j > 0:
self.grid[i][j].neighbors[Direction.N] = self.grid[i][j - 1]
if j < self.max:
self.grid[i][j].neighbors[Direction.S] = self.grid[i][j + 1]
if i > 0:
self.grid[i][j].neighbors[Direction.W] = self.grid[i - 1][j]
if i < self.max:
self.grid[i][j].neighbors[Direction.E] = self.grid[i + 1][j]
if i > 0 and j > 0:
self.grid[i][j].neighbors[Direction.NW] = self.grid[i - 1][j - 1]
if i < self.max and j > 0:
self.grid[i][j].neighbors[Direction.NE] = self.grid[i + 1][j - 1]
if i > 0 and j < self.max:
self.grid[i][j].neighbors[Direction.SW] = self.grid[i - 1][j + 1]
if i < self.max and j < self.max:
self.grid[i][j].neighbors[Direction.SE] = self.grid[i + 1][j + 1]
m2 = size / 2
m1 = m2 - 1
self.grid[m1][m1].piece = Color.WHITE
self.grid[m2][m2].piece = Color.WHITE
self.grid[m1][m2].piece = Color.BLACK
self.grid[m2][m1].piece = Color.BLACK
def getPlayableSquares(self, color):
#Add logic - return list of playable squares for a given color
playableSquares = []
for i in range(self.size):
for j in range(self.size):
if self.grid[i][j].isPlayable(color):
playableSquares.append(self.grid[i][j])
return playableSquares
def getConfig(self, x, y, color):
board = Board(self.size, self.config)
for i in range(self.size):
for j in range(self.size):
board.grid[i][j].piece = self.grid[i][j].piece
board.play(x, y, color)
return board
def getScore(self, weighted):
score = [0, 0, 0]
if weighted:
for i in range(self.size):
for j in range(self.size):
square = self.grid[i][j]
score[square.piece] += square.value
else:
for i in range(self.size):
for j in range(self.size):
score[self.grid[i][j].piece] += 1
return score
def getSquareValue(self, x, y):
if (x == 0 and y == 0) or \
(x == 0 and y == self.max) or \
(x == self.max and y == 0) or \
(x == self.max and y == self.max):
return self.size * 2
elif (x == 0 and y == 1) or \
(x == 1 and y == 0) or \
(x == self.max - 1 and y == 0) or \
(x == self.max and y == 1) or \
(x == 0 and y == self.max - 1) or \
(x == 1 and y == self.max) or \
(x == self.max - 1 and y == self.max) or \
(x == self.max and y == self.max - 1):
return -self.size / 4
elif x == 0 or y == 0 or x == self.max or y == self.max:
return self.size / 2 - 1
elif (x == 1 and y == 1) or \
(x == self.max - 1 and y == 1) or \
(x == 1 and y == self.max - 1) or \
(x == self.max - 1 and y == self.max - 1):
return -2
elif x == 1 or y == 1 or x == self.max - 1 or y == self.max - 1:
return self.size / 4
else:
return 1
def play(self, x, y, color):
self.grid[x][y].play(color)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
result = "\n "
for j in range(self.size):
result += Enums.getAlpha(j) + " "
result += " \n"
result += u'\u2554\u2550'
for j in range(self.size):
result += u'\u2550\u2550'
result += u'\u2557'
result += "\n"
for j in range(self.size):
result += u'\u2551' + " "
for i in range(self.size):
result += Color.chr[self.config][self.grid[i][j].piece] + " "
result += u'\u2551' + " " + str(j+1) + "\n"
result += u'\u255a\u2550'
for j in range(self.size):
result += u'\u2550\u2550'
result += u'\u255d'
score = self.getScore(False)
result += "\n" + Color.str[Color.BLACK] + ": " + str(score[Color.BLACK])
result += " " + Color.str[Color.WHITE] + ": " + str(score[Color.WHITE])
return result + "\n"
| true |
3cb09d15f93bbb93d5a930c425a1a36ea77276bd | Python | tomahawk-player/tomahawk-contrib | /latestxspf/latestxspf.py | UTF-8 | 3,074 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf8 -*-
#
# This script creates an XSPF playlist containing the
# latest additions to your local music collection.
#
# ----------------------------------------------------------------------------
# "THE BEER-WARE LICENSE" (Revision 42):
# <mo@liberejo.de> wrote this file. As long as you retain this notice you
# can do whatever you want with this stuff. If we meet some day, and you think
# this stuff is worth it, you can buy me a beer in return - Remo Giermann.
# ----------------------------------------------------------------------------
#
# author: Remo Giermann <mo@liberejo.de>
# created: 2011/03/30
#
import sys
import time
import urllib
import tagpy
import xspfgenerator
from find import *
class TagReader(object):
"""
Reads tags from filenames and saves it to a list of dictionaries.
"""
def __init__(self):
self.__dicts = []
def read(self, filename):
"""
Reads tag info from 'filename' and saves a dictionary with artist, title
and album strings.
"""
tag = tagpy.FileRef(filename).tag()
d = {}
d.update(location = 'file://'+urllib.quote(filename))
d.update(artist = tag.artist or "Unknown Artist")
d.update(title = tag.title or "Unknown Title")
d.update(album = tag.album or '')
self.__dicts.append(d)
def tags(self):
"""
Returns all tags read so far in a list of dicts
"""
return self.__dicts
def __len__(self):
return len(self.__dicts)
def latesttracks(directory, days):
"""
Finds the latest additions to 'directory' (within the last 'days')
and returns an XSPF playlist.
"""
tags = TagReader()
then = time.time() - (days * 24 * 3600)
date = time.strftime("%D", time.localtime(then))
now = time.strftime("%D")
creator = "LatestXSPF"
title = "New tracks from {date} till {now}".format(date=date, now=now)
find(days=days, dir=directory, exts=[".mp3", ".flac", ".ogg"], hook=tags.read)
print >> sys.stderr, len(tags), 'music files found'
xspf = xspfgenerator.SimpleXSPFGenerator(title, creator)
xspf.addTracks(tags.tags())
return xspf
if __name__ == "__main__":
import sys
import argparse
from datetime import datetime
parser = argparse.ArgumentParser(description='Create playlist of latest additions.')
parser.add_argument('directory', help='directory to look for music (./)', nargs='?', default='./')
group = parser.add_mutually_exclusive_group()
group.add_argument('-d', metavar='DAYS', help='find new music from the last DAYS (14)', type=int, default=14)
group.add_argument('-s', metavar='M/D/YY', help='find new music since this date')
parser.add_argument('-o', dest='outfile', metavar='FILE', help='optional output file name (stdout)', type=argparse.FileType('w'), default=sys.stdout)
args = parser.parse_args()
if args.since is not None:
now = datetime.now()
try:
since = datetime.strptime(args.since, '%m/%d/%y')
except ValueError:
print >> sys.stderr, 'date must be in M/D/YY format'
sys.exit()
args.days = (now-since).days
print >> sys.stderr, args.days, 'days'
print >> args.outfile, latesttracks(args.directory, args.days)
| true |
d187d1269cb27188fe5f94b72ea9c28b2f224557 | Python | forybh/Algorithm_py | /1319.py | UTF-8 | 920 | 2.9375 | 3 | [] | no_license | def solution():
N = int(input())
find = int(input())
x_dir = [0,1,0,-1]
y_dir = [-1,0,1,0]
x_cur = N//2
y_cur = N//2
cur_dir = 0
answer = [[0]*N for _ in range(N)]
count = 0
length = 1
cur_N = 1
while True:
for _ in range(length):
if cur_N > N**2:
break
answer[y_cur][x_cur] = cur_N
cur_N += 1
y_cur += y_dir[cur_dir]
x_cur += x_dir[cur_dir]
if cur_N > N**2:
break
count += 1
if count == 2:
count = 0
length += 1
cur_dir += 1
cur_dir %= 4
for i, l in enumerate(answer):
print(" ".join(str(x) for x in l))
if find in l:
find_index = (i, l.index(find))
print(" ".join(str(f+1) for f in find_index))
solution()
| true |
5a2e4c2a224849d58e733dadfc25a2ba7a564ce7 | Python | mikeKravt/study_progects | /Загадай число.py | UTF-8 | 2,071 | 4.15625 | 4 | [] | no_license | #Программа "Загадай число" \ MikeKravt 24/02/2016
#Пользователь загадует натуральное число от 1 до 100, а ПК отгадует
#В конце выводится отгаданое число и количество попыток
import random
print ("\n\t\t\tДобро пожаловать в ИГРУ!")
print ("\n\nЗагадайте любое целое число от 1 до 100. Если Вы готовы, напишите ДА")
#Алгоритм для определения готовности пользователя (загадал число или нет)
quest = input ("\nНачинаем? Ваш ответ: ")
while quest != "":
if quest.lower() == "нет":
print ("\n\tНу сколько еще ждать?")
quest = input ("\nНачинаем? Ваш ответ: ")
else:
print ("Тогда начнем...")
break
#Алгоритм присвоения случайного значения. Вывод первого варианта.
number = random.randint (1, 100)
print (number)
answ = str(input("Я угадал? Ваш ответ: "))
min = 1
max = 100
if answ.lower() == "да":
print ("Ура! Я угадал с первого раза. Я Суперкомп")
#Основной цикл
tries = 1
while answ != "да":
answ2 = str(input("Больше или меньше? Ваш ответ: "))
if answ2.lower() == "больше":
min = number
number = random.randint (number, max)
print (number)
answ = str(input("Я угадал? "))
elif answ2.lower() == "меньше":
max = number
number = random.randint (min, number)
print (number)
answ = str(input("Я угадал? "))
tries +=1
#Вывод заключительного результата
print ("\t\t\tУра! Я угадал. Это число:", number)
print ("\n\t\t\tИ угадал я всего с", tries,"попытки.")
| true |
5645cfa7d22e44fc60d264dede164629b6ab3572 | Python | fgaurat/ghspython | /write_file.py | UTF-8 | 261 | 2.90625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
with open("the_file.txt","w") as f:
f.write("Toto\n")
f.write("Toto 1\n")
f.write("Toto 2\n")
f.write("Toto 3\n")
f.write("Toto 4\n")
with open("the_file.txt","r") as f:
lines = f.readlines()
print(lines) | true |
b3c802667cf9620de2c012dba2fc8c9729b47f8a | Python | Akuli/math-derivations | /linalg_utils.py | UTF-8 | 4,538 | 3.125 | 3 | [
"MIT"
] | permissive | import itertools
def _stringify(num, parens=False):
string = str(abs(num))
if "/" in string:
string = r"\frac{%s}{%s}" % (abs(num).numerator, abs(num).denominator)
if num < 0:
string = "-" + string
if num < 0 and parens:
string = fr"\left( {string} \right)"
return string
class MatrixWithRowOperations:
def __init__(self, rows, *, separator=None, prefix="", transformed_symbol=r"\to"):
self._color_iter = itertools.cycle([
r'\red{%s}'.__mod__,
r'\blue{%s}'.__mod__,
r'\magenta{%s}'.__mod__,
r'\green{%s}'.__mod__,
r'\darkyellow{%s}'.__mod__,
])
self._rows = [list(row) for row in rows]
self._separator = separator
self.prefix = prefix
self._current_colors = [next(self._color_iter) for row in self._rows]
if self._separator is None:
self._aligned_arrow = "&" + transformed_symbol
else:
self._aligned_arrow = transformed_symbol + "&~~~"
self.clear_output()
def clear_output(self):
self._output = []
if self._separator is not None:
self._output.append(r"&~~~")
self._append_current_state_to_output()
if self._separator is not None:
self._output.append(r"\\")
def _pick_color(self):
# Goals:
# - Use all available colors
# - Avoid choosing colors that have been recently used
for color in self._color_iter:
if color not in self._current_colors:
return color
def _append_current_state_to_output(self):
if self._separator is None:
# row[:]
slices = [slice(None)]
else:
# row[:sep], row[sep:]
slices = [slice(None, self._separator), slice(self._separator, None)]
for s_index, s in enumerate(slices):
self._output.append(self.prefix + r"\begin{bmatrix}")
for y, (color, row) in enumerate(zip(self._current_colors, self._rows)):
line = " " * 4 + " & ".join(color(_stringify(v)) for v in row[s])
if y < len(self._rows): # FIXME: always true
line += r" \\"
self._output.append(line)
self._output.append(r"\end{bmatrix}")
if s_index != len(slices) - 1:
self._output.append(r"\qquad")
# rows[index] *= by
def multiply_row(self, index, by):
assert index >= 0
assert by != 0
self._output.append(self._aligned_arrow)
old_color = self._current_colors[index]
new_color = self._pick_color()
self._rows[index] = [by*v for v in self._rows[index]]
self._current_colors[index] = new_color
self._append_current_state_to_output()
self._output.append(r"\quad")
self._output.append(new_color(r"\text{new %s}" % self._row_name(index)))
self._output.append(r"= %s \cdot " % _stringify(by, parens=True))
self._output.append(old_color(r"\text{old %s}" % self._row_name(index)))
self._output.append(r'\\')
def _row_name(self, i):
if len(self._rows) == 2:
return ["top", "bottom"][i]
if len(self._rows) == 3:
return ["top", "middle", "bottom"][i]
if len(self._rows) > 3:
return f"row {i+1}"
raise NotImplementedError
# rows[dest] += scalar*rows[src]
def add_multiple(self, src, dest, scalar):
assert src != dest
self._output.append(self._aligned_arrow)
self._rows[dest] = [
d + scalar * s for d, s in zip(self._rows[dest], self._rows[src])
]
old_color = self._current_colors[dest]
new_color = self._pick_color()
self._current_colors[dest] = new_color
self._append_current_state_to_output()
self._output.append(r"\quad")
self._output.append(new_color(r"\text{new %s}" % self._row_name(dest)))
self._output.append("=")
self._output.append(old_color(r"\text{old %s}" % self._row_name(dest)))
self._output.append(r"+ %s \cdot " % _stringify(scalar, parens=True))
self._output.append(self._current_colors[src](r"\text{%s}" % self._row_name(src)))
self._output.append(r'\\')
def get_output(self, separator=None):
output = self._output.copy()
if output[-1] == '\\':
output.pop()
return r"\begin{align}" + "\n" + "\n".join(output) + "\n" + r"\end{align}"
| true |
3efd1d698f6a38966fc7abf2485bd242333d13a6 | Python | Asmith9555/Nuclear_Physics_Project | /Nuke_Phys_UNT_Exp/Data/Overlapping_area_per_angle_script.py | UTF-8 | 1,429 | 3.3125 | 3 | [] | no_license | import numpy as np
#Individual functions written to calculate for multiple angles at once.
def R_list(L,thetas):
r_list = [L*np.tan(theta) for theta in thetas]
return r_list
def Phi_list_1(r_1,r_2,r_list):
phi_1_list = [np.arccos((R**2 + r_1**2 - r_2**2)/(2*r_1*R))
for R in r_list]
return phi_1_list
def Phi_list_2(r_1,r_2,r_list):
phi_2_list = [np.arccos((R**2 + r_2**2 - r_1**2)/(2*r_2*R))
for R in r_list]
return phi_2_list
def Area_list(r_1,r_2,phi_1_list,phi_2_list):
area_list_1 = [((r_1**2)*(phi_1 -(0.5*np.sin(2*phi_1))))
for phi_1 in phi_1_list]
area_list_2 = [((r_2**2)*(phi_2-(0.5*np.sin(2*phi_2))))
for phi_2 in phi_2_list]
area_list_total = [x + y for x, y in zip(area_list_1, area_list_2)]
return area_list_total
############# Actual Calculation of the Over-Lapping Areas ###############
thetas = [0,np.pi/180,np.pi/90,np.pi/60,np.pi/45,np.pi/36,
np.pi/30,np.pi/27.5,np.pi/18,np.pi/15]
r_1 = 4
r_2 = 4.25
r_list_15 = R_list(15,thetas)
r_list_30 = R_list(30,thetas)
phi_1_list_15 = Phi_list_1(r_1,r_2,r_list_15)
phi_1_list_30 = Phi_list_1(r_1,r_2,r_list_30)
phi_2_list_15 = Phi_list_2(r_1,r_2,r_list_15)
phi_2_list_30 = Phi_list_2(r_1,r_2,r_list_30)
area_list_15 = Area_list(r_1,r_2,phi_1_list_15,phi_2_list_15)
area_list_30 = Area_list(r_1,r_2,phi_1_list_30,phi_2_list_30)
print(area_list_15)
print(area_list_30)
| true |
06cef092fa6843353da8c9173fae357d20837fea | Python | DaniloFreireHP/Covid19PWA | /src/service/service.py | UTF-8 | 331 | 2.578125 | 3 | [] | no_license | import requests
import json
def getInfoEstados():
r = requests.get("https://xx9p7hp1p7.execute-api.us-east-1.amazonaws.com/prod/PortalEstadoRegiao")
if r.status_code >= 300:
return False, "Não foi possível salvar profissisionais"
else:
return r.json(), "Profissionais salvos"
print(getInfoEstados()) | true |
5b131c993a7dfd81d449b582d498a4b35d8b5420 | Python | isabellabvo/Design-de-Software | /Números primos.py | UTF-8 | 842 | 4.34375 | 4 | [] | no_license | #---------ENUNCIADO---------#
'''
Escreva uma função que recebe um número e verifica se é ou não um número primo.
Para fazer essa verificação, calcule o resto da divisão do número por 2 e depois por todos os números ímpares até o número recebido.
Se o resto de uma dessas divisões for igual a zero, o número não é primo.
Observe que 0 e 1 não são primos e que 2 é o único número primo que é par (adaptado do Ex. 5.23 livro do Nilo Ney).
Sua função deve retornar True ou False.
Observação: este exercício vai te ajudar nos exercícios 32, 33, 34, 51 e 75 ;)
O nome da sua função deve ser eh_primo.
'''
#----------CÓDIGO-----------#
def eh_primo (num):
i=2
if num == 2:
return True
elif num == 0 or num == 1:
return False
while i < num:
if num % i == 0:
return False
i = i+1
return True
| true |
bcbb00c82b9c295c8e12cdcdd220b2fa5b55f7bd | Python | hi2gage/csci127 | /Labs/Test.py | UTF-8 | 119 | 2.984375 | 3 | [] | no_license | def second(x, y, z):
print(x)
print(y)
print(x)
def main():
list = [1, 3, 4]
second(list)
main()
| true |
606aae966b9579e9faf623e4255014bd50dc3f36 | Python | mrmh2/scaleca | /scaleca/cas/ca_life.py | UTF-8 | 3,705 | 2.796875 | 3 | [] | no_license | """CA engine"""
import random
import numpy as np
import pickle
import scipy.misc
from ca_base import CABase
class CA(CABase):
def __init__(self, max_row, max_col):
self.max_row = max_row
self.max_col = max_col
self.array = np.zeros((max_row, max_col), dtype=np.uint8)
def __setitem__(self, key, value):
self.array[key] = value
def nn(self, ri, ci):
ln = [-1, 0, 1]
h8 = [(r, c) for r in ln for c in ln]
h8.remove((0, 0))
nn = sum([self.array[ri+r, ci+c] for r, c in h8])
return nn
def fill_random(self):
xdim, ydim = self.array.shape
for x in range(0, xdim):
for y in range(0, ydim):
self.array[x, y] = random.randint(0, 1)
def sparse_rep(self):
"""Sparse representation of internal array"""
return zip(*np.where(self.array==1))
def inflate_rep(sparse_rep):
print sparse_rep
def save_state(self, filename):
with open(filename, 'wb') as f:
pickle.dump(self.array.shape, f)
pickle.dump(self.sparse_rep(), f)
def save_as_png(self, filename):
xdim, ydim = self.array.shape
outarray = np.zeros((xdim, ydim, 3), dtype=np.uint8)
on = np.where(self.array == 1)
#outarray[zip(*on)] = (255, 255, 255)
scipy.misc.imsave(filename, outarray)
def load_state(self, filename):
with open(filename, 'rb') as f:
shape = pickle.load(f)
new_array = np.zeros(shape, dtype=np.uint8)
sparse_rep = pickle.load(f)
new_array[zip(*sparse_rep)] = 1
self.array = new_array
def update_vote(self):
ln = [-1, 0, 1]
h8 = [(r, c) for r in ln for c in ln]
h8.remove((0, 0))
next_state = np.zeros((self.max_row, self.max_col), np.uint8)
update_rule_l = {s: 0 for s in range(0, 10)}
update_rule_l.update({s: 1 for s in range(5, 10)})
update_rule_l[4] = 1
update_rule_l[5] = 0
# Copies for wrap boundary conditions
self.array[0,:] = self.array[self.max_row-2, :]
self.array[self.max_row-1,:] = self.array[1,:]
self.array[:,0] = self.array[:, self.max_col-2]
self.array[:, self.max_col-1] = self.array[:, 1]
all_cells = [(r, c) for r in range(1, self.max_row-1)
for c in range(1, self.max_col-1)]
for ar, ac in all_cells:
nn = sum([self.array[ar+r, ac+c] for r, c in h8])
next_state[ar, ac] = update_rule_l[self.array[ar, ac] + nn]
self.array = next_state
def update(self):
ln = [-1, 0, 1]
h8 = [(r, c) for r in ln for c in ln]
h8.remove((0, 0))
max_x, max_y = self.array.shape
next_state = np.zeros((max_x, max_y), np.uint8)
update_rule_l = {s: 0 for s in range(0, 9)}
update_rule_l[2] = 1
update_rule_l[3] = 1
# Copy for wrap boundary conditions
all_cells = [(r, c) for r in range(1, max_y-1)
for c in range(1, max_x-1)]
for ar, ac in all_cells:
nn = sum([self.array[ar+r, ac+c] for r, c in h8])
#print nn
if self.array[ar, ac] == 1:
next_state[ar, ac] = update_rule_l[nn]
else:
next_state[ar, ac] = 1 if nn == 3 else 0
self.array = next_state
#print h8
# print self.array
# print 'nn', self.nn(10, 10)
# ar, ac = 10, 10
# for r, c in h8:
# print ar+r, ac+c, self.array[ar+r, ac+c]
# print [self.array[ar+r, ac+c] for r, c in h8]
| true |
4affb62c2921394b186ef0b896b99d21eec77bf5 | Python | brandon-ha/CS175_NLP | /TranslationWeeb/src/encDecoderLSTM.py | UTF-8 | 1,671 | 2.796875 | 3 | [] | no_license | from __future__ import unicode_literals, print_function, division
import torch
import torch.nn as nn
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class EncoderRNN(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size): # input_size = number of tokens in Japanese, hidden_size=rand number
super().__init__()
self.embedding = nn.Embedding(input_size, embedding_size)
self.lstm = nn.LSTM(embedding_size, hidden_size=hidden_size, num_layers=2, bidirectional=False)
self.dropout = nn.Dropout(0.5)
#self.lstm = nn.LSTM(hidden_size, hidden_size, bidirectional=False)
def forward(self, input):
embedded = self.dropout(self.embedding(input))
output, (hidden_state, cell_state) = self.lstm(embedded)
return hidden_state, cell_state
class DecoderRNN(nn.Module):
def __init__(self, input_size, embedding_size, hidden_size, output_size): # hidden_size=rand number (same that EncoderDNN() used, output_size = total tokens of English
super().__init__()
self.dropout = nn.Dropout(0.5)
self.embedding = nn.Embedding(input_size, embedding_size)
self.LSTM = nn.LSTM(embedding_size, hidden_size, num_layers=2)
self.fc = nn.Linear(hidden_size, output_size)
def forward(self, input, hidden_state, cell_state):
x = input.unsqueeze(0)
embedding = self.dropout(self.embedding(x))
outputs, (hidden_state, cell_state) = self.LSTM(embedding, (hidden_state, cell_state))
predictions = self.fc(outputs)
predictions = predictions.squeeze(0)
return predictions, hidden_state, cell_state | true |
8889b882c49012c121f5fc74660c3e336db165ce | Python | jpborsi/pizza-hashcode | /src/pizza_hashcode/algorithms/precalculated.py | UTF-8 | 782 | 2.671875 | 3 | [] | no_license | '''
@author: john.borsi
'''
from pizza_hashcode.core.cut import Cut
from pizza_hashcode.core.solution import Solution
from pizza_hashcode.algorithms.solver import Solver
class PrecalculatedSolution(Solver):
def __init__(self, filename):
self.solution = Solution()
with open(filename) as f:
first_line = True
for line in f:
if first_line:
first_line = False
self.expected_cuts = int(line.strip('\n'))
continue
self.solution.add_cut(Cut(*[int(x) for x in line.strip('\n').split(' ')]))
assert self.expected_cuts == self.solution.num_cuts()
def get_solution(self, problem):
return self.solution | true |