blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
bb1eef089198b7a750e1ab4c41f0190564a23ea8 | Python | enstulen/ITGK | /Øving 9/Oppgave 4.py | UTF-8 | 735 | 3.46875 | 3 | [] | no_license | __author__ = 'Morten Stulen'
def number_of_lines(filename):
file = open(filename, "r")
valueList = file.readlines()
file.close()
return len(valueList)
print(number_of_lines("nummer.txt"))
def number_frequency(filename):
file = open(filename, "r")
valueList = file.readlines()
file.close()
valueList = [word.strip() for word in valueList]
valueSet = set(valueList)
countDict = {}
for i in valueSet:
countDict[i] = valueList.count(i)
return countDict
print(number_frequency("nummer.txt"))
countDict = number_frequency("nummer.txt")
for key, value in countDict.items():
print(str(key) + ": " + str(value))
# for i in countDict:
# print(str(i) + ": " + str(countDict[i]))
| true |
4acc08e93e72e3d2920207011d7ff297c7f8dac9 | Python | Aasthaengg/IBMdataset | /Python_codes/p03244/s465405697.py | UTF-8 | 2,236 | 2.75 | 3 | [] | no_license | import sys
from sys import exit
from collections import deque
from bisect import bisect_left, bisect_right, insort_left, insort_right #func(リスト,値)
from heapq import heapify, heappop, heappush
from itertools import product, permutations, combinations, combinations_with_replacement
from functools import reduce
from math import sin, cos, tan, asin, acos, atan, degrees, radians
sys.setrecursionlimit(10**6)
INF = 10**20
eps = 1.0e-20
MOD = 10**9+7
def lcm(x,y):
return x*y//gcd(x,y)
def lgcd(l):
return reduce(gcd,l)
def llcm(l):
return reduce(lcm,l)
def powmod(n,i,mod):
return pow(n,mod-1+i,mod) if i<0 else pow(n,i,mod)
def div2(x):
return x.bit_length()
def div10(x):
return len(str(x))-(x==0)
def perm(n,mod=None):
ans = 1
for i in range(1,n+1):
ans *= i
if mod!=None:
ans %= mod
return ans
def intput():
return int(input())
def mint():
return map(int,input().split())
def lint():
return list(map(int,input().split()))
def ilint():
return int(input()), list(map(int,input().split()))
def judge(x, l=['Yes', 'No']):
print(l[0] if x else l[1])
def lprint(l, sep='\n'):
for x in l:
print(x, end=sep)
def ston(c, c0='a'):
return ord(c)-ord(c0)
def ntos(x, c0='a'):
return chr(x+ord(c0))
class counter(dict):
def __init__(self, *args):
super().__init__(args)
def add(self,x,d=1):
self.setdefault(x,0)
self[x] += d
class comb():
def __init__(self, n, mod=None):
self.l = [1]
self.n = n
self.mod = mod
def get(self,k):
l,n,mod = self.l, self.n, self.mod
k = n-k if k>n//2 else k
while len(l)<=k:
i = len(l)
l.append(l[i-1]*(n+1-i)//i if mod==None else (l[i-1]*(n+1-i)*powmod(i,-1,mod))%mod)
return l[k]
N,V = ilint()
odd = counter()
even = counter()
for i in range(N):
if i%2==0:
even.add(V[i])
else:
odd.add(V[i])
E = [(k,even[k]) for k in even]+[(0,0)]
O = [(k,odd[k]) for k in odd]+[(0,0)]
E.sort(reverse=True, key=lambda x:x[1])
O.sort(reverse=True, key=lambda x:x[1])
if E[0][0]!=O[0][0]:
print(N-E[0][1]-O[0][1])
else:
print(min(N-E[1][1]-O[0][1],N-E[0][1]-O[1][1])) | true |
b761ed4e299aeee65740acfd254a50b69bd03916 | Python | eubinecto/examples | /skipgram/numpy/utils/initialisation.py | UTF-8 | 1,389 | 3.3125 | 3 | [] | no_license | import numpy as np
# (functional) implementation of skip-gram model.
def initialize_wrd_emb(vocab_size: int, emb_size: int) -> np.ndarray:
"""
vocab_size: int. vocabulary size of your corpus or training data
emb_size: int. word embedding size. How many dimensions to represent each vocabulary (e.g. 100, 200, 300)
* note: the bigger this is, the more training it takes, the better the quality of the word vectors.
"""
# (vocab_size=ONE_HOT_SIZE, EMB_SIZE)
# TODO: why multiply 0.01?
WRD_EMB = np.random.randn(vocab_size, emb_size) * 0.01
return WRD_EMB
def initialize_dense(input_size: int, output_size: int) -> np.ndarray:
"""
input_size: int. size of the input to the dense layer
output_size: int. size of the output of the dense layer
* here, the dense layer = "the projection layer."
"""
# TODO: bias가 있는 경우 vs. 없는 경우. 선형변환이 유지되는가? 증명해보기.
# note that there is no biases here. This is what keeps the linearity.
W = np.random.randn(output_size, input_size) * 0.01
return W
def initialize_parameters(vocab_size, emb_size) -> dict:
"""
initialize all the training parameters
"""
WRD_EMB = initialize_wrd_emb(vocab_size, emb_size)
W = initialize_dense(emb_size, vocab_size)
parameters = {'WRD_EMB': WRD_EMB, 'W': W}
return parameters
| true |
c165b6861a680f8ceb86d833d085303d0834154f | Python | Cassie07/Leetcode_2020 | /Backtracking(DFS)/78. Subsets.py | UTF-8 | 584 | 3.21875 | 3 | [] | no_license | class Solution:
def subsets(self, nums: List[int]) -> List[List[int]]:
self.res = [[]]
self.nums = nums
for i in range(len(nums)):
self.dfs(i, [])
return self.res
def dfs(self, x, tmp):
if x < len(self.nums):
tmp.append(self.nums[x])
self.res.append(tmp)
for k in range(x+1, len(self.nums)):
new_tmp = copy.deepcopy(tmp) # if not use copy.deepcopy() tmp will be changed in the next iteration
self.dfs(k, new_tmp)
| true |
294065cbd26f109d77244afd77990f4debf9faed | Python | pratikv06/Python-Crash-Course | /6_dictionaries/5_nesting_dictionary.py | UTF-8 | 1,254 | 3.578125 | 4 | [] | no_license | print(">> Storing dictionary in a list")
alien_0 = {
'color': 'green',
'point': 5,
}
alien_1 = {
'color': 'red',
'point': 10,
}
alien_2 = {
'color': 'yellow',
'point': 15,
}
aliens = [alien_0, alien_1, alien_2]
for alien in aliens:
print(alien)
print(">> Storing list in dictionary")
pizzas = {
'crust': 'thick',
'toppings': ['mushrooms', 'extra cheese'],
}
print("Order Summary:")
print("You ordered a "+ pizzas['crust'].title()+ "-crust pizza with the following topping")
for topping in pizzas['toppings']:
print("\t"+ topping)
print("\n>> Storing dictionary in dictionary")
users = {
'coolalien': {
'firstname': 'nilesh',
'lastname': 'yadav',
'city': 'pune',
'company': 'hdfc',
},
'gpool': {
'firstname': 'gaurav',
'lastname': 'sharma',
'city': 'banglore',
'company': 'axis',
},
}
for userid, userinfo in users.items():
print("User Id: "+ userid)
fullname = userinfo['firstname'] +" "+ userinfo['lastname']
location = userinfo['city']
companyname = userinfo['company']
print("\tFullname : "+ fullname.title())
print("\tComapny Name : "+ companyname.title())
print("\tLocation :"+ location.title())
| true |
a7788f4c78349d68c33c4f2345c27f5b5c2c6f13 | Python | chtran/easy_rl | /utils/cem_optimizer.py | UTF-8 | 944 | 3.09375 | 3 | [] | no_license | import numpy as np
class CEMOptimizer:
def __init__(self, fn, n_in, n_iters=2, n_samples=64, n_elites=6, distribution='Gaussian'):
self.fn = fn
self.n_in = n_in
self.n_iters = n_iters
self.n_samples = n_samples
self.n_elites = n_elites
self.mean = np.zeros(self.n_in)
self.var = np.diag(np.ones(self.n_in))
def optimize(self):
for i in range(self.n_iters):
x = np.random.multivariate_normal(self.mean, self.var, size=self.n_samples)
results = []
for j in range(x.shape[0]):
results.append((x[j,:], self.fn(x[j, :])))
sorted_results = sorted(results, key=lambda tup: tup[1], reverse=True)
elites = [tup[0] for tup in sorted_results[:self.n_elites]]
self.mean = np.mean(elites, axis=0)
self.var = np.diag(np.var(elites, axis=0, ddof=1))
return sorted_results[0]
| true |
2dd407b0ffb4473551bb24e210780325c10e985f | Python | gotechnica/mobile-backened | /lambda_functions/register_new_user.py | UTF-8 | 1,246 | 2.828125 | 3 | [] | no_license | import boto3
import json
'''
This lambda takes a json describing the user and checks if a duplicate one already exists based on the email.
It returns status code 200 if the table does not contain duplicates and returns status code 400 if such a user exists with the same information.
'''
def lambda_handler(event, context):
# Given user data save in dynamoDB
client = boto3.client('dynamodb')
response = client.get_item(TableName='Technica-Data',Key={'email':{'S':event['email']}})
if 'Item' in response:
raise Exception('Invalid input: user already exists.')
response = client.put_item(TableName='Technica-Data',Item={
'email':{'S':event['email']},
'first_name':{'S':event['first_name']},
'last_name':{'S':event['last_name']},
'minor_status':{'BOOL':event['minor_status']},
'organizer':{'BOOL':event['organizer']},
'other':{'S':event['other']},
'phone':{'S':event['phone']},
'shirt_size':{'S':event['shirt_size']},
'university':{'S':event['university']},
'dietary_restrictions':{'SS':event['dietary_restrictions']}})
return {"statusCode": 200, \
"headers": {"Content-Type": "application/json"}, \
"body": response}
| true |
042c41982eae61cda1c0fa7ddcba423d1ecc8550 | Python | OishinSmith/Python-exercises | /2017-02-09/ca117/smitho25/password_12.py | UTF-8 | 689 | 3.3125 | 3 | [] | no_license | import sys
import string
lines = sys.stdin.readlines()
for sentence in lines:
seen = []
count = 0
for word in sentence:
if string.digits not in seen and word in string.digits:
count = count + 1
seen.append(string.digits)
elif string.ascii_uppercase not in seen and word in string.ascii_uppercase:
count = count + 1
seen.append(string.ascii_uppercase)
elif string.punctuation not in seen and word in string.punctuation:
count = count + 1
seen.append(string.punctuation)
elif string.ascii_lowercase not in seen and word in string.ascii_lowercase:
count = count + 1
seen.append(string.ascii_lowercase)
print(count)
| true |
622c4366645fe28074b764930a62f8e61b2104cc | Python | 91xcode/jShellscript | /bin/template/src/jptjieba/l3_pseg_带标签.py | UTF-8 | 277 | 2.640625 | 3 | [] | no_license | #!/usr/bin/python3
# coding: utf-8
import jieba
import jieba.posseg as pseg
##################################################################
result = pseg.cut("我爱中国, 我爱家乡, 我爱亲人")
for w in result: print(w.word, "/", w.flag, ", ", end=' ') # 带标签
| true |
7a388e8440d15b82e82de716743572b825d7192e | Python | raymond-devries/algo-trading | /algo_trading/tests/data_flow/test_data_flow.py | UTF-8 | 1,123 | 2.890625 | 3 | [] | no_license | import pandas as pd
import pytest
from random import randint, seed, uniform
from algo_trading.data_flow import data_flow
def get_fake_df(*args):
df = pd.DataFrame(columns=["volume", "open", "close", "high", "low"])
seed(35)
length = 20
df["volume"] = [randint(1000, 100000) for _ in range(length)]
df["open"] = [uniform(10, 20) for _ in range(length)]
df["close"] = [uniform(10, 20) for _ in range(length)]
df["high"] = [uniform(10, 20) for _ in range(length)]
df["low"] = [uniform(10, 20) for _ in range(length)]
return df
@pytest.fixture
def patch_get_dataframe(monkeypatch):
monkeypatch.setattr(data_flow.PolygonBackTestDataFlow, "get_dataframe", get_fake_df)
def test_polygon_backtest_get_next(patch_get_dataframe):
instance = data_flow.PolygonBackTestDataFlow("TICKER", 1, "minute", "2020-1-1", "2020-2-1", 6)
df = instance.get_dataframe()
generator = instance.get_data_generator()
pd.testing.assert_frame_equal(next(generator), df[:6])
pd.testing.assert_frame_equal(next(generator), df[:7])
pd.testing.assert_frame_equal(list(generator)[-1], df)
| true |
6d94171dad05ac0a8e89b3e6c9532c8889c3ca48 | Python | jotd666/amiga68ktools | /compilation_maker/extras/pathTableUtil.py | UTF-8 | 8,019 | 2.75 | 3 | [] | no_license |
import sys
import struct
from collections import deque
if len(sys.argv) == 3 and sys.argv[1] in ("print", "uppercase"):
operation = sys.argv[1]
isoFile = open(sys.argv[2], "rb")
if "uppercase" == operation:
isoFile = open(sys.argv[2], "rb+")
else:
raise SystemExit("Usage: " + sys.argv[0].split('/')[-1] + " operation (print/uppercase) isoFile")
sectorSize = 2048
isoFile.seek(sectorSize * 0x10)
class DirectoryEntry:
def __init__(self, data):
self.headerLength = 33
self.recordLen, self.extRecordLen, self.extentLoc, self.extentDataLen, self.timestamp, self.flags, self.unitFlags, self.gapSize, self.volSeqNum, self.fileIdLen = struct.unpack(">BB4xI4xI7sBBB2xHB", data[:self.headerLength])
self.data = data[:self.recordLen]
self.fileId = self.data[self.headerLength:self.headerLength + self.fileIdLen]
def isEmpty(self):
return 0 == self.recordLen
def __repr__(self):
return ",".join([self.fileId, str(self.recordLen)])
class PrimaryVolumeDescriptor:
def __init__(self, volumeDescriptorData):
self.logicalBlockSize, self.pathTableSize, self.pathTableLocMSB = struct.unpack(">2xH4xI8xI", volumeDescriptorData[128:128 + 4 + 8 + 8 + 4])
self.pathTableLocLSB = struct.unpack("<I", volumeDescriptorData[140:140 + 4])[0]
self.rootDirEntry = DirectoryEntry(volumeDescriptorData[156:156 + 34])
def getPrimaryVolumeDescriptor(isoFile):
terminatorCode = 255
primaryVolumeDescriptorCode = 1
while True:
volumeDescriptorData = isoFile.read(sectorSize)
volumeDescriptorCode = struct.unpack("B", volumeDescriptorData[0:1])[0]
if volumeDescriptorCode == terminatorCode:
return None
elif volumeDescriptorCode == primaryVolumeDescriptorCode:
return PrimaryVolumeDescriptor(volumeDescriptorData)
class PathTableEntry:
def __init__(self, entryDataStart, littleEndian, position):
self.littleEndian = littleEndian
self.position = position
self.headerLength = 8
nameLen, self.extentLen, self.extentLoc, self.parentNum = struct.unpack(self.getHeaderStruct(), entryDataStart[:self.headerLength])
self.name = entryDataStart[self.headerLength:self.headerLength + nameLen]
self.children = []
def __repr__(self):
return self.name + "'," + ",".join((str(self.parentNum), str(self.position), str(self.getSize())))
def getHeaderStruct(self):
headerStruct = "BBIH"
if self.littleEndian:
return "<" + headerStruct
else:
return ">" + headerStruct
def getSize(self):
nameLen = len(self.name)
return self.headerLength + nameLen + nameLen % 2
def getRangeString(self):
start = self.position
end = start + self.getSize() - 1
return "{0:05d}-{1:05d}".format(start, end)
def isRoot(self):
# The root will point to itself
return self == self.parent
def getAsData(self):
nameLen = len(self.name)
completeStruct = self.getHeaderStruct() + str(nameLen) + "s" + str(nameLen % 2) + "x"
data = struct.pack(completeStruct, nameLen, self.extentLen, self.extentLoc, self.parentNum, self.name)
return data
def getParents(self):
parents = []
currParent = self.parent
while not currParent.isRoot():
parents.append(currParent)
currParent = currParent.parent
parents.reverse()
return parents
def breadthFirstWalker(rootNode):
queue = deque()
queue.appendleft(rootNode)
while 0 != len(queue):
node = queue.pop()
queue.extendleft(node.children)
yield node
class PathTable:
def __init__(self, pathTableData, littleEndian):
self.littleEndian = littleEndian
self.entries = []
headerLength = 8
currentPos = 0
while currentPos < descriptor.pathTableSize:
entry = PathTableEntry(pathTableData[currentPos:], self.littleEndian, currentPos)
self.entries.append(entry)
currentPos = currentPos + entry.getSize()
# Setup real parent links, which will survive a list sort
for entry in self.entries:
entry.parent = self.entries[entry.parentNum - 1]
if entry != entry.parent: # Avoid the root being its own child also, makes it harder to walk the graph :)
entry.parent.children.append(entry)
def getRootEntry(self):
return self.entries[0]
def getNonRootEntries(self):
return self.entries[1:]
def upperCaseEntries(self):
for entry in self.entries:
entry.name = entry.name.upper()
def updateParentNums(self):
for i, entry in enumerate(self.entries):
for child in entry.children:
child.parentNum = i + 1
def sortEntries(self):
for entry in self.entries:
entry.children.sort(key=lambda e: e.name)
self.entries = [e for e in breadthFirstWalker(self.getRootEntry())]
self.updateParentNums()
def getEntriesAsData(self):
data = b""
for entry in self.entries:
data += entry.getAsData()
return data
def printEntries(self):
for entry in self.entries:
pathElements = [e.name for e in entry.getParents() + [entry]]
print(entry.getRangeString() + "(" + str(len(pathElements)) + "): " + '/'.join(x.decode("latin-1") for x in pathElements))
descriptor = getPrimaryVolumeDescriptor(isoFile)
print("PathTable size: {}".format(descriptor.pathTableSize))
def sortDirEntriesUppercased(descriptor, pathTableEntry):
isoFile.seek(pathTableEntry.extentLoc * descriptor.logicalBlockSize)
extentData = isoFile.read(descriptor.logicalBlockSize)
dirEntry = DirectoryEntry(extentData)
extentData += isoFile.read(max(0, dirEntry.extentDataLen - descriptor.logicalBlockSize))
currentPos = dirEntry.recordLen
parentDirEntry = DirectoryEntry(extentData[currentPos:])
currentPos += parentDirEntry.recordLen
childDirEntries = []
while currentPos < dirEntry.extentDataLen - 33:
childDirEntry = DirectoryEntry(extentData[currentPos:])
currentPos += childDirEntry.recordLen
if childDirEntry.isEmpty():
spaceLeftInBlock = descriptor.logicalBlockSize - (currentPos % descriptor.logicalBlockSize)
currentPos += spaceLeftInBlock
continue
childDirEntries.append(childDirEntry)
isoFile.seek(pathTableEntry.extentLoc * descriptor.logicalBlockSize)
currentPos = 0
for dirEntry in [dirEntry, parentDirEntry] + sorted(childDirEntries, key=lambda e: e.fileId.rsplit(b";",1)[0].upper()):
spaceLeftInBlock = descriptor.logicalBlockSize - (currentPos % descriptor.logicalBlockSize)
if len(dirEntry.data) > spaceLeftInBlock:
isoFile.write(b'\0' * spaceLeftInBlock)
currentPos += spaceLeftInBlock
isoFile.write(dirEntry.data)
currentPos += len(dirEntry.data)
spaceLeftInBlock = descriptor.logicalBlockSize - (currentPos % descriptor.logicalBlockSize)
isoFile.write(b'\0' * spaceLeftInBlock)
# Big endian path table is what is used on the CD32
isoFile.seek(descriptor.pathTableLocMSB * descriptor.logicalBlockSize)
pathTableMSB = PathTable(isoFile.read(descriptor.pathTableSize), False)
# Also process the little endian path table for completeness sake
isoFile.seek(descriptor.pathTableLocLSB * descriptor.logicalBlockSize)
pathTableLSB = PathTable(isoFile.read(descriptor.pathTableSize), True)
# Test comparison
#isoFile.seek(descriptor.pathTableLocMSB * descriptor.logicalBlockSize)
#pathTableMSBData = isoFile.read(descriptor.pathTableSize)
#testDataMSB = pathTableMSB.getEntriesAsData()
#print "TestDataMSBLength:", len(testDataMSB)
#print "MatchMSB:", pathTableMSBData == testDataMSB
if "uppercase" == operation:
pathTableMSB.upperCaseEntries()
pathTableMSB.sortEntries()
isoFile.seek(descriptor.pathTableLocMSB * descriptor.logicalBlockSize)
isoFile.write(pathTableMSB.getEntriesAsData())
print("Uppercased and resorted MSB path table!")
pathTableLSB.upperCaseEntries()
pathTableLSB.sortEntries()
isoFile.seek(descriptor.pathTableLocLSB * descriptor.logicalBlockSize)
isoFile.write(pathTableLSB.getEntriesAsData())
print("Uppercased and resorted LSB path table!")
for entry in pathTableMSB.entries:
sortDirEntriesUppercased(descriptor, entry)
print("Sorted directory entries in uppercased name order!")
isoFile.close()
if "print" == operation:
pathTableMSB.printEntries()
| true |
7251f1f7988a0adaa1bb95309d86d70f10fc6ada | Python | RenShuhuai-Andy/my-tools | /python_script/visualization/tensorboardx.py | UTF-8 | 607 | 2.59375 | 3 | [] | no_license | # reference: https://zhuanlan.zhihu.com/p/36946874
# https://zhuanlan.zhihu.com/p/37022051
# https://pytorch.apachecn.org/docs/1.2/intermediate/tensorboard_tutorial.html
from tensorboardX import SummaryWriter
writer = SummaryWriter('./log')
total_steps = epoch * len(train_loader) + i
# draw scalar(s)
# single line
writer.add_scalar('loss', loss, total_steps)
# multi lines in one figure
writer.add_scalars('losses', {'loss_1': loss_1, 'loss_2': loss_2}, total_steps)
# draw histogram
for pi, (name, param) in enumerate(model.named_parameters()):
writer.add_histogram(name, param, 0)
writer.close() | true |
40d821191f0d6512fbc8f8627ea83210417cde14 | Python | forestyaser/Risk_Control | /src/main/data/simple_data_clean.py | UTF-8 | 3,976 | 2.6875 | 3 | [] | no_license | import numpy
import pandas
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import DecisionTreeClassifier
from xgboost import XGBClassifier
from main.algo.CustomizedAdaBoostClassifier import CustomizedAdaBoostClassifier
from main.data.DataCleaner import DataCleaner
RAW_DATA_FILE_PATH = '/var/qindom/riskcontrol/data/risk_all_label_data.csv'
TEST_DATA_FILE_PATH = '/var/qindom/riskcontrol/data/jan_data.csv'
def new_profit_cal(pp, pf, fp, ff):
origin_ratio = (pp + pf) / (pp + pf + fp + ff)
new_ratio = (pp) / (pp + fp)
total_ppl = 10000
origin_earning = total_ppl * origin_ratio * 300 * 4 - 300 * total_ppl - 1200 * total_ppl * (1 - origin_ratio)
new_earning = total_ppl * new_ratio * 300 * 4 - 300 * total_ppl / (
(pp + fp) / (pp + pf + fp + ff)) - 1200 * total_ppl * (1 - new_ratio)
return new_earning - origin_earning
def customize_acc(y_true, y_pred):
count_p_p = 0
count_p_f = 0
count_f_f = 0
count_f_p = 0
if y_true is None or y_pred is None:
print('null input')
elif len(y_pred) != len(y_true):
print('length no equal: ', len(y_pred), ' ', len(y_true))
else:
for i in range(0, len(y_true)):
if y_true[i] == 0 or y_true[i] == -1:
if y_pred[i] == 0 or y_pred[i] == -1:
count_p_p = count_p_p + 1
else:
count_p_f = count_p_f + 1
else:
if y_pred[i] == 0 or y_pred[i] == -1:
count_f_p = count_f_p + 1
else:
count_f_f = count_f_f + 1
return new_profit_cal(count_p_p, count_p_f, count_f_p, count_f_f), count_p_p, count_p_f, count_f_p, count_f_f
def customize_y(y):
z = numpy.asarray(y)
for i in range(0, len(y)):
z[i] = y[i] * 2 - 1
return z
def ensemble(results):
ensembled_result = []
for i in range(0, len(results[0])):
count = 0
for result in results:
count = count + result[i]
if count > len(results) / 2:
ensembled_result.append(1)
else:
ensembled_result.append(0)
return ensembled_result
data_cleaner = DataCleaner()
df_limited_features = data_cleaner.generate_mapper_and_cleanend_training_data(RAW_DATA_FILE_PATH)
df_limited_test_features = data_cleaner.clean_predict_data_path(TEST_DATA_FILE_PATH)
y_test = df_limited_test_features['好/坏(1:坏)'].values
df_limited_test_features.drop(columns=['好/坏(1:坏)'], inplace=True)
X_test = df_limited_test_features.values.astype(int)
temp_ref = df_limited_features
y = temp_ref['好/坏(1:坏)'].values
temp_ref.drop(columns=['好/坏(1:坏)'], inplace=True)
X = temp_ref.values.astype(int)
# ada = CustomizedAdaBoostClassifier(n_estimators=100)
# ada.fit(X, y)
# result0_tmp = ada.predict(X_test)
d_tree = DecisionTreeClassifier(max_depth=8)
d_tree.fit(X, y)
result1 = d_tree.predict_proba(X_test)
G = GradientBoostingClassifier(max_depth=6, n_estimators=150)
G.fit(X, y)
result2 = G.predict_proba(X_test)
xg = XGBClassifier(max_depth=8, n_estimators=100)
xg.fit(X, y)
result3 = xg.predict_proba(X_test)
threshold = 0.1
threshold_dict = {}
while threshold < 0.95:
print('===========\nthreshold: ', threshold)
result1_tmp = list(map(lambda x: 0 if x[0] > threshold else 1, result1))
result2_tmp = list(map(lambda x: 0 if x[0] > threshold else 1, result2))
result3_tmp = list(map(lambda x: 0 if x[0] > threshold else 1, result3))
final_result_list = [result1_tmp, result2_tmp, result3_tmp]
train_profit, tpp, opf, ofp, off = customize_acc(y_test, ensemble(final_result_list))
print(threshold, train_profit, tpp, opf, ofp, off, tpp / ofp, (tpp + ofp) / (tpp + opf + ofp + off))
final_df = pandas.DataFrame({'predict_y': ensemble(final_result_list)})
final_df.to_csv(str(threshold) + '_jan_pred_result.csv',index=None)
threshold = threshold + 0.05
| true |
8ec411b1f60f09ab7269abc312adf1a546a65df7 | Python | gloomyfish1998/dl_learning_notes | /tutorial_01.py | UTF-8 | 1,607 | 3.15625 | 3 | [] | no_license | import tensorflow as tf;
#加法操作
node1 = tf.constant(3.3, dtype=tf.float32)
node2 = tf.constant(4.8, dtype=tf.float32)
result1 = tf.add(node1, node2)
#乘法操作
node3 = tf.constant([[3.2, 3.8]], dtype=tf.float32)
node4 = tf.constant([[4.5], [5.5]], dtype=tf.float32)
result2 = tf.matmul(node3, node4)
#除法操作
node5 = tf.constant([3.2, 3.8], dtype=tf.float32)
node6 = tf.constant([4.5, 5.5], dtype=tf.float32)
result3 = tf.divide(node5, node6)
#减法操作
node7 = tf.constant([15, 3], dtype=tf.float32)
node8 = tf.constant([4, 5], dtype=tf.float32)
result4 = tf.subtract(node7, node8)
#混合运算
node9 = tf.constant([12, 14], dtype=tf.float32)
node10 = tf.constant([8, 10], dtype=tf.float32)
node11 = tf.constant([3, 5], dtype=tf.float32)
m1 = tf.multiply(node9, node10)
m2 = tf.subtract(m1, node11)
m3 = tf.add(m2, 3)
sess = tf.Session()
print("\n")
print(sess.run([node1, node2]))
print("result : ", sess.run(result1))
print("\n")
print(sess.run([node3, node4]))
print("result : ", sess.run(result2))
print("\n")
print(sess.run([node5, node6]))
print("result : ", sess.run(result3))
print("\n")
print(sess.run([node7, node8]))
print("result : ", sess.run(result4))
print("\n")
print("result : ", sess.run(m1))
print("result : ", sess.run(m2))
print("result : ", sess.run(m3))
#计算线性方程
W = tf.Variable([.3], dtype=tf.float32)
b = tf.Variable([-.3], dtype=tf.float32)
x = tf.placeholder(dtype=tf.float32)
line_model = W*x+b
init = tf.global_variables_initializer();
sess.run(init)
print("\n");
print("line model \n")
print(sess.run(line_model, {x:[1, 2, 3, 4]}))
| true |
b353667ef905619df98e4b44f38b9cdced221436 | Python | jocelynewalker/data-management | /mapperReducer.py | UTF-8 | 429 | 2.625 | 3 | [] | no_license | from mrjob.job import MRJob
class MRJobname(MRJob):
def mapper(self, key, line):
line = line.strip(' ?.!,:()')
words = line.split()
for word in words:
yield word.lower(), 1
def reducer(self, word, occurrences):
yield word, sum(occurrences)
if __name__ == '__main__':
MRJobname.run()
| true |
40f72a483353325077df64c09c2df8ffd5358b9d | Python | GabrielAmare/Models37 | /models37/attributes/Parse.py | UTF-8 | 4,050 | 3.390625 | 3 | [
"MIT"
] | permissive | from datetime import date, datetime
import re
class Parse:
"""
Class to cast the data to a desired type given a possible wrong typed value
"""
regex_int = re.compile(r"^-?[0-9]+$")
regex_float = re.compile(r"^-?([0-9]+\.[0-9]*|\.[0-9]+)$")
regex_int_float = re.compile(r"^-?[0-9]+\.0*$")
regex_date = re.compile(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}$")
regex_datetime = re.compile(r"^[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}(\.[0-9]{6})?$")
@classmethod
def to_bool(cls, value) -> bool:
type_ = type(value)
if type_ is bool:
return value
if type_ in (int, float):
return bool(value)
if type_ is str:
if value == "True":
return True
if value == "False":
return False
raise TypeError
@classmethod
def to_int(cls, value) -> int:
type_ = type(value)
if type_ is int:
return value
if type_ in (bool, float):
return int(value)
if type_ is str:
if cls.regex_int.match(value):
return int(value)
if cls.regex_int_float.match(value):
return int(float(value))
raise TypeError
@classmethod
def to_float(cls, value) -> float:
type_ = type(value)
if type_ is float:
return value
if type_ in (bool, int):
return float(value)
if type_ is str:
if value == "inf":
return float("inf")
if value == "-inf":
return float("-inf")
if cls.regex_float.match(value):
return float(value)
if cls.regex_int.match(value):
return float(int(value))
raise TypeError
@staticmethod
def to_str(value) -> str:
type_ = type(value)
if type_ is str:
return value
if type_ in (bool, int, float):
return str(value)
if type_ in (date, datetime):
return value.isoformat()
raise TypeError
@classmethod
def to_date(cls, value) -> date:
type_ = type(value)
if type_ is date:
return value
if type_ is datetime:
# WARNING : this operation leads to a data loss when (hours, minutes, seconds or milliseconds)
return value.date()
if type_ is str:
if cls.regex_date.match(value):
return date.fromisoformat(value)
if cls.regex_datetime.match(value):
return datetime.fromisoformat(value).date()
raise TypeError
@classmethod
def to_datetime(cls, value) -> datetime:
type_ = type(value)
if type_ is datetime:
return value
if type_ is date:
# WARNING : this operation assumes that a date datetime equivalent is the start of the day
return datetime(value.year, value.month, value.day)
if type_ is str:
if cls.regex_datetime.match(value):
return datetime.fromisoformat(value)
if cls.regex_date.match(value):
value_ = date.fromisoformat(value)
return datetime(value_.year, value_.month, value_.day)
raise TypeError
@classmethod
def to(cls, value, type_):
try:
if type_ is bool:
return cls.to_bool(value)
elif type_ is int:
return cls.to_int(value)
elif type_ is float:
return cls.to_float(value)
elif type_ is str:
return cls.to_str(value)
elif type_ is date:
return cls.to_date(value)
elif type_ is datetime:
return cls.to_datetime(value)
elif hasattr(type_, "__cast__"):
return type_.__cast__(value)
else:
raise TypeError
except TypeError:
return value
| true |
fd1d1a917f3c25d992ad4aa38c772f86c07ed2b8 | Python | DTPsykko/Work | /Python Docs/Generator Performance.py | UTF-8 | 488 | 3.484375 | 3 | [] | no_license | from time import time
def performance(func):
def wrapper(*args, **kwargs):
t1 = time()
result = func(*args, **kwargs)
t2 = time()
print(f'it took {round(t2-t1,4)} ms')
return result
return wrapper
@performance
def long_time():
print('1')
for i in range(10000000):
i*5
@performance
def long_time2():
print('2')
for i in list(range(10000000)):
i*5
long_time()
long_time2()
| true |
e90871be3cc1689ac4d669a01352a725ce406a2d | Python | TangMartin/CS50x-Introduction-to-Computer-Science | /pset6/cash.py | UTF-8 | 1,068 | 3.65625 | 4 | [] | no_license | from cs50 import get_float
def main():
print(f"{numberofcoins()}")
def numberofcoins():
money = -1
while money < 0:
money = get_float("Change owed: ")
totalcents = money * 100
numberofcoins = 0
tempnumber = 0
remainder = 0
quarter = 25
dime = 10
nickel = 5
penny = 1
if quarter <= totalcents:
remainder = totalcents % quarter
numberofcoins = numberofcoins + (totalcents - remainder) / quarter
totalcents = remainder
if dime <= totalcents:
remainder = totalcents % dime
numberofcoins = numberofcoins + (totalcents - remainder) / dime
totalcents = remainder
if nickel <= totalcents:
remainder = totalcents % nickel
numberofcoins = numberofcoins + (totalcents - remainder) / nickel
totalcents = remainder
if penny <= totalcents:
remainder = totalcents % penny
numberofcoins = numberofcoins + (totalcents - remainder) / penny
totalcents = remainder
return numberofcoins
main() | true |
c544b9b1def57b0c03e681a3e03cf3edb19b00e9 | Python | ParasGarg/MongoDB-M101P-Homework-Solutions | /Homework-Week-3-Schema_Design/Homework-3.3/Homework_3.3_Solution.py | UTF-8 | 1,051 | 2.734375 | 3 | [] | no_license | # working and sample code for understanding of 'add_comment' function
# for exact code refer userDOA.py file
import pymongo
# establishing connection
conn = pymongo.MongoClient("mongodb://localhost")
db = conn.blog
posts = db.posts
comment = {'author': "Mongo", 'body': "Hello Mongodb!"} # comment doc is passed in the add_comment function
query = { 'permalink': "my_blog" }
post_find = posts.find_one(query) # finding post in which comment is be added
comment_list = post_find['comments'] # storing comments in a list
comment_list.append(comment) # appending new comment in the list
update = { '$set':{ 'comments': comment_list } }
posts.update_one(query, update, upsert=True) # updating the document by the new list
post_find = self.posts.find_one(query) # finding post in which comment is be added
print post_find # use return rather then print | true |
5788dcd0af9b683281e4ddc52d8975994d94671c | Python | vidhisharma1212/oops | /ch_10_oops/06_constructor.py | UTF-8 | 778 | 3.96875 | 4 | [] | no_license | class Employee:
company= 'Google'
def __init__(self, name, salary, subunit):
self.name= name
self.salary= salary
self.subunit= subunit
print("Employee is created ! ")
def getDetails(self):
print(f"The name of the employee is {self.name}")
print(f"The salary of the employee is {self.salary}")
print(f"The subunit of the employee is {self.subunit}")
def getSalary(self):
print(f"The salary of this person working in {self.company} is {self.salary}")
@staticmethod
def greet():
print("Good Morning Mam")
vid= Employee('vidhi','100','YouTube')
# vid=Employee() this throws an error of missing 3 arguments
vid= Employee(pass, pass ,pass )
vid.getDetails()
print(vid.salary) | true |
3561359fabcb93139dd4aa0313a8bb729e5f6543 | Python | vsikarwar/Algorithms | /TreeDS/AncestorsOfNode.py | UTF-8 | 235 | 2.703125 | 3 | [] | no_license | '''
'''
def ancestors(node, key):
if node is None:
return
if node.key == key:
return True
if ancestors(node.left) or ancestors(node.right):
print(node.key)
return True
return False | true |
5a9e42a61452998dd1df7c66707f7a0ddef68f97 | Python | GuanJinNL/Some-programming-assignment | /Yak.py | UTF-8 | 1,361 | 2.875 | 3 | [] | no_license | import numpy as np
class yak:
name = ''
age = 0
sex = ''
def Stockstatus(Herd, T):
milk = wol = 0; N = len(Herd)
age = np.array([]); sexes = np.array([]); nextshave = age_lastshave = np.zeros(N) # Nextshave registers the remaining days until the next shaving
for yak in Herd:
age = np.append(age, float(yak.age))
sexes = np.append(sexes, yak.sex)
female = np.where(sexes == 'f')[0]
for i in range(T):
alive = np.where(age < 10)[0] # To determine the indices of the alive yaks
if len(alive) == 0:
return (milk, wol, age, age_lastshave)
alivefemale = np.intersect1d(alive,female)
for j in alivefemale:
milk = milk + 50 - age[j] * 3
adult = np.where(age >= 1)[0]
aliveadult = np.intersect1d(alive,adult)
if len(aliveadult) > 0 and nextshave[aliveadult].min() <= 0:
mindex = np.where(nextshave == nextshave[aliveadult].min())[0]
shave = np.intersect1d(mindex,aliveadult) # To select the alive adult yaks that are eligible to be shaven
wol = wol + len(shave)
age_lastshave[shave] = age[shave]
nextshave[shave] = 8 + age[shave]
else: nextshave[aliveadult] -= 1
age = age + 0.01
return(milk, wol, age, age_lastshave)
| true |
f6e7bd83454266934abb8ad5bcfe17d0a1e2b707 | Python | nekonora/University | /Optics/Laser3_3.py | UTF-8 | 2,308 | 2.90625 | 3 | [] | no_license | import numpy as np
# L (bande verticali)
# Variabili
# ---------
# mm, fenditura schermo
L = [780.0, 700.0, 630.0, 560.0, 490.0, 420.0, 380.0, 340.0, 300.0, 220.0, 150.0]
# mm, diametro banda nera
d = np.array([19.7, 16.9, 15.7, 14.2, 12.0, 10.5, 9.1, 8.3, 7.7, 6.0, 4.2]) / 2.0
# Calcoli
# -------
ran = 11
lam = 632.8 * (10e-7)
theta = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
for i in range(ran) :
theta[i] = np.arctan2((d[i] / 2), L[i])
D = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
for i in range(ran) :
D[i] = ((2.0 * lam) / np.sin(theta[i]))
# Print
# -----
print("- - - - Fenditura orizzontale (primo minimo) - - - -\n")
print("L (mm): {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}\n".format(*L))
print("x (mm): {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}\n".format(*d))
print("theta (rad): {:.4f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}\n".format(*theta))
print("l (mm): {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}\n".format(*D))
# H (bande orizzontali)
# Variabili
# ---------
# mm, fenditura schermo
op_L = [780.0, 700.0, 630.0, 560.0, 490.0, 420.0, 380.0, 340.0, 300.0, 220.0, 150.0]
# mm, diametro banda nera
op_d = np.array([16.3, 14.7, 13.1, 12.1, 10.8, 9.0, 8.1, 7.3, 6.3, 5.0, 3.3]) / 2
# Calcoli
# -------
op_theta = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
for i in range(ran) :
op_theta[i] = np.arctan2((op_d[i] / 2), op_L[i])
op_D = [0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0,0.0]
for i in range(ran) :
op_D[i] = ((2.0 * lam) / np.sin(op_theta[i]))
# Print
# -----
print("- - - - Fenditrura orizzontale (secondo minimo) - - - -\n")
print("L (mm): {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}\n".format(*op_L))
print("x (mm): {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}, {:.2f}\n".format(*op_d))
print("theta (rad): {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}\n".format(*op_theta))
print("h (mm): {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}, {:.4f}\n".format(*op_D))
| true |
0ea56ec180a80eb97f016d4b76bf056a7af41622 | Python | 17605272633/ETMS | /ETMS/ETMS/apps/attendance/views.py | UTF-8 | 10,734 | 2.53125 | 3 | [
"MIT"
] | permissive | from django.shortcuts import render
from rest_framework.generics import GenericAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from attendance.models import student_attendance_table, teacher_attendance_table
from attendance.serializers import StudentAttendanceSerializer, TeacherAttendanceSerializer
from lesson.models import lesson_table
from lesson.serializers import LessonSerializer
from users.models import teacher_table, student_table
from users.serializers import TeacherSerializer, StudentSerializer
class AttendanceGetView(GenericAPIView):
"""
学生考勤管理 按学号查询
"""
def post(self, request):
"""
根据路由中班级id获取相关学生考勤
路由: POST attendance/user_attendance/
请求: user_id = ? usrkind = ?
"""
# 获取请求参数
attendance = request.data
userkind = attendance.getlist("userkind")[0]
user_id = attendance.getlist("user_id")[0]
# 教师
if userkind == "1":
try:
user_attendance = teacher_attendance_table.objects.filter(atuser_id=user_id)
except:
return Response({"error": "查询失败"})
# 序列化
tea_attendance_serializer = TeacherAttendanceSerializer(user_attendance, many=True)
tea_attendance_dict = tea_attendance_serializer.data
attendance_list = []
for dict1 in tea_attendance_dict:
id = dict1["id"]
time = dict1["attime"]
status = dict1["atstatus"]
lesson_id = dict1["alesson"]
user_id = dict1["atuser"]
if status == 1:
status = "全勤"
elif status == 2:
status = "迟到"
elif status == 3:
status = "早退"
elif status == 4:
status = "缺勤"
elif status == 5:
status = "请假"
# 获取老师姓名
teacher = teacher_table.objects.filter(tid=user_id)
teacher_data = TeacherSerializer(teacher, many=True)
teacher_name = teacher_data.data[0]["tname"]
# 获取课程名
lesson = lesson_table.objects.filter(lid=lesson_id)
lesson_data = LessonSerializer(lesson, many=True)
lesson_name = lesson_data.data[0]["lname"]
data = {
"id": id,
"name": teacher_name,
"time": time,
"lesson_name": lesson_name,
"status": status,
}
attendance_list.append(data)
return Response(attendance_list)
# 学生
if userkind == "2":
try:
user_attendance = student_attendance_table.objects.filter(asuser_id=user_id)
except:
return Response({"error": "查询失败"})
# 序列化
stu_attendance_serializer = StudentAttendanceSerializer(user_attendance, many=True)
stu_attendance_dict = stu_attendance_serializer.data
attendance_list = []
for dict1 in stu_attendance_dict:
id = dict1["id"]
time = dict1["astime"]
status = dict1["asstatus"]
lesson_id = dict1["alesson"]
user_id = dict1["asuser"]
if status == 1:
status = "全勤"
elif status == 2:
status = "迟到"
elif status == 3:
status = "早退"
elif status == 4:
status = "缺勤"
elif status == 5:
status = "请假"
# 获取学生姓名
student = student_table.objects.filter(sid=user_id)
student_data = StudentSerializer(student, many=True)
student_name = student_data.data[0]["sname"]
# 获取课程名
lesson = lesson_table.objects.filter(lid=lesson_id)
lesson_data = LessonSerializer(lesson, many=True)
lesson_name = lesson_data.data[0]["lname"]
data = {
"id": id,
"name": student_name,
"time": time,
"lesson_name": lesson_name,
"status": status,
}
attendance_list.append(data)
return Response(attendance_list)
# def delete(self, request, student_id):
# """删除"""
# try:
# stu_attendance = student_attendance_table.objects.get(asuser_id=student_id)
# except:
# return Response({"error": "查询错误"})
#
# # 删除
# stu_attendance.delete()
#
# # 响应
# return Response(status=204)
class AttendanceUploadView(APIView):
"""
局部更新考勤信息
路由: POST attendance/up_attendance/
"""
def post(self, request):
# 获取请求参数
attendance = request.data
id = attendance.getlist("id")[0]
userkind = attendance.getlist("userkind")[0]
status = attendance.getlist("status")[0]
# 教师
if userkind == "1":
try:
tea_attendance = teacher_attendance_table.objects.get(id=id)
except:
return Response({"error": "查询错误"})
tea_attendance.atstatus = status
print(status)
tea_attendance.save()
return Response({"message": "ok"}, status=201)
# 学生
if userkind == "2":
try:
stu_attendance = student_attendance_table.objects.get(id=id)
except:
return Response({"error": "查询错误"})
stu_attendance.asstatus = status
stu_attendance.save()
return Response({"message": "ok"}, status=201)
class AttendanceCreateView(APIView):
"""
创建考勤信息
路由: POST attendance/attendance/
"""
def post(self, request):
"""创建考勤信息"""
# 获取请求参数
attendance = request.data
userkind = attendance.getlist("userkind")[0]
# 教师
if userkind == "1":
tea_lesson_id = attendance.getlist("lesson_id")[0]
tea_user_id = attendance.getlist("user_id")[0]
# 将获取的数据上在数据库创建
teacher_attendance_table.objects.create(
atstatus=1,
alesson_id=tea_lesson_id,
atuser_id=tea_user_id
)
return Response({
"message": "ok"
})
# 学生
if userkind == "2":
stu_lesson_id = attendance.getlist("lesson_id")[0]
stu_user_id = attendance.getlist("user_id")[0]
# 将获取的数据上在数据库创建
student_attendance_table.objects.create(
asstatus=1,
alesson_id=stu_lesson_id,
asuser_id=stu_user_id
)
return Response({
"message": "ok"
})
class TeacherAttendanceGetView(GenericAPIView):
"""
教师考勤管理 按工号查询
"""
def get(self, request, teacher_id):
"""
根据路由中班级id获取相关学生考勤
路由: GET attendance/tea_attendance/(?P<teacher_id>\d+)/
"""
try:
tea_attendance = teacher_attendance_table.objects.filter(atuser_id=teacher_id)
except:
return Response({"error": "查询失败"})
# 序列化
tea_attendance_serializer = TeacherAttendanceSerializer(tea_attendance, many=True)
tea_attendance_dict = tea_attendance_serializer.data
return Response(tea_attendance_dict)
# def patch(self, request, teacher_id):
# """局部修改"""
# try:
# tea_attendance = teacher_attendance_table.objects.get(atuser_id=teacher_id)
# except:
# return Response({"error": "查询错误"})
#
# # 接收
# tea_attendance_dict = request.data
#
# # print(stu_attendance)
#
# # 验证
# tea_attendance_serilizer = TeacherAttendanceSerializer(tea_attendance, data=tea_attendance_dict, partial=True)
# if not tea_attendance_serilizer.is_valid():
# return Response(tea_attendance_serilizer.errors)
#
# # 保存 update
# tea_attendance = tea_attendance_serilizer.save()
#
# # 响应
# tea_attendance_serilizer = TeacherAttendanceSerializer(tea_attendance)
# tea_attendance_dict = tea_attendance_serilizer.data
# return Response(tea_attendance_dict, status=201)
def delete(self, request, teacher_id):
"""删除"""
try:
tea_attendance = teacher_attendance_table.objects.get(atuser_id=teacher_id)
except:
return Response({"error": "查询错误"})
# 删除
tea_attendance.delete()
# 响应
return Response(status=204)
#
# class TeacherAttendancePostView(GenericAPIView):
# """
# 创建教师考勤信息
# 路由: POST attendance/tea_attendance/
# """
#
# def post(self, request):
# """创建考勤信息"""
# # 获取请求参数
# tea_attendance = request.data
#
# # atid = tea_attendance.getlist("atid")[0]
# # attime = tea_attendance.getlist("attime")[0]
# atstatus = tea_attendance.getlist("atstatus")[0]
# alesson_id = tea_attendance.getlist("alesson_id")[0]
# # atuser_id = tea_attendance.getlist("atuser_id")[0]
#
# # 获取该课老师id
# lesson_info = lesson_table.objects.filter(lid=alesson_id)
# lesson_serilizer = LessonSerializer(lesson_info, many=True)
# lesson_list = lesson_serilizer.data
# atuser_id = lesson_list[0]["lteacher"]
#
# # 将获取的数据上在数据库创建
# teacher_attendance_table.objects.create(
# # atid=atid,
# atstatus=atstatus,
# alesson_id=alesson_id,
# atuser_id=atuser_id
# )
#
# return Response({
# # "atid": atid,
# "atstatus": atstatus,
# "alesson_id": alesson_id,
# "atuser_id": atuser_id
# })
| true |
260cc871ddc3181742f67d5a83083d289d17e39a | Python | ChesterBu/python | /OOP/OOP4.py | UTF-8 | 327 | 3.703125 | 4 | [] | no_license | #继承先找自己有没有,没有在找父类的
class Dad:
money = 100
def __init__(self,name):
print('爸爸')
self.name = name
def hit_son(self):
print('打')
class Son(Dad):
money = 1000
pass
s = Son('alex')
print(s.name)
print(s.money)
s.hit_son()
print(s.__dict__) | true |
1233baaa05b1cc5489a67a1bdb5e5b8e5ffaf447 | Python | LucasSloan/speedchallenge | /extract_optical_flow.py | UTF-8 | 3,152 | 2.5625 | 3 | [] | no_license | import cv2
import tensorflow as tf
import numpy as np
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[value]))
def write_records(examples, path):
writer = tf.python_io.TFRecordWriter(path)
for e in examples:
writer.write(e)
cam = cv2.VideoCapture("data\\train.mp4")
speeds = open("data\\train.txt", 'r').readlines()
current_frame = 0
examples = []
ret, first_frame = cam.read()
# Converts frame to grayscale because we only need the luminance channel for detecting edges - less computationally expensive
prev_gray = cv2.cvtColor(first_frame, cv2.COLOR_BGR2GRAY)
# Creates an image filled with zero intensities with the same dimensions as the frame
mask = np.zeros_like(first_frame)
# Sets image saturation to maximum
mask[..., 1] = 255
while(True):
ret, frame = cam.read()
speed = speeds[current_frame]
if ret:
print("Creating {}, speed {}".format(current_frame, speed))
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prev_gray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
magnitude, angle = cv2.cartToPolar(flow[..., 0], flow[..., 1])
mask[..., 0] = angle * 180 / np.pi / 2
mask[..., 2] = cv2.normalize(magnitude, None, 0, 255, cv2.NORM_MINMAX)
rgb = cv2.cvtColor(mask, cv2.COLOR_HSV2BGR)
ret, jpg = cv2.imencode(".jpg", rgb)
if ret:
example = tf.train.Example(features=tf.train.Features(feature={
'image_raw': _bytes_feature(jpg.tostring()),
'label': _float_feature(float(speed))
}))
examples.append(example.SerializeToString())
else:
break
current_frame += 1
else:
print("creating the last frame")
# we're calculating the flow for frame n with frames n and n+1, so we have to repeat the last frame
rgb = cv2.cvtColor(mask, cv2.COLOR_HSV2BGR)
ret, jpg = cv2.imencode(".jpg", rgb)
if ret:
print("appending the last frame")
example = tf.train.Example(features=tf.train.Features(feature={
'image_raw': _bytes_feature(jpg.tostring()),
'label': _float_feature(float(speed))
}))
examples.append(example.SerializeToString())
break
cam.release()
cv2.destroyAllWindows()
temporal_train_examples = examples[:16320]
temporal_validation_examples = examples[16320:]
write_records(temporal_train_examples, "D:\\speedchallenge\\optical_flows\\temporal\\train.tfrecords")
write_records(temporal_validation_examples, "D:\\speedchallenge\\optical_flows\\temporal\\validation.tfrecords")
random_examples = np.random.permutation(examples)
random_train_examples = random_examples[:16320]
random_validation_examples = random_examples[16320:]
write_records(random_train_examples, "D:\\speedchallenge\\optical_flows\\random\\train.tfrecords")
write_records(random_validation_examples, "D:\\speedchallenge\\optical_flows\\random\\validation.tfrecords")
| true |
79d0043d6d024c2b1b2850a9853ac26b3cfa2ad7 | Python | wan-catherine/Leetcode | /test/test_1144_decrease_elements_to_make_array_zigzag.py | UTF-8 | 461 | 2.984375 | 3 | [] | no_license | from unittest import TestCase
from problems.N1144_Decrease_Elements_To_Make_Array_Zigzag import Solution
class TestSolution(TestCase):
def test_movesToMakeZigzag(self):
self.assertEqual(2, Solution().movesToMakeZigzag([1,2,3]))
def test_movesToMakeZigzag_1(self):
self.assertEqual(4, Solution().movesToMakeZigzag([9,6,1,6,2]))
def test_movesToMakeZigzag_2(self):
self.assertEqual(0, Solution().movesToMakeZigzag([2,1,2]))
| true |
901a264ac61bf188278add6a70909e910c40cc0d | Python | iammanish17/AOC2020 | /14/Part2.py | UTF-8 | 936 | 2.546875 | 3 | [
"MIT"
] | permissive | s = open('input.txt','r').read()
line = [k for k in s.split("\n")]
mask = line[0].split(" ")[2]
di = {}
for i in line[1:]:
if "mask" in i:
mask = i.split(" ")[2]
continue
index = i.split("[")[1].split("]")[0]
value = int(i.split(" ")[2])
bi = bin(int(index))[2:]
bi = "0"*(36 - len(bi)) + bi
x = ['X']*36
oof = []
for i in range(36):
if mask[i] == '0':
x[i] = bi[i]
elif mask[i] == '1':
x[i] = '1'
else:
oof += [i]
values = []
if oof:
for i in range(2**len(oof)):
xx = list(x)
bi = bin(i)[2:]
bi = "0"*(len(oof)-len(bi))+bi
for j in range(len(oof)):
xx[oof[j]] = bi[j]
values += [int("".join(xx),2)]
else:
values += [int("".join(x), 2)]
for v in values:
di[v] = value
print(sum(di[k] for k in di))
| true |
099b9b00d45aa7608da2430481bc915bd02ef2c7 | Python | neilmarshall/gym-log | /app/models/gym_record.py | UTF-8 | 654 | 2.84375 | 3 | [] | no_license | from app import db
class GymRecord(db.Model):
"""Object relational model of gym records"""
__tablename__ = "gym_records"
record_id = db.Column(db.Integer, primary_key=True)
session_id = db.Column(db.Integer, db.ForeignKey('sessions.session_id'), nullable=False)
exercise_id = db.Column(db.Integer, db.ForeignKey('exercises.exercise_id'), nullable=False)
reps = db.Column(db.Integer, nullable=False)
weight = db.Column(db.Float, nullable=False)
def __repr__(self):
return f"Exercise(session_id='{self.session_id}', exercise_id={self.exercise_id}, " + \
f"reps={self.reps}, weight={self.weight})"
| true |
c6c5e9b910efaa7c136341c0879701742b7f7bcb | Python | lbbruno/Python | /Exercicios_1/ex034.py | UTF-8 | 174 | 3.9375 | 4 | [] | no_license | salario = float(input('Informe o salário: '))
print('Aumento de 10%: {:.2f}'.format(salario * 1.10) if salario > 1250 else 'Aumento de 15% {:.2f}: '.format(salario * 1.15))
| true |
204268ab1b2540e7ae2f3368c17a7f1f12cfc03c | Python | huyenpham2995/python2sre | /week4/reverseInteger/reverseInteger.py | UTF-8 | 707 | 4.25 | 4 | [] | no_license | def reverseInteger(num):
if num == "":
return num
# a flag to see if string has comma(s)
hasComma = False
reversedNum = ""
# travese the string in reverse order
for char in num[::-1]:
if char == ",":
hasComma = True
else:
reversedNum += char
# convert it into an int to eliminate the 0(s), then back to string
reversedNum = str(int(reversedNum))
# add back the comma(s)
if hasComma and len(reversedNum) > 3:
commaPos = len(reversedNum) - 3
while commaPos > 0:
reversedNum = reversedNum[:commaPos] + "," + reversedNum[commaPos:]
commaPos -= 3
return reversedNum
| true |
bbf773cb474ba3b0a4d048d44f3b9d688b1456ae | Python | mmrosek/dataScience | /genAlgo/arxiv/dna_data_v1.py | UTF-8 | 13,015 | 2.71875 | 3 | [] | no_license | import string
import numpy as np
import math
import pandas as pd
from sklearn.linear_model import LinearRegression, ElasticNet
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, RandomizedSearchCV, GridSearchCV
from scipy.stats import pearsonr
from scipy.stats import randint as sp_randint
from sklearn.metrics import r2_score
import pdb
def new_char():
all_possible_chars = string.printable[:-10] + " "
idx = np.random.randint(len(all_possible_chars))
return all_possible_chars[idx]
def mse(arr1, arr2):
return ((arr1 - arr2)**2)/arr1.shape[0]
class DNA:
def __init__(self, data, preproc_algos, models, mutant = False, verbose = False):
self.genes = {}
self.genes["data"] = []
self.genes["preproc"] = []
self.genes["models"] = []
self.fitness = 0
self.verbose = verbose
if not mutant:
# Allocating genes --> data, preproc and models are lists of strings
print(f"data: {data}")
for idx in data:
if np.random.random() > 0.01: self.genes["data"].append(idx)
else: self.genes["data"].append(None)
# Ensuring each DNA instance has at least one dataset
if len(self.genes["data"]) == 0:
idx = np.random.randint(0, len(data))
self.genes["data"].append(data[idx])
print(f"self.genes['data']: {self.genes['data']}")
for p in preproc_algos:
if np.random.random() > 0.01: self.genes["preproc"].append(p)
else: self.genes["preproc"].append(None)
for m in models:
if np.random.random() > 0.01: self.genes["models"].append(m)
else: self.genes["models"].append(None)
# Ensuring each DNA instance has at least one model
if len(self.genes["models"]) == 0:
idx = np.random.randint(0, len(models))
self.genes["models"].append(models[idx])
def crossover(self, partner, midpt_bool):
child = DNA( None, None, None, mutant=True)
if not midpt_bool:
total_fitness = self.fitness + partner.fitness
# THIS WAS NEW
# self_prob = prob of taking one of own genes in crossover
# Weighting self_prob based on fitness, capping at max_self_prob
max_self_prob = 0.8
if total_fitness == 0: self_prob = 0.5
else: self_prob = min( max_self_prob, max( (1-max_self_prob) , self.fitness / max(total_fitness, 1e-4) ) )
if self.verbose:
print(f"self.fitness: {self.fitness}")
print(f"partner.fitness: {partner.fitness}")
print(f"self_prob: {self_prob}")
for i in range(len(self.genes['data'])):
val = np.random.random()
if self.verbose: print(f"val: {val}")
if val < self_prob:
if self.verbose: print("self gene")
child.genes['data'].append(self.genes['data'][i])
else: child.genes['data'].append(partner.genes['data'][i])
for i in range(len(self.genes['models'])):
val = np.random.random()
if self.verbose: print(f"val: {val}")
if val < self_prob:
if self.verbose: print("self gene")
child.genes['models'].append(self.genes['models'][i])
else: child.genes['models'].append(partner.genes['models'][i])
else:
midpt = min(max(2, np.random.randint(len(self.genes))), len(self.genes)-2)
for i in range(len(self.genes)):
if (i > midpt): child.genes[i] = self.genes[i]
else: child.genes[i] = partner.genes[i]
return child
# NEED TO UPDATE !!!!
def mutate(self, mut_rate):
'''Based on a mutation probability, picks a new random character'''
for i in range(len(self.genes['data'])):
pass
# if (np.random.random() < mut_rate):
# self.genes['data'] = new_char()
def calc_fitness(self, df_dict, tgt):
self.genes['preds'] = []
# Perform preprocessing if desired
# for df in self.genes['data']:
# if 'preproc_algos' in self.genes.keys(): pass
# else: continue
# Concatenating subsets into full df
df_keys = [df_idx for df_idx in self.genes['data'] if df_idx is not None]
df_tuple = tuple([df_dict[key] for key in df_keys])
df = np.concatenate( df_tuple , axis=1)
# full_df = pd.concat([df_dict[key] for key in df_keys], axis=1)
X_tr, X_te, y_tr, y_te = self.split_train_test(df,tgt)
del df
for model in self.genes['models']:
if model is not None:
test_preds = self.train_mod_and_predict(model, X_tr, y_tr, X_te)
self.genes['preds'].append(test_preds)
try: print(f"\nR2 for test_preds: {r2_score(y_te, test_preds)}")
except: pdb.set_trace()
print(f"\n test_preds head: {test_preds[:5]}")
# Ensembling and final fitness calculation
if len(self.genes['preds']) == 0: self.fitness = 0
else: self.fitness = self.ensemble_and_score(self.genes['preds'], y_te)
def split_train_test(self, df, tgt, rand_state = 2, test_float = 0.2):
X_tr, X_te, y_tr, y_te = train_test_split(df, tgt, test_size=test_float, random_state=rand_state)
return X_tr, X_te, y_tr, y_te
def train_mod_and_predict(self, mod, X_tr, y_tr, X_te, num_folds = 5, n_iter = 10):
if mod == 'rf':
est = RandomForestRegressor(criterion='mse')
params = {'max_depth': sp_randint(1,12),
'min_samples_leaf': sp_randint(1,50),
'n_estimators': sp_randint(1,30),
'max_features': sp_randint(X_tr.shape[1]*0.3, X_tr.shape[1])}
rs = RandomizedSearchCV(est, param_distributions=params,
n_jobs=24, n_iter=n_iter, cv=num_folds)
print("\nPerforming randomized search")
rs.fit(X_tr, y_tr)
print("Best score: %0.3f" % rs.best_score_)
print("Best parameters set:")
best_parameters = rs.best_estimator_.get_params()
for param_name in sorted(params.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
preds = rs.predict(X_te)
return preds
elif mod == 'lr':
print("Linear regression")
lr = LinearRegression()
lr.fit(X_tr, y_tr)
preds = lr.predict(X_te)
return preds
def ensemble_and_score(self, pred_list, y_te, ens_perc = 0.7):
'''NEED TO SPLIT PREDS TO LEARN WEIGHTS ON TOP HALF AND EVAL ON BOTTOM HALF of test set'''
### Processing pred_list ###
pred_array = np.array(pred_list)
print(f"\npred_array.shape: {pred_array.shape}")
# print(f"pred_array: {pred_array}")
# print(f"\nScore on og_preds: {r2_score(eval_labels, eval_preds[:,0])}")
# Ensuring pred_array has column dimension if only one set of preds
if pred_array.ndim == 1: pred_array.reshape(-1,1)
# Transposing so pred_array will have samples as rows and predictions by each model as different col
else: pred_array = pred_array.T
print(f"\npred_array head: {pred_array[:5,:]}")
if pred_array.shape[0] != y_te.shape[0]: raise Exception("Different number of predictions and ground truths.")
###############################
print(f"pred_array.shape post-processing: {pred_array.shape}")
print(f"\ny_te[:5]: {y_te[:5]}")
ens_preds, eval_preds, ens_labels, eval_labels = self.split_train_test(pred_array, y_te, test_float = 1-ens_perc)
# num_ens_samples = int(pred_array.shape[0] * ens_perc)
# # Model predictions and labels used to learn ensemble weights
# ens_preds = pred_array[ : num_ens_samples, :]
# ens_labels = y_te[ : num_ens_samples].reshape(-1,1)
# # Model predictions and labels used for evaluation
# eval_preds = pred_array[ num_ens_samples : , :]
# eval_labels = y_te[ num_ens_samples : ].reshape(-1,1)
# print(f"\n ens_preds first col: {ens_preds[:,0]}")
# print(f"\n eval_preds first col: {eval_preds[:,0]}")
# print(f"\nens_labels: {ens_labels}")
# print(f"\neval_labels: {eval_labels}")
print(f"ens_preds.shape: {ens_preds.shape}")
print(f"eval_preds.shape: {eval_preds.shape}")
print(f"ens_labels.shape: {ens_labels.shape}")
print(f"eval_labels.shape: {eval_labels.shape}")
# ### Ensembling ###
# score = -10000
# for wt in [0.1, 0.3, 0.5, 0.7, 0.9]:
# wt_score = r2_score(eval_labels, (eval_preds[:,0]*wt + eval_preds[:,1]*(1-wt)))
# if wt_score > score:
# final_wt = wt
# score = wt_score
# final_wt_score = r2_score(eval_labels, (eval_preds[:,0]*final_wt + eval_preds[:,1]*(1-final_wt)))
# print(f"\nScore from simple weighting: {final_wt_score}\n")
# print(f"final_wt: {final_wt}")
lr = LinearRegression()
lr.fit(ens_preds, ens_labels)
ens_eval_preds = lr.predict(eval_preds)
el_net = self.elastic_net_ensemble(ens_preds, ens_labels)
print(f"\nel_net coefficients: {el_net.coef_}")
el_net_ens_eval_preds = el_net.predict(eval_preds)
###################
if self.verbose:
print(f"\nScore on el_net eval: {r2_score(eval_labels, el_net_ens_eval_preds)}")
print(f"\nScore on training/ensemble samples: {lr.score(ens_preds, ens_labels)}")
print(f"\nScore on lr.score(eval_preds, eval_labels): {lr.score(eval_preds, eval_labels)}")
print(f"\nScore on averaging eval samples: {r2_score(eval_labels,np.mean(eval_preds, axis=1))}")
print(f"\nScore on first col eval_preds: {r2_score(eval_labels, eval_preds[:,0])}")
print(f"\nScore on full og_preds first col: {r2_score(y_te, pred_array[:,0])}")
print(f"\nScore on avg og_preds: {r2_score(y_te, np.mean(pred_array, axis=1))}")
print(f"\nLR coefficients: {lr.coef_}")
print(f"LR intercept: {lr.intercept_}")
print(f"ens eval preds shape: {ens_eval_preds.shape}")
print(f"\neval_preds[:5, :]: {eval_preds[:5, :]}")
print(f"\nens_eval_preds[:5]: {ens_eval_preds[:5]}")
print(f"eval_labels[:5]: {eval_labels[:5]}")
# Ensuring ens_eval_preds has column dimension if only one set of preds
if ens_eval_preds.ndim == 1: ens_eval_preds.reshape(-1,1)
if ens_eval_preds.shape != eval_labels.shape:
raise Exception("Shape of preds is not the same as the shape of labels")
print(f"ens_eval_preds.shape: {ens_eval_preds.shape}")
print(f"eval_labels.shape: {eval_labels.shape}")
score = r2_score(eval_labels, ens_eval_preds)
print(f"\nScore: {score}\n")
return score
def elastic_net_ensemble(self, X_train, y_train):
el = ElasticNet(normalize=True, max_iter=10000)
parameters = {
'alpha': (0.2, 0.5, 1, 5),
'l1_ratio': (0.5, 0.7, 0.9, 1)
}
# find the best parameters for both the feature extraction and classifier
gs = GridSearchCV(el, parameters, scoring = 'r2', n_jobs=16, cv = 10)
print("\nPerforming grid search")
gs.fit(X_train, y_train)
print("Best score: %0.3f" % gs.best_score_)
best_parameters = gs.best_estimator_.get_params()
for param_name in sorted(parameters.keys()):
print("\t%s: %r" % (param_name, best_parameters[param_name]))
return gs.best_estimator_
def get_genes(self):
return {'Data':self.genes['data'], 'Preprocessing':self.genes['preproc'], 'Models':self.genes['models']}
### Needed for importation of DNA class ###
if __name__ == "__main__":
pass
| true |
a4ed240e31093732fffc15d36de161781b1c54c5 | Python | yakitori55/supportPiMotor | /rc_http/oled.py | UTF-8 | 4,505 | 2.890625 | 3 | [
"MIT"
] | permissive | from queue import Queue
import threading
import time
from systems import SystemsData
# Imports the necessary libraries...
import socket
import fcntl
import struct
import board
import digitalio
from PIL import Image, ImageDraw, ImageFont
import adafruit_ssd1306
import sys
from icecream import ic
# OLED設定
DISP_WIDTH = 128
DISP_HEIGHT = 64
DEVICE_ADDR = 0x3C
PATH_FONT = "./ipaexm.ttf"
class OledThread(threading.Thread):
"""
OLED管理
例:
queue経由で、{"type":"oled", "time": "3000", "disp":"ip"}
disp : ip / clear
"""
def __init__(self):
ic()
threading.Thread.__init__(self)
self.stop_event = threading.Event()
self.setDaemon(True)
self._rcv_que = Queue()
self._sysdat = SystemsData()
# Setting some variables for our reset pin etc.
RESET_PIN = digitalio.DigitalInOut(board.D4)
TEXT = ""
# Very important... This lets py-gaugette 'know' what pins to use in order to reset the display
i2c = board.I2C()
self._oled = adafruit_ssd1306.SSD1306_I2C(DISP_WIDTH, DISP_HEIGHT, i2c, addr=DEVICE_ADDR, reset=RESET_PIN)
# font
self._font10 = ImageFont.truetype(PATH_FONT, 10)
self._font12 = ImageFont.truetype(PATH_FONT, 12)
self._font14 = ImageFont.truetype(PATH_FONT, 14)
self._font16 = ImageFont.truetype(PATH_FONT, 16)
self._font18 = ImageFont.truetype(PATH_FONT, 18)
# Clear display.
self._oled.fill(0)
self._oled.show()
return
def stop(self):
ic()
self.stop_event.set()
# cleanup
self._oled.fill(0)
self._oled.show()
return
def run(self):
ic()
while True:
item = self.rcv_que.get()
ic(sys._getframe().f_code.co_filename, sys._getframe().f_code.co_name, item)
if "oled" not in item["type"]:
print("[oled_th]", "error : type")
continue
self._recvice(item)
return
@property
def rcv_que(self):
return self._rcv_que
def _recvice(self, item):
ic()
val_time = int(item["time"]) / 1000
val_disp = item["disp"]
def display_ip():
ic()
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(
fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack("256s", str.encode(ifname[:15])),
)[20:24]
)
# This sets TEXT equal to whatever your IP address is, or isn't
try:
TEXT = get_ip_address("wlan0") # WiFi address of WiFi adapter. NOT ETHERNET
except IOError:
try:
TEXT = get_ip_address("eth0") # WiFi address of Ethernet cable. NOT ADAPTER
except IOError:
TEXT = "NO INTERNET!"
# Clear display.
self._oled.fill(0)
self._oled.show()
# Create blank image for drawing.
image = Image.new("1", (self._oled.width, self._oled.height))
draw = ImageDraw.Draw(image)
# Draw the text
intro = "カムロボです。"
ip = "IPアドレス:"
draw.text((0, 46), TEXT, font=self._font14, fill=255)
draw.text((0, 0), intro, font=self._font18, fill=255)
draw.text((0, 30), ip, font=self._font14, fill=255)
# Display image
self._oled.image(image)
self._oled.show()
return
def display_clear():
self._oled.fill(0)
self._oled.show()
return
if "ip" in val_disp:
display_ip()
else:
# Clear display.
display_clear()
return
def main():
import time
oled_th = OledThread()
oled_th.start()
q = oled_th.rcv_que
q.put({"type": "oled", "time": "3000", "disp":"ip"})
time.sleep(10)
q.put({"type": "oled", "time": "3000", "disp":"clear"})
time.sleep(1)
oled_th.stop()
return
if __name__ == "__main__":
main() | true |
e17e858e3935891c9d3792861eea678dfaa05e84 | Python | johnhany97/CCI-solutions | /chapter1/3-URLify.py | UTF-8 | 927 | 3.859375 | 4 | [] | no_license | import unittest
# This solution simply strips s from white spaces on both sides
# then replaces any existing spaces with %20
def urlify(s, l):
s = s.strip() # O(n)
return s.replace(' ', '%20')
# This solution splits s into multiple arrays which are identified
# by the spaces in between then joins them together using '%20'
def urlify_2(s, l):
return '%20'.join(s.strip().split(' '))
class Test(unittest.TestCase):
values = [
('Mr John Smith ', 'Mr%20John%20Smith', 13),
('Test Test', 'Test%20%20Test', 10),
(' 123 ', '123', 3)
]
def test_urlify(self):
for [test, expected, length] in self.values:
self.assertEqual(urlify(test, length), expected)
def test_urlify_2(self):
for [test, expected, length] in self.values:
self.assertEqual(urlify_2(test, length), expected)
if __name__ == "__main__":
unittest.main()
| true |
8043f55e7da489b4c8fcdd89ebb322175012fa53 | Python | GersonSales/AA | /30199-Lista_8-AA_basico-2016.2/P4-Removing_Letters.py | UTF-8 | 468 | 3.046875 | 3 | [] | no_license | #https://www.urionlinejudge.com.br/judge/en/problems/view/1556
def powerset(s):
x = len(s)
result = []
for i in range(1 << x):
result.append([s[j] for j in range(x) if (i & (1 << j))])
return result
string = raw_input()
combinationss = list(map(''.join, powerset(string)))
result = set()
for comb in combinationss:
if (comb != ""):
result.add(comb)
result = list(result)
result.sort()
for comb in result:
print comb
| true |
76e59e6841eff3cc85d3da2a30ae0cd334efe086 | Python | SnowyThinker/word2vec-demo | /wiki/test/test_jieba.py | UTF-8 | 8,978 | 2.8125 | 3 | [] | no_license | # coding=utf-8
from collections import Counter
import jieba
from gensim.models import Word2Vec
text = '''
美国宪法全文及修正案之完全中文版 ②
我们合众国人民,为建立更完善的联邦,树立正义,保障国内安宁,提供共同防务,促进公共福利,并使我们自己和后代得享自由的幸福,特为美利坚合众国制定本宪法。
第 一 条 第一款 本宪法授予的全部立法权,属于由参议院和众议院组成的合众国国会。
第二款 众议院由各州人民每两年选举产生的众议员组成。每个州的选举人须具备该州州议会人数最多一院选举人所必需的资格。
凡年龄不满二十五岁,成为合众国公民不满七年,在一州当选时不是该州居民者,不得担任众议员。
[众议员名额和直接税税额,在本联邦可包括的各州中,按照各自人口比例进行分配。各州人口数,按自由人总数加上所有其他人口的五分之三予以确定。
自由人总数包括必须服一定年限劳役的人,但不包括未被征税的印第安人。]
人口的实际统计在合众国国会第一次会议后三年内和此后每十年内,依法律规定的方式进行。每三万人选出的众议员人数不得超过一名,
但每州至少须有一名众议员;在进行上述人口统计以前,新罕布什尔州有权选出三名,马萨诸塞州八名,罗得岛州和普罗维登斯种植地一名,
康涅狄格州五名,纽约州六名,新泽西州四名,宾夕法尼亚州八名,特拉华州一名,马里兰州六名,弗吉尼亚州十名,北卡罗来纳州五名,
南卡罗来纳州五名,佐治亚州三名。
任何一州代表出现缺额时,该州行政当局应发布选举令,以填补此项缺额。 众议院选举本院议长和其他官员,并独自拥有弹劾权。
第三款 合众国参议院由[每州州议会选举的]两名参议员组成,任期六年;每名参议员有一票表决权。
参议员在第一次选举后集会时,立即分为人数尽可能相等的三个组。第一组参议员席位在第二年年终空出,第二组参议员席位在第四年年终空出,
第三组参议员席位在第六年年终空出,以便三分之一的参议员得每二年改选一次。[在任何一州州议会休会期间,如因辞职或其他原因而出现缺额时,
该州行政长官在州议会下次集会填补此项缺额前,得任命临时参议员。] 凡年龄不满三十岁,成为合众国公民不满九年,在一州当选时不是该州居民者,
不得担任参议员。 合众国副总统任参议院议长,但除非参议员投票时赞成票和反对票相等,无表决权。 参议院选举本院其他官员,
并在副总统缺席或行使合众国总统职权时,选举一名临时议长。 参议院独自拥有审判一切弹劾案的权力。为此目的而开庭时,
全体参议员须宣誓或作代誓宣言。合众国总统受审时,最高法院首席大法官主持审判。无论何人,非经出席参议员三分之二的同意,不得被定罪。
弹劾案的判决,不得超出免职和剥夺担任和享有合众国属下有荣誉、有责任或有薪金的任何职务的资格。
但被定罪的人,仍可依法起诉、审判、判决和惩罚。 第四款 举行参议员和众议员选举的时间、地点和方式,在每个州由该州议会规定。
但除选举参议员的地点外,国会得随时以法律制定或改变这类规定。
国会每年至少开会一次,除非国会以法律另订日期外,此会议在(十二月第一个星期一]举行。
第五款 每院是本院议员的选举、选举结果报告和资格的裁判者。每院议员过半数,即构成议事的法定人数;
但不足法定人数时,得逐日休会,并有权按每院规定的方式和罚则,强迫缺席议员出席会议。
每院得规定本院议事规则,惩罚本院议员扰乱秩序的行为,并经三之二议员的同意开除议员。
每院应有本院会议记录,并不时予以公布,但它认为需要保密的部分除外。
每院议员对于任何问题的赞成票和反对票,在出席议员五分之一的请求下,应载入会议记录。
在国会开会期间,任何一院,未经另一院同意,不得休会三日以上,也不得到非两院开会的任何地方休会。
第六款 参议员和众议员应得到服务的报酬,此项报酬由法律确定并由合众国国库支付。
他们除犯叛国罪、重罪和妨害治安罪外,在一切情况下都享有在出席各自议院会议期间和往返于各自议院途中不受逮捕的特权。
他们不得因在各自议院发表的演说或辩论而在任何其他地方受到质问。
参议员或众议员在当选任期内,不得被任命担任在此期间设置或增薪的合众国管辖下的任何文官职务。
凡在合众国属下任职者,在继续任职期间不得担任任何一院议员。
第七款 所有征税议案应首先在众议院提出,但参议院得像对其他议案一样,提出或同意修正案。
众议院和参议院通过的每一议案,在成为法律前须送交合众国总统。
总统如批准该议案,即应签署;如不批准,则应将该议案同其反对意见退回最初提出该议案的议院。
该院应特此项反对见详细载入本院会议记录并进行复议。
如经复议后,该院三分之二议员同意通过该议案,该议案连同反对意见应一起送交另一议院,并同样由该院进行复议,
如经该院三分之二议员赞同,该议案即成为法律。但在所有这类情况下,两院表决都由赞成票和反对票决定;
对该议案投赞成票和反对票的议员姓名应分别载入每一议院会议记录。
如任何议案在送交总统后十天内(星期日除外)未经总统退回,该议案如同总统已签署一样,即成为法律,
除非因国会休会而使该议案不能退回,在此种情况下,该议案不能成为法律。
凡须由参议院和众议院一致同意的每项命令、决议或表决(关于休会问题除外),
须送交合众国总统,该项命令、决议或表决在生效前,须由总统批准,如总统不批准,
则按照关于议案所规定的规则和限制,由参议院和众议院三分之二议员重新通过。
第八款 国会有权: 规定和征收直接税、进口税、捐税和其他税,以偿付国债、提供合众国共同防务和公共福利,
但一切进口税、捐税和其他税应全国统一; 以合众国的信用借款;
管制同外国的、各州之间的和同印第安部落的商业;
制定合众国全国统一的归化条例和破产法;
铸造货币,厘定本国货币和外国货币的价值,并确定度量衡的标准; 规定有关伪造合众国证券和通用货币的罚则;
设立邮政局和修建邮政道路; 保障著作家和发明家对各自著作和发明在限定期限内的专有权利,以促进科学和工艺的进步;
设立低于最高法院的法院; 界定和惩罚在公海上所犯的海盗罪和重罪以及违反国际法的犯罪行为;
宣战,颁发掳获敌船许可状,制定关于陆上和水上捕获的条例; 招募陆军和供给军需,但此项用途的拨款期限不得超过两年;
建立和维持一支海军; 制定治理和管理陆海军的条例; 规定征召民兵,以执行联邦法律、镇压叛乱和击退入侵;
规定民兵的组织、装备和训练,规定用来为合众国服役的那些民兵的管理,但民兵军官的任命和按国会规定的条例训练民兵的权力,由各州保留;
对于由某些州让与合众国、经国会接受而成为合众国政府所在地的地区(不得超过十平方英
'''
cuted = jieba.cut(text)
counter = Counter()
for x in cuted:
if len(x) > 1 and x != '\n':
counter[x] += 1
for(k, v) in counter.most_common(100):
print('%s %d' % (k, v))
sentences = [
['human', 'interface', 'computer'],
['survey', 'user', 'computer', 'system', 'response', 'time'],
['eps', 'user', 'interface', 'system'],
['system', 'human', 'system', 'eps'],
['user', 'response', 'time'],
['trees'],
['graph', 'trees'],
['graph', 'minors', 'trees'],
['graph', 'minors', 'survey']
]
class CorpusYielder(object):
def __init__(self,path):
self.path=path
def __iter__(self):
for line in open(self.path, 'r', encoding='utf-8'):
yield list(jieba.cut(line))
sentenceIterator = CorpusYielder('../doc/us_institution.txt')
# for str in sentenceIterator:
# print(str)
model = Word2Vec(sentenceIterator, min_count=1)
model.train(sentenceIterator, total_words=500, epochs=10)
rs = model.most_similar(['法律'], ['本院'])
print(rs) | true |
a798b21c61351cf1784cd0dd4f4cc6c2277e00eb | Python | nguyenhai31096/nguyentronghai-fundamentals-c4e29 | /Session01/homework/variablename.py | UTF-8 | 350 | 3.390625 | 3 | [] | no_license | # 1, How to check a variable’s type?
# > use type()
# 2. In what cases, you will get SyntaxError from the compiler telling you that some of your variables have invalid names?
# Can you give 3 different examples of invalid names?
# >If you give a variable an illegal name, you get a syntax error, for example
a = 01
a = $a
a = class
| true |
ed79d7e14eb8ce9e8f22681774ad1ade1dbfd480 | Python | epjoey/work | /app.py | UTF-8 | 1,178 | 2.609375 | 3 | [] | no_license | from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
import datetime
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///work.db'
db = SQLAlchemy(app)
class Shift(db.Model):
__tablename__ = 'shift'
id = db.Column(db.Integer, primary_key=True)
time_in = db.Column(db.DateTime, default=datetime.datetime.utcnow, nullable=False)
time_out = db.Column(db.DateTime)
project = db.Column(db.String(80))
def clockin(project):
last_shift = Shift.query.order_by(Shift.id.desc()).first()
if last_shift and not last_shift.time_out:
last_shift.time_out = datetime.datetime.utcnow()
if not project:
project = last_shift.project
new_shift = Shift(project=project)
db.session.add(new_shift)
db.session.commit()
pass
def history():
return Shift.query.all()
def clockout():
last_shift = Shift.query.order_by(Shift.id.desc()).first()
if not last_shift:
print "You have not started a shift yet"
if last_shift.time_out:
print "You already clocked out or you forgot to clockin"
last_shift.time_out = datetime.datetime.utcnow()
db.session.commit()
pass | true |
a3ed3e6fa52bd65d3881ff65d198c7563f5820b9 | Python | Xnsam/assignments | /largest_3.py | UTF-8 | 246 | 3.75 | 4 | [] | no_license | """Find the largest amongst 3."""
a = 3
b = 4
c = 2
if a > b:
if a > c:
print(a, ' is largest')
else:
print(c, ' is largest')
else:
if b > c:
print(b, ' is largest')
else:
print(c, ' is largest')
| true |
e26066922bc27cd69297b6fff4cd1e11607bd0cd | Python | KieceDonc/L2_Info3A_Projet | /primitives.py | UTF-8 | 2,316 | 2.75 | 3 | [] | no_license | from dag import *
def boule(tup1, r):
(cx,cy,cz) = tup1
x=Var("x")
y=Var("y")
z=Var("z")
return (x-Nb(cx))*(x-Nb(cx)) + (y-Nb(cy))*(y-Nb(cy)) + (z-Nb(cz))*(z-Nb(cz)) - Nb(r*r)
def tore( r, R):
x=Var("x")
y=Var("y")
z=Var("z")
tmp=x*x+y*y+z*z+Nb(R*R-r*r)
return tmp*tmp- Nb(4.*R*R)*(x*x+z*z)
def steiner2():
x=Var("x")
y=Var("y")
z=Var("z")
return (x * x * y * y - x * x * z * z + y * y * z * z - x * y * z)
def steiner4():
x=Var("x")
y=Var("y")
z=Var("z")
return y * y - Nb( 2.) * x * y * y - x * z * z + x * x * y * y + x * x * z * z - z * z * z * z
def hyperboloide_2nappes():
x=Var("x")
y=Var("y")
z=Var("z")
return Nb(0.) - (z * z - (x * x + y * y + Nb(0.1)))
def hyperboloide_1nappe():
x=Var("x")
y=Var("y")
z=Var("z")
return Nb(0.)-(z * z - (x * x + y * y - Nb(0.1)))
def roman():
x=Var("x")
y=Var("y")
z=Var("z")
return ( x * x * y * y + x * x * z * z + y * y * z * z - Nb(2.) * x * y * z)
# https://lejournal.cnrs.fr/sites/default/files/styles/diaporama/public/assets/images/hauser_2.jpg?itok=sbbtGztR
def solitude():
x=Var("x")
y=Var("y")
z=Var("z")
return (x*x*y*z+x*y*y+y*y*y+y*y*y*z-x*x*z*z)
# https://lejournal.cnrs.fr/sites/default/files/styles/diaporama/public/assets/images/hauser_3.jpg?itok=c7zwNoRW
def miau():
x=Var("x")
y=Var("y")
z=Var("z")
return (x*x*y*z+x*x*z*z+Nb(2)*y*y*z+Nb(3)*y*y*y)
# https://imaginary.org/fr/gallery/herwig-hauser-classic
def zitrus():
x=Var("x")
y=Var("y")
z=Var("z")
return (x*x+z*z-y*y*y*(Nb(1)-y)*(Nb(1)-y)*(Nb(1)-y))
# https://imaginary.org/fr/node/2221
def saturne():
x=Var("x")
y=Var("y")
z=Var("z")
return ((Nb(0.2)*x*x+Nb(0.4)*y*y+z*z+Nb(0.12))*(Nb(0.2)*x*x+Nb(0.4)*y*y+z*z+Nb(0.12))-Nb(0.5)*(Nb(0.2)*x*x+Nb(0.4)*y*y))*(Nb(0.4)*x*x+Nb(0.6)*y*y+Nb(0.6)*z*z-Nb(0.1))
# https://imaginary.org/fr/gallery/oliver-labs
def sextiqueDeBarth(r):
x=Var("x")
y=Var("y")
z=Var("z")
P6 = (Nb(r)*Nb(r)*x*x-y*y)*(Nb(r)*Nb(r)*y*y-z*z)*(Nb(r)*Nb(r)*z*z-x*x)
alpha = (Nb(2)*Nb(r)+Nb(1))*(Nb(4e-1))
K = x*x+y*y+z*z-Nb(1)
return P6-alpha*K
# https://imaginary.org/fr/node/888
def weirdHeart():
y=Var("y")
z=Var("z")
return y*y+z*z*z-Nb(1) | true |
b9c82d99092c03616036bbd689942e964a184a58 | Python | carvalhe/KinoTracker | /Kino_Main.py | UTF-8 | 400 | 2.65625 | 3 | [] | no_license | from flask import Flask, jsonify, request
import requests
import config
def movieApi():
# set the domain for the api, and give the api key
key = '&apikey=' + config.key
search = '?t=' + input('please give a movie to pull info for: ')
domain = 'http://www.omdbapi.com/' + search + key
source = requests.get(domain)
print(source.url)
if __name__ == "__main__":
movieApi() | true |
6d680427110d403d41b93ea385e8d275bc538999 | Python | ianagbip1oti/aura | /core/service/karma_service.py | UTF-8 | 3,984 | 2.515625 | 3 | [
"MIT"
] | permissive | from core.datasource import DataSource
from core.model.member import KarmaMember, Member
# karma database service class, perform operations on the configured mongodb.
from util.config import config, profile
class KarmaService:
def __init__(self):
self._karma = DataSource(config['database']['host'], config['database']['port'],
config['database']['username'], config['database']['password'],
config['database']['name']).db.karma
self._filter_query = dict(guild_id="", member_id="")
self._channel_query = dict(guild_id="", member_id="", channel_id="", message_id="")
self._increase_karma = {"$inc": {'karma': 1}}
self._decrease_karma = {"$inc": {'karma': -1}}
# update or insert karma member if not exist on first karma
# check on inc if inc or dec query should be applied.
def upsert_karma_member(self, member: KarmaMember, inc: bool) -> None:
self._channel_query['guild_id'] = member.guild_id
self._channel_query['member_id'] = member.member_id
self._channel_query['channel_id'] = member.channel_id
self._channel_query['message_id'] = member.message_id
if inc:
self._karma.update_one(filter=self._channel_query, update=self._increase_karma,
upsert=True)
else:
self._karma.delete_one(filter=self._channel_query)
# remove all karma, regardless of channel
def delete_all_karma(self, guild_id: str, member_id: str) -> None:
filter_member = dict(guild_id=guild_id, member_id=member_id)
self._karma.delete_many(filter=filter_member)
# aggregate overall karma of a member
def aggregate_member_by_karma(self, member: KarmaMember) -> int:
self._filter_query['guild_id'] = member.guild_id
self._filter_query['member_id'] = member.member_id
pipeline = [{"$unwind": "$karma"}, {"$match": self._filter_query},
{"$group": {"_id": {"member_id": "$member_id"}, "karma": {"$sum": "$karma"}}}]
doc_cursor = self._karma.aggregate(pipeline)
for doc in doc_cursor:
return doc['karma']
def aggregate_member_by_channels(self, member: KarmaMember):
self._filter_query['guild_id'] = member.guild_id
self._filter_query['member_id'] = member.member_id
pipeline = [{"$unwind": "$karma"}, {"$match": self._filter_query},
{"$group": {"_id": {"member_id": "$member_id", "channel_id": "$channel_id"},
"karma": {"$sum": "$karma"}}}, {"$limit": profile()['channels']},
{"$sort": {"karma": -1}}]
doc_cursor = self._karma.aggregate(pipeline)
return doc_cursor
class BlockerService:
def __init__(self):
self._blacklist = DataSource(config['database']['host'], config['database']['port'],
config['database']['username'], config['database']['password'],
config['database']['name']).db.blacklist
self._filter_query = dict(guild_id="", member_id="")
def blacklist(self, member: Member):
self._filter_query['guild_id'] = member.guild_id
self._filter_query['member_id'] = member.member_id
self._blacklist.update_one(filter=self._filter_query, update={'$set': {
'guild_id': '{}'.format(member.guild_id),
'member_id': '{}'.format(member.member_id)
}}, upsert=True)
def whitelist(self, member: Member):
self._filter_query['guild_id'] = member.guild_id
self._filter_query['member_id'] = member.member_id
self._blacklist.delete_one(filter=self._filter_query)
def find_member(self, member: Member):
self._filter_query['guild_id'] = member.guild_id
self._filter_query['member_id'] = member.member_id
return self._blacklist.find_one(filter=self._filter_query)
| true |
6adf0fb886587f77ae6c6b33f3af7e510c0a3eff | Python | MarRoar/Python-code | /07-module/01-module.py | UTF-8 | 491 | 3.421875 | 3 | [] | no_license | '''
什么是模块?
只要以 .py 为后缀的文件,都可以被称为模块
模块中可以包含什么东西
1、变量
2、函数
3、class 面向对象(类 -》 对象)
4、可执行代码
使用模块的好处?
管理方便,易维护
降低复杂度
'''
PI = 3.14
def get_area(r):
'''
求圆的面积
:param r:
:return:
'''
return PI * r ** 2
class Student:
pass
print(PI) | true |
28e0a768adfc2e99f99416e8918fbef2bdca5895 | Python | LourdesOshiroIgarashi/algorithms-and-programming-1-ufms | /Lists/Listas e Repetição - AVA/Cauê/06.py | UTF-8 | 186 | 3.375 | 3 | [
"MIT"
] | permissive | par = []
impar = []
num = list(map(int,input().split(' ')))
for i in num:
if i % 2 == 0:
par.append(i)
else:
impar.append(i)
print(num)
print(par)
print(impar)
| true |
4b324912d32ea64253766cb2b8ab7ed5f15e464b | Python | rabiazaka/project | /min.py | UTF-8 | 163 | 3.09375 | 3 | [] | no_license | import math
def function_power():
a = (int(input("Enter any no: ")))
b = (int(input("Enter value for power: ")))
print(a.__pow__(b))
print(abs(a))
| true |
d95b59df573edf7179cc8b9db5c832216990e6f8 | Python | EugenePY/tensor-work | /sandbox/model/mlp_hook.py | UTF-8 | 4,127 | 2.75 | 3 | [
"BSD-3-Clause"
] | permissive | """
Letting RNN and MLP Layers support ContextSpace input
"""
from pylearn2.utils.track_version import MetaLibVersion
from pylearn2.utils import wraps
from pylearn2.space import VectorSpace
from pylearn2.sandbox.rnn.space import SequenceDataSpace, SequenceSpace
from space import ContextSpace
class AttentionWrapper(MetaLibVersion):
def __new__(cls, name, bases, dct):
wrappers = [attr[:-8] for attr in cls.__dict__.keys()
if attr.endswith('_wrapper')]
for wrapper in wrappers:
if wrapper not in dct:
for base in bases:
method = getattr(base, wrapper, None)
if method is not None:
break
else:
method = dct[wrapper]
dct[wrapper] = getattr(cls, wrapper + '_wrapper')(name, method)
dct['seq2seq_friendly'] = False
dct['_requires_reshape'] = False
dct['_requires_unmask'] = False
dct['_input_space_before_reshape'] = None
return type.__new__(cls, name, bases, dct)
@classmethod
def set_input_space_wrapper(cls, name, set_input_space):
@wraps(set_input_space)
def outer(self, input_space):
if not self.seq2seq_friendly:
if isinstance(input_space, ContextSpace):
self._requires_reshape = True
self._input_space_before_reshape = input_space
input_space = SequenceDataSpace(
VectorSpace(dim=input_space.dim))
self.output_space = SequenceDataSpace(
VectorSpace(dim=self.dim))
if isinstance(input_space, (SequenceSpace, SequenceDataSpace)):
pass
else:
raise TypeError("Current Seq2Seq LSTM do not support "
"none-context space. Got " +
str(input_space))
return set_input_space(self, input_space)
return outer
@classmethod
def fprop_wrapper(cls, name, fprop):
@wraps(fprop)
def outer(self, state_below, return_all=False):
if self._requires_reshape:
if isinstance(state_below, tuple):
ndim = state_below[0].ndim
reshape_size = state_below[0].shape
else:
ndim = state_below.ndim
reshape_size = state_below.shape
inp_shape = (reshape_size[1], reshape_size[0], reshape_size[2])
output = fprop(self, state_below.reshape(inp_shape),
return_all=return_all)
output_shape = output.shape
output = output.reshape((output_shape[1], output_shape[0],
output_shape[2]))
self.output_space.validate(output)
return output
else:
return fprop(self, state_below, return_all=return_all)
return outer
@classmethod
def get_output_space_wrapper(cls, name, get_output_space):
"""
Same thing as set_input_space_wrapper.
Parameters
----------
get_output_space : method
The get_output_space method to be wrapped
"""
@wraps(get_output_space)
def outer(self):
if (not self.seq2seq_friendly and self._requires_reshape and
not isinstance(get_output_space(self), ContextSpace)):
if isinstance(self._input_space_before_reshape, ContextSpace):
return ContextSpace(dim=get_output_space(self).dim,
num_annotation=\
self._input_space_before_reshape.num_annotation)
else:
return get_output_space(self)
return outer
if __name__ == "__main__":
# simple test it seems ok....
from pylearn2.sandbox.rnn.models.rnn import LSTM
class LSTM_CONTEXT(LSTM): __metaclass__ = AttentionWrapper
print LSTM_CONTEXT.fprop
| true |
2147fdf754b6b22cd7eb4d4e7ebb2b4dfa97fa25 | Python | kaicarver/cybsec | /arpscanner.py | UTF-8 | 573 | 2.578125 | 3 | [] | no_license | #!/usr/bin/python3
import signal
from kamene.all import *
def keyboardInterruptHandler(signal, frame):
print("KeyboardInterrupt (ID: {}). Au revoir !".format(signal))
exit(0)
signal.signal(signal.SIGINT, keyboardInterruptHandler)
# boucle 254 adresses IP
for i in range(1, 255):
ip = "192.168.99." + str(i)
arpRequest = Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=ip)
arpResponse = srp1(arpRequest, timeout=.1, verbose=False)
print(i, end=" ", flush=True)
if arpResponse:
print("\nIP: " + arpResponse.psrc + " MAC: " + arpResponse.hwsrc)
| true |
fe2c580e25bbdaf6b0064eb57d3be79fa8a732b6 | Python | xinbeiliu/coding-problems | /test_reverse_a_string.py | UTF-8 | 314 | 3.046875 | 3 | [] | no_license | import unittest
import reverse_a_string
class TestReverseString(unittest.TestCase):
def test_reverse_string(self):
s = ["H","a","n","n","a","h"]
result = reverse_a_string.reverse_str(s)
self.assertEqual(result, ["h","a","n","n","a","H"])
if __name__ == '__main__':
unittest.main() | true |
e8bf3ec53e17a1166a5aa6cb4cb4bc9e2d17e7c5 | Python | Little-Captain/py | /Lean Python/mod1.py | UTF-8 | 180 | 2.8125 | 3 | [] | no_license | def hello():
print('hello')
writtenby = 'Paul'
class greeting():
def morning(self):
print('Good Morning')
def evening(self):
print('Good Evening') | true |
5c181eecbcb9efd545a62345328d41dd5e7b36ba | Python | info9117/BlueGarden_Project | /models/item.py | UTF-8 | 696 | 2.90625 | 3 | [] | no_license | from shared import db
class Item(db.Model):
__tablename__ = 'items'
id = db.Column('id', db.Integer, primary_key=True)
price = db.Column('price', db.Float)
produce_id = db.Column('produce_id', db.Integer, db.ForeignKey('produces.id'))
produce = db.relationship('Produce', foreign_keys=[produce_id])
total = db.Column('total', db.Float)
amount = db.Column('amount', db.Integer)
def __init__(self, price, produce_id, amount):
self.price = price
self.produce_id = produce_id
self.amount = amount
self.calculate_total(self.price, self.amount)
def calculate_total(self, price, amount):
self.total = price * float(amount)
| true |
7f84f20638133040206253061bf564c5d4630354 | Python | clint07/CHIP-8-Py | /tests/test_chip8.py | UTF-8 | 1,808 | 3.34375 | 3 | [] | no_license | import unittest
from chip8.chip8 import Chip8
class TestChip8(unittest.TestCase):
def test_ret(self):
"""
Instruction 00EE: Return from a subroutine.
The CPU sets the program counter to the address at the top of the stack,
then subtracts 1 from the stack pointer.
"""
chip8 = Chip8()
chip8.program_counter = 0xFF
chip8.stack[chip8.stack_pointer] = 512
chip8.stack_pointer += 1
chip8.ret()
self.assertEqual(chip8.program_counter, 514)
self.assertEqual(chip8.stack_pointer, 0)
def test_jump(self):
"""
Instruction 1nnn: Jump to location nnn.
The CPU sets the program counter to nnn.
:param self:
:return:
"""
chip8 = Chip8()
chip8.jump(512)
self.assertEqual(chip8.program_counter, 512)
def test_call(self):
"""
Instruction 2nnn: Call subroutine at nnn.
The CPU increments the stack pointer, then puts the current PC on the top of the stack.
The PC is then set to nnn.
:param self:
:return:
"""
chip8 = Chip8()
chip8.PC = 512
chip8.call(777)
self.assertEqual(chip8.stack_pointer, 1)
self.assertEqual(chip8.stack[0], 512)
self.assertEqual(chip8.program_counter, 777)
def test_skip_if(self):
"""
Instruction 3xkk: Skip next instruction if Vx = kk.
The CPU compares register Vx to kk, and if they are equal,
increments the program counter by 2.
"""
chip8 = Chip8()
chip8.registers[0x0] = 7
chip8.skip_if(0x0, 7)
self.assertEqual(chip8.program_counter, 516)
chip8.skip_if(0x0, 9)
self.assertEqual(chip8.program_counter, 518)
| true |
9e6474944827670b4a170db373faec09017083f8 | Python | Camilo0319/SketchSeleniumPhyton | /Functions/Funciones.py | UTF-8 | 4,163 | 2.921875 | 3 | [] | no_license | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
import time
from selenium.webdriver.common.action_chains import ActionChains
class Funciones():
#----------------------------INICIO ENTORNOS ---------------------------
GooglePage=""
#--------------------------- FIN DE ENTORNOS --------------------------
#--------------------------------------------LOGUEARSE EN LA APLICACION--------------------------
def login(self):
self.driver = webdriver.Firefox(executable_path=r"C:\Drivers\geckodriver.exe")
self.driver.get(self.GooglePage)
time.sleep(5)
#self.driver.find_element_by_id(Elementoslogin.Login.user_id).send_keys("CMLAutt")
#profile = webdriver.FirefoxProfile()
#profile.accept_untrusted_certs = True
def cerrarAplicacion(self):
self.driver.close()
#------------------------------------------------FIN DE LOGUEARSE -------------------------------
#----------------------------------------------FUNCIONES PARA CLIQUEAR----------------------------
def clicElementoID(self,elemento):
self.driver.find_element_by_id(elemento).click()
def clicElementoXpath(self,elemento):
self.driver.find_element_by_xpath(elemento).click()
def clicElementoCSS(self,elemento):
self.driver.find_element_by_class_name(elemento).click()
#----------------------------------------------FIN PARA CLIQUEAR----------------------------
#----------------------------------------------VERIFICACION DE ELEMENTOS------------------------
def verificarTextosXpath(self,elemento):
elemento=self.driver.find_element_by_xpath(elemento)
print(elemento.text)
def verificarElementoSeVisualiza(self,elemento):
self.driver.find_element_by_xpath(elemento).is_enabled()
def verificarElementoSeVisualizaClase(self,elemento):
self.driver.find_element_by_class_name(elemento).is_enabled()
#------------------------------DESPLAZAR EN ELEMENTOS----------------------------------------------
def desplazarRatonporxPath(self, elemento):
element_hover = self.driver.find_element_by_xpath(elemento)
hover = ActionChains(self.driver).move_to_element(element_hover)
hover.perform()
#------------------------------ESPERASS-------------------------------------
def esperaImplicita(self,tiempodeespera):
driver=self.driver
driver.implicitly_wait(tiempodeespera)
def esperaExplicitaXpath(self,elemento):
wait=WebDriverWait(self.driver,20)
wait.until(EC.element_to_be_clickable(By.XPATH,elemento))
#--------------------REDIMENSIONAR PANTALLA---------------------------------------------------------------------------
def redimensionarPantalla(self,ancho,alto):
self.driver.set_window_size(ancho,alto)
#-------------------FIN DE REDIMENSIONAR LA PANTALLA------------------------------------------------------------------
#--------------------ITERACCIONES DEL NAVEGADOR ---------------------------------------------------------------------
def refrescarPagina(self):
self.driver.refresh()
#----------------------FIN DE LAS ITERRACIONES DEL NAVEGADOR---------------------------------------------------------
#-----------------------INICIO ITERACCIONES DEL TECLADO -------------------------------------------------------------
def tabularTeclado(self,element,accion):
if accion=="TAB":
element=self.driver.find_element_by_xpath(element).send_keys(Keys.TAB)
elif accion=="FLECHADERECHA":
element=self.driver.find_element_by_xpath(element).send_keys(Keys.ARROW_RIGHT)
def escribirTexto(self,elementoSeleccionado,texto):
elemento=self.driver.find_element_by_class_name(elementoSeleccionado)
elemento.send_keys(texto)
#-----------------------FIN DE ITERACCIONES DEL TECLADO-------------------------------------------------------------- | true |
03341b7daeda5d2bb841ab5969e9f1d2f49f46ac | Python | cgddrd/CS39440-major-project | /final/technical_work/primary_experiments/python/tse/tests/regression/test_regression_tse_imageutils.py | UTF-8 | 6,579 | 2.828125 | 3 | [] | no_license | from unittest import TestCase
from nose.tools import *
from tse.tse_imageutils import TSEImageUtils
from tse.tse_geometry import TSEGeometry
from tse.tse_datautils import TSEDataUtils
import numpy as np
__author__ = 'connorgoddard'
class TestRegressionTSEImageUtils(TestCase):
# Refs: Fix #94 - https://github.com/cgddrd/CS39440-major-project/issues/94
def test_calc_ed_template_match_score_scaled_fix_94(self):
# Create a sample test image that is empty.
self._original_image = np.zeros((400, 400, 3), dtype=np.uint8)
# Calculate the scale factor (MAKING SURE TO SUBTRACT '1' from the max height/width to account for array index out of bounds issue)
scale_factor_width = TSEGeometry.calc_measure_scale_factor(200, (400 - 1))
# Calculate the scaled indices to identify the pixels in the larger image that we will want to make GREEN to provide evidence for the test succeeding.
original_image_scaled_indices = np.rint((np.arange(0, 200) * scale_factor_width)).astype(int)
rows_cols_cartesian_product = np.hsplit(TSEDataUtils.calc_cartesian_product([original_image_scaled_indices, original_image_scaled_indices]), 2)
rows_to_extract = rows_cols_cartesian_product[0].astype(int)
cols_to_extract = rows_cols_cartesian_product[1].astype(int)
# We now want to set each fo the pixels THAT WE EXPECT TO BE EXTRACTED BY THE TEST to GREEN to show that the test has passed.
self._original_image[rows_to_extract, cols_to_extract] = [0, 200, 0]
# Once we have performed the pixel extraction, we expect that all of the pixels returned will be GREEN (based ont he setup above)
matching_image = np.full((200, 200, 3), [0, 200, 0], dtype=np.uint8)
non_matching_image = np.full((200, 200, 3), [200, 0, 0], dtype=np.uint8)
# Check that for perfectly matching images, we get a score of exactly 0.
assert_equal(TSEImageUtils.calc_ed_template_match_score_scaled(matching_image, self._original_image), 0)
# Check that for non-matching images, we get a score > 0.
assert_true(TSEImageUtils.calc_ed_template_match_score_scaled(non_matching_image, self._original_image) > 0)
# Refs: Fix #96 - https://github.com/cgddrd/CS39440-major-project/issues/96
def test_calc_ed_template_match_score_scaled_slow_fix_96(self):
original_image_1 = np.zeros((400, 400, 3), dtype=np.uint8)
original_image_2 = np.full((400, 400, 3), [200, 0, 0], dtype=np.uint8)
original_image_3 = np.full((400, 400, 3), [0, 200, 0], dtype=np.uint8)
original_image_4 = np.full((400, 400, 3), [0, 0, 200], dtype=np.uint8)
# Notice template patch is half the size of the original. We can therefore scale it up.
matching_image_1 = np.zeros((200, 200, 3), dtype=np.uint8)
matching_image_2 = np.full((200, 200, 3), [200, 0, 0], dtype=np.uint8)
matching_image_3 = np.full((200, 200, 3), [0, 200, 0], dtype=np.uint8)
matching_image_4 = np.full((200, 200, 3), [0, 0, 200], dtype=np.uint8)
non_matching_image = np.full((200, 200, 3), [0, 0, 200], dtype=np.uint8)
# Check that for perfectly matching images, we get a score of exactly 0.
assert_equal(TSEImageUtils.calc_ed_template_match_score_scaled_slow(matching_image_1, original_image_1), 0)
assert_equal(TSEImageUtils.calc_ed_template_match_score_scaled_slow(matching_image_2, original_image_2), 0)
assert_equal(TSEImageUtils.calc_ed_template_match_score_scaled_slow(matching_image_3, original_image_3), 0)
assert_equal(TSEImageUtils.calc_ed_template_match_score_scaled_slow(matching_image_4, original_image_4), 0)
# Check that for non-matching images, we get a score > 0.
assert_true(TSEImageUtils.calc_ed_template_match_score_scaled_slow(non_matching_image, original_image_1) > 0)
assert_true(TSEImageUtils.calc_ed_template_match_score_scaled_slow(non_matching_image, original_image_2) > 0)
assert_true(TSEImageUtils.calc_ed_template_match_score_scaled_slow(non_matching_image, original_image_3) > 0)
# As the "non-matching" image has the same pixel value as "original_image_4", we WOULD EXPECT A MATCH.
assert_equal(TSEImageUtils.calc_ed_template_match_score_scaled_slow(non_matching_image, original_image_4), 0)
# Refs: Fix #96 - https://github.com/cgddrd/CS39440-major-project/issues/96
def test_calc_ed_template_match_score_scaled_compiled_slow_fix_96(self):
original_image_1 = np.zeros((400, 400, 3), dtype=np.uint8)
original_image_2 = np.full((400, 400, 3), [200, 0, 0], dtype=np.uint8)
original_image_3 = np.full((400, 400, 3), [0, 200, 0], dtype=np.uint8)
original_image_4 = np.full((400, 400, 3), [0, 0, 200], dtype=np.uint8)
# Notice template patch is half the size of the original. We can therefore scale it up.
matching_image_1 = np.zeros((200, 200, 3), dtype=np.uint8)
matching_image_2 = np.full((200, 200, 3), [200, 0, 0], dtype=np.uint8)
matching_image_3 = np.full((200, 200, 3), [0, 200, 0], dtype=np.uint8)
matching_image_4 = np.full((200, 200, 3), [0, 0, 200], dtype=np.uint8)
non_matching_image = np.full((200, 200, 3), [0, 0, 200], dtype=np.uint8)
# Check that for perfectly matching images, we get a score of exactly 0.
assert_equal(TSEImageUtils.calc_ed_template_match_score_scaled_compiled_slow(matching_image_1, original_image_1), 0)
assert_equal(TSEImageUtils.calc_ed_template_match_score_scaled_compiled_slow(matching_image_2, original_image_2), 0)
assert_equal(TSEImageUtils.calc_ed_template_match_score_scaled_compiled_slow(matching_image_3, original_image_3), 0)
assert_equal(TSEImageUtils.calc_ed_template_match_score_scaled_compiled_slow(matching_image_4, original_image_4), 0)
# Check that for non-matching images, we get a score > 0.
assert_true(TSEImageUtils.calc_ed_template_match_score_scaled_compiled_slow(non_matching_image, original_image_1) > 0)
assert_true(TSEImageUtils.calc_ed_template_match_score_scaled_compiled_slow(non_matching_image, original_image_2) > 0)
assert_true(TSEImageUtils.calc_ed_template_match_score_scaled_compiled_slow(non_matching_image, original_image_3) > 0)
# As the "non-matching" image has the same pixel value as "original_image_4", we WOULD EXPECT A MATCH.
assert_equal(TSEImageUtils.calc_ed_template_match_score_scaled_compiled_slow(non_matching_image, original_image_4), 0) | true |
6398797f5ce6930c2ba83458618b0d151ad12ce2 | Python | a3X3k/Competitive-programing-hacktoberfest-2021 | /CodeWars/Pete, the baker.py | UTF-8 | 984 | 4.03125 | 4 | [
"Unlicense"
] | permissive | '''
5 kyu Pete, the baker
https://www.codewars.com/kata/525c65e51bf619685c000059/solutions/train/python
Pete likes to bake some cakes. He has some recipes and ingredients. Unfortunately he is not good in maths. Can you help him to find out, how many cakes he could bake considering his recipes?
Write a function cakes(), which takes the recipe (object) and the available ingredients (also an object) and returns the maximum number of cakes Pete can bake (integer). For simplicity there are no units for the amounts (e.g. 1 lb of flour or 200 g of sugar are simply 1 or 200). Ingredients that are not present in the objects, can be considered as 0.
Examples:
# must return 2
cakes({flour: 500, sugar: 200, eggs: 1}, {flour: 1200, sugar: 1200, eggs: 5, milk: 200})
# must return 0
cakes({apples: 3, flour: 300, sugar: 150, milk: 100, oil: 100}, {sugar: 500, flour: 2000, milk: 2000})
'''
def cakes(recipe, available):
return min([(available.get(i, 0)//recipe[i]) for i in recipe]) | true |
94d8eda6d338bc2a2dfda4de1db740f30ca0a9f2 | Python | hexinyu1900/LearnPython | /demo-project/011_字符串的查找与替换.py | UTF-8 | 944 | 3.828125 | 4 | [] | no_license | str="hello,python,python"
# # 判断
# # string.startswith(str) 检查字符串是否以str开头,是则返回True
# print(str.startswith('h'))
# print(str.startswith('python'))
# # string.endswith(str) 检查字符串是否以str结束,是则返回True
# print(str.endswith('n'))
# print(str.endswith('java'))
# # 应用:查找以.py结尾的文件或以.html结尾的文件
# print(str.find('e')) # 已经找到,返回索引值
# print(str.find('hello')) # 返回最左边第一个字符串的索引
# print(str.find('python', 8)) # 指定开始范围,从8索引向后找
# print(str.find('python', 8, len(str))) # 指定范围
# print(str.find('java')) #没有找到,返回-1
# # print(str.index('java', 0, len(str))) # 报错
# 字符串的替换
print(str.replace('python','go')) #num值没有指定,默认全部替换
print(str.replace('python', 'go', 1)) #num值指定为1,替换1次
| true |
b2d03bbbde7b04586300ccf0b19f217569e34b46 | Python | gani89/Automating_the_Boring_Stuff | /Exceptions.py | UTF-8 | 667 | 3.640625 | 4 | [] | no_license |
def boxPrint(symbol, width, height):
if len(symbol) != 1:
raise Exception("Symbol needs to be a string of length 1")
if (width < 2) or (height < 2):
raise Exception('Width and Height must be greater or equal than 2')
print(symbol*width)
for i in range(height - 2):
print(symbol + ' '*(width-2) + symbol)
print(symbol * width)
print(boxPrint('*',10,5))
import traceback
try:
raise Exception('This is the error message')
except:
errorFile = open('errorInfo.txt', 'a')
errorFile.write(traceback.format_exc())
errorFile.close()
print('The traceback info was wrriten to errorInfo.txt') | true |
2db5fcae375edf66b17c4bd08dc10443908e5301 | Python | meetchandan/cadence-python | /cadence/tests/test_exception_handling.py | UTF-8 | 1,152 | 2.59375 | 3 | [
"MIT"
] | permissive | import json
import traceback
import tblib
from cadence.exception_handling import serialize_exception, deserialize_exception, THIS_SOURCE, ExternalException
class TestException(Exception):
pass
def a():
b()
def b():
c()
def c():
d()
def d():
raise TestException("here")
def test_serialize_deserialize_exception():
try:
a()
except TestException as e:
ex = e
details = serialize_exception(ex)
details_dict = json.loads(details)
assert details_dict["class"] == "cadence.tests.test_exception_handling.TestException"
assert details_dict["args"] == ["here"]
assert details_dict["traceback"]
assert details_dict["source"] == "cadence-python"
dex = deserialize_exception(details)
assert type(dex) == TestException
assert repr(dex) == repr(ex)
assert dex.__traceback__
def test_deserialize_unknown_exception():
details_dict = {
"class": "java.lang.Exception"
}
details = json.dumps(details_dict)
exception = deserialize_exception(details)
assert isinstance(exception, ExternalException)
assert exception.details == details_dict
| true |
0c99ef5eb8cda4756ffbff77bb96244a6c05ecb0 | Python | ChanwO-o/peg-solitaire | /psboard.py | UTF-8 | 3,798 | 3.65625 | 4 | [] | no_license | '''
Created on Oct 28, 2018
@author: cmins
'''
# Every coordinates of first value is rows and second value is col
import psexceptions
import psgamestate
class PSBoard:
def __init__(self):
self._board = self.getNewBoard(7, 7)
# Joowon Jan,04,2019
# Add variable to store the value of number of rows and columns
# Set dafault as 7
# Will be used later when we add resizing of board.
self.numOfCols = 7
self.numOfRows = 7
# end
def getNewBoard(self, rows: int, cols: int) -> [[int]]:
''' Creates a new game board with specified rows and columns '''
board = []
boundindex = (rows - 3) / 2
for r in range(rows):
row = []
for c in range(cols):
if r < boundindex or r > (rows - boundindex - 1):
if c < boundindex or c > (cols - boundindex - 1):
row.append(-1)
continue
row.append(1) # fill with 1
board.append(row)
board[int(rows/2)][int(cols/2)] = 0 # center empty
return board
def getBoard(self) -> [[int]]:
''' Returns the board '''
return self._board
def get(self, row: int, col: int) -> int:
''' Returns value of peg at coordinate (-1 0 or 1) '''
if self.isOutOfBounds(row, col):
raise psexceptions.PSOutOfBoundsException()
return self._board[row][col]
def addPeg(self, row: int, col: int) -> None:
self._board[row][col] = 1
def removePeg(self, row: int, col: int) -> None:
self._board[row][col] = 0
def getRows(self) -> int:
''' Returns number of rows of board '''
# return len(self._board)
# Joowon Jan,04,2019
# This should return exact value of length
# I changed it to return variable
return self.numOfRows
# end
def getCols(self) -> int:
''' Returns number of cols of board '''
# return len(self._board[0])
# Joowon Jan,04,2019
# return len(self._board)
# This should return exact value of length
# I changed it to return variable
return self.numOfCols
# end
# Chan Woo, Jan, 23 moved coordinate calculation functions from psgamestate to psboard
def calcPegMiddle(self, fromRow: int, fromCol: int, toRow: int, toCol: int) -> ():
if fromRow - toRow > 0 and fromCol - toCol == 0:
return (fromRow - 1, fromCol)
elif fromRow - toRow < 0 and fromCol - toCol == 0:
return (fromRow + 1, fromCol)
elif fromCol - toCol > 0 and fromRow - toRow == 0:
return (fromRow, fromCol - 1)
elif fromCol - toCol < 0 and fromRow - toRow == 0:
return (fromRow, fromCol + 1)
else:
pass # throwexcemption
def isDiagonal(self, fromcol: int, fromrow: int, tocol: int, torow: int) -> bool:
if (fromcol - tocol) != 0 and (fromrow - torow) != 0:
return False
return True
def isOutOfBounds(self, row: int, col: int) -> bool:
''' Checks if location is in board '''
if row < 0 or row > self.getRows():
return True
if col < 0 or col > self.getCols():
return True
# TODO: check for corners
def printBoard(self) -> None:
''' Display the board on the console '''
for r in range(self.getRows()):
for c in range(self.getCols()):
if self.get(r,c) == -1:
print('x', end=' ')
else:
print(self.get(r, c), end=' ')
print('\n') | true |
48302f0829eafe07d972c6c348b49c32c83be878 | Python | CVanchieri/CS-Unit3-IterativeSorting | /src/searching/searching.py | UTF-8 | 2,298 | 4.71875 | 5 | [] | no_license | '''
Iterative:
A program is called recursive when an entity calls itself. A program is call iterative when there is a loop (or repetition).
'''
# Write a linear search approach
'''
Linear Search:
A simple approach is to do linear search, i.e
Start from the leftmost element of arr[] and one by one compare x with each element of arr[]
1. If x matches with an element, return the index.
2. If x doesn’t match with any of elements, return -1.
'''
def linear_search(arr, target): # function to implement linear search
for i in range(len(arr)): # for the i value in the range of the length of the array
if arr[i] == target: # if the arr index value for i is equal to the target
return i # return the value
return -1 # return not found
# Write an iterative implementation of Binary Search
'''
# Binary Search:
This search algorithm takes advantage of a collection of elements that is already sorted by ignoring
half of the elements after just one comparison.
1. Compare x with the middle element.
2. If x matches with the middle element, we return the mid index.
3. Else if x is greater than the mid element, then x can only lie in the right (greater) half subarray
after the mid element. Then we apply the algorithm again for the right half.
4. Else if x is smaller, the target x must lie in the left (lower) half. So we apply the algorithm for
the left half.
'''
def binary_search(arr, target): # function to implement binary search
start = 0 # set the star to 0
stop = len(arr) - 1 # set the stop to the length of the arr minus 1
while start <= stop: # while the start value is less than the stop value
midpoint = start + (stop - start)//2 # set the midpoint to start value and (stop value minus start value) divided by 2
midpoint_val = arr[midpoint] # set the midpoint to the arr midpoint
if midpoint_val == target: # if the midpoint_val is equal to the target
return midpoint # return the midpoint value
elif target <midpoint_val: # else if the target is less than the midpoint_val
stop = midpoint - 1 # set stop to the midpoint minus 1
else: # else
start = midpoint + 1 # set the start value to the midpoint plus 1
return -1 # not found
| true |
1260fe8110828e6ba6dd9eb2ee9c8b68fab81607 | Python | wi7a1ian/python-lab | /NewStuffInPy3.py | UTF-8 | 1,128 | 3.234375 | 3 | [] | no_license | a, *rest, b = range(10)
with open("Python/NewStart/Basics.py", encoding='utf-8') as f:
first, *_, last = f.readlines()
def sum(a, b, *, biteme=False):
if biteme:
pass
else:
return a + b
#sum(1, 2, 3) # TypeError: sum() takes 2 positional arguments but 3 were given
try:
try:
raise Exception("Yo")
except Exception as e:
raise Exception("Chain preserved") from e
except Exception as e:
print(e)
def ListIter():
""" Instead of
for i in gen():
yield i
"""
yield from range(10)
ListIter()
from pathlib import Path
directory = Path("Python/NewStart")
filepath = directory / "Basics.py"
print(filepath.exists())
# https://docs.python.org/3/library/asyncio-task.html
import asyncio
async def ping_server(ip):
print("Pinging {0}".format(ip))
@asyncio.coroutine # same as above
def load_file(path):
pass
async def ping_local():
return await ping_server('192.168.1.1')
# Blocking call which returns when the ping_local() coroutine is done
loop = asyncio.get_event_loop()
loop.run_until_complete(ping_local())
loop.close() | true |
7e20671b0c07ba670b5fd56d248fda080d48aa70 | Python | niklasf/jerry | /dialogs/DialogWithListView.py | UTF-8 | 1,789 | 2.65625 | 3 | [] | no_license | from PyQt4.QtGui import *
from PyQt4.QtCore import *
class DialogWithListView(QDialog):
def __init__(self, moveList, parent=None):
super(DialogWithListView, self).__init__(parent)
self.setWindowTitle("Next Move")
self.resize(20, 40)
self.selected_idx = 0
self.listWidget = QListWidget()
buttonBox = QDialogButtonBox(QDialogButtonBox.Ok| QDialogButtonBox.Cancel)
self.okButton = QPushButton("&OK")
cancelButton = QPushButton("Cancel")
buttonLayout = QHBoxLayout()
buttonLayout.addStretch()
buttonLayout.addWidget(self.okButton)
buttonLayout.addWidget(cancelButton)
layout = QGridLayout()
layout.addWidget(self.listWidget,0,1)
layout.addWidget(buttonBox, 3, 0, 1, 3)
self.setLayout(layout)
self.listWidget.addItems(moveList)
self.listWidget.item(0).setSelected(True)
self.connect(buttonBox, SIGNAL("accepted()"),
self, SLOT("accept()"))
self.connect(buttonBox, SIGNAL("rejected()"),
self, SLOT("reject()"))
self.connect(self,SIGNAL("rightclick()"), SLOT("accept()") )
self.connect(self,SIGNAL("leftclick()"), SLOT("reject()") )
self.listWidget.itemDoubleClicked.connect(self.accept)
self.listWidget.currentItemChanged.connect(self.on_item_changed)
def on_item_changed(self):
self.selected_idx = self.listWidget.currentRow()
def keyPressEvent(self, event):
key = event.key()
if key == Qt.Key_Left or key == Qt.Key_Escape:
self.emit(SIGNAL("leftclick()"))
elif key == Qt.Key_Right or key == Qt.Key_Return:
self.emit(SIGNAL("rightclick()"))
| true |
822c00023d0a0103897d5a61fdda9d67c7dc4b5f | Python | CUCEI20B/distancia-euclidiana-ErickJoestar | /admin.py | UTF-8 | 344 | 2.921875 | 3 | [] | no_license | from particula import Particula
class Admin:
def __init__(self):
self.__particulas = []
def agregar_inicio(pt: Particula):
self.particulas.append(pt)
def agregar_final(pt: Particula):
self.particulas.append(pt)
def mostrar():
for particula in self.__particulas:
print(particula)
| true |
818df47cb66c3e978d1f8ea717643381ee4546fd | Python | robdelacruz/boneyard | /witness/test_widgets.py | UTF-8 | 3,093 | 2.609375 | 3 | [
"MIT"
] | permissive | import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk, Pango, Gdk, GLib
import datetime
import ui
import conv
class MainWin(Gtk.Window):
width = 300
height = int(width * 3/2)
def __init__(self):
super().__init__(border_width=0, title="ui test")
self.set_size_request(MainWin.width, MainWin.height)
grid1 = Gtk.Grid()
lbl = Gtk.Label("Date Entry")
de1 = DateEntry()
de2 = DateEntry()
de2.set_isodt("2019-01-02")
grid1.attach(lbl, 0,0, 1,1)
grid1.attach(de1.widget(), 0,1, 1,1)
grid1.attach(de2.widget(), 0,2, 1,1)
grid2 = Gtk.Grid()
lbl = Gtk.Label("Form 2")
chk = Gtk.CheckButton("Check 1")
grid2.attach(lbl, 0,0, 1,1)
grid2.attach(chk, 0,1, 1,1)
grid3 = Gtk.Grid()
lbl = Gtk.Label("Form 3")
grid3.attach(lbl, 0,0, 1,2)
grid3.set_hexpand(True)
stack = Gtk.Stack()
stack.add_titled(grid1, "pane1", "Journal")
stack.add_titled(grid2, "pane2", "Topics")
stack.add_titled(grid3, "pane3", "Utility")
ss = Gtk.StackSwitcher()
ss.set_stack(stack)
ss.set_halign(Gtk.Align.CENTER)
grid = Gtk.Grid()
grid.attach(ss, 0,0, 1,1)
grid.attach(ui.frame(stack), 0,1, 1,1)
self.add(grid)
self.connect("destroy", Gtk.main_quit)
self.show_all()
class DateEntry():
def __init__(self):
entry = Gtk.Entry()
entry.set_icon_from_icon_name(Gtk.EntryIconPosition.SECONDARY, "x-office-calendar")
popover = Gtk.Popover()
cal = Gtk.Calendar()
popover.add(cal)
popover.set_position(Gtk.PositionType.BOTTOM)
popover.set_relative_to(entry)
def on_icon_clicked(entry, *args):
date = conv.isodt_to_date(entry.get_text())
if not date:
date = datetime.datetime.now()
cal.props.year = date.year
cal.props.month = date.month-1
cal.props.day = date.day
popover.show_all()
popover.popup()
entry.connect("icon-press", on_icon_clicked)
def on_sel_day(cal):
if cal.is_visible():
(year, month, day) = cal.get_date()
month += 1
entry.set_text(conv.dateparts_to_isodt(year, month, day))
cal.connect("day-selected", on_sel_day)
def on_sel_day_dblclick(cal):
popover.popdown()
cal.connect("day-selected-double-click", on_sel_day_dblclick)
self.entry = entry
def widget(self):
return self.entry
def set_date(self, date):
self.entry.set_text(conv.date_to_isodt())
def get_date(self):
date = conv.isodt_to_date(self.entry.get_text())
if not date:
date = datetime.datetime.now()
return date
def set_isodt(self, isodt):
self.entry.set_text(isodt)
def get_isodt(self):
return self.entry.get_text()
if __name__ == "__main__":
w = MainWin()
Gtk.main()
| true |
5194a95cb6533751700d0431134d9b790c0367db | Python | akankshajagwani/Python_Final | /Codes_Practice/prog_18_bullet_submit_1.py | UTF-8 | 3,775 | 2.75 | 3 | [] | no_license | # https://www.hackerrank.com/challenges/a-super-hero
import time
ticks_1 = time.time()
def ConvertToInt(str):
try:
str = int(str)
return str
except:
print "Non-integer input", str
exit()
def CheckRange(num, minValue, maxValue):
if num < minValue or num > maxValue:
print "Entry is not in range",num,".It should be >= %d and <= %d" %(minValue,maxValue)
exit()
filePath = raw_input('File Path:')
if len(filePath) <= 0:
filePath = "bullet_input.txt"
fhandle = open(filePath)
fhand_write = open("bullet_output.txt",'w')
line = fhandle.readline().strip()
T = line
# T = raw_input('')
T = ConvertToInt(T)
CheckRange(T,1,100)
MinBullets = []
for t in range(1,T+1):
line = fhandle.readline().strip()
NM = line
# NM = raw_input('')
lst_1 = NM.split()
if len(lst_1) is 2:
N = ConvertToInt(lst_1[0])
CheckRange(N,1,100)
M = ConvertToInt(lst_1[1])
CheckRange(M,1,500000)
else:
print "N and M input wrong"
exit()
levels = range(1,N+1)
enemies = range(0,M)
P = {}
for n in levels:
line = fhandle.readline().strip()
P[n] = line
# print len(line)
# P[n] = raw_input('')
P[n] = P[n].split()
# print P[n]
# print len(P[n]) is M
# C = len(P[n])
# if C is M:
# print "entering"
indx = 0
for i in P[n]:
i = ConvertToInt(i)
CheckRange(i,1,1000)
P[n][indx] =i
indx = indx+1
# else:
# print "Invalid input, M:",M,"N: ",N,"len:",len(P[n]), type(M),type(len(P[n]))
# exit()
B = {}
for n in levels:
line = fhandle.readline().strip()
B[n] = line
# B[n] = raw_input('')
B[n] = B[n].split()
# if len(B[n]) is M:
indx = 0
for i in B[n]:
i = ConvertToInt(i)
CheckRange(i,1,1000)
B[n][indx] =i
indx = indx+1
# print B[1]
# else:
# print "Invalid input, M:",M,"N: ",N
# exit()
# import numpy
# DP = numpy.zeros(shape=(N,M), dtype=int)
DP = {}
DP[1] = []
for i in enemies:
# DP[0][i]=P[1][i]
# print i
DP[1].append(P[1][i])
for i in range(2,N+1):
DP[i] = []
# fhand_write.write("DP[i-1]"+ str( DP[i-1])+"\n")
# fhand_write.write( "B[i-1]"+str( B[i-1])+"\n")
# rem = []
# for kk in enemies:
# rem.append((B[i-1][kk],DP[i-1][kk]))
# fhand_write.write( "B[i-1],DP[i-1]"+str( rem)+"\n")
# rem.sort()
# fhand_write.write( "sorted"+str( rem)+"\n")
# rem1 = []
# for j in enemies:
# rem1.append((P[i][j],j))
# rem1.sort()
# fhand_write.write( "sorted P[i]"+str( rem1)+"\n")
for j in enemies:
bullet = []
for k in enemies:
if B[i-1][k] >= P[i][j] :
# bullet.append(DP[i-2][k])
bullet.append(DP[i-1][k])
else :
# bullet.append(DP[i-2][k]+P[i][j]-B[i-1][k])
bullet.append(DP[i-1][k]+P[i][j]-B[i-1][k])
# DP[i-1][j]=min(bullet)
DP[i].append(min(bullet))
# fhand_write.write( "DP: "+str(DP[i])+str( min(DP[i]))+"\n")
# fhand_write.write( str(min(DP[N-1]))+"\n")
fhand_write.write( str(min(DP[N]))+"\n")
# print (min(DP[N-1]))
# print (min(DP[N]))
fhand_write.flush()
fhand_write.close()
fhandle.close()
ticks_2 = time.time()
print "time: ",ticks_2-ticks_1 | true |
ed7641be6b11a91b370e364d453d4e476835d24c | Python | sunnyyeti/Leetcode-solutions | /2218 Maximum Value of K Coins From Piles.py | UTF-8 | 2,533 | 3.625 | 4 | [] | no_license | # <!-- There are n piles of coins on a table. Each pile consists of a positive number of coins of assorted denominations.
# In one move, you can choose any coin on top of any pile, remove it, and add it to your wallet.
# Given a list piles, where piles[i] is a list of integers denoting the composition of the ith pile from top to bottom, and a positive integer k, return the maximum total value of coins you can have in your wallet if you choose exactly k coins optimally.
# Example 1:
# Input: piles = [[1,100,3],[7,8,9]], k = 2
# Output: 101
# Explanation:
# The above diagram shows the different ways we can choose k coins.
# The maximum total we can obtain is 101.
# Example 2:
# Input: piles = [[100],[100],[100],[100],[100],[100],[1,1,1,1,1,1,700]], k = 7
# Output: 706
# Explanation:
# The maximum total can be obtained if we choose all coins from the last pile.
# Constraints:
# n == piles.length
# 1 <= n <= 1000
# 1 <= piles[i][j] <= 105
# 1 <= k <= sum(piles[i].length) <= 2000 -->
from functools import cache
class Solution:
def maxValueOfCoins(self, piles: List[List[int]], k: int) -> int:
def get_prefix_sum(pile):
prefix_sum = [0]
for i in range(0,len(pile)):
prefix_sum.append(prefix_sum[-1]+pile[i])
return prefix_sum
prefix_sum_piles = [get_prefix_sum(pile) for pile in piles]
#print(prefix_sum_piles)
prefix_sum_total = [0]*len(prefix_sum_piles)
prev = 0
for i in reversed(range(len(prefix_sum_piles))):
prefix_sum_total[i] = prefix_sum_piles[i][-1] + prev
prev = prefix_sum_total[i]
#print(prefix_sum_total)
length_to_end = [0]*len(piles)
prev = 0
for i in reversed(range(len(piles))):
length_to_end[i] = len(piles[i])+prev
prev = length_to_end[i]
#print(length_to_end)
@cache
def max_coins_from_index(index,k):
if index >= len(piles):
return 0
if k==0:
return 0
if length_to_end[index] <= k:
return prefix_sum_total[index]
max_coins = float("-inf")
for cur_k in range(min(len(piles[index])+1,k+1)):
chose_coins = prefix_sum_piles[index][cur_k]
max_coins = max(max_coins,chose_coins+max_coins_from_index(index+1,k-cur_k))
#print(index,k,max_coins)
return max_coins
return max_coins_from_index(0,k)
| true |
610116d131009a31747a81a87da7e80a2ad00803 | Python | shankarkrishnamurthy/problem-solving | /maximize-palindrome-length-from-subsequences.py | UTF-8 | 615 | 2.578125 | 3 | [] | no_license | class Solution:
def longestPalindrome(self, w1, w2):
s, n, res = w1 + w2, len(w1), 0
dp=[[-1]*len(s) for i in range(len(s))]
def lps(i,j):
nonlocal res
if i > j: return 0
if dp[i][j] != -1: return dp[i][j]
if i == j:
dp[i][j] = 1
return 1
if s[i] == s[j]:
sv = lps(i+1, j-1) + 2
if i < n and j >= n: res = max(res, sv)
else: sv = max(lps(i+1,j), lps(i,j-1))
dp[i][j] = sv
return sv
lps(0, len(s)-1)
return res
| true |
b388f306664dbf13566f6c2eeec5fbc4f868065c | Python | zengljnwpu/yaspc | /optimization/peephole.py | UTF-8 | 6,415 | 2.71875 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
"""
Created : 2017/8/7
Author: hellolzc axiqia
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from optimization import instruction
from optimization import function_optimizer
DEBUG = False
def set_debug_print(debug_print):
"""set DEBUG
"""
global DEBUG
DEBUG = debug_print
class ControlFlowOptimizer(function_optimizer.FunctionOptimizer):
"""ControlFlowOptimizer class
"""
def __init__(self):
super(ControlFlowOptimizer, self).__init__()
def __get_used_labels(self):
''' 遍历所有的指令查找跳转的Label一览 '''
inst_list = self.data_unit.get_inst_list()
used_labels = set()
for inst in inst_list:
if isinstance(inst, instruction.CJumpInst):
used_labels.add(inst.thenlabel)
used_labels.add(inst.elselabel)
elif isinstance(inst, instruction.JumpInst):
used_labels.add(inst.label)
return used_labels
def remove_unused_label(self):
"""remove unused label"""
inst_list = self.data_unit.get_inst_list()
# remove a JumpInst if its target is followed label
new_inst_list = []
for inst_no, inst in enumerate(inst_list):
'''
如果当前指令是无条件跳转指令并且下一条指令刚好是跳转的目的地指令(Label),可忽略掉该无条件跳转指令
'''
if isinstance(inst, instruction.JumpInst) and ((inst_no + 1) < len(inst_list)) and \
isinstance(inst, instruction.LabelInst) and inst.label == inst_list[inst_no + 1].labelname:
continue
''' 其它指令则保留 '''
new_inst_list.append(inst)
inst_list = new_inst_list
# 遍历所有的指令查找跳转的Label一览
used_label_set = self.__get_used_labels()
# remove unused label
new_inst_list = []
for inst in inst_list:
# 若当前指令为Label指令,并且该Label没有被使用,则忽略之
if isinstance(inst, instruction.LabelInst) and \
not inst.labelname in used_label_set:
continue
new_inst_list.append(inst)
return new_inst_list
@classmethod
def __replace_preblock_target(cls, block, label, new_succ):
'''
遍历其前序基本块,修改其后继为new_succ
查看前序基本块的最后指令,
若最后的指令为无条件跳转指令,则跳转的Label修改为当前基本块中指令的Label
若最后的指令为有条件跳转指令,则更改其中为当前指令Label的分支Label
'''
for preblock in block.preBasicBlock:
preinst = preblock.instList[-1]
if isinstance(preinst, instruction.JumpInst):
preinst.label = label
# TODO:
elif isinstance(preinst, instruction.CJumpInst):
for pre_succ in preblock.succBasicBlock:
if pre_succ[0] == block:
if pre_succ[1] == "thenlabel":
preinst.thenlabel = label
else:
preinst.elselabel = label
@classmethod
def __control_flow_optimization(cls, block_list):
"""控制流优化
"""
for block in block_list[::-1]:
"""
find unconditional jump instruction and has been labeled
"""
if len(block.instList) == 1 and isinstance(block.instList[0], instruction.JumpInst):
''' 当前基本块中只有一条指令并且该指令为无条件跳转指令
'''
''' 取当前无条件跳转指令的转向Label和唯一后继 '''
label = block.instList[0].label
unique_succ = block.get_succ_unique_block()
'''
不妨设当前基本块为B2,前驱为B1(可能有多个,记为B1',B1''),后继为B3(只有一个)
1. 遍历其前序基本块,查看前序基本块的最后指令
*若B1最后的指令为无条件跳转指令,则跳转的Label修改为当前基本块B2中指令的Label
*若B1最后的指令为有条件跳转指令,则更改其中为当前指令Label的分支Label
2. 修改前序基本块的后继,将出现的B2改为B3,接着:
*在B2中的前驱中删掉B1
*在B3的前驱中加上B1
这样,B1到达B2的弧被改成B1到B3
处理了当前基本块B2所有前驱后,B2变得不可达,调用dead_code_elimination删除它
'''
cls.__replace_preblock_target(block, label, unique_succ)
# TODO:
def control_flow_optimization(self):
"""控制流优化
注意:做完控制流优化后基本块需要重新构建
TODO:控制流优化同时使用了instList和blockList,instList和blockList这两个不应该同时存在的,需要重写控制流优化
"""
pass
def dead_code_elimination(self):
"""删除到达不了的基本块
返回更新过的block_list
"""
block_list = self.data_unit.get_block_list()
# 循环直到找不到死节点为止
loop_change_flag = True
while loop_change_flag:
loop_change_flag = False
new_block_list = []
for block in block_list:
if block.blockNum != 0 and block.blockNum != -1:
if len(block.preBasicBlock) == 0:
# 到达不了的节点不加入new_block_list中,且如果死节点有后继,删除后继基本块的相关信息
loop_change_flag = True
for succ_block, _ in block.succBasicBlock:
succ_block.preBasicBlock.remove(block)
if DEBUG:
print("delete block %d"%block.blockNum)
continue
new_block_list.append(block)
block_list = new_block_list
#return block_list
self.data_unit.set_block_list(block_list)
| true |
89598ea91be7181b778ca7b01eb0c93af9216478 | Python | Jasper-Dong/Decision-Tree | /test1.py | UTF-8 | 1,694 | 3.484375 | 3 | [] | no_license | from CreateDT.ID3 import createID3Tree
from CreateDT.C4_5 import createC4_5Tree
from CreateDT.CART import createCARTTree
from CreateDT.PlotDT import createPlot
import matplotlib.pyplot as plt
# 读取数据集文件
def loadDataSet(fileName):
"""
:param fileName:数据集文件
:return:数据集
"""
file = open(fileName) # 打开数据集文件
line = file.readline() # 读取每行所有元素
dataSet = [] # 数据集初始化
while line:
data = line.strip('\n').split(',') # 按照','划分数据,并剔除回车符
dataSet.append(data) # 将每行数据放到数据集
line = file.readline()
file.close()
return dataSet
# 构造原始数据集和属性集合
originalDataSet = loadDataSet('DataSet/watermelon.txt')
labels = originalDataSet[0]
dataSet = originalDataSet[1:]
def showDT(dataSet, labels):
"""
:param dataSet:数据集
:param labels:属性标签
"""
# ID3算法生成分类决策树
ID3Tree = createID3Tree(list(dataSet), list(labels))
print('The ID3 Decision Tree is', ID3Tree)
# C4.5算法生成分类决策树
C4_5Tree = createC4_5Tree(list(dataSet), list(labels))
print('The C4.5 Decision Tree is', C4_5Tree)
# CART算法生成分类决策树
CARTTree = createCARTTree(list(dataSet), list(labels))
print('The CART Decision Tree is', CARTTree)
# 显示各个决策树
createPlot(ID3Tree, 'ID3 Decision Tree')
createPlot(C4_5Tree, 'C4.5 Decision Tree')
createPlot(CARTTree, 'CART Decision Tree')
plt.show() # 显示决策树
showDT(dataSet, labels)
| true |
bda16ebaf7d5ffcca69792a792a6b5d6cff61bde | Python | howieWong/pythonStudy | /LearnPython/day2/conclude.py | UTF-8 | 193 | 2.8125 | 3 | [] | no_license | print("abc"+"==" +"def")
a=set("abcc")
print(a)
b=set("cde")
print(a&b)
print(a-b)
list=["2","3","4"]
json={"name":"howie","age":"30"};
print(json["name"])
json["job"]="developer"
print(json)
| true |
aba51ca311ea462ea14a5f55190192fcb0fa25d3 | Python | Hexexe/Da-Python-2020 | /part04-e15_last_week/src/last_week.py | UTF-8 | 705 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python3
import pandas as pd
import numpy as np
def last_week():
a = pd.read_csv("src/UK-top40-1964-1-2.tsv", sep="\t", converters={"LW": lambda x: np.int64(x) if x not in ["New", "Re"] else np.nan})
b = a[~a["LW"].isna()]
b.sort_values(by=["LW"], inplace=True)
b["WoC"] -= 1
b["Peak Pos"].where((b["Peak Pos"] != b["Pos"]) | (b["Peak Pos"] == b["LW"]), np.nan, inplace=True)
b.index = b["LW"].rename()
b = b.reindex(range(1, a.shape[0]+1))
b["Pos"], b["LW"] = b.index, np.nan
return b
def main():
df = last_week()
print("Shape: {}, {}".format(*df.shape))
print("dtypes:", df.dtypes)
print(df)
if __name__ == "__main__":
main()
| true |
2eb288b804011cec61d7ee864c3455596ebe4e70 | Python | k0nsta/tceh-python | /course4/imports_example/module_one.py | UTF-8 | 614 | 2.84375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import absolute_import
# This wont work together:
# from __future__ import absolute_import
# import module_two
# from imports_example import module_two # explicit relative import!
# from imports_example import module_two
import module_two
def test_imports():
# without absolute_import (possible, but not right):
# from sub_package import get_info
# from sub_package.sub import help
# with absolute_import:
from imports_example.sub_package import get_info
from imports_example.sub_package.sub import help
help()
# module_two.do_work()
| true |
cf8cb9d94bcb1e002627d674eba938d9d0eb4ad6 | Python | songszw/python | /python小栗子/t80.py | UTF-8 | 402 | 3.0625 | 3 | [] | no_license | import os
all_files = os.listdir(os.curdir)
type_dict = dict()
for each in all_files:
if os.path.isdir(each):
type_dict.setdefault('文件夹',0)
type_dict['文件夹']+=1
else:
ext = os.path.splitext(each)[1]
type_dict.setdefault(ext,0)
type_dict[ext]+=1
for each in type_dict.keys():
print('该文件夹下面共有类型为【%s】的文件%d 个'%(each,type_dict[each]))
| true |
7fdcb7272462646bee81fab01ff8b077309d7d0d | Python | SherlockHua1995/Pedestrian-Trajectory-Clustering | /utils/common.py | UTF-8 | 246 | 2.859375 | 3 | [] | no_license | """
2018.02.03
@author: Hao Xue
"""
# from math import *
import math
PIXELS_IN_METER = 3.33
def euclidDist(p1, p2):
assert (len(p1) == len(p2))
return math.sqrt(sum([((p1[i] - p2[i]) / PIXELS_IN_METER) ** 2 for i in range(len(p1))]))
| true |
3453f125e8d3c1f389bef9e6482a4ea67d5776d9 | Python | s160785/opencv | /Shape_Estimator.py | UTF-8 | 2,122 | 2.796875 | 3 | [] | no_license | #Importing Modules
import cv2
import numpy as np
from joining_images import stackImages
def getContoours(img, imgContour):
contours, hierarcy = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)
print(f"Number of counters:{len(contours)}")
for cnt in contours:
area = cv2.contourArea(cnt)
print(f'\narea={area}')
if area > 0:
peri = cv2.arcLength(cnt, True)
# print(peri)
approx = cv2.approxPolyDP(cnt, 0.02 * peri, True)
cv2.drawContours(imgContour, cnt, -1, (255, 0, 0), 3)
print(len(approx))
objCor = len(approx)
x, y, w, h = cv2.boundingRect(approx)
if objCor == 3: objectType = "Tri"
elif objCor == 4:
aspratio = w/float(h)
if aspratio > 0.95 and aspratio < 1.05: objectType ="Square"
else: objectType = "Rect"
elif objCor == 8:
objectType = "Circle"
elif objCor == 6:
objectType = "Hex"
else:
objectType = None
#cv2.rectangle(imgContour, (x, y), (x + w, y + h), (0, 155, 0), 2)
cv2.putText(imgContour,f"{objCor}{objectType}",
(x + int(w / 2) - 10, y + int(h / 2) - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 0, 0), 2)
#main
path = "resources/shapes_small.jpg"
img = cv2.imread(path)
imgContour = img.copy()
imgContour2 = img.copy()
imgBlur = cv2.bilateralFilter(img, d=7, sigmaColor=75, sigmaSpace=75)
imgGray = cv2.cvtColor(imgBlur, cv2.COLOR_BGR2GRAY)
imgthresh = cv2.adaptiveThreshold(imgGray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY,11,2)
#Result
#cv2.resize(imgthresh,(img.shape[1],img.shape[0]))
imgCanny = cv2.Canny(imgBlur, 50, 50)
getContoours(imgCanny,imgContour)
getContoours(imgthresh,imgContour2)
imgBlank = np.zeros_like(img)
imgStack = stackImages(0.6, ([img, imgthresh, imgGray],
[imgCanny, imgContour, imgContour2]))
cv2.imshow("Stack", imgStack)
cv2.waitKey(0)
| true |
4e53a630899bd08654d57a6180f85aaf4deb47f9 | Python | Alex-Mathai-98/Sarcasm-Detection-in-Product-Reviews-Using-Deep-Learning | /Sentiment/Model/sentiment.py | UTF-8 | 5,115 | 3.140625 | 3 | [] | no_license | import numpy as np
from nltk import sent_tokenize
import json, requests
# java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port 9000 -timeout 15000000
class StanfordCoreNLP:
"""
Modified from https://github.com/smilli/py-corenlp
"""
def __init__(self, server_url):
# TODO: Error handling? More checking on the url?
if server_url[-1] == '/':
server_url = server_url[:-1]
self.server_url = server_url
def annotate(self, text, properties=None):
assert isinstance(text, str)
if properties is None:
properties = {}
else:
assert isinstance(properties, dict)
# Checks that the Stanford CoreNLP server is started.
try:
requests.get(self.server_url)
except requests.exceptions.ConnectionError:
raise Exception('Check whether you have started the CoreNLP server e.g.\n'
'$ cd <path_to_core_nlp_folder>/stanford-corenlp-full-2016-10-31/ \n'
'$ java -mx4g -cp "*" edu.stanford.nlp.pipeline.StanfordCoreNLPServer -port <port> -timeout <timeout_in_ms>')
data = text.encode()
r = requests.post(
self.server_url, params={
'properties': str(properties)
}, data=data, headers={'Connection': 'close'})
output = r.text
if ('outputFormat' in properties
and properties['outputFormat'] == 'json'):
try:
output = json.loads(output, encoding='utf-8', strict=True)
except:
pass
return output
class sentiment_classifier() :
def __init__ (self,text) :
self.text = text
def sentiment_analysis_on_sentence(self,sentence):
# The StanfordCoreNLP server is running on http://127.0.0.1:9000
nlp = StanfordCoreNLP('http://127.0.0.1:9000')
# Json response of all the annotations
output = nlp.annotate(sentence, properties={
"annotators": "tokenize,ssplit,parse,sentiment",
"outputFormat": "json",
# Setting enforceRequirements to skip some annotators and make the process faster
"enforceRequirements": "false"
})
# In JSON, 'sentences' is a list of Dictionaries, the second number is basically the index of the sentence you want the result of, and each sentence has a 'sentiment' attribute and 'sentimentValue' attribute
# "Very negative" = 0 "Negative" = 1 "Neutral" = 2 "Positive" = 3 "Very positive" = 4 (Corresponding value of sentiment and sentiment value)
assert isinstance(output['sentences'], list)
return output['sentences']
def sentence_sentiment(self,sentence):
# checking if the sentence is of type string
assert isinstance(sentence, str)
# getting the json ouput of the different sentences. Type "List"
result = self.sentiment_analysis_on_sentence(sentence)
num_of_sentences = len(result)
sentiment_vec = np.zeros((1,num_of_sentences), dtype = "int64" )
for i in range(0,num_of_sentences):
sentiment_vec[0,i] = ( int(result[i]['sentimentValue']) )
#print(sentiment_vec[0])
return sentiment_vec
def paragraph_sentiment(self,text):
sents = sent_tokenize(self.text)
final_vector = []
for sent in sents :
vec = self.sentence_sentiment(sent)
modified_vec = vec[0]
if len(modified_vec) > 1 :
average = 0
for value in modified_vec :
average += value
average = average/len(modified_vec)
final_vector.append(average)
else :
final_vector.append(modified_vec[0])
return final_vector
def display_value_meanings(self):
setiment_meaning = {'0':'Very Negative','1': 'Negative','2':'Normal','3':'Good','4':'Very Good'}
for i in range(len(setiment_meaning)):
print("{} stands for {}".format(str(i),setiment_meaning[str(i)]))
if __name__ == '__main__':
text = "You are stupid! You're smart and handsome. This is a tool. Rohan is a fantastic person and a great person!"
text = "I think she makes some good points, and I think some things are just put out there and that she doesn't listen. She just wants only her opinion to be right to an extreme. She's good at analyzing situations, but she would not be good for a government position requiring much trust to keep stability, that is for sure. On the other hand, you probably want her to be your Republican lobbyist. A \"friend\" a \"Coulter Jr.\" told me about how great this book is. He acts just like Coulter, but just doesn't publish books and goes out and speaks like she does. Otherwise, he would probably be doing at least okay- (Coulter created and kept her niche first.) I am not particularly Democrat or Republican, but I try to give everything a chance. This book, while giving some fresh perspectives I would not have thought of, is quite hit or miss, too opinionated, and not always reasoning things out enough."
senti = sentiment_classifier(text)
senti.display_value_meanings()
vector = senti.paragraph_sentiment(text)
print(vector) | true |
700565b8fba8d4304ea4dc112f8002e7448b88b7 | Python | ThiaguinhoLS/Python | /state_two.py | UTF-8 | 595 | 3.609375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
class Airplane(object):
def __init__(self):
self._state = Stopped()
def __str__(self):
return 'Airplaine is '
def set_state(self, state):
self._state = state
def on(self):
self._state.on()
def off(self):
self._state.off()
class Movement(object):
def on(self):
print('Avião já está ligado')
def off(self):
print('Desligando avião')
class Stopped(object):
def on(self):
print('Ligando o avião')
def off(self):
print('Avião já está desligado')
| true |
a3b57f00ec30bb34ab090f9dc3372cce7c4f1125 | Python | endeavor5/django-nomad | /django_nomad/git/utils.py | UTF-8 | 2,715 | 2.875 | 3 | [
"MIT"
] | permissive | import subprocess
from .exceptions import GitDirNotFound, GitException
def git_exec(*args):
return subprocess.check_output(("git",) + args, stderr=subprocess.STDOUT)
def common_ancestor(target, current="HEAD"):
"""
Find the most recent ancestor commit that is shared between the two branches. This function
simply calls `git-merge-base` command.
Args:
target (string): name of branch to compare to current.
current (string): name of current branch. Defaults to HEAD.
Returns:
string: the ancestor commit SHA-1, removing the final blank-line character.
Raises:
GitException: if git-merge cannot find a common ancestor.
"""
try:
output = git_exec("merge-base", current, target)
except subprocess.CalledProcessError as e:
raise GitException(e.output.decode("utf-8")[:-1])
else:
return output.decode("utf-8")[:-1]
def diff_files(target, current="HEAD"):
"""
Get list of changed files between two commit refs.
Args:
target (string): name of branch to compare to current.
current (string): name of current branch. Defaults to HEAD.
Returns:
list: name of files that were changed between current and target.
Raises:
GitException: if any error occur while executing diff.
"""
try:
bin_output = git_exec("diff", current, target, "--name-only")
except subprocess.CalledProcessError as e:
raise GitException(
"Error getting diff between commits {} and {}".format(current, target)
)
else:
output = bin_output.decode("utf-8")
# Remove empty strings
return list(filter(bool, output.split("\n")))
def get_file_content_from_commit(file_name, commit_ref):
"""
Get the content a file from a given commit reference.
Args:
file_name (string): the file path.
commit_ref (string): the commit SHA-1 reference.
Returns:
string: the given file content.
Raises:
GitException: if any error occur while executing show.
"""
try:
bin_output = git_exec("show", "{}:{}".format(commit_ref, file_name))
except subprocess.CalledProcessError as e:
raise GitException(
"Could not get file {} from {}".format(file_name, commit_ref)
)
else:
return bin_output.decode("utf-8")
def find_git_directory():
"""
Search for git directory (in case the user in not on the project root).
Returns:
string: path to git directory
"""
try:
bin_output = git_exec("rev-parse", "--git-dir")
except subprocess.CalledProcessError as e:
raise GitDirNotFound()
else:
return bin_output.decode("utf-8")[:-1]
| true |
4dc58b2f72d4602b7bc7c45384b5d0a940660012 | Python | MattimusRex/AdventOfCode2018 | /Advent of Code 2018/Day 10/10_1_2.py | UTF-8 | 2,331 | 3.53125 | 4 | [] | no_license | class Node:
def __init__(self, id, pos, vel):
self.id = id
self.pos = pos
self.vel = vel
def advance(self):
x = self.pos[0] + self.vel[0]
y = self.pos[1] + self.vel[1]
self.pos = (x, y)
def get_distance(self, node):
x_dist = node.pos[0] - self.pos[0]
y_dist = node.pos[1] - self.pos[1]
return (x_dist * x_dist) + (y_dist * y_dist)
def print_grid(nodes, positions, node, other_node):
min_x = min(node.pos[0], other_node.pos[0])
max_x = max(node.pos[0], other_node.pos[0])
min_y = min(node.pos[1], other_node.pos[1])
max_y = max((node.pos[1], other_node.pos[1]))
min_x -= 50
max_x += 50
min_y -= 50
max_y += 50
for i in range(min_y, max_y + 1):
for j in range(min_x, max_x + 1):
if (j, i) in positions:
print("#", end='')
else:
print(".", end='')
print()
print()
def calc_longest_distance(nodes):
max_dist = 0
for node in nodes:
for other_node in nodes:
dist = node.get_distance(other_node)
if dist > max_dist:
max_dist = dist
node1 = node
node2 = other_node
return (max_dist, node1, node2)
#process input into nodes
nodes = []
positions = set()
id_counter = 1
with open('input.txt', 'r') as inputFile:
for line in inputFile:
pos = line[line.find("<") + 1:line.find(">")]
pos = pos.split(",")
pos = (int(pos[0].strip()), int(pos[1].strip()))
line = line[line.find(">") + 1:]
vel = line[line.find("<") + 1:line.find(">")]
vel = vel.split(",")
vel = (int(vel[0].strip()), int(vel[1].strip()))
positions.add(pos)
node = Node(id_counter, pos, vel)
nodes.append(node)
dist, node1, node2 = calc_longest_distance(nodes)
seconds = 0
while seconds < 100000:
#print(seconds)
dist = node1.get_distance(node2)
# if seconds % 100 == 0:
# print(dist)
if dist < 15000:
print(seconds)
print_grid(nodes, positions, node1, node2)
positions.clear()
for node in nodes:
node.advance()
positions.add(node.pos)
seconds += 1
| true |
140f540bca8fd7a5ffd9049139bbc656dd5ba0e5 | Python | webbam46/YBVisual | /lib/ybvisual/robot/robotbase.py | UTF-8 | 5,319 | 2.75 | 3 | [] | no_license | #!/usr/bin/env python
import sys
import rospy
import actionlib
import geometry_msgs.msg
from geometry_msgs import *
from move_base_msgs.msg import *
from actionlib_msgs.msg import *
from nav_msgs.msg import Odometry
import time
from std_msgs.msg import String
import math
#
# Robotbase is used to send goals to the base
#
class RobotBase:
#Initialise
def __init__(self):
#Log
rospy.loginfo("Creating robot base controller");
#Publisher used to manually control the robot
self.cmd_vel_publisher = rospy.Publisher('/move_base/cmd_vel',geometry_msgs.msg.Twist);
#Subscribe to the move_base action server
self.move_base_server = actionlib.SimpleActionClient('move_base',MoveBaseAction);
#move base goal publisher
#self.move_base_goal_publisher = rospy.Publisher('/move_base/goal',move_base_msgs.msg.MoveBaseActionGoal);
rospy.loginfo("Starting move_base action server..");
#Wait unil the acton server is available
self.move_base_server.wait_for_server(rospy.Duration(60));
rospy.loginfo("Started move_base action server");
#Linear velocity
self.linear_velocity = 0.3
#Angular velocity
self.angular_velocity = 0.5
#Created!
rospy.loginfo("Created robot base");
#Variables can be used to command robot by giving a string
self.cmd_move_forwards = "FORWARDS"
self.cmd_move_back = "BACK"
self.cmd_move_left = "LEFT"
self.cmd_move_right = "RIGHT"
self.cmd_rotate_left = "LEFT"
self.cmd_rotate_right = "RIGHT"
#Process given cmdVel command
def procCmdVel(self,twist):
for i in range(30):
self.cmd_vel_publisher.publish(twist)
#Stop moving the base - cancel all goals
def Stop(self):
#Now stop the robot
rospy.loginfo("Attempting to stop robot")
self.procCmdVel(geometry_msgs.msg.Twist())
#Create a goal message
def CreateGoal(self,x,y,z,w):
#Create msg object
g = MoveBaseGoal()
g.target_pose.header.frame_id = "base_link";
g.target_pose.header.stamp = rospy.Time.now()
g.target_pose.pose.position.x = x; #Move in X axis by meters
g.target_pose.pose.position.y = y; #Move in Y axis by meters
g.target_pose.pose.position.z = z; #Move in Z axis by meters
g.target_pose.pose.orientation.w = w; #We need to specify an orientation > 0
return g
#Move base in direction
def Move(self,lx,ly,az,amount):
#Create the twist message
twist = geometry_msgs.msg.Twist()
twist.linear.x = lx
twist.linear.y = ly
twist.angular.z = az
self.procCmdVel(twist)
#Robot is moving 1 m/s, so we should wait for specified given distance
time.sleep(amount)
#Stop the robot after waiting
self.Stop()
#Move specified distance - use time to calculate the distance
def MoveDistance(self,lx,ly,dist):
rospy.loginfo("Moving distance: " + str(dist) + "m")
duration = dist / self.linear_velocity
rospy.loginfo("The move should take: " + str(duration))
start_time = time.time()
twist = geometry_msgs.msg.Twist()
if lx == 1:
twist.linear.x = self.linear_velocity
elif lx==-1:
twist.linear.x = -self.linear_velocity
else:
#Do not set linear x
twist.linear.x = 0
if ly == 1:
twist.linear.y = self.linear_velocity
elif ly == -1:
twist.linear.y = -self.linear_velocity
else:
#do not set linear y
twist.linear.y = 0
while ( (time.time() - start_time) < duration ):
self.procCmdVel(twist)
rospy.loginfo("Reached!")
self.Stop()
#Rotate specified distance (degrees given, converted to radians
def RotateDistance(self,az,dist):
_dist = math.radians(dist)
rospy.loginfo("Rotating distance: " + str(dist) + "degrees" + " or " + str(_dist) + " radians")
duration = _dist / self.angular_velocity
rospy.loginfo("Rotation should take: " + str(duration))
twist = geometry_msgs.msg.Twist()
if az == 1:
twist.angular.z = self.angular_velocity
elif az == -1:
twist.angular.z = -self.angular_velocity
else:
twist.angular.z = 0
start_time = time.time()
while( (time.time() - start_time) < duration):
self.procCmdVel(twist)
rospy.loginfo("Reached!")
self.Stop()
def _Move(self,lx,ly,az):
#Create the twist message
twist = geometry_msgs.msg.Twist()
twist.linear.x = lx
twist.linear.y = ly
twist.angular.z = az
self.procCmdVel(twist)
#Move the base to a goal
def MoveTo(self,x,y,z):
#Create the goal
goal = self.CreateGoal(x,y,z,1.0);
#Send the robot to the goal
rospy.loginfo("Moving robot towards goal");
self.move_base_server.send_goal(goal);
#get result
goalresult= self.move_base_server.wait_for_result(rospy.Duration(50));
#stop when reached
self.Stop(); | true |
d91eb0c960e18deed7193d3bfeaae4502274d42d | Python | codecreation01/tuple-index- | /tuple index().py | UTF-8 | 90 | 3.28125 | 3 | [] | no_license | val=('1','2','3','4','5','6')
print(val)
index=val.index(5)
print("index of 5 is:",index)
| true |
96bcc19c60ebd5e90137712ee0121cda157ec7e2 | Python | starrysky1211/leetcode | /python/101.对称二叉树.py | UTF-8 | 819 | 2.78125 | 3 | [] | no_license | '''
Author: Zander
Description: Edit Here
Date: 2021-08-06 14:15:46
LastEditors: Zander
LastEditTime: 2021-08-06 15:34:57
FilePath: /python/101.对称二叉树.py
'''
#
# @lc app=leetcode.cn id=101 lang=python3
#
# [101] 对称二叉树
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def isSymmetric(self, root: TreeNode) -> bool:
def isSym(l: TreeNode, r: TreeNode) -> bool:
if not l and not r:
return True
if l and r and l.val == r.val:
return isSym(l.left, r.right) and isSym(l.right, r.left)
return False
return isSym(root, root)
# @lc code=end
| true |
4e899b67a01b809a21ca14e249c1817968ff599f | Python | ZESl/MotionAnalysis | /get_dataset.py | UTF-8 | 8,069 | 2.6875 | 3 | [] | no_license | import os
import pandas as pd
from get_user_feature import get_all_user_feature_filtered
# save all motion data from folder:data_event&cut
# + add uid trial
def concat_all_motion():
df_list = []
for file in os.listdir("data_event&cut/sifted/"):
df = pd.read_csv('data_event&cut/sifted/' + file, encoding='gbk', index_col=0)
# resolve filename and add column to df
uid = file.split('.')[0].split('-')[0]
trial = file.split('.')[0].split('-')[1]
df.insert(0, 'uid', uid)
df.insert(1, 'trial', trial)
# add to df_list
df_list.append(df)
df_motion = pd.concat(df_list, axis=0, join='outer')
print('Concat all motion file done.')
return df_motion
# save all motion data from folder:data_event&space
# + add uid trial
def concat_all_space():
df_list = []
for space_file in os.listdir("data_event&space"):
df = pd.read_csv('data_event&space/' + space_file, encoding='gbk')
# resolve filename and add column to df
uid = space_file.split('.')[0].split('-')[0]
trial = space_file.split('.')[0].split('-')[1]
df.insert(0, 'uid', uid)
df.insert(1, 'trial', trial)
# add to df_list
df_list.append(df)
df_space = pd.concat(df_list, axis=0, join='outer')
print('Concat all space file done.')
return df_space
# save all data (add user features)
# filter some irrelevant features: eg. ['passed_time', 'name']
def add_user(df_motion, feature_list):
df_motion["uid"] = df_motion["uid"].astype(str)
df_user = get_all_user_feature_filtered(feature_list)
df_result = pd.merge(df_motion, df_user, on='uid')
print('add_user: Add user done.')
return df_result
# get mean, min, max, ... data to form a dataset
def get_dataset(feature_list):
df_dataset = {
"uid": [],
"side": [],
"event": [],
"trial": [],
"cut_mean": [],
"cut_max": [],
"cut_min": [],
"cut_std": [],
"cut_var": [],
"speed_mean": [],
"speed_max": [],
"speed_min": [],
"speed_std": [],
"speed_var": [],
"space_mean": [],
"space_max": [],
"space_min": [],
}
df_space = pd.read_csv('Dataset/Data_space.csv', encoding='gbk')
df_motion = pd.read_csv('Dataset/Data_motion.csv', encoding='gbk')
side_op = ['left', 'right']
# todo modify range
for uid_t in range(1, 63): # uid: 1 ~ 62
for event_type_t in range(1, 5): # event_type: 1 2 3 4
for trial_t in range(1, 4): # trial: 1 2 3
for side_t in side_op: # side: 0 1
df_motion_t = df_motion[(df_motion['event_type'] == event_type_t) & (df_motion['uid'] == uid_t) & (
df_motion['trial'] == trial_t) & (df_motion['side'] == side_t)]
df_space_t = df_space[
(df_space.event == event_type_t) & (df_space.uid == uid_t) & (df_space.trial == trial_t)]
df_dataset["uid"].append(uid_t)
df_dataset["side"].append(side_t)
df_dataset["event"].append(event_type_t)
df_dataset["trial"].append(trial_t)
df_dataset["cut_mean"].append(df_motion_t["cut_length"].mean())
df_dataset["cut_min"].append(df_motion_t["cut_length"].min())
df_dataset["cut_max"].append(df_motion_t["cut_length"].max())
df_dataset["cut_std"].append(df_motion_t["cut_length"].std())
df_dataset["cut_var"].append(df_motion_t["cut_length"].var())
df_motion_t = df_motion_t[(df_motion_t.speed > 0)]
df_dataset["speed_mean"].append(df_motion_t["speed"].mean())
df_dataset["speed_min"].append(df_motion_t["speed"].min())
df_dataset["speed_max"].append(df_motion_t["speed"].max())
df_dataset["speed_std"].append(df_motion_t["speed"].std())
df_dataset["speed_var"].append(df_motion_t["speed"].var())
if side_t == 0:
df_dataset["space_mean"].append(df_space_t["l_space_mean"].mean())
df_dataset["space_min"].append(df_space_t["l_space_min"].min())
df_dataset["space_max"].append(df_space_t["l_space_max"].max())
else:
df_dataset["space_mean"].append(df_space_t["r_space_mean"].mean())
df_dataset["space_min"].append(df_space_t["r_space_min"].min())
df_dataset["space_max"].append(df_space_t["r_space_max"].max())
df_dataset = pd.DataFrame(df_dataset)
print('get_dataset: Get dataset done.')
df_dataset = add_user(df_dataset, feature_list)
print('get_dataset: Add user done.')
return df_dataset
# get mean, min, max, ... data to form a dataset
# WITHOUT side & trial
def get_dataset_tmp(feature_list):
df_dataset = {
"uid": [],
"event": [],
"cut_mean": [],
"cut_max": [],
"cut_min": [],
"cut_std": [],
"cut_var": [],
"speed_mean": [],
"speed_max": [],
"speed_min": [],
"speed_std": [],
"speed_var": [],
"space_mean": [],
"space_max": [],
"space_min": [],
}
df_space = pd.read_csv('Dataset/Data_space.csv', encoding='gbk')
df_motion = pd.read_csv('Dataset/Data_motion.csv', encoding='gbk')
for uid_t in range(1, 63): # uid: 1 ~ 62
for event_type_t in range(1, 5): # event_type: 1 2 3 4
df_motion_t = df_motion[(df_motion['event_type'] == event_type_t) & (df_motion['uid'] == uid_t)]
df_space_t = df_space[
(df_space.event == event_type_t) & (df_space.uid == uid_t)]
df_dataset["uid"].append(uid_t)
df_dataset["event"].append(event_type_t)
df_dataset["cut_mean"].append(df_motion_t["cut_length"].mean())
df_dataset["cut_min"].append(df_motion_t["cut_length"].min())
df_dataset["cut_max"].append(df_motion_t["cut_length"].max())
df_dataset["cut_std"].append(df_motion_t["cut_length"].std())
df_dataset["cut_var"].append(df_motion_t["cut_length"].var())
df_motion_t = df_motion_t[(df_motion_t.speed > 0)]
df_dataset["speed_mean"].append(df_motion_t["speed"].mean())
df_dataset["speed_min"].append(df_motion_t["speed"].min())
df_dataset["speed_max"].append(df_motion_t["speed"].max())
df_dataset["speed_std"].append(df_motion_t["speed"].std())
df_dataset["speed_var"].append(df_motion_t["speed"].var())
df_dataset["space_mean"].append((df_space_t["l_space_mean"].mean() + df_space_t["r_space_mean"].mean()) / 2)
df_dataset["space_min"].append(min(df_space_t["l_space_min"].min(), df_space_t["r_space_min"].min()))
df_dataset["space_max"].append(max(df_space_t["l_space_max"].max(), df_space_t["r_space_max"].max()))
df_dataset = pd.DataFrame(df_dataset)
print('get_dataset: Get dataset done.')
df_dataset = add_user(df_dataset, feature_list)
print('get_dataset: Add user done.')
return df_dataset
if __name__ == '__main__':
# # include all motion data
df_m = concat_all_motion()
df_m.to_csv('Dataset/Data_motion.csv', encoding='gbk')
df_s = concat_all_space()
df_s.to_csv('Dataset/Data_space.csv', encoding='gbk')
features = ['uid', 'gender', 'age', 'height', 'weight',
'fre_side', 'VR_exp', 'game_fre', 'sport_fre',
'difficulty', 'enjoyment', 'fatigue', 'personality', 'familiarity']
df_d = get_dataset(features)
df_d = df_d.dropna()
df_d.to_csv('Dataset/Data_dataset.csv', encoding='gbk', index=None)
df_d_tmp = get_dataset_tmp(features)
df_d_tmp = df_d_tmp.dropna()
df_d_tmp.to_csv('Dataset/Data_dataset_tmp.csv', encoding='gbk', index=None)
| true |
90096f6969a5c02e3fb22b67523a6ef9a2bedc15 | Python | Zomega/gooey-examples | /catan_demo/catan.py | UTF-8 | 9,391 | 3.125 | 3 | [] | no_license | #!/usr/bin/python
# -*- coding: utf-8 -*-
class HexGridTile:
def __init__( self, x, y ):
z = None
self._x = int(x)
self._y = int(y) # TODO Assert...
if z != None:
assert x + y + z == 0
def __hash__(self):
return hash((self._x,self._y))
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def z(self):
return -1 * ( self._x + self._y )
@property
def neighbors(self):
return set([ HexGridTile(x + dx, y + dy) for dx, dy, dz in HexGridEdge.directions ])
@property
def corners(self):
return set([ HexGridCorner(self, direction) for direction in HexGridCorner.directions ])
@property
def edges(self):
return set([ HexGridEdge(self, direction) for direction in HexGridEdge.directions ])
class HexGridCorner:
X = 1
negY = 2
Z = 3
negX = 4
Y = 5
negZ = 6
directions = [X, Y, Z, negX, negY, negZ]
# Canonical directions are X, negX
def __init__( self, hexloc, corner_direction ):
x = hexloc.x
y = hexloc.y
if corner_direction == self.X:
self.direction = self.X
self.hexloc = hexloc
elif corner_direction == self.Y:
self.direction = self.X
self.hexloc = HexGridTile( x - 1, y + 1 )
elif corner_direction == self.Z:
self.direction = self.X
self.hexloc = HexGridTile( x - 1, y )
elif corner_direction == self.negX:
self.direction = self.negX
self.hexloc = hexloc
elif corner_direction == self.negY:
self.direction = self.negX
self.hexloc = HexGridTile( x + 1, y - 1)
elif corner_direction == self.negZ:
self.direction = self.negX
self.hexloc = HexGridTile( x + 1, y )
else:
raise InvalidDirectionError()
@property
def edges(self):
if self.direction == self.X:
return set([ HexGridEdge(self.hexloc, HexGridEdge.YX),
HexGridEdge(self.hexloc, HexGridEdge.XZ),
HexGridEdge(HexGridTile(self.hexloc.x + 1, self.hexloc.y), HexGridEdge.ZY) ])
else:
return set([ HexGridEdge(self.hexloc, HexGridEdge.XY),
HexGridEdge(self.hexloc, HexGridEdge.ZX),
HexGridEdge(HexGridTile(self.hexloc.x - 1, self.hexloc.y), HexGridEdge.YZ) ])
@property
def neighbors(self):
if self.direction == self.X:
return set([ HexGridCorner(self.hexloc, HexGridCorner.negY),
HexGridCorner(self.hexloc, HexGridCorner.negZ),
HexGridCorner(HexGridTile(self.hexloc.x + 2, self.hexloc.y - 1), HexGridCorner.negX)])
else:
return set([ HexGridCorner(self.hexloc, HexGridCorner.Y),
HexGridCorner(self.hexloc, HexGridCorner.Z),
HexGridCorner(HexGridTile(self.hexloc.x - 2, self.hexloc.y + 1), HexGridCorner.X)])
@property
def tiles(self):
if self.direction == self.X:
return set([ self.hexloc ]) #TODO: Others
else:
return set([ self.hexloc ]) #TODO: Others
class HexGridEdge:
XZ = (1,0,-1)
XY = (1,-1,0)
YZ = (0,1,-1)
YX = (-1,1,0)
ZY = (0,-1,1)
ZX = (-1,0,1)
directions = [XZ, XY, YZ, YX, ZY, ZX]
# Canonical directions are XY, YZ, ZX
def __init__( self, hexloc, edge_direction ):
x = hexloc.x
y = hexloc.y
if edge_direction == self.XY:
self.direction = self.XY
self.hexloc = hexloc
elif edge_direction == self.YX:
self.direction = self.XY
self.hexloc = HexGridTile( x + 1, y - 1 )
elif edge_direction == self.YZ:
self.direction = self.YZ
self.hexloc = hexloc
elif edge_direction == self.ZY:
self.direction = self.YZ
self.hexloc = HexGridTile( x, y - 1 )
elif edge_direction == self.ZX:
self.direction = self.ZX
self.hexloc = hexloc
elif edge_direction == self.XZ:
self.direction = self.ZX
self.hexloc = HexGridTile( x + 1, y )
else:
raise InvalidDirectionError()
@property
def ends(self):
pass #TODO
@property
def neighbors(self):
pass #TODO
@property
def tiles(self):
pass #TODO
print HexGridEdge.directions
###
# Find the canvas coordanates to draw a sprite based on a hexloc
###
def tile_coords( hexloc ):
#########################
# y_h # x_h # x_c # y_c #
#########################
# 0 # 0 # 0 # 0 #
# 1 # 0 # 0 # 4 #
# -1 # 1 # 12 # 0 #
#########################
# We expect a linear combination...
x_h = hexloc.x
y_h = hexloc.y
return (6*x_h, 2*x_h + 4*y_h)
def corner_coords( cornerloc ):
tile_x, tile_y = tile_coords( cornerloc.hexloc )
if cornerloc.direction == HexGridCorner.X:
return tile_x + 8, tile_y + 2
else:
return tile_x, tile_y + 2
def edge_coords( edgeloc ):
tile_x, tile_y = tile_coords( edgeloc.hexloc )
if edgeloc.direction == HexGridEdge.XY:
return tile_x + 1, tile_y + 3
if edgeloc.direction == HexGridEdge.YZ:
return tile_x + 3, tile_y + 4
else:
return tile_x + 1, tile_y + 1
#!/usr/bin/python
# -*- coding: utf-8 -*-
from gooey.core.Widget import *
from gooey.core.Canvas import *
from gooey.canvas.AsciiCanvas import *
from gooey.canvas.AsciiSprite import *
from gooey.core.Event import *
from gooey.core.EventType import *
SPRITE_CHARS = u''' @───@
/z \\
@ x@
\\y /
@───@'''
SPRITE_FG_MASK = u''' BWWWG
WW W
R WR
WW W
GWWWB'''
SPRITE_BG_MASK = u''' KKKKK
KCCCCCK
KCCCCCCCK
KCCCCCK
KKKKK'''
THROTTLE_SPRITE = AsciiSprite(SPRITE_CHARS, SPRITE_FG_MASK, SPRITE_BG_MASK)
from catan_sprites import DIRMAP_SPRITE
def pseudorand(x, n):
return int(hash(x)) % n
class Throttle(Widget):
def validate_canvas( self, canvas ):
if not canvas.type == "AsciiCanvas":
raise InvalidCanvasTypeError("Throttles only support AsciiCanvases for now...")
c_w, c_h = canvas.size
s_w, s_h = THROTTLE_SPRITE.size
# TODO: Correct size.
if not ( c_w >= s_w and c_h >= s_h ):
raise InvalidCanvasSizeError("Throttles need a larger canvas.")
def handle_event( self, event ):
return False
def render_corner( self, corner, fg_color = None, bg_color = None ):
self.canvas.putchr( corner_coords( corner ), '@', fg_color, bg_color )
def render_edge( self, edge, fg_color = None, bg_color = None ):
if edge.direction == HexGridEdge.XY:
self.canvas.putchr( edge_coords( edge ), '\\', fg_color, bg_color )
if edge.direction == HexGridEdge.YZ:
x_, y_ = edge_coords( edge )
self.canvas.putstr( edge_coords( edge ), u'───', fg_color, bg_color )
if edge.direction == HexGridEdge.ZX:
x_, y_ = edge_coords( edge )
self.canvas.putchr( edge_coords( edge ), '/', fg_color, bg_color )
def render( self ):
for y in range(-20, 30):
for x in range(-20, 30):
hexloc = HexGridTile(x,y)
cartloc = tile_coords(hexloc)
if cartloc[0] < 0 or cartloc[1] < 0 or cartloc[0] >= self.canvas.size[0] - THROTTLE_SPRITE.size[0] or cartloc[1] >= self.canvas.size[1] - THROTTLE_SPRITE.size[1]:
continue
for corner in hexloc.corners:
self.render_corner( corner )
for edge in hexloc.edges:
self.render_edge( edge )
self.canvas.putstr( ( cartloc[0] + 4, cartloc[1] + 1 ), str(x) )
self.canvas.putstr( ( cartloc[0] + 4, cartloc[1] + 3 ), str(y) )
hexloc = HexGridTile(3,1)
for corner in hexloc.corners:
self.render_corner( corner, 'Y' )
for edge in hexloc.edges:
self.render_edge( edge, 'G' )
corner = HexGridCorner( hexloc, HexGridCorner.Y )
self.render_corner( corner, 'B' )
for edge in corner.edges:
self.render_edge( edge, 'M' )
for corner_ in corner.neighbors:
self.render_corner( corner_, 'R' )
from gooey.app.CursesApplication import CursesApplication
from gooey.core.Controller import Controller
from gooey.core.Model import Model
model = Model()
controller = Controller(model)
widget = Throttle(model, controller)
with CursesApplication( model, widget, controller ) as app:
app.run()
| true |
a3cc5c9fea1e82fe378e03eca9b2faac798fdf2b | Python | aaberbach/LFP_Prediction | /makeplot.py | UTF-8 | 3,732 | 2.609375 | 3 | [] | no_license | # Make plot comparing models
import numpy as np
import pdb
import pandas as pd
import pickle
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
import scipy.stats as ss
from statsmodels.graphics.gofplots import qqplot
np.random.seed(10304)
########## Experimental predictions ############
# Load data
file = open('./data/channel0-preds-trues.pkl', "br")
data = pickle.load(file)
# Pick a subset for plotting
idx = np.random.choice(np.arange(data['multi_ensemble'].shape[0]),10000)
# Get y-true
y_true = data['y_true'][idx,:]
models = ['AR','uni_ensemble',\
'multi_ensemble']
RMSE = np.zeros((idx.shape[0],len(models)))
for j,i in enumerate(models):
preds = data[i][idx,:]
RMSE[:,j] = np.log(np.sqrt(np.mean((preds-y_true)**2,axis=1)))
RMSE_df = pd.DataFrame(RMSE,columns=models)
plt.figure()
qqplot(RMSE_df['AR'], line='s')
print('####### EXPERIMENTAL ###########')
stat, p = ss.shapiro(RMSE_df['AR']) #shapiro-wilks test for normality
print('Shapiro-Wilks test for normality\n \
variable: RMSE_df[AR]\n\
p = {}'.format(p))
stat, p = ss.normaltest(RMSE_df['AR']) #shapiro-wilks test for normality
print('DAgostinos test for normality\n \
variable: RMSE_df[AR]\n\
p = {}'.format(p))
p = ss.wilcoxon(RMSE_df['AR'],RMSE_df['uni_ensemble']).pvalue
print('Wilcoxon signed rank test p-value between AR and uni_ensemble = {}'.format(p))
p = ss.wilcoxon(RMSE_df['uni_ensemble'],RMSE_df['multi_ensemble']).pvalue
print('Wilcoxon signed rank test p-value between uni_ensemble and multi_ensemble = {}'.format(p))
print('###################################')
RMSE_df = RMSE_df.melt(var_name='groups', value_name='vals')
plt.figure()
ax = sns.violinplot(x="groups", y="vals", data=RMSE_df)
ax.set_xlabel([])
ax.set_ylabel('log(RMSE)')
plt.title('experimental')
#################################################
############### Model predictions ###############
# Load data
file = open('./data/model_uni_preds.pkl', "br")
data_uni = pickle.load(file)
file = open('./data/model_multi_preds.pkl', "br")
data_multi = pickle.load(file)
# Pick a subset for plotting
idx = np.random.choice(np.arange(data_uni['ensemble'].shape[0]),10000)
y_true = np.load('./data/model_y_true.npy')
y_true = y_true[idx,:]
models = ['ar','ensemble']
RMSE = np.zeros((idx.shape[0],3))
for j,i in enumerate(models):
preds = data_uni[i][idx,:]
RMSE[:,j] = np.log(np.sqrt(np.mean((preds-y_true)**2,axis=1)))
preds = data_multi['ensemble'][idx,:]
RMSE[:,2] = np.log(np.sqrt(np.mean((preds-y_true)**2,axis=1)))
RMSE_df = pd.DataFrame(RMSE,columns=['AR-only LFP','ens.-only LFP','ens.-LFP+FR'])
print('####### MODEL ###########')
stat, p = ss.shapiro(RMSE_df['AR-only LFP']) #shapiro-wilks test for normality
print('Shapiro-Wilks test for normality\n \
variable: RMSE_df[AR-only LFP]\n\
p = {}'.format(p))
stat, p = ss.normaltest(RMSE_df['AR-only LFP']) #shapiro-wilks test for normality
print('DAgostinos test for normality\n \
variable: RMSE_df[AR-only LFP]\n\
p = {}'.format(p))
p = ss.wilcoxon(RMSE_df['AR-only LFP'],RMSE_df['ens.-only LFP']).pvalue
print('Wilcoxon signed rank test p-value between AR and uni_ensemble = {}'.format(p))
p = ss.wilcoxon(RMSE_df['ens.-only LFP'],RMSE_df['ens.-LFP+FR']).pvalue
print('Wilcoxon signed rank test p-value between uni_ensemble and multi_ensemble = {}'.format(p))
print('###################################')
RMSE_df = RMSE_df.melt(var_name='groups', value_name='vals')
plt.figure()
ax = sns.violinplot(x="groups", y="vals", data=RMSE_df)
ax.set_xlabel([])
ax.set_ylabel('RMSE')
plt.title('model')
plt.show()
pdb.set_trace()
| true |
225914f681607482b15af0230b0c2d450651002a | Python | softking/cnn_learn | /dqn_game/car_conv/car.py | UTF-8 | 3,703 | 3.078125 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import gym
import numpy as np
import pygame
import random
from pygame.locals import *
# 定义颜色变量
redColour = pygame.Color(255,0,0)
blackColour = pygame.Color(0,0,0)
whiteColour = pygame.Color(255,255,255)
greenColour = pygame.Color(0,255,0)
SHOW_POS = range(9)
RANGE = 40
W = 10
H = 16
P = 70 # 出现炮弹概率
class Car(gym.Env):
"""
"""
def __init__(self):
self.fpsClock = pygame.time.Clock()
self.is_show = False
self.score = 0
self.position = 0
self.shell_pos = {}
def reset(self):
"""
"""
self.score = 0
self.position = int(W/2)
self.shell_pos = {}
pos = random.choice(SHOW_POS)
self.shell_pos[pos] = [pos, 0]
return self.build_data()
def set_show(self, is_show=True):
"""
"""
self.is_show = is_show
if not is_show: return
self.playSurface = pygame.display.set_mode((W*RANGE, H*RANGE))
pygame.init()
pygame.display.set_caption('Car')
def build_data(self):
"""
"""
data = np.full((W, H, 1), 0)
data[self.position][H-1][0] = 1 # 车位置
for i in self.shell_pos:
data[self.shell_pos[i][0]][self.shell_pos[i][1]][0] = 2 # 炮弹
return data
# image_data = pygame.surfarray.array3d(pygame.display.get_surface())
# return image_data
def step(self, action):
"""
"""
# 检测例如按键等pygame事件
if self.is_show: pygame.event.get()
done = False
if action == 1 and self.position > 0:
self.position -= 1
if action == 2 and self.position < W-2:
self.position += 1
if action == 0:
pass
reward = 0.1
drop_list = []
# 下落一下
for i in self.shell_pos:
self.shell_pos[i][1] += 1
if self.shell_pos[i][1] >= H:
drop_list.append(i)
# 剔除出界的炮弹
for i in drop_list:
del self.shell_pos[i]
reward = 1
self.score += 1
# 是否随机出新的
if random.randint(0,100) >= P:
pos = random.choice(SHOW_POS)
if pos not in self.shell_pos:
self.shell_pos[pos] = [pos, 0]
# 判断撞击
for i in self.shell_pos:
if self.shell_pos[i][0] == self.position and self.shell_pos[i][1] == H-1: # 撞了
reward = -1
done = True
break
# 控制游戏速度
# self.fpsClock.tick(5)
return self.build_data(), reward, done
def render(self, mode='human', close=False):
# 绘制pygame显示层
self.playSurface.fill(blackColour)
# self.playSurface.blit(pygame.image.load("assets/car.jpg").convert_alpha(), (100, 30))
myfont = pygame.font.Font(None, 60)
textImage = myfont.render(str(self.score), True, (255, 255, 255))
self.playSurface.blit(textImage, (0, 0))
pygame.draw.rect(self.playSurface, greenColour, Rect(self.position*RANGE, (H-1)*RANGE, RANGE, RANGE))
for i in self.shell_pos:
pygame.draw.rect(self.playSurface, redColour, Rect(self.shell_pos[i][0]*RANGE, self.shell_pos[i][1]*RANGE, RANGE, RANGE))
# 刷新pygame显示层
pygame.display.flip()
# self.fpsClock.tick(5)
# from gym.envs.classic_control import rendering
# viewer = rendering.Viewer(640, 480)
#
# return viewer.render(return_rgb_array=mode == 'rgb_array')
| true |
8f8719d4c4bdb32d5116f39ba0a68a85b245e6ec | Python | chr0nikler/devianttagger | /crop_images.py | UTF-8 | 868 | 2.796875 | 3 | [] | no_license | import os
import glob
from PIL import Image, ImageOps
from utils import fetch_img_list
def crop_images(file_list, dest_directory, crop_dims=(512, 512)):
# Crops image to desired size
if not os.path.exists(dest_directory):
os.makedirs(dest_directory)
for file in file_list:
_, filename = os.path.split(file)
with Image.open(file) as image:
cropped_img = ImageOps.fit(image, crop_dims, Image.ANTIALIAS)
cropped_img.save(dest_directory + filename)
cropped_img.close()
# Note this script should be run from one level above the images directory
# At least, that's where I ran and tested it
crop_img_directory = "./images-cropped/"
img_directory = "./images/"
img_list = fetch_img_list(img_directory)
crop_images(img_list, crop_img_directory)
#crop_images(img_list, crop_img_directory, (320, 320))
| true |
10c0a87304139b1dcc2062821cc85167aa1b3683 | Python | linzifan/python_courses | /PoC-Project-3-Tic-Tac-Toe.py | UTF-8 | 4,025 | 3.203125 | 3 | [] | no_license | """
Monte Carlo Tic-Tac-Toe Player
"""
# http://www.codeskulptor.org/#user39_vKUU4Cwa9N4Xja4.py
import random
import poc_ttt_gui
import poc_ttt_provided as provided
# Constants for Monte Carlo simulator
# You may change the values of these constants as desired, but
# do not change their names.
NTRIALS = 20 # Number of trials to run
SCORE_CURRENT = 1.0 # Score for squares played by the current player
SCORE_OTHER = 1.0 # Score for squares played by the other player
# Add your functions here.
def mc_trial(board, player):
"""
This function takes a current board and the next player
to move. The function plays a game starting with the
given player by making random moves, alternating between
players.
"""
current_player = player
# The game will continue until there is no empty cell
while len(board.get_empty_squares()) >= 1 and board.check_win() == None:
choice = random.choice(board.get_empty_squares())
board.move(choice[0], choice[1], current_player)
current_player = provided.switch_player(current_player)
def mc_update_scores(scores, board, player):
"""
This function takes a grid of scores (a list of lists)
with the same dimensions as the Tic-Tac-Toe board, a
board from a completed game, and which player the
machine player is. The function scores the completed
board and updates the scores grid.
"""
for row in range(len(scores)):
for col in range(len(scores[0])):
if board.check_win() == player:
if board.square(row, col) == player:
scores[row][col] += SCORE_CURRENT
elif board.square(row, col) == provided.switch_player(player):
scores[row][col] -= SCORE_OTHER
elif board.check_win() == provided.switch_player(player):
if board.square(row, col) == player:
scores[row][col] -= SCORE_CURRENT
elif board.square(row, col) == provided.switch_player(player):
scores[row][col] += SCORE_OTHER
def get_best_move(board, scores):
"""
This function takes a current board and a grid of scores.
The function finds all of the empty squares with the
maximum score and randomly return one of them as a (row, column)
tuple.
"""
empty_square_scores = []
best_score = None
best_empty_squares = []
# find empty squares with their scores
for square in board.get_empty_squares():
empty_square_scores.append(scores[square[0]][square[1]])
best_score = max(empty_square_scores)
for row in range(len(scores)):
for col in range(len(scores[0])):
if scores[row][col] == best_score:
if board.square(row, col) == provided.EMPTY:
best_empty_squares.append((row, col))
# return best_move
return random.choice(best_empty_squares)
def mc_move(board, player, trials):
"""
This function takes a current board, which player the
machine player is, and the number of trials to run.
The function uses the Monte Carlo simulation to return
a move for the machine player in the form of a
(row, column) tuple.
"""
# create score grid
scores = [[0 for dummy_row in range(board.get_dim())] for dummy_col in range(board.get_dim())]
for dummy_trial in range(trials):
working_board = board.clone()
mc_trial(working_board, player)
mc_update_scores(scores, working_board, player)
return get_best_move(board, scores)
# Test game with the console or the GUI. Uncomment whichever
# you prefer. Both should be commented out when you submit
# for testing to save time.
# provided.play_game(mc_move, NTRIALS, False)
# poc_ttt_gui.run_gui(3, provided.PLAYERX, mc_move, NTRIALS, False)
| true |
c10f6ef67155a177cf51e2228c8ec14297beba34 | Python | prade7970/PirplePython | /Project_Hangman.py | UTF-8 | 3,550 | 3.28125 | 3 | [] | no_license | import os
class DrawHangManGallows:
def __init__(self):
pass
def hangmanstages(self):
intial_gallow = "----------\n |\t|\n\t|\n\t|\n\t|\n\t|\n\t|\n\t-------------"
hangman= [ "----------\n |\t|\n 0\t|\n\t|\n\t|\n\t|\n\t|\n\t-------------",
"----------\n |\t|\n 0\t|\n |\t|\n\t|\n\t|\n\t|\n\t-------------",
"----------\n |\t|\n 0\t|\n-|\t|\n\t|\n\t|\n\t|\n\t-------------",
"----------\n |\t|\n 0\t|\n-|-\t|\n\t|\n\t|\n\t|\n\t-------------",
"----------\n |\t|\n 0\t|\n-|-\t|\n/\t|\n\t|\n\t|\n\t-------------",
"----------\n |\t|\n 0\t|\n-|-\t|\n/ \ \t|\n\t|\n\t|\n\t-------------"
]
print(intial_gallow)
return hangman
class ChooseMode:
def __init__(self):
pass
def ChooseModeFunc(self):
user_input= int(input("One Player(1) or Two Player mode(2)"))
if user_input==2:
Word=input("Two Player Mode Selected: - Player 1 pick a word : ")
#print("Answer",Word)
#BlankSpaces= len(Word)
return user_input,Word
class TwoPlayerMode:
def __init__(self):
pass
def GameBegins(self,secret_word):
display_hangman=0
secret_list=[]
display_correct_guess=[]
index_pos_list=[]
correct_guess=[]
cg=[]
secret_list= list(secret_word.lower())
hangmanstages=[ "----------\n |\t|\n 0\t|\n\t|\n\t|\n\t|\n\t|\n\t-------------",
"----------\n |\t|\n 0\t|\n |\t|\n\t|\n\t|\n\t|\n\t-------------",
"----------\n |\t|\n 0\t|\n-|\t|\n\t|\n\t|\n\t|\n\t-------------",
"----------\n |\t|\n 0\t|\n-|-\t|\n\t|\n\t|\n\t|\n\t-------------",
"----------\n |\t|\n 0\t|\n-|-\t|\n/\t|\n\t|\n\t|\n\t-------------",
"----------\n |\t|\n 0\t|\n-|-\t|\n/ \ \t|\n\t|\n\t|\n\t-------------"
]
for i in range(len(secret_list)):
if secret_list[i]==' ':
#print('-',end="")
display_correct_guess.append('- ')
else:
display_correct_guess.append('_ ,')
print(display_correct_guess)
while(True):
#for i in range(len(hangmanstages)):
guess=input('Guess : ')
if guess.lower() in secret_list:
correct_guess.append(guess.lower())
for j in range(len(secret_list)):
if secret_list[j]==guess.lower():
index_pos_list.append(j)
#cg.insert(guess.lower(),str(index_pos_list))
if j==len(secret_list):
print("You Guessed all correctly",secret_word)
break
break
print("You guessed right!", "Word is at", str(index_pos_list))
else:
if(display_hangman<len(hangmanstages)):
print(hangmanstages[display_hangman])
display_hangman+=1
if(display_hangman==len(hangmanstages)):
print("You Lost!")
break
class SinglePlayerMode:
def __init__(self):
pass
if __name__ == "__main__":
cm=ChooseMode()
userinput,secret_word=cm.ChooseModeFunc()
tw= TwoPlayerMode()
print(chr(27) + "[2J")
d=DrawHangManGallows()
hangmanstages=d.hangmanstages()
#tw.DrawBlankSpaces(secret_word)
tw.GameBegins(secret_word)
| true |
a2ef7f83829d07ef03fd6cf8617d02c3577a7324 | Python | jogiji/milliEye | /module3_our_dataset/data_collection/utils/ReadRadar.py | UTF-8 | 12,806 | 2.640625 | 3 | [
"MIT"
] | permissive | import serial
import sys
import os
import time
import numpy as np
import pickle
import cv2
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
"""
This program only is tested for Radar IWR6843
"""
# Function to configure the serial ports and send the data from the configuration file to the radar
def serialConfig(configFileName):
# Open the serial ports for the configuration and the data ports
# Linux
CLIport = serial.Serial('/dev/ttyACM0', 115200)
Dataport = serial.Serial('/dev/ttyACM1', 921600)
# Windows
# CLIport = serial.Serial('COM9', 115200)
# Dataport = serial.Serial('COM10', 921600)
# Read the configuration file and send it to the board
config = [line.rstrip('\r\n') for line in open(configFileName)]
for i in config:
CLIport.write((i+'\n').encode())
print(i)
time.sleep(0.01)
return CLIport, Dataport
# ------------------------------------------------------------------
# Function to parse the data inside the configuration file
def parseConfigFile(configFileName):
# Initialize an empty dictionary to store the configuration parameters
configParameters = {}
# Read the configuration file and send it to the board
config = [line.rstrip('\r\n') for line in open(configFileName)]
for i in config:
# Split the line
splitWords = i.split(" ")
# Hard code the number of antennas, change if other configuration is used
numRxAnt = 4
numTxAnt = 3
# Get the information about the profile configuration
if "profileCfg" in splitWords[0]:
startFreq = int(float(splitWords[2]))
idleTime = int(splitWords[3])
rampEndTime = float(splitWords[5])
freqSlopeConst = float(splitWords[8])
numAdcSamples = int(splitWords[10])
numAdcSamplesRoundTo2 = 1
while numAdcSamples > numAdcSamplesRoundTo2:
numAdcSamplesRoundTo2 = numAdcSamplesRoundTo2 * 2
digOutSampleRate = int(splitWords[11])
# Get the information about the frame configuration
elif "frameCfg" in splitWords[0]:
chirpStartIdx = int(splitWords[1])
chirpEndIdx = int(splitWords[2])
numLoops = int(splitWords[3])
numFrames = int(splitWords[4])
framePeriodicity = float(splitWords[5])
# Combine the read data to obtain the configuration parameters
numChirpsPerFrame = (chirpEndIdx - chirpStartIdx + 1) * numLoops
configParameters["numDopplerBins"] = numChirpsPerFrame / numTxAnt
configParameters["numRangeBins"] = numAdcSamplesRoundTo2
configParameters["rangeResolutionMeters"] = (
3e8 * digOutSampleRate * 1e3) / (2 * freqSlopeConst * 1e12 * numAdcSamples)
configParameters["rangeIdxToMeters"] = (3e8 * digOutSampleRate * 1e3) / (
2 * freqSlopeConst * 1e12 * configParameters["numRangeBins"])
configParameters["dopplerResolutionMps"] = 3e8 / (2 * startFreq * 1e9 * (
idleTime + rampEndTime) * 1e-6 * configParameters["numDopplerBins"] * numTxAnt)
configParameters["maxRange"] = (
300 * 0.9 * digOutSampleRate)/(2 * freqSlopeConst * 1e3)
configParameters["maxVelocity"] = 3e8 / \
(4 * startFreq * 1e9 * (idleTime + rampEndTime) * 1e-6 * numTxAnt)
return configParameters
# ------------------------------------------------------------------
# Function to draw the plot
def draw(detObj):
x, y, z, v = [], [], [], []
if len(detObj["x"]) > 0:
fig.clf()
ax = fig.add_subplot(111, projection="3d")
ax.set_zlim(bottom=-5, top=5)
ax.set_ylim(bottom=0, top=10)
ax.set_xlim(left=-4, right=4)
ax.set_xlabel('X Label')
ax.set_ylabel('Y Label')
ax.set_zlabel('Z Label')
x = -detObj["x"]
y = detObj["y"]
z = detObj["z"]
v = detObj["velocity"]
ax.scatter(x, y, z, c='r', marker='o', s=10)
plt.pause(0.01) # show
class readradar():
def __init__(self, configFileName='./cfg/indoor.cfg', folderName="./data", num=600):
self.byteBuffer = np.zeros(2**15, dtype='uint8')
self.byteBufferLength = 0
self.folderName = folderName
self.num = num
self.configFileName = configFileName
def run(self, pipe):
# Get the configuration parameters from the configuration file
configParameters = parseConfigFile(self.configFileName)
# Set the plot
# fig = plt.figure()
# plt.ion()
# ax = Axes3D(fig)
# Configurate the serial port.
# The `Dataport` will start to read data immediately after the `serialConfig` function
time.sleep(2)
CLIport, Dataport = {}, {}
CLIport, Dataport = serialConfig(self.configFileName)
detObj, frameData = {}, []
currentIndex, dataOk = 0, 0
pipe.send("Radar is ready")
print("Radar -> Camera: radar is ready to start")
pipe.recv()
print("Both sensors are ready to start")
while True:
# check if there is data
dataOk, frameNumber, detObj, timestamp = self.readAndParseData68xx(
Dataport, configParameters)
if dataOk:
# Store the current frame into frameData
frameData.append(
dict(Data=detObj, Time=timestamp, Frame_ID=currentIndex))
print("Radar count: " + str(currentIndex))
# draw(detObj) # very time consuming: > 0.1s
currentIndex += 1
if currentIndex == self. num:
CLIport.write(('sensorStop\n').encode())
CLIport.close()
Dataport.close()
# Saved as pickle file
print("Radar Done!")
self.outputFile = self.folderName + '/pointcloud' + '.pkl'
f = open(self.outputFile, 'wb')
pickle.dump(frameData, f)
f.close()
break
# Funtion to read and parse the incoming data
def readAndParseData68xx(self, Dataport, configParameters):
# Constants
OBJ_STRUCT_SIZE_BYTES = 12
BYTE_VEC_ACC_MAX_SIZE = 2**15
MMWDEMO_UART_MSG_DETECTED_POINTS = 1
MMWDEMO_UART_MSG_RANGE_PROFILE = 2
maxBufferSize = 2**15
tlvHeaderLengthInBytes = 8
pointLengthInBytes = 16
magicWord = [2, 1, 4, 3, 6, 5, 8, 7]
# Initialize variables
magicOK = 0 # Checks if magic number has been read
dataOK = 0 # Checks if the data has been read correctly
frameNumber = 0
detObj = {}
readBuffer = Dataport.read(Dataport.in_waiting)
byteVec = np.frombuffer(readBuffer, dtype='uint8')
byteCount = len(byteVec)
# Check that the buffer is not full, and then add the data to the buffer
if (self.byteBufferLength + byteCount) < maxBufferSize:
self.byteBuffer[self.byteBufferLength:self.byteBufferLength +
byteCount] = byteVec[:byteCount]
self.byteBufferLength = self.byteBufferLength + byteCount
# Check that the buffer has some data
if self.byteBufferLength > 16:
# Check for all possible locations of the magic word
possibleLocs = np.where(self.byteBuffer == magicWord[0])[0]
# Confirm that is the beginning of the magic word and store the index in startIdx
startIdx = []
for loc in possibleLocs:
check = self.byteBuffer[loc:loc+8]
if np.all(check == magicWord):
startIdx.append(loc)
# Check that startIdx is not empty
if startIdx:
# Remove the data before the first start index
if startIdx[0] > 0 and startIdx[0] < self.byteBufferLength:
self.byteBuffer[:self.byteBufferLength-startIdx[0]
] = self.byteBuffer[startIdx[0]:self.byteBufferLength]
self.byteBuffer[self.byteBufferLength-startIdx[0]:] = np.zeros(
len(self.byteBuffer[self.byteBufferLength-startIdx[0]:]), dtype='uint8')
self.byteBufferLength = self.byteBufferLength - startIdx[0]
# Check that there have no errors with the byte buffer length
if self.byteBufferLength < 0:
self.byteBufferLength = 0
# word array to convert 4 bytes to a 32 bit number
word = [1, 2**8, 2**16, 2**24]
# Read the total packet length
totalPacketLen = np.matmul(self.byteBuffer[12:12+4], word)
# Check that all the packet has been read
if (self.byteBufferLength >= totalPacketLen) and (self.byteBufferLength != 0):
magicOK = 1
# If magicOK is equal to 1 then process the message
if magicOK:
# word array to convert 4 bytes to a 32 bit number
word = [1, 2**8, 2**16, 2**24]
# Initialize the pointer index
idX = 0
# Read the header
magicNumber = self.byteBuffer[idX:idX+8]
idX += 8
version = format(np.matmul(self.byteBuffer[idX:idX+4], word), 'x')
idX += 4
totalPacketLen = np.matmul(self.byteBuffer[idX:idX+4], word)
idX += 4
platform = format(np.matmul(self.byteBuffer[idX:idX+4], word), 'x')
idX += 4
frameNumber = np.matmul(self.byteBuffer[idX:idX+4], word)
idX += 4
timeCpuCycles = np.matmul(self.byteBuffer[idX:idX+4], word)
idX += 4
numDetectedObj = np.matmul(self.byteBuffer[idX:idX+4], word)
idX += 4
numTLVs = np.matmul(self.byteBuffer[idX:idX+4], word)
idX += 4
subFrameNumber = np.matmul(self.byteBuffer[idX:idX+4], word)
idX += 4
# Read the TLV messages
for tlvIdx in range(numTLVs):
# word array to convert 4 bytes to a 32 bit number
word = [1, 2**8, 2**16, 2**24]
# Check the header of the TLV message
tlv_type = np.matmul(self.byteBuffer[idX:idX+4], word)
idX += 4
tlv_length = np.matmul(self.byteBuffer[idX:idX+4], word)
idX += 4
# Read the data depending on the TLV message
if tlv_type == MMWDEMO_UART_MSG_DETECTED_POINTS:
# Initialize the arrays
x = np.zeros(numDetectedObj, dtype=np.float32)
y = np.zeros(numDetectedObj, dtype=np.float32)
z = np.zeros(numDetectedObj, dtype=np.float32)
velocity = np.zeros(numDetectedObj, dtype=np.float32)
for objectNum in range(numDetectedObj):
# Read the data for each object
x[objectNum] = self.byteBuffer[idX:idX +
4].view(dtype=np.float32)
idX += 4
y[objectNum] = self.byteBuffer[idX:idX +
4].view(dtype=np.float32)
idX += 4
z[objectNum] = self.byteBuffer[idX:idX +
4].view(dtype=np.float32)
idX += 4
velocity[objectNum] = self.byteBuffer[idX:idX +
4].view(dtype=np.float32)
idX += 4
# Store the data in the detObj dictionary
detObj = {"numObj": numDetectedObj, "x": x,
"y": y, "z": z, "velocity": velocity}
dataOK = 1
# Remove already processed data
if idX > 0 and self.byteBufferLength > idX:
shiftSize = totalPacketLen
self.byteBuffer[:self.byteBufferLength -
shiftSize] = self.byteBuffer[shiftSize:self.byteBufferLength]
self.byteBuffer[self.byteBufferLength - shiftSize:] = np.zeros(
len(self.byteBuffer[self.byteBufferLength - shiftSize:]), dtype='uint8')
self.byteBufferLength = self.byteBufferLength - shiftSize
# Check that there are no errors with the buffer length
if self.byteBufferLength < 0:
self.byteBufferLength = 0
return dataOK, frameNumber, detObj, time.time()
| true |
f71dcc64e87c742daffcabb49c2f996cc062a09e | Python | pkug/matasano | /Set4/29.py | UTF-8 | 1,076 | 2.515625 | 3 | [] | no_license | #!/usr/bin/env python3
"""."""
import os
import struct
import random
from sha1 import SHA1 as sha1
KEY = random.choice(list(open('/usr/share/dict/words')))[:-1].encode()
message = b'comment1=cooking%20MCs;userdata=foo;comment2=%20like%20a%20pound%20of%20bacon'
def auth(msg):
return sha1(KEY + msg).digest()
def check(msg, mac):
return sha1(KEY + msg).digest() == mac
def getpad(s):
cnt = len(s) * 8
index = (cnt >> 3) & 0x3f
padding = b'\x80' + b'\x00' * 63
padLen = 120 - index
if index < 56:
padLen = 56 - index
return padding[:padLen] + struct.pack('>Q', cnt)
mac = auth(message)
forged = b';admin=true'
regs = struct.unpack('>5I', mac)
# Bruteforce keylen
for i in range(30):
keypad = b'A' * i
newmsg = message + getpad(keypad + message) + forged
count = (len(keypad) + len(newmsg)) * 8
newmac = sha1(forged, regs, count).digest()
if check(newmsg, newmac):
print("NEWMSG:", newmsg)
print("NEWMAC:", newmac)
print("len(KEY) == len(keypad):", len(KEY) == len(keypad))
break
| true |
8f9f8b3ec82749c55c730680c6a23c61434c0943 | Python | abhishekraok/EducationBot | /Result.py | UTF-8 | 154 | 2.609375 | 3 | [] | no_license | class Result():
def __init__(self, success, value, intent):
self.success = success
self.value = value
self.intent = intent | true |
e1d0bca672bec9c3e1df47dab785deb338e2c2f3 | Python | aleric-cusher/pyboids | /pyboids/app/menu.py | UTF-8 | 2,913 | 3.078125 | 3 | [
"MIT"
] | permissive | """Menu screen."""
import pygame
from . import params
from . import assets
from . import gui
from .simulation import Simulation
key_to_function = {
# insert lambda hooks here
}
class Menu:
"""The menu loop."""
def __init__(self):
self.running = True
self.screen = pygame.display.set_mode(params.SCREEN_SIZE)
pygame.display.set_icon(assets.image('boids-logo.png'))
pygame.display.set_caption(params.CAPTION)
self.clock = pygame.time.Clock()
self.to_update = pygame.sprite.Group()
self.to_display = pygame.sprite.Group()
def update(self, motion_event, click_event):
self.to_update.update(motion_event, click_event)
def display(self):
for sprite in self.to_display:
sprite.display(self.screen)
def start_simulation(self):
s = Simulation(self.screen)
if s.run() == "PYGAME_QUIT":
self.quit()
def main(self):
self.to_update = pygame.sprite.Group(
gui.Button(
pos=(6, 5.5), text="Start", font=params.H3_FONT,
action=lambda: self.start_simulation()),
gui.Button(
pos=(6, 8), text="Quit", font=params.H3_FONT,
action=lambda: self.quit())
)
self.to_display = pygame.sprite.Group(
self.to_update,
gui.Message(pos=(6, 2), text="PyBoids", font=params.H1_FONT),
gui.Message(
pos=(6, 3), text="An implementation of steering behaviors.",
font=params.H5_FONT),
)
texts = []
texts.append(
"There are three entities : Boid - Leader boid - Obstacle.")
texts.append("Right click to add an entity to the simulation space.")
texts.append(
"You can play with many different behaviors by toggling" +
"them on or off.")
texts.append("Have fun !")
self.to_display.add(
gui.Message(pos=(6, 3.3 + 0.3 * k), text=t)
for k, t in enumerate(texts))
while self.running:
motion_event, click_event = None, None
self.screen.fill(params.MENU_BACKGROUND)
self.clock.tick(params.FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.running = False
elif event.type == pygame.KEYDOWN:
if event.key in key_to_function:
key_to_function[event.key](self, event)
elif event.type == pygame.MOUSEBUTTONDOWN:
click_event = event
elif event.type == pygame.MOUSEMOTION:
motion_event = event
self.update(motion_event, click_event)
self.display()
pygame.display.flip()
pygame.quit()
def quit(self):
self.running = False
| true |
b7ce2a07bd85b45fdfaa911a78cc2b5f845b4ec2 | Python | IntelligentCow/corenet | /src/corenet/model/ray_traced_skip_connection.py | UTF-8 | 6,277 | 2.59375 | 3 | [
"Apache-2.0",
"CC-BY-4.0"
] | permissive | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Ray-traced skip connections."""
from typing import Callable
import torch as t
from torch import nn
import corenet.misc_util as util
from corenet.geometry import transformations
class SampleGrid2d(nn.Module):
"""Samples a 2D grid with the camera projected centers of a 3D grid."""
def __init__(self, in_channels: int, out_channels: int,
output_resolution: util.InputTensor):
"""Initializes the module.
Args:
in_channels: The number of input channels (in the 2D layer)
out_channels: The number of output channels (in the sampled 3D grid)
output_resolution: The 3D grid resolution (depth, height, width).
"""
super().__init__()
self.compress_channels = nn.Conv2d(in_channels, out_channels, kernel_size=1)
grid_depth, grid_height, grid_width = output_resolution
zz, yy, xx = t.meshgrid([
t.arange(0, grid_depth, dtype=t.float32),
t.arange(0, grid_height, dtype=t.float32),
t.arange(0, grid_width, dtype=t.float32)])
# Voxel grids are addressed using [z, y, x]
# shape: [depth, height, width, 3]
self.voxel_centers = t.stack([xx, yy, zz], dim=-1)
def _apply(self, fn: Callable[[t.Tensor], t.Tensor]) -> 'SampleGrid2d':
super()._apply(fn)
self.voxel_centers = fn(self.voxel_centers)
return self
def forward(self, grid2d: t.Tensor, voxel_projection_matrix: t.Tensor,
voxel_sample_location: t.Tensor, outside_value: float = 0,
flip_x=False, flip_y=False):
"""The forward pass.
Args:
grid2d: The 2D grid, float32[batch_size, num_channels, height, width].
voxel_projection_matrix: Matrix that projects voxel centers onto the screen,
float32[batch_size, 4, 4].
voxel_sample_location: 3D sample location within the voxels, float32[3].
outside_value: Value used to fill the channels for voxels whose
projected position is outside the 2D grid, float32[]
flip_x: Whether to flip the 2D grid along the X dimension. This can be
used to correct for a right/left handed 3D coordinate system issues.
flip_y: Whether to flip the 2D grid along the Y dimension. This can be
used to correct for a right/left handed 3D coordinate system issues.
Returns:
The resulting 3D grid, float32[batch_size, num_channels, depth, height,
width]. The content of cell [b, c, z, y, x] in the result will be equal to
grid2d[b, c, py, px], where
(px, py, _) = affine_transform(
voxel_projection_matrix, (x, y, z, 1)) * (height, width, 1).
If (b, py, px) lies outside the 2D image, the content of the cell in all
channels will be equal to outside_value.
"""
grid2d = util.to_tensor(grid2d, t.float32)
assert len(grid2d.shape) == 4
voxel_sample_location = util.to_tensor(voxel_sample_location, t.float32)
assert voxel_sample_location.shape == (grid2d.shape[0], 3)
compressed_grid2d = self.compress_channels(grid2d)
batch_size, channels, height, width = compressed_grid2d.shape
voxel_projection_matrix = util.to_tensor(voxel_projection_matrix, t.float32)
assert voxel_projection_matrix.shape == (batch_size, 4, 4)
voxel_centers = self.voxel_centers
grid_depth, grid_height, grid_width, _ = voxel_centers.shape
# shape: [batch, depth, height, width, 3]
voxel_centers = (voxel_centers[None]
.expand(batch_size, grid_depth, grid_height, grid_width, 3)
.contiguous())
voxel_centers = (
voxel_centers + voxel_sample_location[:, None, None, None, :])
# shape: [batch, depth * height * width, 3]
voxel_centers = voxel_centers.reshape([batch_size, -1, 3])
# Project the voxel centers onto the screen
projected_centers = transformations.transform_points_homogeneous(
voxel_centers, voxel_projection_matrix, w=1)
projected_centers = projected_centers.reshape([batch_size, grid_depth,
grid_height, grid_width, 4])
camera_depth = projected_centers[..., 2]
projected_centers = projected_centers[..., :3] / projected_centers[..., 3:4]
# XY range in OpenGL camera space is [-1:1, -1:1]. Transform to [0:1, 0:1].
projected_centers = projected_centers[..., :2] / 2 + 0.5
if flip_y:
projected_centers = projected_centers * (1, -1) + (0, 1)
if flip_x:
projected_centers = projected_centers * (-1, 1) + (1, 0)
# projected_centers contains (x, y) coordinates in [0, 1]^2 at this point.
# Convert to indices into 2D grid.
wh = projected_centers.new_tensor([[[[[width, height]]]]], dtype=t.float32)
pixel_indices = (projected_centers * wh).to(t.int64)
xx, yy = pixel_indices.unbind(-1) # type: t.Tensor
bb = t.arange(batch_size, dtype=t.int64, device=grid2d.device)
bb = bb[:, None, None, None]
bb = bb.expand(batch_size, grid_depth, grid_height, grid_width)
# Pad the grid to detect voxels which project outside the image plane
padded_grid2d = t.constant_pad_nd(compressed_grid2d, [1, 1, 1, 1],
value=outside_value)
xx = (xx + 1).clamp(0, padded_grid2d.shape[-1] - 1)
yy = (yy + 1).clamp(0, padded_grid2d.shape[-2] - 1)
# Sample the 2D grid
result = padded_grid2d[bb, :, yy, xx].permute([0, 4, 1, 2, 3])
assert result.shape == (batch_size, channels, grid_depth, grid_height,
grid_width)
# Discard voxels behind the camera
camera_depth = camera_depth[:, None, :, :, :].expand(result.shape)
result = t.where(camera_depth >= 0, result,
t.ones_like(result) * outside_value)
return result
| true |
252d881b62e19e836e27c08db2e9f564a2b7caf9 | Python | UIUCLearningLanguageLab/Visualized | /dendrogram_heatmap.py | UTF-8 | 4,288 | 2.515625 | 3 | [] | no_license | from typing import Optional, List
import numpy as np
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import dendrogram, linkage
from scipy.spatial.distance import pdist
from mpl_toolkits.axes_grid1 import make_axes_locatable
from src import config
def make_dendrogram_heatmap_fig(similarity_matrix: np.ndarray,
labels: List[str],
num_colors=None,
y_title=False,
vmin=0.0,
vmax=1.0):
"""
Returns fig showing dendrogram heatmap of similarity matrix
"""
assert len(labels) == len(similarity_matrix)
print('Matrix min: {} max {}'.format(np.min(similarity_matrix), np.max(similarity_matrix)))
print('Figure min: {} max {}'.format(vmin, vmax))
# fig
res, ax_heatmap = plt.subplots(figsize=config.Fig.fig_size, dpi=config.Fig.dpi)
ax_heatmap.yaxis.tick_right()
divider = make_axes_locatable(ax_heatmap)
ax_dendrogram_right = divider.append_axes("right", 0.8, pad=0.0, sharey=ax_heatmap)
ax_dendrogram_right.set_frame_on(False)
ax_colorbar = divider.append_axes("top", 0.1, pad=0.4)
# dendrogram
lnk0 = linkage(pdist(similarity_matrix))
if num_colors is None or num_colors <= 1:
left_threshold = -1
else:
left_threshold = 0.5 * (lnk0[1 - num_colors, 2] +
lnk0[-num_colors, 2])
dg0 = dendrogram(lnk0, ax=ax_dendrogram_right,
orientation='right',
color_threshold=left_threshold,
no_labels=True)
# Reorder the values in x to match the order of the leaves of the dendrograms
z = similarity_matrix[dg0['leaves'], :] # sorting rows
z = z[:, dg0['leaves']] # sorting columns for symmetry
# heatmap
max_extent = ax_dendrogram_right.get_ylim()[1]
im = ax_heatmap.imshow(z[::-1], aspect='auto',
cmap=plt.cm.jet,
extent=(0, max_extent, 0, max_extent),
vmin=vmin, vmax=vmax)
# colorbar
cb = plt.colorbar(im, cax=ax_colorbar, ticks=[vmin, vmax], orientation='horizontal')
cb.ax.set_xticklabels([vmin, vmax], fontsize=config.Fig.ax_label_fontsize)
cb.set_label('Correlation Coefficient', labelpad=-10, fontsize=config.Fig.ax_label_fontsize)
# set heatmap ticklabels
xlim = ax_heatmap.get_xlim()[1]
ncols = len(labels)
halfxw = 0.5 * xlim / ncols
ax_heatmap.xaxis.set_ticks(np.linspace(halfxw, xlim - halfxw, ncols))
ax_heatmap.xaxis.set_ticklabels(np.array(labels)[dg0['leaves']]) # for symmetry
ylim = ax_heatmap.get_ylim()[1]
nrows = len(labels)
halfyw = 0.5 * ylim / nrows
if y_title:
ax_heatmap.yaxis.set_ticks(np.linspace(halfyw, ylim - halfyw, nrows))
ax_heatmap.yaxis.set_ticklabels(np.array(labels)[dg0['leaves']])
# Hide all tick lines
lines = (ax_heatmap.xaxis.get_ticklines() +
ax_heatmap.yaxis.get_ticklines() +
ax_dendrogram_right.xaxis.get_ticklines() +
ax_dendrogram_right.yaxis.get_ticklines())
plt.setp(lines, visible=False)
# set label rotation and fontsize
x_labels = ax_heatmap.xaxis.get_ticklabels()
plt.setp(x_labels, rotation=-90)
plt.setp(x_labels, fontsize=config.Fig.ax_label_fontsize)
y_labels = ax_heatmap.yaxis.get_ticklabels()
plt.setp(y_labels, rotation=0)
plt.setp(y_labels, fontsize=config.Fig.ax_label_fontsize)
# make dendrogram labels invisible
plt.setp(ax_dendrogram_right.get_yticklabels() + ax_dendrogram_right.get_xticklabels(),
visible=False)
res.subplots_adjust(bottom=0.2) # make room for tick labels
res.tight_layout()
return res
NUM_WORDS = 12
NOISE = 0.3
# create random words and similarity matrix
words = [f'word-{n}' for n in range(NUM_WORDS)]
tmp1 = np.random.random((1, NUM_WORDS)).repeat(NUM_WORDS//2, axis=0) + NOISE * np.random.random((NUM_WORDS//2, NUM_WORDS))
tmp2 = np.random.random((1, NUM_WORDS)).repeat(NUM_WORDS//2, axis=0) + NOISE * np.random.random((NUM_WORDS//2, NUM_WORDS))
sim_matrix = np.vstack([tmp1, tmp2])
fig = make_dendrogram_heatmap_fig(sim_matrix, words)
fig.show()
| true |
49801101cd0190e873ba01d7e21302aa7c55c946 | Python | awhitford10/assessment-2 | /classes/video.py | UTF-8 | 975 | 3.078125 | 3 | [] | no_license | class Video():
def check_video_in_inventory(self,video_title):
available_flag = False
for video in self.videos:
if video['title'] == video_title and int(video['copies_available']) >= 0:
available_flag = True
return(video)
elif video['title'] == video_title and int(video['copies_available']) == 0:
print(f"\nAll copies of {video['title']} are currently rented out\n")
return
if available_flag == False:
print(f'\n{video_title} does not appear to be in our inventory.\n')
return
def check_return_video_in_inventory(self,video_title):
video_flag = False
for video in self.videos:
if video['title'] == video_title:
video_flag = True
return(video)
if video_flag == False:
print(f'\n{video_title} was not found in the system\n')
return
| true |
262eeb85fec34650679b2f654a340c3dcfc93a75 | Python | Aasthaengg/IBMdataset | /Python_codes/p02948/s689021086.py | UTF-8 | 523 | 2.6875 | 3 | [] | no_license | def main():
import heapq
n,m = map(int,input().split())
job = {}
for i in range(n):
a,b = map(int,input().split())
if a not in job.keys():
job[a] = [b]
else:
job[a].append(b)
ans = 0
hp = []
for i in range(1,m+1):
if i in job.keys():
for j in range(len(job[i])):
heapq.heappush(hp,-1*job[i][j])
if len(hp)>0:
ans += -1*heapq.heappop(hp)
print(ans)
if __name__ == "__main__":
main()
| true |
8c507557d4cffdfa0cfffe5e478c7cdff35d3cc2 | Python | BurnySc2/Twitch-SC2-Stream-Scripts | /points_system/point_system.py | UTF-8 | 8,527 | 2.53125 | 3 | [
"MIT"
] | permissive | from __future__ import annotations
from typing import TYPE_CHECKING
from twitchio import Message
import json
import os
import time
from pathlib import Path
from dataclasses import dataclass
from dataclasses_json import DataClassJsonMixin
import atexit
from typing import List, Dict
from loguru import logger
from plugin_base_class.base_class import BaseScript
if TYPE_CHECKING:
from bot import TwitchChatBot
@dataclass()
class PointSystemConfig(DataClassJsonMixin):
give_points_interval: int = 300
viewer_pointers_increment: int = 5
active_chatter_time: int = 1800
active_chatter_points_increment: int = 50
class PointSystem(BaseScript):
def __init__(self, bot=None):
self.bot: TwitchChatBot = bot
self.database_path: Path = Path(__file__).parent / "db.json"
# Launch database
self.db = {}
self.load_database()
# When the dict was last updated, which means a new user was inserted, points were updated or subtracted
self.db_last_updated: float = time.time()
# When the dict was last written to file
self.db_last_written: float = time.time()
self.db_changes_pending: int = 0
# Keep track on when the points were last updated for all users
self.timestamp_last_points_given: float = time.time()
# Check if stream is online
self.stream_is_online: bool = False
self.last_stream_is_online_check: float = time.time()
self.stream_is_online_check_interval: int = 120
self.name_to_id_dict: Dict[str, int] = {}
# Load config file
config_file_path = Path(__file__).parent / "config.json"
with open(config_file_path) as f:
self.config = PointSystemConfig.from_json(f.read())
atexit.register(self.on_exit)
logger.info(
f"At the current configuration, chatters receive {60 * self.config.active_chatter_points_increment / self.config.give_points_interval} points per minute while lurker-viewers receive {60 * self.config.viewer_pointers_increment / self.config.give_points_interval} points per minute"
)
def load_database(self):
if self.database_path.absolute().is_file():
with self.database_path.open() as f:
data = json.load(f)
self.db.update(data)
else:
logger.warning(f"Database file does not exist, creating a new one: {self.database_path.absolute()}")
def save_database(self):
with self.database_path.open("w") as f:
json.dump(self.db, f, sort_keys=True, indent=2)
self.db_changes_pending = 0
self.db_last_written = time.time()
def get_points_of_user(self, user: str):
if user not in self.db:
logger.info(f"User {user} was not found in points database")
return self.db.get(user, {"points": 0})["points"]
def add_new_user(self, user: str, points: int = 0, last_message: float = 0):
assert user not in self.db
self.db[user] = {"points": points, "last_message": last_message}
self.db_last_updated = time.time()
self.db_changes_pending += 1
def update_last_message(self, user: str):
"""
Update when the last message of the user was sent.
"""
if user in self.db:
self.db[user]["last_message"] = time.time()
self.db_last_updated = time.time()
self.db_changes_pending += 1
else:
logger.debug(f"Found a new face in chat: {user}")
self.add_new_user(user, last_message=time.time())
def add_points(self, user: str, amount: int):
""" Increment points of a user. """
self.db[user]["points"] = self.db[user]["points"] + amount
if amount != 0:
self.db_last_updated = time.time()
self.db_changes_pending += 1
def remove_points(self, user: str, amount: int):
""" Remove points from a user """
self.add_points(user, -amount)
async def give_points_to_all_chatters(self):
self.timestamp_last_points_given = time.time()
viewers = await self.bot.get_chatters(self.bot.main_channel_name)
for viewer in viewers.all:
# All chatters are displayed as display name, so this doesnt work for asian characters?
viewer_name = viewer.lower()
# Viewer has not chatted yet, so add him to the database
if viewer_name not in self.db:
self.add_new_user(viewer_name, last_message=0)
time_last_message = self.db[viewer_name]["last_message"]
user_is_active_chatter = time.time() - time_last_message < self.config.active_chatter_time
# If viewer has chatted in the last X minutes, give him more points than a lurker
if user_is_active_chatter:
self.add_points(viewer_name, amount=self.config.active_chatter_points_increment)
else:
self.add_points(viewer_name, amount=self.config.viewer_pointers_increment)
self.db_last_updated = time.time()
self.db_changes_pending += 1
async def check_if_stream_is_live(self, channel_name: str) -> bool:
# Get the ID of the streamer to be able to poll if the stream is live
channel_name = channel_name.lower()
channel_id = self.name_to_id_dict.get(channel_name, None)
if channel_id is None:
for channel_info in self.bot.twitch_client.users.translate_usernames_to_ids([channel_name]):
# Cache name to id
self.name_to_id_dict[channel_info.name] = channel_info.id
channel_id = self.name_to_id_dict[channel_name]
# Check if stream is live
stream_data = self.bot.twitch_client.streams.get_stream_by_user(channel_id)
if stream_data and stream_data.stream_type == "live":
return True
return False
async def on_ready(self):
""" Once the bot starts, check immediately if channel is live. """
self.stream_is_online = await self.check_if_stream_is_live(self.bot.main_channel_name)
logger.info(f"Point system initialized. Stream is live ({self.bot.main_channel_name}): {self.stream_is_online}")
async def on_message(self, message: Message):
# Update last time a user entered a message, so they get more points, instead of people who arent chatting and just watching
self.update_last_message(message.author.name)
def on_exit(self):
""" Gets called when this instance is shut down - application exit """
# Only write to database if the database was changed at all
if self.db_last_updated > self.db_last_written:
logger.warning(f"Bot was closed before data was written to database file")
self.save_database()
logger.warning(f"Data was successfully written to database file on bot shutdown.")
async def on_tick(self):
# Every X minutes, check if the stream is online and only if stream is online, give chatters / viewers points
if time.time() - self.last_stream_is_online_check > self.stream_is_online_check_interval:
stream_is_online: bool = await self.check_if_stream_is_live(self.bot.main_channel_name)
if self.stream_is_online != stream_is_online:
self.stream_is_online = stream_is_online
logger.info(
f"Checked if stream {self.bot.main_channel_name} is live. Detected a change, stream is live: {stream_is_online}"
)
if self.stream_is_online:
# Give points to chatters every X minutes
if time.time() - self.timestamp_last_points_given > self.config.give_points_interval:
await self.give_points_to_all_chatters()
# Write current database to file (don't write after each change instantly to file)
if (
# Wait x seconds before writing the updated database entry to file
(time.time() - self.db_last_updated > 30 or self.db_changes_pending > 5)
# Only write to database if the database was changed at all
and self.db_last_updated > self.db_last_written
# At least one change is pending
and self.db_changes_pending > 0
):
self.save_database()
if __name__ == "__main__":
ps = PointSystem()
ps.add_points("burnysc2", 1)
ps.update_last_message("burnysc2")
| true |
f40057801f93618dc9dd31a0eb15ee6b037d6a47 | Python | benjaminpommier/vic | /train_model.py | UTF-8 | 7,262 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 25 08:44:29 2020
@author: Benjamin Pommier
"""
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.metrics import classification_report
import matplotlib.pyplot as plt
import glob
import pandas as pd
import pickle
import numpy as np
# Load data
def load_data(path_train=None, path_dev=None, path_labels=None, partial='all'):
if path_train is None:
path_train = 'features/features_train.csv'
if path_dev is None:
path_dev = 'features/features_dev.csv'
if path_labels is None:
path_labels = 'features/labels.csv'
hog = list(np.arange(50, 86, 1))
hsv = list(np.arange(2, 50, 1))
spatial = list([0,1])
image = list([86])
if partial == 'all':
usecol = spatial + hsv + hog + image
elif partial == 'spatial_hsv':
usecol = spatial + hsv + image
elif partial == 'spatial_hog':
usecol = spatial + hog + image
elif partial == 'hsv_hog':
usecol = hsv + hog + image
elif partial == 'hsv':
usecol = hsv + image
elif partial == 'hog':
usecol = hog + image
exception = 13 #Problematic image, label shape differs from image shape
#Loading training set
print('--- Loading Training Set ---')
X_train = pd.read_csv(path_train, usecols=usecol)
max_im = X_train.image.max()
X_train = X_train[(X_train.image <= max_im) & (X_train.image != exception)]
#Loading dev set
print('--- Loading Dev Set ---')
X_dev = pd.read_csv(path_dev, usecols=usecol)
print('--- Loading Ground Truth ---')
labels = pd.read_csv(path_labels)
y_train = labels[(labels.image <= max_im) & (labels.image != exception)].label
y_dev = labels[(labels.image > max_im) & (labels.image < 31)].label
print('--- END OF DATA LOADING ---')
return X_train, X_dev, y_train, y_dev
def train(X_train, y_train, model=None, gridsearch=False, filename_model='None'):
if model is None:
model = RandomForestClassifier(n_jobs=-1)
#Model RF
if gridsearch:
if type(model) == type(LogisticRegression()):
params = {'C':[0.01, 0.1, 1, 10]}
elif type(model) == type(RandomForestClassifier()):
params = {'n_estimators': [10, 30, 50], 'min_samples_leaf': [10, 100]}
elif type(model) == type(SVC()):
params = {'C': [0.1, 1, 10]}
gridsearch = GridSearchCV(model, param_grid=params, cv=3, verbose=2, n_jobs=-1)
gridsearch.fit(X_train, y_train)
model = gridsearch.best_estimator_
print(model)
model.n_jobs = -1 #Setting te parameter afterwards otherwise None
model.fit(X_train, y_train)
else:
model.n_jobs = -1
model.fit(X_train, y_train)
#Save the model to disk
pickle.dump(model, open(filename_model+'.sav', 'wb'))
def evaluate(X_train, X_dev, y_train, y_dev, type_model='rf'):
if type_model == 'rf' :
best_model = pickle.load(open('model/random_forest.sav', 'rb'))
elif type_model == 'logreg':
best_model = pickle.load(open('model/logreg.sav', 'rb'))
elif type_model == 'svc':
best_model = pickle.load(open('model/svc.sav', 'rb'))
else:
best_model = pickle.load(open(type_model+'.sav', 'rb'))
#Training set
y_pred_train = best_model.predict(X_train)
y_probas_train = best_model.predict_proba(X_train)
print(classification_report(y_train, y_pred_train))
pickle.dump(classification_report(y_train, y_pred_train, output_dict=True),
open(type_model+'_results_train.pkl', 'wb'))
#Dev set
y_pred_dev= best_model.predict(X_dev)
y_probas_dev= best_model.predict_proba(X_dev)
print(classification_report(y_dev, y_pred_dev))
pickle.dump(classification_report(y_dev, y_pred_dev, output_dict=True),
open(type_model+'_results_dev.pkl', 'wb'))
return y_probas_train, y_probas_dev
#%%Training
partial = ['all']
for prt in partial:
X_train, X_dev, y_train, y_dev = load_data(partial=prt)
rf = RandomForestClassifier(n_estimators=10, min_samples_leaf=100, n_jobs=-1)
logreg = LogisticRegression(C=1, n_jobs=-1)
# svc = SVC()
train(X_train, y_train, model=rf, gridsearch=True, filename_model=prt+'_random_forest')
train(X_train, y_train, model=logreg, gridsearch=True, filename_model=prt+'_logreg')
evaluate(X_train, X_dev, y_train, y_dev, type_model=prt+'_random_forest')
evaluate(X_train, X_dev, y_train, y_dev, type_model=prt+'_logreg')
#%%Visualisation
def visualize(labels=None, features=None, model=None, image_num=None, predict=True):
ORIGINAL_PATH = 'data/FASSEG-frontal03/Original/'
# ORIGINAL_PATH = 'data/perso/'
LABELED_PATH = 'data/FASSEG-frontal03/Labeled/'
EXTENSION = '.jpg'
if image_num < 10:
or_image = plt.imread(ORIGINAL_PATH + '00' + str(int(image_num)) + EXTENSION)
lbl_image = plt.imread(LABELED_PATH + '00' + str(int(image_num)) + EXTENSION)
elif (image_num >= 10) & (image_num < 100):
or_image = plt.imread(ORIGINAL_PATH + '0' + str(int(image_num)) + EXTENSION)
lbl_image = plt.imread(LABELED_PATH + '0' + str(int(image_num)) + EXTENSION)
else:
raise NotImplementedError
# Prediction for a given image
if predict:
X = features[features.image == image_num]
pred = model.predict_proba(X)
else:
pred = labels
try:
#Display
plt.figure(figsize=(20,20))
plt.subplot(331)
plt.title('Original')
plt.imshow(or_image)
plt.subplot(332)
plt.title('Ground truth')
plt.imshow(lbl_image)
plt.subplot(334)
plt.title('Mouth') #nose eyes hair background skin
plt.imshow(pred[:, 0].reshape((512, -1)), cmap='gray')
plt.subplot(335)
plt.title('Nose') #nose eyes hair background skin
plt.imshow(pred[:, 1].reshape((512, -1)), cmap='gray')
plt.subplot(336)
plt.title('Eyes') #nose eyes hair background skin
plt.imshow(pred[:, 2].reshape((512, -1)), cmap='gray')
plt.subplot(337)
plt.title('Hair') #nose eyes hair background skin
plt.imshow(pred[:, 3].reshape((512, -1)), cmap='gray')
plt.subplot(338)
plt.title('Background') #nose eyes hair background skin
plt.imshow(pred[:, 4].reshape((512, -1)), cmap='gray')
plt.subplot(339)
plt.title('Skin') #nose eyes hair background skin
plt.imshow(pred[:, 5].reshape((512, -1)), cmap='gray')
plt.tight_layout()
plt.show()
except:
pass
# im_num = 5
# file = pd.read_csv('probability_maps_perso/' + str(im_num) + '.csv').to_numpy()
# visualize(labels = file, image_num=im_num, predict=False)
# im_num = 9
# mdl = pickle.load(open('model/all_random_forest.sav', 'rb'))
# visualize(features=X_train, model=mdl, image_num=im_num, predict=True) | true |
5f301f7adbfe08c600945e43e03b31c447281490 | Python | Aasthaengg/IBMdataset | /Python_codes/p03240/s505325708.py | UTF-8 | 635 | 2.9375 | 3 | [] | no_license | n = int(input())
XYH = [list(map(int, input().split())) for _ in range(n)]
for i in range(101):
for j in range(101):
temp = -1
for x, y, h in XYH:
if h == 0:
continue
temp = h + abs(x - i) + abs(y - j)
break
flag = 0
for x, y, h in XYH:
kouho = temp - (abs(x - i) + abs(y - j)) - h
if kouho == 0:
continue
elif h == 0 and kouho < 0:
continue
else:
flag = 1
break
if not flag:
print(i, j, temp)
exit()
| true |
90c59cc19cf30a5b4fa02407f3149325640aa273 | Python | SSITB/CorePython | /for_5.py | UTF-8 | 438 | 3.46875 | 3 | [] | no_license | for row in range(7):
for col in range(7):
if row == 0 or row == 3 or row == 6 or col == 6: #or(col==0 and (row>0 and row<6)):
print('*',end=' ')
else:
print(end=' ')
print()
# for row in range(5):
# for col in range(5):
# if row == 0 or row == 4 or col == 0 or col == 4:
# print("*",end=' ')
# else :
# print(end=' y ')
# print() | true |