blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
9715a3515adacefa0f9ecf878aafa4f431ccb0b0 | Python | Mark-TonG/python-test | /4-8.py | UTF-8 | 66 | 3.046875 | 3 | [] | no_license | number=[x**3 for x in range(1,11)]
for num in number:
print(num)
| true |
72b71eee1d51c6d8c4e873e8e4b629c44cd4be1d | Python | JennaVergeynst/code_cluster_classification | /classify_receiver_clusters.py | UTF-8 | 2,032 | 3.109375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri June 30 2017
@author: jennavergeynst
"""
import pandas as pd
def classify_receiver_clusters(fixed_tags, acc_goal, confidence_level, min_group_size=10):
"""
All fixed positions are grouped per receiver cluster used for calculation of the position.
For each receiver cluster, the percentage of positions with error <= accuracy_goal is calculated.
Only clusters with a groupsize >= min_group_size are classified:
- as good performers if this percentage is >= confidence_level;
- as bad performers if this percentage is < confidence_level.
Parameters:
-----------
fixed_tags = dataframe with positions of the fixed transmitters, with at least columns HPEm and URX
URX = list of receivers used to calculate the position
acc_goal = maximum allowed error
confidence_level = minimum proportion of the group that has to meet the acc_goal
group_size = minimum number of positions calculated by a receiver cluster as a prerequisite to be classified, default 10
Returns:
--------
URX_groups = dataframe with for each receiver cluster (URX-group) the percentage of positions with error <= accuracy goal and the groupsize
good_performers = list with good performing receiver clusters
bad_performers = list with bad performing receiver clusters
"""
fixed_tags.loc[:,'acc_check'] = [error <= acc_goal for error in fixed_tags['HPEm']]
URX_groups = fixed_tags.groupby(by=['URX'])['acc_check'].agg(['mean', 'count'])
URX_groups = URX_groups.reset_index().rename(columns = {'mean': 'percentage', 'count': 'groupsize'})
URX_subset = URX_groups[URX_groups['groupsize']>=min_group_size].reset_index(drop = True)
good_performers = list(pd.DataFrame(URX_subset[URX_subset['percentage'] >= confidence_level])['URX'])
bad_performers = list(pd.DataFrame(URX_subset[URX_subset['percentage'] < confidence_level])['URX'])
return URX_groups, good_performers, bad_performers | true |
8ba384066ff5331913a48ad0c5498c3c3ec8753b | Python | luckystar1992/word2mvec | /Model.py | UTF-8 | 13,074 | 2.53125 | 3 | [] | no_license | #coding:utf-8
"""
模型类
"""
import sys, os
import struct
import time
import tqdm
import numpy as np
from multiprocessing import Array, Value
import Util
class SingleModel:
def __init__(self, args, vocab):
"""
:param args: 参数列表
:param vocab: 词汇表对象
"""
self.args = args
self.vocab = vocab
self.embedding_size = args.embedding_size
self.vocab_size = len(self.vocab)
def init_model(self):
"""初始化模型的参数"""
# [V, dim] 矩阵作为第一层权重
# 这里的net1和net2使用的是内存共享的机制,在train中init_process的时候也进行了一次转化
# 但是这里的net1和net2指向的是同一块内存
tmp = np.random.uniform(low=-0.5/self.embedding_size,
high=0.5/self.embedding_size,
size=(self.vocab_size, self.embedding_size))
net1 = np.ctypeslib.as_ctypes(tmp)
self.net1 = Array(net1._type_, net1, lock=False)
# [V, dim] 矩阵作为第二层权重
tmp = np.zeros(shape=(self.vocab_size, self.embedding_size))
net2 = np.ctypeslib.as_ctypes(tmp)
self.net2 = Array(net2._type_, net2, lock=False)
def getNetMean(self, index):
return np.mean(self.net1[index])
def updateNet1(self, index, gradient):
"""更新网络层1的权重"""
self.net1[index] += gradient
def updateNet2(self, index, gradient):
"""更新网络层2的权重"""
self.net2[index] += gradient
def saveEmbedding(self, epoch):
"""保存词向量"""
embedding_folder = self.args.embedding_folder
print(" Saving {out_folder}".format(out_folder=embedding_folder))
# 先将此次的训练参数保存下来
with open(os.path.join(embedding_folder, 'config.txt'), 'w') as f:
for (args_key, args_value) in sorted(vars(self.args).items()):
if isinstance(args_value, (int, float, bool, str)):
f.write("%20s: %10s\n" % (args_key, str(args_value)))
# 开始保存词向量
if self.args.binary:
embedding_path = os.path.join(embedding_folder, 'wv_epoch{epoch}.bin'.format(epoch=epoch))
with open(embedding_path, 'wb') as f_out:
f_out.write(('%d %d\n' % (len(self.net1), self.args.embedding_size)).encode())
for token, vector in zip(self.vocab, self.net1):
f_out.write(('%s %s\n' % (token.word, ' '.join([str(s) for s in vector]))).encode())
else:
embedding_path = os.path.join(embedding_folder, 'wv_epoch{epoch}.txt'.format(epoch=epoch))
with open(embedding_path, 'w') as f_out:
f_out.write('%d %d\n' % (len(self.net1), self.args.embedding_size))
for token, vector in zip(self.vocab, self.net1):
f_out.write('%s %s\n' % (token.word, ' '.join([str(s) for s in vector])))
def saveEmbeddingOut(self, epoch):
"""保存第二个词向量"""
def saveSenses(self):
"""保存上下文向量"""
raise NotImplementedError
class MultiSenseModel(SingleModel):
"""多语境词向量模型"""
def __init__(self, args, vocab):
super(MultiSenseModel, self).__init__(args, vocab)
self.senses_number = args.senses
def init_model(self):
"""
初始化模型参数,包括模型权重,多语境词向量,多语境向量
向量包括一个Meta-Embedding和众多Senses-Embedding
"""
# [V] 记录每个token共有多少不同的count
tmp = np.zeros(self.vocab_size, dtype='int32')
senses_count = np.ctypeslib.as_ctypes(tmp)
self.senses_count = Array(senses_count._type_, senses_count, lock=False)
# [V] 记录每个token的每个sense被访问过多少次
tmp = np.zeros(shape=(self.vocab_size, self.senses_number + 1), dtype='int32')
senses_access = np.ctypeslib.as_ctypes(tmp)
self.senses_access = Array(senses_access._type_, senses_access, lock=False)
# [V, dim] 模型的主向量 + 模型的主向量对应的
tmp = np.random.uniform(low=-0.5/self.embedding_size,
high=0.5/self.embedding_size,
size=(self.vocab_size, self.embedding_size))
main_embedding = np.ctypeslib.as_ctypes(tmp)
self.main_embedding = Array(main_embedding._type_, main_embedding, lock=False)
tmp = np.zeros(shape=(self.vocab_size, self.embedding_size))
main_sense = np.ctypeslib.as_ctypes(tmp)
self.main_sense = Array(main_sense._type_, main_sense, lock=False)
# [V, senses, dim] 模型的多语境词向量
# 多语境的第一个词向量为main-embedding
tmp = np.random.uniform(low=-0.5/self.embedding_size,
high=0.5/self.embedding_size,
size=(self.vocab_size, self.senses_number, self.embedding_size))
embedding = np.ctypeslib.as_ctypes(tmp)
self.embedding = Array(embedding._type_, embedding, lock=False)
# [V, senses , dim] 模型的多语境词向量对应的语境信息
# 语境的第一个词向量为main-sense
tmp = np.zeros(shape=(self.vocab_size, self.senses_number, self.embedding_size))
senses = np.ctypeslib.as_ctypes(tmp)
self.senses = Array(senses._type_, senses, lock=False)
# [V, dim] 矩阵作为第二层权重
tmp = np.zeros(shape=(self.vocab_size, self.embedding_size))
weights = np.ctypeslib.as_ctypes(tmp)
self.weights = Array(weights._type_, weights, lock=False)
def getContextVector(self, context_ids):
"""通过上下文token的ID获取到上下文向量,并将上下文向量做average"""
avg_vector = np.mean([self.main_embedding[t] for t in context_ids], axis=0)
return avg_vector
def getSimilarMax(self, context_vector, token):
"""将context vector与已经存在的sense比较,返回最相似的index,value"""
current_count = self.senses_count[token]
candidate_vectors = np.insert(self.senses[token][0:current_count - 1], 0, values=self.main_sense[token], axis=0)
cos_list = np.array([Util.cos_sim(context_vector, v) for v in candidate_vectors])
cos_max_index = np.argmax(cos_list)
cos_max_value = cos_list[cos_max_index]
return cos_max_index, cos_max_value
def clusterSense(self):
"""对sense/embedding进行聚类和舍弃那些update次数少的"""
pass
def saveEmbedding(self, epoch):
"""保存词向量,并将对应的语境向量也保存, 分开文件保存"""
embedding_folder = self.args.embedding_folder
print(" Saving {out_folder}".format(out_folder=embedding_folder))
if not self.args.binary:
embedding_path = os.path.join(embedding_folder, 'wv_epoch{epoch}.txt'.format(epoch=epoch))
sense_path = os.path.join(embedding_folder, 'sv_epoch{epoch}.txt'.format(epoch=epoch))
count_path = os.path.join(embedding_folder, 'count_epoch{epoch}.txt'.format(epoch=epoch))
with open(embedding_path, 'w') as f_wv, open(sense_path, 'w') as f_sv, open(count_path, 'w') as f_count:
f_wv.write("%d %d\n" % (len(self.embedding), self.args.embedding_size))
f_sv.write("%d %d\n" % (len(self.embedding), self.args.embedding_size))
f_count.write("%d %d\n" % (len(self.embedding), self.args.embedding_size))
for index, vocab in tqdm.tqdm(enumerate(self.vocab)):
count = self.senses_count[index]
# 保存每个token的不同sense被count的次数
f_count.write("%s %d " % (vocab.word, count))
for value in self.senses_access[index][:count]:
f_count.write("%s " % value)
f_count.write("\n")
# 先把main 词向量和main 语境向量进行保存
f_wv.write("%s %d " % (vocab.word, count))
f_wv.write("%s " % " ".join([str(item) for item in self.main_embedding[index]]))
for vector in self.embedding[index][:count-1]:
f_wv.write("%s " % " ".join([str(item) for item in vector]))
f_wv.write("\n")
f_sv.write("%s %d " % (vocab.word, count))
f_sv.write("%s " % " ".join([str(item) for item in self.main_sense[index]]))
for vector in self.senses[index][:count-1]:
f_sv.write("%s " % " ".join([str(item) for item in vector]))
f_sv.write("\n")
class MultiSenseModel2(SingleModel):
"""将senses和embedding的第一个sense当作main-sense和main-embedding"""
def __init__(self, args, vocab):
super(MultiSenseModel2, self).__init__(args, vocab)
self.senses_number = args.senses
def init_model(self):
# [V] 记录每个token的每个sense被访问过多少次
tmp = np.zeros(shape=(self.vocab_size, self.senses_number + 1), dtype='int32')
senses_access = np.ctypeslib.as_ctypes(tmp)
self.senses_access = Array(senses_access._type_, senses_access, lock=False)
# [V, senses, dim] 模型的多语境词向量
# 多语境的第一个词向量为main-embedding
tmp = np.random.uniform(low=-0.5 / self.embedding_size,
high=0.5 / self.embedding_size,
size=(self.vocab_size, self.senses_number + 1, self.embedding_size))
embedding = np.ctypeslib.as_ctypes(tmp)
self.embedding = Array(embedding._type_, embedding, lock=False)
# [V, senses , dim] 模型的多语境词向量对应的语境信息
# 语境的第一个词向量为main-sense
tmp = np.random.uniform(low=-0.5 / self.embedding_size,
high=0.5 / self.embedding_size,
size=(self.vocab_size, self.senses_number + 1, self.embedding_size))
senses = np.ctypeslib.as_ctypes(tmp)
self.senses = Array(senses._type_, senses, lock=False)
# [V, dim] 矩阵作为第二层权重
tmp = np.zeros(shape=(self.vocab_size, self.embedding_size))
weights = np.ctypeslib.as_ctypes(tmp)
self.weights = Array(weights._type_, weights, lock=False)
def getContextVector(self, context_ids):
avg_vector = np.mean([self.embedding[t][0] for t in context_ids], axis=0)
return avg_vector
def getSimilarMax(self, context_vector, token):
cos_list = np.array([Util.cos_sim(context_vector, v) for v in self.senses[token]])
cos_max_index = np.argmax(cos_list)
cos_max_value = cos_list[cos_max_index]
return cos_max_index, cos_max_value
def clusterSense(self):
pass
def saveEmbedding(self, epoch):
embedding_folder = self.args.embedding_folder
embedding_path = os.path.join(embedding_folder, 'wv_epoch{epoch}.txt'.format(epoch=epoch))
sense_path = os.path.join(embedding_folder, 'sv_epoch{epoch}.txt'.format(epoch=epoch))
count_path = os.path.join(embedding_folder, 'count_epoch{epoch}.txt'.format(epoch=epoch))
print(" Saving {out_folder}".format(out_folder=embedding_folder))
with open(embedding_path, 'w') as f_wv, open(sense_path, 'w') as f_sv, open(count_path, 'w') as f_count:
f_wv.write("%d %d\n" % (len(self.embedding), self.args.embedding_size))
f_sv.write("%d %d\n" % (len(self.embedding), self.args.embedding_size))
f_count.write("%d %d\n" % (len(self.embedding), self.args.embedding_size))
for index, vocab in tqdm.tqdm(enumerate(self.vocab)):
access_list = self.senses_access[index]
count = len([item for item in access_list if item > 0])
# 保存每个token的不同sense被count的次数
f_count.write("%s %d " % (vocab.word, count))
for sense_access in self.senses_access[index]:
if sense_access > 0:
f_count.write("%s " % sense_access)
f_count.write("\n")
f_wv.write("%s %d " % (vocab.word, count))
for vector, sense_access in zip(self.embedding[index], self.senses_access[index]):
if sense_access > 0:
f_wv.write("%s " % " ".join([str(item) for item in vector]))
f_wv.write("\n")
f_sv.write("%s %d " % (vocab.word, count))
for vector, sense_access in zip(self.senses[index], self.senses_access[index]):
if sense_access > 0:
f_sv.write("%s " % " ".join([str(item) for item in vector]))
f_sv.write("\n") | true |
034631e0c627c7eb02a34250b015212f0450e2ed | Python | PhungXuanAnh/python-note | /set_sample/set_sample.py | UTF-8 | 143 | 3.296875 | 3 | [] | no_license | list1 = [1, 1, 2, 3, 3, 4, 5, 6, 8, 9, 9, 1]
print(set(list1))
set1 = {1, 2, 3}
print(set1)
set1.add(4)
print(set1)
set1.add(1)
print(set1)
| true |
f96dbf302518d5586743662721cf1970988dc1b4 | Python | Fapfood/nlp | /lab4/task.py | UTF-8 | 1,505 | 3.0625 | 3 | [] | no_license | import glob
import math
import regex as re
COUNTER = {}
FIRST = {}
SECOND = {}
ALL = 0
def entropy(counts):
return sum([k / ALL * math.log(k / ALL + (k == 0)) for k in counts])
def for_one(name):
with open(name, encoding='utf8') as f:
content = f.read()
content = re.sub(r'[[[:punct:]]', ' ', content)
content = re.sub(r'\s+', ' ', content)
content = re.sub(r'^\s+', '', content)
content = content.lower()
terms = content.split(' ')
sum = 0
for i, elem in enumerate(terms[1:]):
if elem.isalpha() and terms[i].isalpha():
key1 = terms[i]
FIRST[key1] = FIRST.get(key1, 0) + 1
key2 = elem
SECOND[key2] = SECOND.get(key2, 0) + 1
key = (key1, key2)
COUNTER[key] = COUNTER.get(key, 0) + 1
sum += 1
return sum
for filename in glob.glob("../ustawy/*.txt"):
ALL += for_one(filename)
PMI = []
LLR = []
for key, val in COUNTER.items():
pmi = math.log(val / (FIRST[key[0]] * SECOND[key[1]]))
k_11 = val
k_12 = SECOND[key[1]] - val
k_21 = FIRST[key[0]] - val
k_22 = ALL - k_11 - k_12 - k_21
llr = 2 * ALL * (entropy([k_11, k_12, k_21, k_22]) -
entropy([k_11 + k_12, k_21 + k_22]) -
entropy([k_11 + k_21, k_12 + k_22]))
PMI.append((key, pmi))
LLR.append((key, llr))
PMI.sort(key=lambda x: x[1])
LLR.sort(key=lambda x: x[1])
print(PMI[:30])
print(PMI[-30:])
print(LLR[:30])
print(LLR[-30:])
| true |
5e3c6fb69fbc4ad7ac6fc283ba105f0fcae4c8dc | Python | sujithnj88/Machine_Learning | /Numpy/Solving_LInear_System_Example/Lin_System.py | UTF-8 | 222 | 3.421875 | 3 | [] | no_license | import numpy as np
a = np.array([[1, 1], [1.5, 4]])
b = np.array([2200, 5050])
x = np.linalg.solve(a, b)
print("Solution of the Linear System : ")
print("Number of Children : %s Adults : %s" % (x[0], x[1]))
| true |
a99bcfd4ff7a3d987850bc20c1e86b3950f23869 | Python | TamizhselvanR/TCS-NQT-2020-Coding | /TCS_NQT_Day1_Slot4_Coding.py | UTF-8 | 396 | 3.75 | 4 | [] | no_license | n=int(input("Enter the Value"))
x=0
y=0
c='R'
while(n):
if c=='R':
x=abs(x)+10
y=abs(y)
c='U'
n-=1
continue
elif c=='U':
y=(y+20)
c='L'
n-=1
continue
elif c=='L':
x=-(x+10)
c='D'
n-=1
continue
else:
y=-y
c='R'
n-=1
continue
print(x, y)
| true |
7516bdfd067b59e392cb5e39410f577e1e332a54 | Python | huaping/getGfxinfo | /getGfxInfo.py | UTF-8 | 3,798 | 2.828125 | 3 | [] | no_license | #!/usr/bin/python
# ----------------------------------------------------------------------------------------------------------------------
# Name Of File:getGfxInfo.py #
# Author: Qi Huaping #
# Purpose Of File: Script to monitor adb shell dumpsys gfxinfo on one app and return a average #
# #
# History: #
# Date Author Changes #
# 2016.06.10 Qi Huaping Inital #
# ----------------------------------------------------------------------------------------------------------------------
# ----------------------------------------------------------------------------------------------------------------------
import subprocess
import sys
import re
import getopt
"""
Device Graphic Monitor scripts
-------------------------------------
getGfxInfo.py -s <serial> -p <package name>
getGfxInfo.py --serial=<serial> --packageName <package name>
"""
device_id = None
packageName = None
def main():
global packageName
global device_id
try:
options, _ = getopt.getopt(sys.argv[1:], "hp:s:", ["help","package=","serial="])
except getopt.GetoptError as err:
print(err.msg)
print(__doc__)
sys.exit(1)
for opt, arg in options:
if opt in ('-s', '--serial'):
device_id = arg
elif opt in ('-p', '--package'):
packageName = arg
elif opt in ('-h', '--help'):
usage()
sys.exit(0)
if packageName == None or device_id == None:
usage()
sys.exit(1)
#print packageName, device_id
print "GrapicTime:",get_graphic_time(device_id, packageName)
def usage():
print """Usage:
commmads like:
getGfxInfo.py -s ec8fc2b3 -p com.android.launcher3
getGfxInfo.py -s <serial> -p <package name>
getGfxInfo.py --serial=<serial> --packageName <package name>
"""
# ----------------------------------------------------------------------------------------------------------------------
# get_graphic_time
#
# DESCRIPTION
#
# Args
# Arg. 2
# device_id - adb serial id of the the Device under test (DUT}
# packageName - package under monitor
# return:
# avarage time for per frame, it should be less than 16.67, otherwise, you may feel stuck on the screen
# ----------------------------------------------------------------------------------------------------------------------
def get_graphic_time(device_id, packageName):
try:
proc = subprocess.Popen(['adb', '-s', device_id, 'shell', 'dumpsys', 'gfxinfo', packageName], stdout=subprocess.PIPE)
alllines = proc.stdout.read().rstrip()
#print alllines
regx = re.compile(r'(\s+\d+\.\d{2})(\s+\d+\.\d{2})(\s+\d+\.\d{2})(\s+\d+\.\d{2})',re.MULTILINE)
#print regx.findall(alllines)
num = 0.0
total = 0
for gfx in regx.findall(alllines):
num += 1
total1 = 0
for data in gfx:
total1 += float(data.replace(" ","").replace("\t","").strip())
total += total1
if num != 0:
return total/num
else:
return None
#print gfxData
except Exception, e:
print e
if __name__ == "__main__":
main()
| true |
81673803e78b644dec2006202e8df50c3b0d44ea | Python | photo-bro/PiasaBright | /src/AppLogic/Fixture.py | UTF-8 | 2,307 | 2.859375 | 3 | [] | no_license | '''
Created on Dec 3, 2015
@author: jharm
'''
from DB import Database
class FixtureManager():
def __init__(self):
self.DB = Database()
if not Database.IsConnected():
self.DB.OpenDb()
def Add(self, fixture):
if not fixture:
raise Exception('fixture')
args = {'name' : fixture.Name,
'location':fixture.Location,
'brightness':fixture.Brightness,
'fixtureType': fixture.FixtureType}
errMsg = Database.ExecuteScriptFile('AddFixture', args)
return errMsg;
def Remove(self, fixture):
if not fixture:
raise Exception('fixture')
args = {'id' : fixture.Id}
errMsg = Database.ExecuteScriptFile('DeleteFixture', args)
return errMsg
def GetFixtures(self):
args = {'id' : 'NULL',
'name' : 'NULL',
'location':'NULL',
'brightness':'NULL',
'fixtureType':'NULL' }
rows = Database.ExecuteStatementFile('GetFixture', args)
fixtures = []
for r in rows:
fixtures.append(Fixture(r[0], r[1], r[2], r[3], r[4]))
return fixtures
def GetFixture(self, fixtureId = 'NULL', name = 'NULL'):
if not fixtureId:
fixtureId = 'NULL'
if not name:
name = 'NULL'
args = {'id' : fixtureId, 'name' : name, 'location':'NULL',
'brightness':'NULL', 'fixtureType':'NULL'}
rows = Database.ExecuteStatementFile('GetFixture', args)
fixtures = []
for r in rows:
fixtures.append(Fixture(r[0], r[1], r[2], r[3], r[4]))
return fixtures
class FixtureType():
Blank = 1
Light = 2
DimmableLight = 3
ColoredLight = 4
class Fixture():
def __init__(self, id, name, location, fixtureType, brightness):
self.Id = id
self.Name = name
self.Location = location
self.FixtureType = fixtureType
self.Brightness = brightness
def ToDictionary(self):
return {'name' : self.Name,
'location' : self.Location,
'fixtureType' : self.FixtureType,
'brightness' : self.Brightness}
| true |
c8029c3db69c44c29abb91bddf13761ed6786e32 | Python | CloudBoltSoftware/cloudbolt-forge | /blueprints/azure_storage/sync.py | UTF-8 | 2,062 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | """
Discover Azure Storage Records
Return Azure Storage records identified by sku, handler_id and location
"""
from common.methods import set_progress
from azure.common.credentials import ServicePrincipalCredentials
from resourcehandlers.azure_arm.models import AzureARMHandler
import azure.mgmt.storage as storage
RESOURCE_IDENTIFIER = "azure_account_name"
def _get_client(handler):
"""
Get the clients using newer methods from the CloudBolt main repo if this CB is running
a version greater than 9.2.2. These internal methods implicitly take care of much of the other
features in CloudBolt such as proxy and ssl verification.
Otherwise, manually instantiate clients without support for those other CloudBolt settings.
"""
import settings
from common.methods import is_version_newer
cb_version = settings.VERSION_INFO["VERSION"]
if is_version_newer(cb_version, "9.2.2"):
wrapper = handler.get_api_wrapper()
storage_client = wrapper.storage_client
else:
# TODO: Remove once versions <= 9.2.2 are no longer supported.
credentials = ServicePrincipalCredentials(
client_id=handler.client_id, secret=handler.secret, tenant=handler.tenant_id
)
storage_client = storage.StorageManagementClient(
credentials, handler.serviceaccount
)
set_progress("Connection to Azure established")
return storage_client
def discover_resources(**kwargs):
discovered_az_stores = []
for handler in AzureARMHandler.objects.all():
set_progress(
"Connecting to Azure Storage \
for handler: {}".format(
handler
)
)
storage_client = _get_client(handler)
for st in storage_client.storage_accounts.list():
discovered_az_stores.append(
{
"name": st.name,
"azure_rh_id": handler.id,
"azure_account_name": st.name,
}
)
return discovered_az_stores
| true |
40fa3550d868c3900da0df0a8bcbab94cd8e6836 | Python | GreatRaksin/7G_Lessons | /l0812/task1.py | UTF-8 | 253 | 3.546875 | 4 | [] | no_license | '''Task 1. Напишите конвертер градусов Фаренгейта в градусы цельсия'''
t = float(input('Введите температуру по Фаренгейту: '))
res = (t - 32) * 5 / 9
print(round(res, 2))
| true |
ed8249f83d6ed882d8bb90a227c3611fae564d73 | Python | carnufex/Pixel-based-bot-in-python | /Random/contours.py | UTF-8 | 1,610 | 2.6875 | 3 | [] | no_license | from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
import random as rng
rng.seed(12345)
def thresh_callback(val, src_gray):
threshold = val
# Detect edges using Canny
canny_output = cv.Canny(src_gray, threshold, threshold * 2)
# Find contours
_, contours, hierarchy = cv.findContours(canny_output, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)
# Draw contours
drawing = np.zeros((canny_output.shape[0], canny_output.shape[1], 3), dtype=np.uint8)
for i in range(len(contours)):
color = (255, 255, 255)
cv.drawContours(drawing, contours, i, color, 3, cv.LINE_8, hierarchy, 0)
# Show in a window
cv.imshow('Contours', drawing)
something, drawing_w, drawing_h = drawing.shape[::-1]
middle_height = int(drawing_h/2)
crop_drawing = drawing[middle_height:middle_height+1, 0:drawing_w]
cv.imshow('cropped', crop_drawing)
cv.imwrite('test.png', drawing)
def find_contours(image):
src = cv.imread(cv.samples.findFile(image))
if src is None:
print('Could not open or find the image:', image)
# exit(0)
# Convert image to gray and blur it
src_gray = cv.cvtColor(src, cv.COLOR_BGR2GRAY)
src_gray = cv.blur(src_gray, (3,3))
# Create Window
source_window = 'Source'
cv.namedWindow(source_window)
cv.imshow(source_window, src)
max_thresh = 255
thresh = 100 # initial threshold
#cv.createTrackbar('Canny Thresh:', source_window, thresh, max_thresh, thresh_callback)
thresh_callback(thresh, src_gray)
cv.waitKey()
find_contours('wtf_yellow.png')
| true |
abd72420a28902d5fd5dec3a2eec66200063282e | Python | MattGjdr/picturaludere | /file.py | UTF-8 | 3,651 | 2.53125 | 3 | [] | no_license | import hashlib
import os
import datetime
import random
from flask import request
from werkzeug.utils import secure_filename
from es import add_elastic
from format import read_xml
UPLOAD_FOLDER = '/home/matus/Documents/uploads'
STATIC_IMAGE_FOLDER = '/home/matus/Documents/bordino/static/img'
TXT_EXTENSIONS = set(['xml'])
IMG_EXTENSIONS = set(['jpg'])
"""
Function check text file name
"""
def allowed_file_txt(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in TXT_EXTENSIONS
"""
Function check image file name
"""
def allowed_file_img(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in IMG_EXTENSIONS
"""
Function upload txt to elastic
"""
def upload_txt(xml):
filename = secure_filename(xml[0].filename)
return upload_to_elastic(xml[0].read().decode("utf-8"))
"""
Function upload jpgs and txt to elastic
"""
def upload_img(xml, list_of_jpgs):
img_hashes = list()
for image in list_of_jpgs:
hash_img = hashlib.md5(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S:%6").encode("utf-8")+str(random.randint(1,9999)).encode("utf-8")+image.filename.encode("utf-8")).hexdigest()
if image and allowed_file_img(image.filename):
filename = secure_filename(image.filename)
image.save(os.path.join(UPLOAD_FOLDER, filename))
os.system("cp "+os.path.join(UPLOAD_FOLDER, filename)+" "+ os.path.join(UPLOAD_FOLDER, hash_img+".jpg"))
#todo, convert image
os.system("cp "+os.path.join(UPLOAD_FOLDER, hash_img+".jpg")+" "+ os.path.join(STATIC_IMAGE_FOLDER, hash_img+".jpg &"))
img_hashes.append(hash_img)
if xml[0] and allowed_file_txt(xml[0].filename):
filename = secure_filename(xml[0].filename )
return upload_to_elastic(xml[0].read().decode("utf-8"), img_hashes)
return False, "Image upload failed"
"""
Function get request query and based whether it is list or just string, it decides what is going to be uploaded
if list then image and text is uploaded to elasticsearch
if string then text is uploaded to elasticsearch
"""
def upload_file(request):
img_txt = False
txt_img = False
txt = False
response = False
msg = "Files missing"
if request.method != 'POST':
return
print(request.files)
list_of_jpgs = request.files.getlist("jpgs[]")
xml = request.files.getlist("xml")
print(list_of_jpgs)
if list_of_jpgs[0].filename == '' and xml[0].filename != '' and xml[0] and allowed_file_txt(xml[0].filename):
txt = True
response, msg = upload_txt(xml)
if list_of_jpgs[0].filename != '' and xml[0].filename != '':
img = True
response, msg = upload_img(xml, list_of_jpgs)
if response and txt:
return "Uploaded of text succesfull"
elif response and img:
return "Uploaded of text and image succesfull"
elif response:
return "Unexpected behaviour, try again, perhaps something with wrong files uploaded"
else:
#todo, maybe return somathing more meaningful
return "Upload failed: "+msg
"""
Function read data from xml and send it to elasticsearch function which upload it
"""
def upload_to_elastic(file_data, hash_img=""):
if (hash_img == ""):
new_item, msg = read_xml(file_data, "txt")
else:
new_item, msg = read_xml(file_data, "img", hash_img)
if (new_item == False):
return False, msg
else:
add_elastic(new_item)
return True, "" | true |
5990fddd6b791879a92ad2f6c3d1a0970ac9f36a | Python | kyranyerkin/pp2 | /week3/7.py | UTF-8 | 153 | 2.875 | 3 | [] | no_license | a=int(input())
ans="1*"
sum=0
for i in range(a):
sum+=i*(i+1)
for i in range(2,a):
ans+=str(i)+"+"+str(i)+"*"
ans+=str(a)+"="+str(sum)
print(ans) | true |
81615279c09de1a35c786f7643c9be8bbd927044 | Python | ferretj/adventofcode2017 | /test_puzzle04.py | UTF-8 | 709 | 3.21875 | 3 | [] | no_license | import pytest
from cStringIO import StringIO
from puzzle04 import check_passphrases, check_passphrases_anagrams
@pytest.mark.parametrize('passw, is_valid', [
('aa bb cc dd ee', True),
('aa bb cc dd aa', False),
('aa bb cc dd aaa', True)
])
def test_check_passphrases(passw, is_valid):
assert bool(check_passphrases(StringIO(passw))) == is_valid
@pytest.mark.parametrize('passw, is_valid', [
('abcde fghij', True),
('abcde xyz ecdab', False),
('a ab abc abd abf abj', True),
('iiii oiii ooii oooi oooo', True),
('oiii ioii iioi iiio', False)
])
def test_check_passphrases_anagrams(passw, is_valid):
assert bool(check_passphrases_anagrams(StringIO(passw))) == is_valid
| true |
2924a8155ccf818d0d702cad34bfd9ab7ce438e1 | Python | zwhubuntu/CTF-chal-code | /gctf_mobile1.py | UTF-8 | 107 | 3.046875 | 3 | [] | no_license | char = 'b9c77224ff234f27ac6badf83b855c76'
flag = ''
for i in xrange(len(char)):
if i % 2 == 0:
flag += char[i]
print flag
| true |
9a3aa9a2ad096974af30303510f6e256bad516d9 | Python | ngpark7/deeplink_public | /2.ReinforcementLearning/FrozenLake/FrozenLake-2.py | UTF-8 | 2,266 | 3.25 | 3 | [
"MIT"
] | permissive | # Dummy Q-Table learning algorithm
from __future__ import print_function
import gym
from gym.envs.registration import register
import numpy as np
import random
import matplotlib.pyplot as plt
register(
id = 'FrozenLake-v3',
entry_point = 'gym.envs.toy_text:FrozenLakeEnv',
kwargs={
'map_name': '4x4',
'is_slippery': False
}
)
def rargmax(vector):
# vector: [ 0. 1. 1. 0.]
# Return the maximum number of an array element.
m = np.amax(vector) # m = 1.
# Return the list of indices of the elements that are non-zero and the given condition is True
indices = np.nonzero(vector == m)[0] # indices = [1, 2]
return random.choice(indices)
env = gym.make("FrozenLake-v3")
env.render()
print("env.observation_space.n:", env.observation_space.n)
print("env.action_space.n:", env.action_space.n)
Q = np.zeros([env.observation_space.n, env.action_space.n])
#Discount Factor
discount_factor = .99 # <-- Updated for ver.2
max_episodes = 2000
# list to contain total rewards and steps per episode
rList = []
for i in range(max_episodes):
# Reset environment and get first new observation
state = env.reset()
rAll = 0
done = False
# The Q-Table learning algorithm
while not done:
#Decaying Random Noise <-- Updated for ver.2
#e = 0.1 / (i + 1)
e = 1. / ((i / 50) + 10)
if np.random.rand(1) < e:
action = env.action_space.sample()
else:
action = rargmax(Q[state, :])
#Decaying Random Noise <-- Updated for ver.2
#action = np.argmax(Q[state, :] + np.random.randn(1, env.action_space.n) / (i+1))
# Get new state and reward from environment
new_state, reward, done, info = env.step(action)
# Update Q-Table with new knowledge using learning rate
Q[state, action] = reward + discount_factor * np.max(Q[new_state, :]) # <-- Updated for ver.2
rAll += reward
state = new_state
rList.append(rAll)
print("Success rate: " + str(sum(rList)/max_episodes))
print("Final Q-Table Values")
print("LEFT DOWN RIGHT UP")
for i in range(16):
for j in range(4):
print("%6.4f" % Q[i][j], end=", ")
print()
plt.plot(rList)
plt.ylim(-0.5, 1.5)
plt.show() | true |
7cb58c672b8f89c05fee7fb0460e3f534bd7cc50 | Python | 1arjunarora/datarobot-user-models | /task_templates/pipelines/python3_sklearn_with_custom_classes/custom_pipeline.py | UTF-8 | 1,603 | 3.359375 | 3 | [
"Apache-2.0"
] | permissive | from sklearn.pipeline import Pipeline
import pandas as pd
import numpy as np
def pipeline(X):
"""
Simple 2-step sklearn pipeline containing a transform and an estimator steps implemented using custom classes
It can be used as a calibration task for a regression: add it in the very end of a blueprint, and it will
multiply predictions by a fixed coefficients so that, on training, avg(predicted) = avg(actuals)
"""
return Pipeline(steps=[("preprocessing", Calibrator(X)), ("model", EmptyEstimator())])
class Calibrator:
"""
During fit(), it computes and stores the calibration coefficient that is equal to avg(actuals) / avg(predicted) on training data
During transform(), it multiplies incoming data by the calibration coefficient
"""
def __init__(self, X):
self.multiplier = None # calibration coefficient
if len(X.columns) != 1:
raise Exception(
"As an input, this task must receive a single column containing predictions of a calibrated estimator. Instead, multiple columns have been passed."
)
def fit(self, X, y=None, **kwargs):
self.multiplier = sum(y) / sum(X[X.columns[0]])
return self
def transform(self, X):
return np.array(X[X.columns[0]] * self.multiplier).reshape(-1, 1)
class EmptyEstimator:
"""
[Empty] estimator:
- during fit, it does nothing
- during predict, it passes incoming data as a prediction
"""
def fit(self, X, y):
return self
def predict(self, data: pd.DataFrame):
return data[:, 0]
| true |
ffdfafe9a4bfd6fdeb0bba4c68ce2a1008ac6070 | Python | lucioeduardo/cc-ufal | /ED/5 - Listas/lista_encadeada.py | UTF-8 | 1,540 | 3.546875 | 4 | [] | no_license | class Lista:
def __init__(self):
self.front = None
self.back = None
self.size = 0
def push_back(self,value):
no = Node(value)
self.size += 1
if(self.empty()):
self.front = no
else:
self.back.next = no
self.back = no
def push_front(self, value):
no = Node(value)
self.size += 1
no.next = self.front
self.front = no
def push(self, value, idx):
if(idx >= self.size):
return None
self.size += 1
aux = self.front
while(idx > 1 ):
idx -= 1
aux = aux.next
no = Node(value)
no.next = aux.next
aux.next = no
def get(self, idx):
if(idx >= self.size):
return None
aux = self.front
while(idx):
idx-=1
aux = aux.next
return aux.value
def remove(self, idx):
if(idx >= self.size):
return None
aux = self.front
while(idx>1):
idx-=1
aux = aux.next
value = aux.next.value
aux.next = (aux.next).next
return value
def print_list(self):
aux = self.front
while(aux != None):
print(aux.value)
aux = aux.next
def empty(self):
return (self.front == None)
class Node:
def __init__(self,value):
self.value = value
self.next = None | true |
02757a0bcd49b3f41454d665fd40606319f1e53a | Python | HeraclitoDeEfeso/edd-tp1 | /test_batalla.py | UTF-8 | 5,558 | 3 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Test unitarios para el módulo batalla.
"""
import unittest
from batalla import Batalla
from elemento import Elemento
class TestBatalla(unittest.TestCase):
def setUp(self):
self.batalla = Batalla("Natalia", Elemento.AGUA, Elemento.FUEGO, "Pedro", Elemento.AIRE, Elemento.TIERRA)
def test_se_puede_crear_una_batalla(self):
self.assertIsInstance(self.batalla, Batalla)
def test_se_puede_crear_una_batalla_con_primer_jugador_con_un_solo_elemento_1(self):
Batalla("Natalia", Elemento.AGUA, Elemento.NADA, "Pedro", Elemento.AIRE, Elemento.TIERRA)
def test_se_puede_crear_una_batalla_con_primer_jugador_con_un_solo_elemento_2(self):
Batalla("Natalia", Elemento.NADA, Elemento.FUEGO, "Pedro", Elemento.AIRE, Elemento.TIERRA)
def test_se_puede_crear_una_batalla_con_segundo_jugador_con_un_solo_elemento_1(self):
Batalla("Natalia", Elemento.AGUA, Elemento.FUEGO, "Pedro", Elemento.AIRE, Elemento.NADA)
def test_se_puede_crear_una_batalla_con_segundo_jugador_con_un_solo_elemento_2(self):
Batalla("Natalia", Elemento.AGUA, Elemento.FUEGO, "Pedro", Elemento.NADA, Elemento.TIERRA)
def test_no_se_puede_crear_una_batalla_con_cadena_vacia_como_nombre_del_primer_jugador(self):
self.assertRaises(Exception, Batalla, "", Elemento.AGUA, Elemento.FUEGO, "Pedro", Elemento.AIRE, Elemento.TIERRA)
def test_no_se_puede_crear_una_batalla_con_cadena_vacia_como_nombre_del_segundo_jugador(self):
self.assertRaises(Exception, Batalla, "Natalia", Elemento.AGUA, Elemento.FUEGO, "", Elemento.AIRE, Elemento.TIERRA)
def test_no_se_puede_crear_una_batalla_con_los_mismo_nombres_de_jugadores(self):
self.assertRaises(Exception, Batalla, "Natalia", Elemento.AGUA, Elemento.FUEGO, "Natalia", Elemento.AIRE, Elemento.TIERRA)
def test_creada_una_batalla_el_primer_jugador_definido_inicia(self):
self.assertEqual("Natalia", self.batalla.__jugador_atacante__.__nombre__)
def test_creada_una_batalla_el_primer_turno_tiene_ataque_especial(self):
self.assertTrue(self.batalla.__jugador_atacante__.__monstruo__.__ataques_especiales_restantes__ > 0)
def test_despues_de_una_jugada_avanza_el_turno(self):
mi_jugador_atacante = self.batalla.__jugador_atacante__
mi_ataque = mi_jugador_atacante.__monstruo__.generar_ataque_especial(Elemento.FUEGO)
self.batalla.jugada(mi_ataque)
self.assertNotEqual(mi_jugador_atacante, self.batalla.__jugador_atacante__)
def test_creada_una_batalla_si_un_jugador_aire_y_tierra_recibe_ataque_especial_de_fuego_su_vida_restante_es_82(self):
mi_ataque = self.batalla.__jugador_atacante__.__monstruo__.generar_ataque_especial(Elemento.FUEGO)
self.batalla.jugada(mi_ataque)
self.assertEqual(82, self.batalla.__jugador_atacante__.__monstruo__.__estado_vital__)
def test_creada_una_batalla_si_un_jugador_agua_y_fuego_recibe_ataque_especial_de_aire_su_vida_restante_es_85(self):
mi_ataque = self.batalla.__jugador_atacante__.__monstruo__.generar_ataque_especial(Elemento.FUEGO)
self.batalla.jugada(mi_ataque)
mi_ataque = self.batalla.__jugador_atacante__.__monstruo__.generar_ataque_especial(Elemento.AIRE)
self.batalla.jugada(mi_ataque)
self.assertEqual(85, self.batalla.__jugador_atacante__.__monstruo__.__estado_vital__)
class TestBatallaTerminada(unittest.TestCase):
@classmethod
def setUpClass(cls):
batalla = Batalla("Natalia", Elemento.AGUA, Elemento.FUEGO, "Pedro", Elemento.AIRE, Elemento.TIERRA)
while not batalla.termino():
mi_monstruo = batalla.__jugador_atacante__.__monstruo__
batalla.jugada(mi_monstruo.generar_ataque(mi_monstruo.generar_opciones()[0]))
cls.batalla = batalla
def test_terminada_una_batalla_el_ganador_es_el_defensor_porque_el_ultimo_ataque_avanzo_el_turno(self):
self.assertEqual(self.batalla.__jugador_defensor__, self.batalla.ganador())
def test_terminada_una_batalla_si_se_intenta_otra_jugada_sigue_terminada(self):
mi_monstruo = self.batalla.__jugador_atacante__.__monstruo__
self.batalla.jugada(mi_monstruo.generar_ataque(mi_monstruo.generar_opciones()[0]))
self.assertTrue(self.batalla.termino())
def test_terminada_una_batalla_si_se_intenta_otra_jugada_no_cambia_el_turno(self):
mi_jugador = self.batalla.__jugador_atacante__
mi_monstruo = mi_jugador.__monstruo__
self.batalla.jugada(mi_monstruo.generar_ataque(mi_monstruo.generar_opciones()[0]))
self.assertEqual(mi_jugador, self.batalla.__jugador_atacante__)
def test_terminada_una_batalla_si_se_intenta_otra_jugada_no_tiene_efecto_en_la_vida_de_los_monstruos(self):
jugador_atacante = self.batalla.__jugador_atacante__
jugador_defensor = self.batalla.__jugador_defensor__
monstruo_atacante = jugador_atacante.__monstruo__
monstruo_defensor = jugador_defensor.__monstruo__
estado_vital_monstruo_atacante = monstruo_atacante.__estado_vital__
estado_vital_monstruo_defensor = monstruo_defensor.__estado_vital__
self.batalla.jugada(monstruo_atacante.generar_ataque(monstruo_atacante.generar_opciones()[0]))
self.assertEqual(estado_vital_monstruo_atacante, monstruo_atacante.__estado_vital__)
self.assertEqual(estado_vital_monstruo_defensor, monstruo_defensor.__estado_vital__)
if __name__=="__main__":
unittest.main(argv=['first-arg-is-ignored'], verbosity=2, exit=False)
| true |
b2b9d77d7d710077a7f7bcfb665d39715ef2452c | Python | zpz351348924/python3_learn | /yingjieshengSpider/yingjieshengSpider/spiders/yingjiesheng.py | UTF-8 | 1,263 | 2.546875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
from yingjieshengSpider.items import YingjieshengspiderItem
class YingjieshengSpider(scrapy.Spider):
name = 'yingjiesheng'
allowed_domains = ['yingjiesheng.com']
offset = 1
url1 = 'http://www.yingjiesheng.com/jiangsujob/list_'
url2 = '.html'
start_urls = ['http://www.yingjiesheng.com/jiangsujob/list_1.html']
def parse(self, response):
dataList = response.xpath('//tr[@class="bg_0"] | //tr[@class="bg_1"]')
for each in dataList:
item = YingjieshengspiderItem()
item['jobTitle'] = each.xpath('./td[@class="item1"]/a/text()').extract()[0]
item['jobTime'] = each.xpath('./td[@class="date cen"]/text()').extract()[0]
item['jobLink'] = each.xpath('./td[@class="item1"]/a/@href').extract()[0]
if len(each.xpath('./td[@class="cen"]/text()').extract()) > 0:
item['jobSource'] = each.xpath('./td[@class="cen"]/text()').extract()[0]
else:
item['jobSource'] = 'Nothing'
yield item
if self.offset < 200: #注意使用self.offset和self.url1
self.offset += 1
yield scrapy.Request(self.url1+str(self.offset)+self.url2, callback = self.parse) | true |
f2d4bba1cfe4936ec0390e9faf6c3b591f74cf75 | Python | lijiayan2020/Code | /CompetitionCode/LanQiaoBei/LianXi/JiChu_Sheet/ji_chu_2_7.py | UTF-8 | 624 | 4.1875 | 4 | [] | no_license | #2.7 水仙花数
'''
问题描述
153是一个非常特殊的数,它等于它的每位数字的立方和,即153=1*1*1+5*5*5+3*3*3。编程求所有满足这种条件的三位十进制数。
输出格式
按从小到大的顺序输出满足条件的三位十进制数,每个数占一行。
'''
import time
start = time.time()
def judge_shuixian(n):
a = n % 10 #个位
b = int(n%100 / 10) #十位
c = int(n / 100)#注意:python中的除法会得到浮点数
if n == a**3 + b**3 + c**3:
print(n)
for i in range(100, 1000):
judge_shuixian(i)
end = time.time()
print(end-start) | true |
69ac3e551b9df30fac6489bcfded33143d5a1bb0 | Python | hardikhere/competitive-programming | /Project Euler (HackerRank)/020. Factorial digit sum.py | UTF-8 | 218 | 3.578125 | 4 | [] | no_license | fact = [1]*1000
for i in range(1,1000):
fact[i] = i*fact[i-1]
t = int(input())
while t:
t -= 1
n = int(input())
ans = str(fact[n])
sumi = 0
for i in ans:
sumi += int(i)
print(sumi)
| true |
a57c3eac5aaec65a0940dbda3e9b627532357d8e | Python | brn016/cogs18 | /18 Projects/Project_aquijada_attempt_2018-12-12-10-47-35_COGS18_Final_Project/COGS18_Final_Project/my_modules/my_tests.py | UTF-8 | 1,141 | 3.203125 | 3 | [] | no_license | #tests
def test_is_question():
assert isinstance(is_question(input_string), bool)
assert is_question('what?') == True
assert is_question('huh') == False
def test_msg_question():
assert isinstance(is_msg_question(input_string), bool)
assert is_msg_question('ask a question') == True
assert is_msg_question('draw a card') == False
def test_remv_punct():
assert isinstance(remove_punctuation(input_string), string)
assert remove_punctuation('h.e:ll~o~') == 'hello'
assert remove_punctuation('i. tried. my. best.') == 'i tried my best'
def test_prepare_text():
assert isinstance(prepare_text(input_string), list)
assert prepare_text('HeL.lo') == 'hello'
assert prepare_text('Smelborp.is Problems backwards!') == ['smelborp', 'is', 'problems', 'backwards']
def test_selector():
assert isinstance(selector(return_list), string)
assert selector(['in', 'words'], ['words'], ['yes']) == 'yes'
assert selector(['in', 'words'], ['out'], ['yes']) == None
def test_end_chat():
assert isinstance(end_chat(['test1', 'test2']), bool)
assert end_chat(['quit']) == True
| true |
2ff45b3426d1f91feae9bba30a0532a677a145bb | Python | umunusb1/PythonMaterial | /python2/13_OOP/14_property_decorator_ex2.py | UTF-8 | 1,491 | 4.125 | 4 | [] | no_license | """
Q) When to use @property decorator?
Ans) When an attribute is derived from other attributes in the class, so the
derived attribute will update whenever the source attributes is changed.
Q) How to make a @property?
Ans) Make an attribute as property by defining it as a function and add the
@property decorator before the fn definition.
Q) When to define a setter method for the property?
Ans) Typically, if you want to update the source attributes whenever the property
is set. It lets you define any other changes as well.
"""
class Person(object):
def __init__(self, firstname, lastname):
self.first = firstname
self.last = lastname
@property#.getter
def fullname(self):
return self.first + ' '+ self.last
@fullname.setter
def fullname(self, name):
firstname, lastname = name.split()
self.first = firstname
self.last = lastname
@fullname.deleter # doesn't get called in old style classes
def fullname(self):
self.first = None
self.last = None
def email(self):
return '{}.{}@email.com'.format(self.first, self.last)
# Init a Person
person = Person('Udhay', 'Prakash')
print(person.fullname) #> Udhay Prakash
# Deleting fullname calls the deleter method, which erases self.first and self.last
del person.fullname
# Print the changed values of `first` and `last`
print(person.first) #> None
print(person.last) #> None | true |
0a1dd3d3b64f28ba2065b149e177290d90fc952e | Python | srikanthpragada/DEMO_PYTHON_19_NOV_2019 | /libdemo/get_gituser_info.py | UTF-8 | 344 | 2.984375 | 3 | [] | no_license | import requests
name = "srikanth_pragada"
resp = requests.get(f"https://api.github.com/users/{name}")
if resp.status_code == 200:
details = resp.json() # Convert json in response to dict
print(details['name'])
print(details['public_repos'])
print(details['followers'])
else:
print("Sorry! Could not get details of user!")
| true |
a09d7132d24b43099fdd08be9e541d46567122d9 | Python | hgsujay/Samvaya-Live-Feed | /Carrom/carrom.py | UTF-8 | 5,947 | 3.125 | 3 | [] | no_license | """
This is a part of window application that was written to update a live feed system for a indoor games organized in a event.
This script is written to update the feed about carrom.
"""
from Tkinter import *
import ttk
from ttk import *
import os
from socket import *
#function to configure host and port
def config_target():
global ip_en
global port_en
global addr
ip=ip_en.get() #get ip from the ip entry field
print ip
addr
port=int(port_en.get()) #get port num from the port num entry field
addr=(ip,port) #bind it into a tuple
#send message to target
def send_msg(msg):
config_target() #configure target everytime a msg is sent
global addr
UDPSock.sendto(msg, addr) #send message through a udp socket
print "msg sent " + msg
#functions when buttons are clicked, prefix a code before the msg and pass it to send_msg() func
def k1_bu():
global up_en_1
data='k1'+up_en_1.get()
send_msg(data)
def k2_bu():
global on_en_1
data='k2'+on_en_1.get()
send_msg(data)
def k3_bu():
global up_en_2
data='k3'+up_en_2.get()
send_msg(data)
def k4_bu():
global on_en_2
data='k4'+on_en_2.get()
send_msg(data)
def k5_bu():
global up_en_3
data='k5'+up_en_3.get()
send_msg(data)
def k6_bu():
global on_en_3
data='k6'+on_en_3.get()
send_msg(data)
def k7_bu():
global up_en_4
data='k7'+up_en_4.get()
send_msg(data)
def k8_bu():
global on_en_4
data='k8'+on_en_4.get()
send_msg(data)
def k9_bu():
global up_en_5
data='k9'+up_en_5.get()
send_msg(data)
def k10_bu():
global on_en_5
data='ka'+on_en_5.get()
send_msg(data)
def k11_bu():
global up_en_6
data='kb'+up_en_6.get()
send_msg(data)
def k12_bu():
global on_en_6
data='kc'+on_en_6.get()
send_msg(data)
host=str() #string variable to store ip address
port=int() #int variable to store port num
addr=tuple() #a tuple to store host and port
UDPSock = socket(AF_INET, SOCK_DGRAM)
root=Tk()
con=LabelFrame(root,text='Configuration') #a child to root, to hold ip and host entries
con.pack()
ip_en=ttk.Entry(con) #child of con labelframe, an entry field for ip_address
ip_en.insert(0,'192.168.1.1')
ip_en.pack()
port_en=ttk.Entry(con) #child of con labelframe, an entry field for port num
port_en.insert(0,'13002')
port_en.pack()
config=ttk.Button(con, text="Configure", command=config_target) #a button to set the addr variable using the host n port entries
config.pack()
#a label frame to hold all the buttons and entries about upcoming matches
upc=LabelFrame(root)
upc.config(text='Next up')
upc.pack(side=LEFT)
#one more label to hold all the entries for updating upcomming matches
upc1=Label(upc)
upc1.pack()
#label1 to hold a label, entry and button in a single row
la1=Label(upc1,text='k1')
la1.pack(side=LEFT)
up_en_1=ttk.Entry(upc1, width=25)
up_en_1.pack(side=LEFT)
up_en1_bu=ttk.Button(upc1,text='send',command=k1_bu)
up_en1_bu.pack(side=LEFT)
#label 2 to hold a label, entry and button in a single row
upc2=Label(upc)
upc2.pack()
la2=Label(upc2,text='k3')
la2.pack(side=LEFT)
up_en_2=ttk.Entry(upc2, width=25)
up_en_2.pack(side=LEFT)
up_en2_bu=ttk.Button(upc2,text='send',command=k3_bu)
up_en2_bu.pack(side=LEFT)
#label 3 to hold a label, entry and button in a single row
upc3=Label(upc)
upc3.pack()
la3=Label(upc3,text='k5')
la3.pack(side=LEFT)
up_en_3=ttk.Entry(upc3, width=25)
up_en_3.pack(side=LEFT)
up_en3_bu=ttk.Button(upc3,text='send',command=k5_bu)
up_en3_bu.pack(side=LEFT)
#label 4 to hold a label, entry and button in a single row
upc4=Label(upc)
upc4.pack()
la4=Label(upc4,text='k7')
la4.pack(side=LEFT)
up_en_4=ttk.Entry(upc4, width=25)
up_en_4.pack(side=LEFT)
up_en4_bu=ttk.Button(upc4,text='send',command=k7_bu)
up_en4_bu.pack(side=LEFT)
#label 5 to hold a label, entry and button in a single row
upc5=Label(upc)
upc5.pack()
la5=Label(upc5,text='k9')
la5.pack(side=LEFT)
up_en_5=ttk.Entry(upc5, width=25)
up_en_5.pack(side=LEFT)
up_en5_bu=ttk.Button(upc5,text='send',command=k9_bu)
up_en5_bu.pack(side=LEFT)
#label 6 to hold a label, entry and button in a single row
upc6=Label(upc)
upc6.pack()
la6=Label(upc6,text='k11')
la6.pack(side=LEFT)
up_en_6=ttk.Entry(upc6, width=25)
up_en_6.pack(side=LEFT)
up_en6_bu=ttk.Button(upc6,text='send',command=k11_bu)
up_en6_bu.pack(side=LEFT)
#a label frame to hold all the buttons and entries about ongoing matches
ong=LabelFrame(root)
ong.config(text='ongoing')
ong.pack(side=LEFT)
#parent labels to hold labels(k2,k4,k6,k8,k10,k12), entry and button
ong1=Label(ong)
ong1.pack()
on_la_1=Label(ong1,text='k2')
on_la_1.pack(side=LEFT)
on_en_1=ttk.Entry(ong1, width=25)
on_en_1.pack(side=LEFT)
on_en1_bu=ttk.Button(ong1,text='send',command=k2_bu)
on_en1_bu.pack(side=LEFT)
ong2=Label(ong)
ong2.pack()
on_la_2=Label(ong2,text='k4')
on_la_2.pack(side=LEFT)
on_en_2=ttk.Entry(ong2, width=25)
on_en_2.pack(side=LEFT)
on_en2_bu=ttk.Button(ong2,text='send',command=k4_bu)
on_en2_bu.pack(side=LEFT)
ong3=Label(ong)
ong3.pack()
on_la_3=Label(ong3,text='k6')
on_la_3.pack(side=LEFT)
on_en_3=ttk.Entry(ong3, width=25)
on_en_3.pack(side=LEFT)
on_en3_bu=ttk.Button(ong3,text='send',command=k6_bu)
on_en3_bu.pack(side=LEFT)
ong4=Label(ong)
ong4.pack()
on_la_4=Label(ong4,text='k8')
on_la_4.pack(side=LEFT)
on_en_4=ttk.Entry(ong4, width=25)
on_en_4.pack(side=LEFT)
on_en4_bu=ttk.Button(ong4,text='send',command=k8_bu)
on_en4_bu.pack(side=LEFT)
ong5=Label(ong)
ong5.pack()
on_la_5=Label(ong5,text='k10')
on_la_5.pack(side=LEFT)
on_en_5=ttk.Entry(ong5, width=25)
on_en_5.pack(side=LEFT)
on_en5_bu=ttk.Button(ong5,text='send',command=k10_bu)
on_en5_bu.pack(side=LEFT)
ong6=Label(ong)
ong6.pack()
on_la_6=Label(ong6,text='k12')
on_la_6.pack(side=LEFT)
on_en_6=ttk.Entry(ong6, width=25)
on_en_6.pack(side=LEFT)
on_en6_bu=ttk.Button(ong6,text='send',command=k12_bu)
on_en6_bu.pack(side=LEFT)
mainloop()
| true |
88c6540b019f20425729f5d13881b4dd60379044 | Python | Anjunheon/RL_Parking | /car_test/capture_goal.py | UTF-8 | 999 | 3 | 3 | [] | no_license | import time
import pyautogui
import pytesseract
from PIL import Image
pytesseract.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract'
while True:
# print(pyautogui.position())
# time.sleep(1) # (40, 110) (240, 123)
# 좌표 출력 부분 스크린샷 캡쳐
pyautogui.screenshot('goal.png', region=(36, 90, 210, 15)) # 전체화면(F11) 기준
# pyautogui.screenshot('goal.png', region=(40, 110, 190, 14)) # 전체화면(F11) 기준
# 좌표 스크린샷 문자열로 변환
goal_pos = pytesseract.image_to_string(Image.open('goal.png'))
# print(goal_pos[:-2])
# x, y 좌표 구분 -> 좌표 값 float 변환
goal_pos = str.split(goal_pos[:-2], ' ')
x = str.split(goal_pos[0], '.')[0]
y = str.split(goal_pos[1], '.')[0]
x = int(float(x[2:]))
if y[0] == '¥': # 가끔 문자를 잘못 인식하는 경우 발생
y = int(float(y[3:]))
else:
y = int(float(y[2:]))
print('x :', x, 'y :', y)
| true |
4eba7c0c9aa5c0b3c0c28e094bca783939a0861b | Python | nilesh-hegde/Leetcode | /First Missing Positive.py | UTF-8 | 1,274 | 3.65625 | 4 | [] | no_license | '''
Given an unsorted integer array, find the smallest missing positive integer.
Example 1:
Input: [1,2,0]
Output: 3
Example 2:
Input: [3,4,-1,1]
Output: 2
Example 3:
Input: [7,8,9,11,12]
Output: 1
Note:
Your algorithm should run in O(n) time and uses constant extra space.
'''
class Solution:
def firstMissingPositive(self, nums: List[int]) -> int:
set1=set(nums)
set2=set()
for x in set1:
if x<=0:
set2.add(x)
set1=set1-set2
set2=set()
i=1
while i<=len(set1):
set2.add(i)
i=i+1
for x in set2:
if x not in set1:
return x
return len(set1)+1
def stringToIntegerList(input):
return json.loads(input)
def main():
import sys
import io
def readlines():
for line in io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8'):
yield line.strip('\n')
lines = readlines()
while True:
try:
line = next(lines)
nums = stringToIntegerList(line);
ret = Solution().firstMissingPositive(nums)
out = str(ret);
print(out)
except StopIteration:
break
if __name__ == '__main__':
main() | true |
a8d5a8796518112027dccb9087dd5b1ad2811af0 | Python | stefanyramos/perceptron | /perceptron.py | UTF-8 | 1,423 | 3.390625 | 3 | [] | no_license | from random import randint, random, seed, uniform
# função de ativação
def sign(n):
return (1 if n>=0 else -1)
class Perceptron:
# construtor
weights = list()
def __init__(self):
self.weights = [0, 0]
# inicializar os pesos aleatoriamente
for i in range(0, len(self.weights)):
self.weights[i] = uniform(-1, 1)
# self.weights[0] = 0.7
# self.weights[1] = -1
def guess(self, inputs):
sum = 0
for i in range(0, len(self.weights)):
sum += self.weights[i] * inputs[i]
# print(f'{self.weights[i]} * {inputs[i]} * 0.1 = {sum}')
output = sign(sum)
return output
def train(self, inputs, target):
guess = self.guess(inputs)
error = target - guess
# print(f'guess: {guess}')
# print(f'target: {target}')
print(f'weights: {self.weights}')
print(f'error: {error}')
# ajustando todos os pesos
for i in range(0, len(self.weights)):
delta = error * inputs[i] * 0.05
self.weights[i] = self.weights[i] + delta
class Point:
def __init__(self) -> None:
self.x = randint(0, 50)
self.y = randint(0, 50)
self.label = -1
if self.x > self.y:
self.label = 1
else:
self.label = -1 | true |
7220f5373d0d58247cc9194c2a8a4638945676ad | Python | LeafyQ/Algorithm | /Intro2GA/DP_exercise.py | UTF-8 | 1,150 | 3.53125 | 4 | [] | no_license | """
The corresponding practices in DPV
-----------------------------------
Chapter 6: Dynamic Programming
Author: Rebecca
"""
import sys
min = -sys.maxsize -1
# run in linear time
def Max_sum(a:list):
n = len(a)
max_val = a[0]
res = [a[0]]
idx = len(a)-1
sub = []
for i in range(1,n):
if res[i-1]>0 and a[i]+res[i-1]>0:
res.append(a[i]+res[i-1])
else:
res.append(a[i])
if max_val<res[-1]:
max_val = res[-1]
# backtracking
start, end = 0,0
while(idx>0):
if max_val == res[idx]:
end = idx
while(res[idx]>0):
idx-=1
start = idx if end==idx else idx+1
break
idx-=1
print("optimal substring: {}".format(a[start:end+1]))
return max_val
# ----------------------------------
t1 = [-3,-2,-1]
t2 = [-5,-10,1,-4]
t3 = [-1,1,2,-3,2,-1]
t4 = [1,2,-4,2,-1,5,-3]
t5 = [5, 15, -30, 10, -5, 40, 10]
print(Max_sum(t1))
print(Max_sum(t2))
print(Max_sum(t3))
print(Max_sum(t4)) #6
print(Max_sum(t5))
# ----------------------------------
# ----------------------------------
# ----------------------------------
# ----------------------------------
# ---------------------------------- | true |
7201dfa8894262e1dd0790725a52b018fb7c3931 | Python | Gautam-MG/ML-Assignment-6SEM | /Assignment_MLP/mlp-program.py | UTF-8 | 5,410 | 3.03125 | 3 | [] | no_license |
# coding: utf-8
# In[8]:
#imports
import numpy as np
import pandas as pd
from sklearn.datasets import make_classification
from random import random
from random import seed
from math import exp
# In[9]:
# No. of data points
n_samples = 200
# No. of features (dimensions of the data)
n_features = 4
# No. of redundent features (linear combinations of other features)
n_redundant = 1
# No. of classes
n_classes = 2
# In[10]:
X, y = make_classification(n_samples=n_samples, n_features=n_features,
n_redundant=n_redundant, n_classes=n_classes)
df = pd.DataFrame(X, columns=['feature1', 'feature2', 'feature3', 'feature4'])
df['label'] = y
df.head()
# In[11]:
#df.to_csv("dataset1.csv")
# In[12]:
#reading the dataset
#dataset was generated from make-dataset.pynb
df=pd.read_csv('dataset1.csv',index_col=0)
df.head()
# In[13]:
#initializing the network for weights
def initialize_network(n_inputs, n_hidden, n_outputs):
network=list()
hidden_layer = [{'weights':[random() for i in range(n_inputs + 1)]} for i in range(n_hidden)]
network.append(hidden_layer)
output_layer = [{'weights':[random() for i in range(n_hidden + 1)]} for i in range(n_outputs)]
network.append(output_layer)
return network
# In[14]:
#activation function
def activate(weights, inputs):
activation=weights[-1]
for i in range(len(weights)-1):
activation+=weights[i]*inputs[i]
return activation
def transfer(activation):
return 1.0 / (1.0 + exp(-activation))
# In[15]:
#forward propogation
def forward_propagate(network,raw):
inputs=raw
for layer in network:
new_inputs=[]
for neuron in layer:
activation=activate(neuron['weights'], inputs)
neuron['output']=transfer(activation)
new_inputs.append(neuron['output'])
inputs=new_inputs
return inputs
def transfer_derivative(output):
return output * (1.0 - output)
# In[16]:
#backward propogation to learn
def backward_propagate_error(network, expected):
for i in reversed(range(len(network))):
layer = network[i]
errors = list()
if i != len(network)-1:
for j in range(len(layer)):
error = 0.0
for neuron in network[i + 1]:
error += (neuron['weights'][j] * neuron['delta'])
errors.append(error)
else:
for j in range(len(layer)):
neuron = layer[j]
errors.append(expected[j] - neuron['output'])
for j in range(len(layer)):
neuron = layer[j]
neuron['delta'] = errors[j] * transfer_derivative(neuron['output'])
# In[17]:
#update weights on training
def update_weights(network, row, l_rate):
for i in range(len(network)):
inputs=row[:-1]
if i!=0:
inputs=[neuron['output'] for neuron in network[i-1]]
for neuron in network[i]:
for j in range(len(inputs)):
neuron['weights'][j]+=l_rate*neuron['delta']*inputs[j]
neuron['weights'][-1]+=l_rate*neuron['delta']
# In[18]:
#training the network
def train_network(network, train, l_rate, n_epoch, n_outputs):
for epoch in range(n_epoch):
sum_error = 0
for row in train:
outputs = forward_propagate(network, row)
expected = [0 for i in range(n_outputs)]
expected[int(row[-1])] = 1
sum_error += sum([(expected[i]-outputs[i])**2 for i in range(len(expected))])
backward_propagate_error(network, expected)
update_weights(network, row, l_rate)
print('>epoch=%d, lrate=%.3f, error=%.3f' % (epoch, l_rate, sum_error))
# In[19]:
#predicting function
def predict(network, row):
outputs = forward_propagate(network, row)
return outputs.index(max(outputs))
# In[20]:
dataset=np.array(df[:])
dataset
# In[21]:
n_inputs = len(dataset[0]) - 1
n_outputs = len(set([row[-1] for row in dataset]))
print(n_inputs,n_outputs)
# In[22]:
#splitting into test and train datset
train_dataset=dataset[:150]
test_dataset=dataset[150:]
# In[23]:
#feeding the datset into the network
network=initialize_network(n_inputs,1,n_outputs)
train_network(network, train_dataset, 0.5, 100, n_outputs)
# In[24]:
#learned weights of the network
for layer in network:
print(layer)
# In[25]:
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
# In[26]:
#applying on training dataset
y_train=[]
pred=[]
for row in train_dataset:
prediction = predict(network, row)
y_train.append(int(row[-1]))
pred.append(prediction)
# In[27]:
print("Accuracy: ",accuracy_score(y_train,pred))
print("Confusion Matrix: ",confusion_matrix(y_train,pred))
print("Precision: ",precision_score(y_train, pred))
print("recall: ",recall_score(y_train, pred))
# In[28]:
#applying on testing dataset
y_test=[]
pred=[]
for row in test_dataset:
prediction = predict(network, row)
y_test.append(row[-1])
pred.append(prediction)
# In[29]:
print("Accuracy: ",accuracy_score(y_test,pred))
print("Confusion Matrix: ",confusion_matrix(y_test,pred))
print("Precision: ",precision_score(y_test, pred))
print("recall: ",recall_score(y_test, pred))
| true |
0f53ae454f955edd7c7d0ce6b5e75afc62d9397b | Python | currylym/video_popularity_cache | /popularity_prediction/seq2seq/data_generator.py | UTF-8 | 2,570 | 3.21875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
'''
产生测试seq2seq模型的生成数据
'''
import numpy as np
import random
from itertools import product
from keras.utils import to_categorical
def gen_add_data(data_num,in_len=10,out_len=5,max_single_num=1000):
alphas = ['0','1','2','3','4','5','6','7','8','9','+','-','<pad>']
alphas_map = dict(zip(alphas,range(len(alphas))))
def _num_to_seq(num):
return list(str(num))
def _pad_onehot(seq,max_len):
if len(seq) > max_len:
seq = seq[:max_len]
else:
seq = seq + ['<pad>']*(max_len-len(seq))
seq = [alphas_map[i] for i in seq]
return to_categorical(seq)
def _str_operation(a,b,ops):
if ops == '+':
return a+b
elif ops == '-':
return a-b
else:
print('ops error!')
def _gen_data():
#产生不重复的数据
a_list = np.arange(1,max_single_num).tolist()
b_list = np.arange(1,max_single_num).tolist()
ops_list = ['+','-']
all_combines = list(product(a_list,b_list,ops_list))
random.shuffle(all_combines)
data = all_combines[:data_num]
for a,b,ops in data:
x = _num_to_seq(a) + [ops] + _num_to_seq(b)
y = _num_to_seq(_str_operation(a,b,ops))
#print(a,ops,b,'=',_str_operation(a,b,ops))
yield _pad_onehot(x,in_len),_pad_onehot(y,out_len)
X = []
Y = []
for x,y in _gen_data():
X.append(x)
Y.append(y)
return np.array(X),np.array(Y)
def add_data_decoder(X,Y,Y1):
#对X,Y,Y1数据进行解码,分别代表表达式,准确结果和预测结果,并打印
alphas = ['0','1','2','3','4','5','6','7','8','9','+','-','<pad>']
alphas_map_r = dict(zip(range(len(alphas)),alphas))
def _onehot_to_num(matrix):
l = np.argmax(matrix,axis=1)
l = [alphas_map_r[i] for i in l]
l = [i for i in l if i != '<pad>']
return ''.join(l)
#计算预测值和真实值的平均偏差
mean_bias = 0
for x,y,y1 in zip(X,Y,Y1):
x_,y_,y1_ = _onehot_to_num(x),_onehot_to_num(y),_onehot_to_num(y1)
bias = abs(int(y1_) - int(y_))
mean_bias += bias
print('expression:%s right answer:%s predicted answer:%s bias:%d' % (x_,y_,y1_,bias))
mean_bias = mean_bias/X.shape[0]
print('mean bias:%.2f' % mean_bias)
if __name__ == '__main__':
X,Y = gen_add_data(data_num=10,in_len=10,out_len=5,max_single_num=5)
add_data_decoder(X,Y,Y) | true |
c472fbd7fc79851ddb6f7160b32c29ed30bba414 | Python | debajit13/100Days-of-Code | /Insertion_Sort.py | UTF-8 | 302 | 3.546875 | 4 | [] | no_license | def insertion_sort(l):
for sliceEnd in range(len(l)):
pos = sliceEnd
while pos > 0 and l[pos] < l[pos-1]:
(l[pos],l[pos-1]) = (l[pos-1],l[pos])
pos = pos - 1
arr = [12, 11, 13, 5, 6]
insertion_sort(arr)
for i in range(len(arr)):
print ("% d" % arr[i])
| true |
c99117d6ff33fc0ecc856b20ffe77d6c79b9ef7a | Python | hmchen47/DataScience | /Math/ProbStatsPy-UCSD/src/Topic12.5-Lectures.py | UTF-8 | 6,541 | 3.203125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import math
import matplotlib.pyplot as plt
def load_hw_data(printing=False):
"""lodaing data w/ pandas"""
hw_df = pd.read_csv('./Topic12-Lectures/data/HW25000.csv')
hw_df = hw_df.iloc[:, 1:]
hw_df.columns = ['Height', 'Weight']
if printing:
print("\n\nHeight-Weight regression:")
print("\nhw_df: \n{}".format(hw_df.head()))
print("\nhw_df.describe: \n{}".format(hw_df.describe()))
return hw_df
def get_hw_reg(df, x_name, y_name, printing=False):
"""calculate parameter vector for Height-Weight regression"""
A = np.array(df[x_name])
A = np.array([np.ones(len(A)), A])
y = np.array(df[y_name])
w = np.linalg.lstsq(A.T, y, rcond=None)[0]
if printing:
print("\n\nCalculating the parameter vector for Height-Weight regression:")
print("\nAw = b w/ w = linalg.lstsq(A.T, b, rcond=None): A= \n{}, \nb.T= {}, w.T= {}")
return w
def df_average(df, x_name, y_name):
"""calculate mean of x and y axis"""
# calculate the mean weight for each 1-inch interval of height
df['round'+x_name] = df[x_name].round()
per_height_means = df.groupby('round'+x_name).mean()[[y_name]]
return per_height_means
def f(x, w):
return w[0]+w[1]*x
def plot_average(df, x_name, y_name, title, regline=False):
per_height_means = df_average(df, x_name, y_name)
ax = df.plot(kind='scatter', s=1, x=x_name, y=y_name, figsize=[8, 6])
per_height_means.plot(y=y_name, style='ro', ax=ax, legend=False)
x0, x1 = plt.xlim()
y0, y1 = plt.ylim()
# plot vertical line for grid
for _x in np.arange(x0+0.5, x1+1, 1):
ax.plot([_x, _x], [y0, y1], 'g')
if regline:
w1 = get_hw_reg(df, x_name, y_name)
x0, x1 = plt.xlim()
ax.plot([x0, x1], [f(x0, w1), f(x1, w1)], 'k')
plt.title(title, fontsize=15)
plt.show()
return None
def f2(x, w):
return w[0]+w[1]*x+w[2]*x**2
def get_hw_reg2(df, x_name, y_name, printing=False):
"""calculate parameter vector for Height-Weight 2nd degree regression"""
A = np.array(df[x_name])
A = np.array([np.ones(len(A)), A, A**2])
y = np.array(df[y_name])
w = np.linalg.lstsq(A.T, y, rcond=None)[0]
if printing:
print("\n\nCalculating the parameter vector for Height-Weight regression:")
print("\nAw = b w/ w = linalg.lstsq(A.T, b, rcond=None): \nA= \n{}, \nb.T= {}, w.T= {}"\
.format(A, y.T, w.T))
return w
def plot_hw_reg2(df, x_name, y_name, title):
w2 = get_hw_reg2(hw_df, 'Height', 'P2')
per_height_means = df_average(df, x_name, y_name)
ax = df.plot(kind='scatter', s=1, x=x_name, y=y_name, figsize=[8, 6])
per_height_means.plot(y=y_name, style='ro', ax=ax, legend=False)
x0, x1 = plt.xlim()
y0, y1 = plt.ylim()
# plot vertical line for grid
for _x in np.arange(x0+0.5, x1+1, 1):
ax.plot([_x, _x], [y0, y1], 'g')
X = np.arange(x0, x1, (x1-x0)/100)
Y = f2(X, w2)
ax.plot(X, Y, 'k')
plt.title(title, fontsize=15)
plt.show()
return None
def F(X, w):
accum = w[0]*np.ones(len(X))
for i in range(1, len(w)):
accum += w[i]*X**i
return accum
def plot_data():
np.random.seed(0)
# generate data
X = np.arange(-1, 1.6, 0.25*0.2)
Y = X + np.random.rand(len(X))
data = pd.DataFrame({'x': X, 'y': Y})
data.plot(kind='scatter', s=30, c='r', x='x', y='y', figsize=[6, 5])
plt.grid()
plt.show()
return data
def plot_polyfit(ax, df, d):
"""plot polyfit regression line
Args:
df (dataframe): input dataframe for analysis
d (int): degree of polynomial to fit data
"""
L = df.count()[0]
split = [0, 1] * L
df['split'] = split[:L]
train_df = df[df['split'] == 1]
test_df = df[df['split'] == 0]
A = np.array([train_df['x']])
D = np.ones([1, A.shape[1]])
for i in range(1, d+1):
D = np.concatenate([D, A**i])
w = np.linalg.lstsq(D.T, train_df['y'], rcond=None)[0]
train_RMS = np.sqrt(np.mean((train_df['y'] - F(train_df['x'], w))**2))
test_RMS = np.sqrt(np.mean((test_df['y'] - F(test_df['x'], w))**2))
train_df.plot(kind='scatter', s=30, c='b', x='x', y='y', ax=ax, label='Train')
test_df.plot(kind='scatter', s=30, c='r', x='x', y='y', ax=ax, label='Test')
plt.grid()
plt.legend()
_xmin, _xmax = plt.xlim()
_xrange = _xmax - _xmin
X = np.arange(_xmin, _xmax, _xrange/100.)
ax.plot(X, F(X, w), 'k')
plt.title("d={} , train_RMS = {:5.3f}, test_RMS= {:5.3f}"\
.format(d, train_RMS, test_RMS), fontsize=10)
return train_RMS, test_RMS
if __name__ == "__main__":
print("\nStarting Topic 12.5 Lecture Notes Python code ......")
hw_df = load_hw_data()
# A linear graph of averages
title = 'Scattered data and average height w/ regression line'
plot_average(hw_df, 'Height', 'Weight', title, regline=True)
# non-linear graph of averages
title = 'Scattered data and 2nd-degree polynomial height average'
hw_df['P2'] = hw_df['Weight'] + (hw_df['Height']-68)**2
plot_average(hw_df, 'Height', 'P2', title, regline=False)
# limits of linear regression
title = 'Scattered data and 2nd polynomial average w/ regression line'
plot_average(hw_df, 'Height', 'P2', title, regline=True)
# 2nd degree polynomial fit
title = 'Scattered data and average w/ 2nd degree polynomial fit'
plot_hw_reg2(hw_df, 'Height', 'P2', title)
# overfitting, underfitting amn model selection
# plot data in x-y coordinate
data_df = plot_data()
# plot 2-dim polyfit for data
fig = plt.figure(figsize=[6,5])
ax = plt.subplot(111)
plot_polyfit(ax, data_df, 3)
plt.show()
# multiple degrees of polynomial
rows, cols, max_d = 2, 3, 6
fig = plt.figure(figsize=[14, 10])
train_RMS = np.zeros(max_d)
test_RMS = np.zeros(max_d)
for d in range(max_d):
if d == 0:
ax = plt.subplot(rows, cols, d+1)
ax0 = ax
else:
ax = plt.subplot(rows, cols, d+1, sharex=ax0)
train_RMS[d], test_RMS[d] = plot_polyfit(ax, data_df, d)
plt.show()
# Train & Test RMS to get best degree of fit
plt.plot(train_RMS, label='train RMS')
plt.plot(test_RMS, label='test RMS')
plt.legend()
plt.grid()
plt.show()
print("\nEnd of Topic 12.5 Lecture Notes Python code ......\n")
| true |
ed091aa275f956193efef1a52929432215f3b182 | Python | stefan-grafberger/duckdq | /examples/verification_suite_example.py | UTF-8 | 987 | 2.59375 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | import time
import pandas as pd
from duckdq.checks import Check, CheckLevel
from duckdq.verification_suite import VerificationSuite
start = time.time()
df = pd.read_csv("data/train.csv")
end = time.time()
print(end-start)
start = time.time()
verification_result = (
VerificationSuite()
.on_data(df, dataset_id="data10", partition_id="1")
.using_metadata_repository("duckdb://basic_check.duckdq")
.add_check(
Check(CheckLevel.EXCEPTION, "Basic Check")
.is_complete("Name")
.is_contained_in("Pclass",(1,2,3))
.is_contained_in("Sex",("male","female"))
.is_contained_in("SibSp",[1, 0, 3, 4, 2, 5, 8])
.is_contained_in("Embarked",("S","C","Q"))
.has_min("Age", lambda mn: mn > 0)
.has_max("Age", lambda mx: mx < 60)
.has_min("Fare", lambda mn: mn >= 0)
.has_max("Fare", lambda mx: mx < 999)
)
.run()
)
end = time.time()
print(end-start)
| true |
86da31d32d5b3c09f78b76be11c6d02c4549dee4 | Python | drake-smu/7331-lab-2 | /analysis/badlogregMF.py | UTF-8 | 6,601 | 2.75 | 3 | [] | no_license | #%%
import os
os.chdir("/home/david/7331-lab-2")
import pandas as pd
import numpy as np
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer, make_column_transformer
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score , classification_report, log_loss
from sklearn.svm import LinearSVC, SVC
from sklearn.neighbors import KNeighborsClassifier
def convert_dummy(df,cols):
dummies = []
for cat in cols:
dummy = pd.get_dummies(df[cat],
drop_first=True)
dummies.append(dummy)
df.drop(cat,axis=1,inplace=True)
return pd.concat([df,*dummies], axis=1)
#%%
df_headers = [
'age',
'workclass',
'fnlwgt',
'education',
'education_num',
'marital_status',
'occupation',
'relationship',
'race',
'gender',
'capital_gain',
'capital_loss',
'hours_per_week',
'native_country',
'income_bracket'
]
df_census = pd.read_csv("data/adult-training.csv",
names=df_headers,
index_col=False)
# Input in case we want to combine the dataframes.
# df_test = pd.read_csv("data/adult-test.csv",names = df_headers,skiprows=1)
# df_census = pd.concat([df_test, df_census], axis=0)
df_cols = [
'age',
'workclass',
'fnlwgt',
'education',
'education_num',
'marital_status',
'occupation',
'relationship',
'race',
'gender',
'capital_gain',
'capital_loss',
'hours_per_week',
'native_country',
'income_bracket'
]
cat_cols = [
"workclass",
"marital_status",
"occupation",
"race",
"income_bracket",
"relationship"]
cont_cols = [
"age",
"education_num",
"capital_gain",
"capital_loss",
"hours_per_week"]
drop_cols = [
'fnlwgt',
"native_country",
"education"]
target_col = "target"
df_training = pd.read_csv("data/adult-training.csv",
names=df_cols,
skipinitialspace = True)
df_test = pd.read_csv("data/adult-test.csv",
names = df_cols,
skipinitialspace = True,
skiprows=1)
# %%
df_training[target_col] = (df_training["gender"].apply(lambda x: "M" in x)).astype(int)
df_test[target_col] = (df_test["gender"].apply(lambda x: "M" in x)).astype(int)
replace_edu_no = ('1st-4th', '5th-6th','7th-8th','9th', '10th', '11th', '12th', 'Preschool')
replace_edu_associate = ('Assoc-acdm', 'Assoc-voc')
replace_edu_diploma = ('Some-college', 'HS-grad')
df_training.education = df_training.education.replace(to_replace=replace_edu_no,value='No Diploma')
df_training.education = df_training.education.replace(to_replace=replace_edu_associate,value='Associates')
df_training.education = df_training.education.replace(to_replace=replace_edu_diploma,value='Diploma')
df_test.education = df_test.education.replace(to_replace=replace_edu_no,value='No Diploma')
df_test.education = df_test.education.replace(to_replace=replace_edu_associate,value='Associates')
df_test.education = df_test.education.replace(to_replace=replace_edu_diploma,value='Diploma')
df_training['education'] = df_training['education'].str.strip()
df_test['education'] = df_test['education'].str.strip()
# %%
# Put countries in their native region continent
replace_northA = ('United-States', 'Honduras', 'Mexico','Puerto-Rico','Canada', 'Outlying-US(Guam-USVI-etc)', 'Nicaragua', 'Guatemala', 'El-Salvador')
replace_carib = ('Cuba', 'Jamaica', 'Trinadad&Tobago', 'Haiti', 'Dominican-Republic')
replace_asia = ('South', 'Cambodia','Thailand','Laos', 'Taiwan', 'China', 'Japan', 'India', 'Iran', 'Philippines', 'Vietnam', 'Hong')
replace_europe = ('England', 'Germany', 'Portugal', 'Italy', 'Poland', 'France', 'Yugoslavia','Scotland', 'Greece', 'Ireland', 'Hungary', 'Holand-Netherlands')
replace_sa = ('Columbia', 'Ecuador', 'Peru')
replace_other = ('?')
df_training.native_country = df_training.native_country.replace(to_replace=replace_northA,value='North America')
df_training.native_country = df_training.native_country.replace(to_replace=replace_carib,value='Caribbean')
df_training.native_country = df_training.native_country.replace(to_replace=replace_asia,value='Asia')
df_training.native_country = df_training.native_country.replace(to_replace=replace_europe,value='Europe')
df_training.native_country = df_training.native_country.replace(to_replace=replace_sa,value='South America')
df_training.native_country = df_training.native_country.replace(to_replace=replace_other,value='Other')
df_test.native_country = df_test.native_country.replace(to_replace=replace_northA,value='North America')
df_test.native_country = df_test.native_country.replace(to_replace=replace_carib,value='Caribbean')
df_test.native_country = df_test.native_country.replace(to_replace=replace_asia,value='Asia')
df_test.native_country = df_test.native_country.replace(to_replace=replace_europe,value='Europe')
df_test.native_country = df_test.native_country.replace(to_replace=replace_sa,value='South America')
df_test.native_country = df_test.native_country.replace(to_replace=replace_other,value='Other')
# %%
df_training.drop(drop_cols,axis=1,inplace=True)
df_test.drop(drop_cols,axis=1,inplace=True)
# %%
df_training2 = df_training.copy()
df_test2 = df_test.copy()
# %%
def convert_dummy(df,cols):
dummies = []
for cat in cols:
dummy = pd.get_dummies(df[cat],
drop_first=True)
dummies.append(dummy)
df.drop(cat,axis=1,inplace=True)
return pd.concat([df,*dummies], axis=1)
df_training_dum = convert_dummy(df_training.copy(),cat_cols)
df_test_dum = convert_dummy(df_test.copy(),cat_cols)
# %%
X_train = df_training_dum.drop(columns=["income_bracket",target_col])
y_train = df_training_dum[target_col]
X_test = df_test_dum.drop(columns=["income_bracket",target_col])
y_test = df_test_dum[target_col]
X_train2 = df_training2.drop(columns=["income_bracket",target_col])
y_train2 = df_training2[target_col]
X_test2 = df_test2.drop(columns=["income_bracket",target_col])
y_test2 = df_test2[target_col]
preprocess = make_column_transformer(
(make_pipeline(SimpleImputer(), StandardScaler()),cont_cols),
(OneHotEncoder(),cat_cols))
# Define the model pipeline (preprocessing step and LogisticRegression step)
model2 = make_pipeline(
preprocess,
LogisticRegression(solver='liblinear'))
model2.fit(X_train2,y_train2)
# Calculate predictions
predictions2 = model2.predict(X_test2)
#%%
print(predictions2)
#%%
print(model2)
#%%
#%%
| true |
4ab9af765062b225daf89eeddef23f3c650faa28 | Python | JiangZhaoh/Popper | /popper/core.py | UTF-8 | 6,967 | 2.859375 | 3 | [
"MIT"
] | permissive | from collections import namedtuple, defaultdict
ConstVar = namedtuple('ConstVar', ['name', 'type'])
class Grounding:
@staticmethod
# IMPROVE/REFACTOR
def ground_literal(literal, assignment):
ground_args = []
for arg in literal.arguments:
if arg in assignment:
ground_args.append(assignment[arg])
# handles tuples of ConstVars
# TODO: AC: EXPLAIN BETTER
elif isinstance(arg, tuple):
ground_t_args = []
# AC: really messy
for t_arg in arg:
if t_arg in assignment:
ground_t_args.append(assignment[t_arg])
else:
ground_t_args.append(t_arg)
ground_args.append(tuple(ground_t_args))
else:
ground_args.append(arg)
return (literal.positive, literal.predicate, tuple(ground_args))
@staticmethod
def ground_clause(clause, assignment):
(head, body) = clause
ground_head = None
if head:
ground_head = Grounding.ground_literal(head, assignment)
ground_body = frozenset(Grounding.ground_literal(literal, assignment) for literal in body)
return (ground_head, ground_body)
# AC: When grounding constraint rules, we only care about the vars and the constraints, not the actual literals
@staticmethod
def grounding_hash(body, all_vars):
cons = set()
for lit in body:
if lit.meta:
cons.add((lit.predicate, lit.arguments))
return hash((frozenset(all_vars), frozenset(cons)))
@staticmethod
def find_all_vars(body):
all_vars = set()
for literal in body:
for arg in literal.arguments:
if isinstance(arg, ConstVar):
all_vars.add(arg)
elif isinstance(arg, tuple):
for t_arg in arg:
if isinstance(t_arg, ConstVar):
all_vars.add(t_arg)
return all_vars
class Literal:
def __init__(self, predicate, arguments, directions = [], positive = True, meta=False):
self.predicate = predicate
self.arguments = arguments
self.arity = len(arguments)
self.directions = directions
self.positive = positive
self.meta = meta
self.inputs = frozenset(arg for direction, arg in zip(self.directions, self.arguments) if direction == '+')
self.outputs = frozenset(arg for direction, arg in zip(self.directions, self.arguments) if direction == '-')
@staticmethod
def to_code(literal):
args = ','.join(literal.arguments)
return f'{literal.predicate}({args})'
# AC: TODO - REFACTOR
def __str__(self):
if self.directions:
vdirections = (var_dir + var for var, var_dir in zip(self.arguments, self.directions))
x = f'{self.predicate}({",".join(vdirections)})'
if not self.positive:
x = 'not ' + x
return x
else:
args = []
for arg in self.arguments:
if isinstance(arg, ConstVar):
args.append(arg.name)
elif isinstance(arg, tuple):
t_args = []
for t_arg in arg:
if isinstance(t_arg, ConstVar):
t_args.append(t_arg.name)
else:
t_args.append(str(t_arg))
if len(t_args) > 1:
args.append(f'({",".join(t_args)})')
else:
args.append(f'({",".join(t_args)},)')
else:
args.append(str(arg))
x = f'{self.predicate}({",".join(args)})'
if not self.positive:
x = 'not ' + x
return x
def __hash__(self):
return self.my_hash()
def __eq__(self, other):
if other == None:
return False
return self.my_hash() == other.my_hash()
def my_hash(self):
return hash((self.predicate, self.arguments))
class Clause:
@staticmethod
def to_code(clause):
(head, body) = clause
head_str = ''
if head:
head_str = Literal.to_code(head)
body_str = ','.join(Literal.to_code(literal) for literal in body)
return head_str + ':-' + body_str
@staticmethod
def clause_hash(clause):
(head, body) = clause
h = None
if head:
h = (head.my_hash(),)
b = frozenset(literal.my_hash() for literal in body)
return hash((h,b))
@staticmethod
def is_recursive(clause):
(head, body) = clause
if not head:
return False
return head.predicate in set(literal.predicate for literal in body if isinstance(literal, Literal))
@staticmethod
def is_separable(rule):
if Clause.is_recursive(rule):
return False
(head, body) = rule
if head.predicate.startswith('inv'):
return False
return True
@staticmethod
def all_vars(clause):
(head, body) = clause
xs = set()
if head:
xs.update(head.arguments)
for literal in body:
for arg in literal.arguments:
if isinstance(arg, ConstVar):
xs.add(arg)
elif isinstance(arg, tuple):
for t_arg in arg:
if isinstance(t_arg, ConstVar):
xs.add(t_arg)
return xs
@staticmethod
def to_ordered(clause):
(head, body) = clause
ordered_body = []
grounded_variables = head.inputs
body_literals = set(body)
if head.inputs == []:
return clause
while body_literals:
selected_literal = None
for literal in body_literals:
# AC: could cache for a micro-optimisation
if literal.inputs.issubset(grounded_variables):
if literal.predicate != head.predicate:
# find the first ground non-recursive body literal and stop
selected_literal = literal
break
else:
# otherwise use the recursive body literal
selected_literal = literal
if selected_literal == None:
message = f'{selected_literal} in clause {self} could not be grounded'
raise ValueError(message)
ordered_body.append(selected_literal)
grounded_variables = grounded_variables.union(selected_literal.outputs)
body_literals = body_literals.difference({selected_literal})
return (head, tuple(ordered_body)) | true |
583657ac567ce57ffe4f86fc5efd6aa93b7d9e2f | Python | robb83/Projects | /Solovers/Minesweeper/source/minesweeper.py | UTF-8 | 3,640 | 3.0625 | 3 | [] | no_license | import random
class Minesweeper:
width = 0
height = 0
cells = None
values = None
def __init__(self, cells, width, height):
self.cells = cells
self.width = width
self.height = height
self.values = [0] * (self.width * self.height)
def loadState(self, values):
self.values = values
def getNeighbors(self, pos):
x = pos[0]
y = pos[1]
results = []
if y > 0:
if x > 0:
results.append((x - 1, y - 1))
results.append((x ,y - 1))
if x < self.width - 1:
results.append((x + 1,y - 1))
if x > 0:
results.append((x - 1, y))
if x < self.width - 1:
results.append((x + 1, y))
if y < self.height - 1:
if x > 0:
results.append((x - 1, y + 1))
results.append((x,y + 1))
if x < self.width - 1:
results.append((x + 1, y + 1))
return results
def hiddenOrMarkedNeighbors(self, pos):
value = 0
ns = self.getNeighbors(pos)
for n in ns:
v = self.values[n[0] + n[1] * self.width]
if v == 254:
value = value + 1
if v == 255:
value = value + 1
return value
def markedNeighbors(self, pos):
value = 0
ns = self.getNeighbors(pos)
for n in ns:
v = self.values[n[0] + n[1] * self.width]
if v == 254:
value = value + 1
return value
def isPossibleMine(self, c):
ns = self.getNeighbors(c[1])
for n in ns:
value = self.hiddenOrMarkedNeighbors(n)
v = self.values[n[0] + n[1] * self.width]
if value == v:
return 1
return 0
def isPossibleNoMine(self, c):
ns = self.getNeighbors(c[1])
for n in ns:
value = self.markedNeighbors(n)
v = self.values[n[0] + n[1] * self.width]
if value == v:
return 1
return 0
def domarkmine(self):
for c in self.cells:
if self.values[c[2]] == 255:
if self.isPossibleMine(c):
return c
return None
def doclick(self):
for c in self.cells:
if self.values[c[2]] == 255:
if self.isPossibleNoMine(c):
return c;
return None
def dorandomclick(self):
unknows = []
for c in self.cells:
if self.values[c[2]] == 255:
unknows.append(c)
if len(unknows) > 0:
return random.choice(unknows)
return None
def generateMinesweeper(width, height, cell_size, cell_offset):
result_info = []
result_value = []
offsety = cell_offset[1]
for y in range(height):
offsetx = cell_offset[0]
sizey = cell_size
if y == 4:
sizey = sizey + 1
for x in range(width):
sizex = cell_size
if x == 4:
sizex = sizex + 1
pos = (x, y)
index = x + y * width
size = (sizex, sizey)
offset = (offsetx, offsety)
click = (offsetx + 5, offsety + 5)
# value, pos, index, offset, size, click
cellinfo = (0, pos, index, offset, size, click)
result_info.append(cellinfo)
result_value.append(255)
offsetx = offsetx + sizex
offsety = offsety + sizey
return Minesweeper(result_info, width, height) | true |
d822b152ac426a3fae9b529d3f25a5361b46651b | Python | telugu-boy/futaba | /futaba/unicode.py | UTF-8 | 4,226 | 2.84375 | 3 | [
"MIT"
] | permissive | #
# unicode.py
#
# futaba - A Discord Mod bot for the Programming server
# Copyright (c) 2017-2020 Jake Richardson, Ammon Smith, jackylam5
#
# futaba is available free of charge under the terms of the MIT
# License. You are free to redistribute and/or modify it under those
# terms. It is distributed in the hopes that it will be useful, but
# WITHOUT ANY WARRANTY. See the LICENSE file for more details.
#
import logging
import os
import re
import string
import unicodedata
from bisect import bisect
from urllib.request import urlretrieve
from futaba.str_builder import StringBuilder
logger = logging.getLogger(__name__)
__all__ = [
"READABLE_CHAR_SET",
"UNICODE_BLOCKS",
"UNICODE_BLOCKS_FILENAME",
"UNICODE_CATEGORY_NAME",
"normalize_caseless",
"unicode_block",
"unicode_repr",
]
READABLE_CHAR_SET = frozenset(string.printable) - frozenset("\t\n\r\x0b\x0c")
# Adapted from https://gist.github.com/acdha/49a610089c2798db6fe2
def _load_unicode_blocks():
if not os.path.exists(UNICODE_BLOCKS_FILENAME):
logger.info(
"Unicode blocks file '%s' does not exist, downloading...",
UNICODE_BLOCKS_FILENAME,
)
urlretrieve(
"https://unicode.org/Public/UNIDATA/Blocks.txt",
filename=UNICODE_BLOCKS_FILENAME,
)
blocks = []
with open(UNICODE_BLOCKS_FILENAME) as fh:
content = fh.read()
for start, end, block_name in re.findall(
r"([0-9A-F]+)\.\.([0-9A-F]+);\ (\S.*\S)", content
):
if block_name == "No_Block":
continue
blocks.append((int(start, 16), int(end, 16), block_name))
return blocks
UNICODE_BLOCKS_FILENAME = "unidata-blocks.txt"
UNICODE_BLOCKS = _load_unicode_blocks()
UNICODE_BLOCK_STARTS = [block[0] for block in UNICODE_BLOCKS]
UNICODE_CATEGORY_NAME = {
"Lu": "Letter, uppercase",
"Ll": "Letter, lowercase",
"Lt": "Letter, titlecase",
"Lm": "Letter, modified",
"Lo": "Letter, other",
"Mn": "Mark, nonspacing",
"Mc": "Mark, spacing combining",
"Me": "Mark, enclosing",
"Nd": "Number, decimal digit",
"Nl": "Number, letter",
"No": "Number, other",
"Pc": "Punctuation, connector",
"Pd": "Punctuation, dash",
"Ps": "Punctuation, open",
"Pe": "Punctuation, close",
"Pi": "Punctuation, initial quote",
"Pf": "Punctuation, final quote",
"Po": "Punctuation, other",
"Sm": "Symbol, mathematics",
"Sc": "Symbol, currency",
"Sk": "Symbol, modifier",
"So": "Symbol, other",
"Zs": "Separator, space",
"Zl": "Separator, line",
"Zp": "Separator, paragraph",
"Cc": "Other, control",
"Cf": "Other, format",
"Cs": "Other, surrogate",
"Co": "Other, private use",
"Cn": "Other, not assigned",
}
def normalize_caseless(s):
"""
Shifts the string into a uniform case (lowercase),
but also accounting for unicode characters. Used
for case-insenstive comparisons.
"""
return unicodedata.normalize("NFKD", s.casefold())
def unicode_block(s):
""" Gets the name of the Unicode block that contains the given character. """
codepoint = ord(s)
index = bisect(UNICODE_BLOCK_STARTS, codepoint)
try:
_, stop, block = UNICODE_BLOCKS[index]
except IndexError:
return None
return block if codepoint <= stop else None
def unicode_repr(s):
"""
Similar to repr(), but always escapes characters that aren't "readable".
That is, any characters not in READABLE_CHAR_SET.
"""
result = StringBuilder('"')
for ch in s:
if ch == "\n":
result.write("\\n")
elif ch == "\t":
result.write("\\t")
elif ch == '"':
result.write('\\"')
elif ch in READABLE_CHAR_SET:
result.write(ch)
else:
num = ord(ch)
if num < 0x100:
result.write(f"\\x{num:02x}")
elif num < 0x10000:
result.write(f"\\u{num:04x}")
elif num < 0x100000000:
result.write(f"\\U{num:08x}")
else:
raise ValueError(f"Character {ch!r} (ord {num:x}) too big for escaping")
result.write('"')
return str(result)
| true |
13638be0f82debcc44c4157bb24d2b394afda54b | Python | fandemonium/code | /cdhit_clustering/cdhit_2d_mapping_clstr_parser.py | UTF-8 | 850 | 2.734375 | 3 | [
"MIT"
] | permissive | import sys
import os
import re
from itertools import groupby
def clstr_iter(cdhit_clstr):
f = open(cdhit_clstr)
citer = (x[1] for x in groupby(f, lambda line: line[0] == ">"))
for cluster in citer:
seq = {}
for line in next(citer):
if "*" in line:
string = re.split("\t| |>|\.|\||;", line)
cluster = string[3]
else:
string = re.split("\t| |>|\.|\|", line)
sample = string[3]
seq_id = string[4]
if sample not in seq:
seq[sample] = [seq_id]
else:
seq[sample].append(seq_id)
yield cluster, seq
clstr = sys.argv[1]
d = dict(clstr_iter(clstr))
for item in d:
for s in d[item]:
print('%s\t%s\t%s' % (item, s, len(d[item][s])))
| true |
9a16854ae0297e894658db5b4be4e3f3c7b0b40a | Python | Gulnaaznasrin21/Function | /question_8.py | UTF-8 | 344 | 3.203125 | 3 | [] | no_license | def isharshad(j,k):
if j%k==0:
return "it is a harshad number"
else:
return "it is not a harshad number"
def my_function(s):
m=list(str(s))
i=0
sum=0
while i<len(m):
k=int(m[i])
i+=1
sum+=k
b=isharshad(s,sum)
print(b)
n=int(input("enterany any number"))
my_function(n) | true |
68e798dde997b7ee66f4b6bf3c50fbd2c05f0c6c | Python | jamin-hu/SoundsGood | /Experimental/yodel-master/test/test_filter_state_variable.py | UTF-8 | 5,107 | 2.546875 | 3 | [
"MIT"
] | permissive | import unittest
import math
import random
import yodel.filter
import yodel.analysis
import yodel.conversion
import yodel.complex
def impulse_response(bq, size):
impulse = [0] * size
impulse[0] = 1
response = [0] * size
bq.process(impulse, response)
return response
def frequency_response(response):
size = len(response)
freq_response_real = [0] * size
freq_response_imag = [0] * size
fft = yodel.analysis.FFT(size)
fft.forward(response, freq_response_real, freq_response_imag)
return freq_response_real, freq_response_imag
def amplitude_response(spec_real, spec_imag, db=True):
size = len(spec_real)
amp = [0] * size
for i in range(0, size):
amp[i] = yodel.complex.modulus(spec_real[i], spec_imag[i])
if db:
amp[i] = yodel.conversion.lin2db(amp[i])
return amp
class TestStateVariableFilter(unittest.TestCase):
def setUp(self):
self.sample_rate = 48000
self.block_size = 512
self.delta = (1.0 / self.block_size) * self.sample_rate
self.signal = [0] * self.block_size
self.output_hp = [0] * self.block_size
self.output_bp = [0] * self.block_size
self.output_lp = [0] * self.block_size
self.output_br = [0] * self.block_size
self.flt = yodel.filter.StateVariable()
def tearDown(self):
pass
def common_check_flat_response(self):
self.flt.process(self.signal, self.output_hp, self.output_bp, self.output_lp, self.output_br)
for i in range(0, self.block_size):
self.assertEqual(self.signal[i], self.output_hp[i])
self.assertEqual(self.signal[i], self.output_br[i])
self.assertEqual(0, self.output_bp[i])
self.assertEqual(0, self.output_lp[i])
def common_check_hp_response(self):
hpf_real, hpf_imag = frequency_response(self.output_hp)
hpf_spec = amplitude_response(hpf_real, hpf_imag)
fc_approx = 0
prev = hpf_spec[0]
for i in range(1, int(self.block_size/2)):
curr = hpf_spec[i]
if curr > 0 and prev <= 0:
fc_approx = (float(i) / float(self.block_size)) * self.sample_rate
break
else:
prev = curr
self.assertAlmostEqual(self.fc, fc_approx, delta=self.delta)
def common_check_bp_response(self):
bpf_real, bpf_imag = frequency_response(self.output_bp)
bpf_spec = amplitude_response(bpf_real, bpf_imag)
fc_approx = 0
prev = bpf_spec[0]
for i in range(1, int(self.block_size/2)-1):
curr = bpf_spec[i]
after = bpf_spec[i+1]
if curr >= prev and curr >= after:
fc_approx = (float(i) / float(self.block_size)) * self.sample_rate
break
else:
prev = curr
self.assertAlmostEqual(self.fc, fc_approx, delta=self.delta)
def common_check_lp_response(self):
lpf_real, lpf_imag = frequency_response(self.output_lp)
lpf_spec = amplitude_response(lpf_real, lpf_imag)
fc_approx = 0
prev = lpf_spec[0]
for i in range(1, int(self.block_size/2)):
curr = lpf_spec[i]
if curr <= 0 and prev > 0:
fc_approx = (float(i) / float(self.block_size)) * self.sample_rate
break
else:
prev = curr
self.assertAlmostEqual(self.fc, fc_approx, delta=self.delta)
def common_check_br_response(self):
brf_real, brf_imag = frequency_response(self.output_br)
brf_spec = amplitude_response(brf_real, brf_imag)
fc_approx = 0
prev = brf_spec[0]
for i in range(1, int(self.block_size/2)-1):
curr = brf_spec[i]
after = brf_spec[i+1]
if curr <= prev and curr <= after:
fc_approx = (float(i) / float(self.block_size)) * self.sample_rate
break
else:
prev = curr
self.assertAlmostEqual(self.fc, fc_approx, delta=self.delta)
def test_flat_zero(self):
self.flt.reset()
self.common_check_flat_response()
def test_flat_dirac(self):
self.flt.reset()
self.signal[0] = 1
self.common_check_flat_response()
def test_flat_sine(self):
self.flt.reset()
self.signal = [math.sin(2.0 * math.pi * 100.0 * i / 48000.0) for i in range(0, self.block_size)]
self.common_check_flat_response()
def common_test_cutoff_frequency(self, fc):
self.fc = fc
self.qfactor = 1
self.signal[0] = 1
self.flt.set(self.sample_rate, self.fc, self.qfactor)
self.flt.process(self.signal, self.output_hp, self.output_bp, self.output_lp, self.output_br)
self.common_check_hp_response()
self.common_check_bp_response()
self.common_check_lp_response()
self.common_check_br_response()
def test_cutoff_frequency(self):
self.common_test_cutoff_frequency(200)
self.common_test_cutoff_frequency(500)
| true |
0141db85ecd7fb115c78e38381bd64f4bb5936ac | Python | mudismud/czsc | /czsc/utils/kline_generator.py | UTF-8 | 12,998 | 2.90625 | 3 | [
"Apache-2.0"
] | permissive | # coding: utf-8
from datetime import datetime, timedelta
from typing import List, Union
from ..enum import Freq
from ..objects import RawBar
def bar_end_time(dt: datetime, m=1):
"""获取 dt 对应的分钟周期结束时间
:param dt: datetime
:param m: int
分钟周期,1 表示 1分钟,5 表示 5分钟 ...
:return: datetime
"""
dt = dt.replace(second=0, microsecond=0)
dt_span = {
60: ["01:00", "2:00", "3:00", '10:30', "11:30", "14:00", "15:00", "22:00", "23:00", "23:59"],
}
if m < 60:
if (dt.hour == 15 and dt.minute == 0) or (dt.hour == 11 and dt.minute == 30):
return dt
delta_m = dt.minute % m
if delta_m != 0:
dt += timedelta(minutes=m - delta_m)
else:
dt += timedelta(minutes=m)
return dt
else:
for v in dt_span[m]:
hour, minute = v.split(":")
edt = dt.replace(hour=int(hour), minute=int(minute))
if dt <= edt:
return edt
return dt
def freq_end_time(dt: datetime, freq: Freq) -> datetime:
"""获取 dt 对应的K线周期结束时间
:param dt: datetime
:param freq: Freq
周期,1 表示 1分钟,5 表示 5分钟 ...
:return: datetime
"""
dt = dt.replace(second=0, microsecond=0)
if freq in [Freq.F1, Freq.F5, Freq.F15, Freq.F30, Freq.F60]:
m = int(freq.value.strip("分钟"))
if m < 60:
if (dt.hour == 15 and dt.minute == 0) or (dt.hour == 11 and dt.minute == 30):
return dt
delta_m = dt.minute % m
if delta_m != 0:
dt += timedelta(minutes=m - delta_m)
return dt
else:
dt_span = {
60: ["01:00", "2:00", "3:00", '10:30', "11:30", "14:00", "15:00", "22:00", "23:00", "23:59"],
}
for v in dt_span[m]:
hour, minute = v.split(":")
edt = dt.replace(hour=int(hour), minute=int(minute))
if dt <= edt:
return edt
# 处理 日、周、月、季、年 的结束时间
dt = dt.replace(hour=0, minute=0)
if freq == Freq.D:
return dt
if freq == Freq.W:
sdt = dt + timedelta(days=5-dt.isoweekday())
return sdt
if freq == Freq.M:
if dt.month == 12:
sdt = datetime(year=dt.year+1, month=1, day=1) - timedelta(days=1)
else:
sdt = datetime(year=dt.year, month=dt.month+1, day=1) - timedelta(days=1)
return sdt
if freq == Freq.S:
dt_m = dt.month
if dt_m in [1, 2, 3]:
sdt = datetime(year=dt.year, month=4, day=1) - timedelta(days=1)
elif dt_m in [4, 5, 6]:
sdt = datetime(year=dt.year, month=7, day=1) - timedelta(days=1)
elif dt_m in [7, 8, 9]:
sdt = datetime(year=dt.year, month=10, day=1) - timedelta(days=1)
else:
sdt = datetime(year=dt.year+1, month=1, day=1) - timedelta(days=1)
return sdt
if freq == Freq.Y:
return datetime(year=dt.year, month=12, day=31)
print(f'freq_end_time error: {dt} - {freq}')
return dt
class KlineGenerator:
"""K线生成器,仿实盘"""
def __init__(self, max_count: int = 5000, freqs: List[Union[str, Freq]] = None):
"""
:param max_count: int
最大K线数量
:param freqs: list of str
级别列表,默认值为 ['周线', '日线', '60分钟', '30分钟', '15分钟', '5分钟', '1分钟']
"""
self.max_count = max_count
if freqs is None:
self.freqs = ['月线', '周线', '日线', '60分钟', '30分钟', '15分钟', '5分钟', '1分钟']
else:
self.freqs = freqs
self.m1: List[RawBar] = []
self.m5: List[RawBar] = []
self.m15: List[RawBar] = []
self.m30: List[RawBar] = []
self.m60: List[RawBar] = []
self.D: List[RawBar] = []
self.W: List[RawBar] = []
self.M: List[RawBar] = []
self.end_dt = None
self.symbol = None
def __update_end_dt(self):
if self.m1:
self.end_dt = self.m1[-1].dt
self.symbol = self.m1[-1].symbol
def init_kline(self, freq: [Freq, str], kline: List[RawBar]):
"""输入K线进行初始化
:param freq: str
:param kline: list of dict
:return:
"""
freqs_map = {"1分钟": self.m1, "5分钟": self.m5, "15分钟": self.m15, "30分钟": self.m30,
"60分钟": self.m60, "日线": self.D, "周线": self.W, "月线": self.M}
m = freqs_map[freq.value if isinstance(freq, Freq) else freq]
m.extend(kline)
self.__update_end_dt()
def __repr__(self):
return "<KlineGenerator for {}; latest_dt={}>".format(self.symbol, self.end_dt)
@staticmethod
def __update_from_1min(last: RawBar, k: RawBar, next_end_dt: datetime):
new = RawBar(
symbol=last.symbol,
dt=next_end_dt,
id=last.id,
freq=last.freq,
open=last.open,
close=k.close,
high=max(last.high, k.high),
low=min(last.low, k.low),
vol=last.vol + k.vol,
)
return new
def __update_1min(self, k: RawBar):
"""更新1分钟线"""
assert '1分钟' in self.freqs
if not self.m1:
self.m1.append(k)
else:
if k.dt > self.m1[-1].dt:
self.m1.append(k)
elif k.dt == self.m1[-1].dt:
self.m1[-1] = k
else:
raise ValueError("1分钟新K线的时间{}必须大于等于最后一根K线的时间{}".format(k.dt, self.m1[-1].dt))
self.m1 = self.m1[-self.max_count:]
def __update_minutes(self, k: RawBar, minutes=(5, 15, 30, 60)):
"""更新分钟线"""
fm_map = {5: self.m5, 15: self.m15, 30: self.m30, 60: self.m60}
freq_map = {5: Freq.F5, 15: Freq.F15, 30: Freq.F30, 60: Freq.F60}
for minute in minutes:
if "{}分钟".format(minute) not in self.freqs:
continue
m = fm_map[minute]
next_end_dt = bar_end_time(k.dt, m=minute)
if not m:
m.append(RawBar(symbol=k.symbol, id=1, freq=freq_map[minute],
dt=next_end_dt, open=k.open, close=k.close,
high=k.high, low=k.low, vol=k.vol))
else:
last = m[-1]
if next_end_dt != last.dt:
m.append(RawBar(symbol=k.symbol, id=last.id+1, freq=freq_map[minute],
dt=next_end_dt, open=k.open, close=k.close,
high=k.high, low=k.low, vol=k.vol))
else:
next_bar = self.__update_from_1min(last, k, next_end_dt)
m[-1] = next_bar
fm_map[minute] = m[-self.max_count:]
def __update_d(self, k=None):
if "日线" not in self.freqs:
return
next_end_dt = k.dt.replace(hour=0, minute=0, second=0, microsecond=0)
k = RawBar(symbol=k.symbol, id=1, freq=Freq.D, dt=next_end_dt, open=k.open, close=k.close, high=k.high, low=k.low, vol=k.vol)
if not self.D:
self.D.append(k)
last = self.D[-1]
if next_end_dt.date() != last.dt.date():
k.id = last.id + 1
self.D.append(k)
else:
self.D[-1] = self.__update_from_1min(last, k, next_end_dt)
self.D = self.D[-self.max_count:]
def __update_w(self, k=None):
if "周线" not in self.freqs:
return
next_end_dt = k.dt.replace(hour=0, minute=0, second=0, microsecond=0)
k = RawBar(symbol=k.symbol, id=1, freq=Freq.W, dt=next_end_dt, open=k.open,
close=k.close, high=k.high, low=k.low, vol=k.vol)
if not self.W:
self.W.append(k)
last = self.W[-1]
if next_end_dt.weekday() == 0 and k.dt.weekday() != last.dt.weekday():
k.id = last.id + 1
self.W.append(k)
else:
self.W[-1] = self.__update_from_1min(last, k, next_end_dt)
self.W = self.W[-self.max_count:]
def __update_m(self, k=None):
if "月线" not in self.freqs:
return
next_end_dt: datetime = k.dt.replace(hour=0, minute=0, second=0, microsecond=0)
k = RawBar(symbol=k.symbol, id=1, freq=Freq.M, dt=next_end_dt, open=k.open,
close=k.close, high=k.high, low=k.low, vol=k.vol)
if not self.M:
self.M.append(k)
last = self.M[-1]
if next_end_dt.month != last.dt.month:
k.id = last.id + 1
self.M.append(k)
else:
self.M[-1] = self.__update_from_1min(last, k, next_end_dt)
self.M = self.M[-self.max_count:]
def update(self, k: RawBar):
"""输入1分钟、Tick最新数据,更新其他级别K线
:param k: 1分钟K线
"""
assert k.freq == Freq.F1, "目前仅支持从1分钟K线生成"
if self.m1:
k.id = self.m1[-1].id + 1
else:
k.id = 0
if self.end_dt and k.dt <= self.end_dt:
# print("输入1分钟K时间小于最近一个更新时间,{} <= {},不进行K线更新".format(k.dt, self.end_dt))
return
self.end_dt = k.dt
self.symbol = k.symbol
self.__update_1min(k)
self.__update_minutes(k, minutes=(5, 15, 30, 60))
self.__update_d(k)
self.__update_w(k)
self.__update_m(k)
def get_kline(self, freq: str, count: int = 1000) -> List[RawBar]:
"""获取单个级别的K线
:param freq: str
级别名称,可选值 1分钟;5分钟;15分钟;30分钟;60分钟;日线;周线
:param count: int
数量
:return: list of dict
"""
freqs_map = {"1分钟": self.m1, "5分钟": self.m5, "15分钟": self.m15, "30分钟": self.m30,
"60分钟": self.m60, "日线": self.D, "周线": self.W, "月线": self.M}
return freqs_map[freq][-count:]
def get_klines(self, counts=None):
"""获取多个级别的K线
:param counts: dict
默认值 {"1分钟": 1000, "5分钟": 1000, "30分钟": 1000, "日线": 100}
:return: dict of list of dict
"""
if counts is None:
counts = {"1分钟": 1000, "5分钟": 1000, "30分钟": 1000, "日线": 100}
return {k: self.get_kline(k, v) for k, v in counts.items()}
class KlineGeneratorD:
"""使用日线合成周线、月线、季线"""
def __init__(self, freqs: List[str] = None):
self.symbol = None
self.end_dt = None
if freqs:
self.freqs = freqs
else:
self.freqs: List[str] = [Freq.D.value, Freq.W.value, Freq.M.value, Freq.S.value, Freq.Y.value]
self.bars = {v: [] for v in self.freqs}
def __repr__(self):
return f"<KlineGeneratorD for {self.symbol} @ {self.end_dt}>"
def _update_freq(self, bar: RawBar, freq: Freq):
"""更新指定周期K线"""
freq_edt = freq_end_time(bar.dt, freq)
if not self.bars[freq.value]:
bar_ = RawBar(symbol=bar.symbol, freq=freq, dt=freq_edt, id=0, open=bar.open,
close=bar.close, high=bar.high, low=bar.low, vol=bar.vol)
self.bars[freq.value].append(bar_)
return
last = self.bars[freq.value][-1]
if freq_edt != self.bars[freq.value][-1].dt:
bar_ = RawBar(symbol=bar.symbol, freq=freq, dt=freq_edt, id=last.id + 1, open=bar.open,
close=bar.close, high=bar.high, low=bar.low, vol=bar.vol)
self.bars[freq.value].append(bar_)
else:
bar_ = RawBar(symbol=bar.symbol, freq=freq, dt=freq_edt, id=last.id, open=last.open, close=bar.close,
high=max(last.high, bar.high), low=min(last.low, bar.low), vol=last.vol + bar.vol)
self.bars[freq.value][-1] = bar_
def update(self, bar: RawBar):
"""
:param bar: 必须是已经结束的日线 Bar
:return:
"""
assert bar.freq == Freq.D
self.symbol = bar.symbol
self.end_dt = bar.dt
if self.bars[Freq.D.value] and self.bars[Freq.D.value][-1].dt.date() == bar.dt.date():
return
self.bars[Freq.D.value].append(bar)
if Freq.W.value in self.freqs:
self._update_freq(bar, Freq.W)
if Freq.M.value in self.freqs:
self._update_freq(bar, Freq.M)
if Freq.S.value in self.freqs:
self._update_freq(bar, Freq.S)
if Freq.Y.value in self.freqs:
self._update_freq(bar, Freq.Y)
| true |
9ac25da5011c3017312d9ed6a0db9896b7580bc2 | Python | brandonfry/LPTHW_EX45 | /ex45_main.py | UTF-8 | 1,470 | 3.328125 | 3 | [] | no_license | """This is the main game file for example 45. From here methods
and functions from supporting files are called to initialize
and play the game.
"""
from sys import argv
from ex45_text import clear
from ex45_engines import GameEngine, RoomEngine
from ex45_map import create_map, seed_map, generate_map
from ex45_chars import populate_map
#~ from ex45_chars import Populate_Map
import readline
def main():
"""When called, main() will run a text adventure game written to
fulfill the requirements of Learn Python the Hard Way's exercise 45.
"""
size = int(argv[1])
clear()
room_map = RoomEngine(size)
# Create a blank, square map of zeros.
room_map = create_map(room_map)
# Update map with a single, non-edge starting room.
(room_map) = seed_map(room_map)
# Randomly generate the rest of the map from the starting room.
(room_map) = generate_map(room_map)
# Randomly populate with monsters, based on room type.
room_map = populate_map(room_map)
a_game = GameEngine()
# Start the game from the starting room
a_game.start(room_map)
# Give myself a somewhat robust method to debug.
debug = raw_input("Debug (Y/N)?: ").lower()
while debug == 'y':
try:
print input()
except KeyboardInterrupt:
break
except SyntaxError:
continue
except NameError:
continue
return
if __name__ == "__main__":
main()
| true |
45f6e6f7a09667e0b3f1d9fa7ca7e669048dbee1 | Python | mowenli/CLT | /assets_md5_diff.py | UTF-8 | 8,378 | 2.796875 | 3 | [] | no_license | #!/usr/bin/python
# encoding=utf-8
##################################################################
## Descrpition :
## md5变更工具
## Author : Reyn
## Date : 2018/10/01
##################################################################
import json
import md5
import os
import sys
def getFileInfo(file):
u'''获取文件信息'''
with open(file, 'rb') as f:
md = md5.new(f.read()).hexdigest()
size = os.path.getsize(file)
return {'md5' : md, 'size' : size}
return None
class AssetsMd5Diff():
u'''资源MD5比较工具'''
def __init__(self):
self.ignore_file = './ignore.ini'
self.version_file = './version.manifest'
self.project_file = './project.manifest'
self.src_dir = './src'
self.res_dir = './res'
self.ignore_json_dict = {} ##忽略的文件、目录字典
self.old_version = 0 ##旧版本号
self.new_version = 0 ##新版本号
self.old_assets_dict = {} ##旧资源字典
self.new_assets_dict = {} ##新资源字典
self.is_assets_changed = False ##资源是否变动
def readVersion(self):
u'''从版本文件读取版本号'''
try:
with open(self.version_file, 'r') as file:
version_json_dict = json.load(file)
self.old_version = version_json_dict['version']
except:
# print(u'读取版本文件 version.manifest 失败')
print('read version file failed.')
sys.exit(0)
def readProject(self):
u'''读取工程文件'''
try:
with open(self.project_file, 'r') as file:
project_json_dict = json.load(file, encoding='utf-8')
self.old_assets_dict = project_json_dict['assets']
except:
# print(u'读取工程文件 project.manifest 失败')
print('read project file failed.')
sys.exit(0)
def writeVersion(self):
u'''将新版本号写入版本文件'''
try:
with open(self.version_file, 'w') as file:
self.new_version = self.old_version + 1
str_json = {'version' : self.new_version}
new_version_json = json.dumps(str_json, ensure_ascii=False, indent=4)
file.write(new_version_json)
# print(u'旧版本号: ' + str(self.old_version))
# print(u'新版本号: ' + str(self.new_version))
# print(u'完成!')
print('old version : ' + str(self.old_version))
print('new version : ' + str(self.new_version))
print('Done!')
except:
# print(u'写入版本文件version.manifest失败')
print('write version file failed.')
sys.exit(0)
def writeProject(self):
u'''写入工程文件'''
try:
with open(self.project_file, 'w') as file:
str_json = {'version' : self.new_version, 'assets' : self.new_assets_dict}
project_json = json.dumps(str_json, sort_keys=True, ensure_ascii=False, indent=4)
file.write(project_json)
except:
# print(u'写入工程文件project.manifest失败')
print('write project file failed.')
sys.exit(0)
def readIgnore(self):
u'''读取忽略文件'''
try:
with open(self.ignore_file, 'r') as file:
self.ignore_json_dict = json.load(file, encoding='utf-8')
print('ignore file list:')
# print(u'忽略文件列表:')
for file in self.ignore_json_dict.get('files'):
print ' ' + file
# print(u'忽略目录列表:')
print('ignore dir list:')
for dirf in self.ignore_json_dict.get('dirs'):
print ' ' + dirf
except:
# print(u'读取忽略文件失败')
print('read ignore file failed.')
sys.exit(0)
def readDirs(self, dirpath):
u'''读取MD5值并记录到新的字典'''
for root, _, files in os.walk(dirpath):
for filename in files:
filepath = os.path.join(root, filename)
if filename == '.DS_Store':
os.remove(filepath)
else:
is_in_root = True
for dirf in self.ignore_json_dict['dirs']:
if dirf in root:
is_in_root = False
break
if is_in_root:
fileinfo = getFileInfo(filepath)
if fileinfo is not None:
new_filename = filepath[2:].replace('\\', '/')
save_filename = filepath[6:].replace('\\', '/')
if new_filename not in self.ignore_json_dict['files']:
self.new_assets_dict[save_filename] = fileinfo
def newMd5(self):
u'''生成新的MD5值'''
self.readDirs(self.res_dir)
self.readDirs(self.src_dir)
def diffMd5(self):
u'''比对新旧的MD5值'''
add_list = [] ##新增列表
del_list = [] ##删除列表
mod_list = [] ##修改列表
for key, value in self.new_assets_dict.items():
cv = self.old_assets_dict.get(key)
if cv is None:
add_list.append(key)
else:
if cv['md5'] != value['md5']:
mod_list.append(key)
for key, value in self.old_assets_dict.items():
nv = self.new_assets_dict.get(key)
if nv is None:
del_list.append(key)
# state = u'统计:'
state = 'total:'
if len(add_list) > 0:
# state += u'\n 新增:'+str(len(add_list))
state += u'\n add:'+str(len(add_list))
diff = [ self.diffKey(key, 'ADD') for key in add_list ]
# print u'新增:\n' + ''.join( diff )
print u'add:\n' + ''.join( diff )
self.is_assets_changed = True
if len(del_list) > 0:
# state += u'\n 删除:'+str(len(del_list))
state += u'\n del:'+str(len(del_list))
diff = [ self.diffKey(key, 'DEL') for key in del_list ]
# print u'删除:\n' + ''.join( diff )
print u'del:\n' + ''.join( diff )
self.is_assets_changed = True
if len(mod_list) > 0:
# state += u'\n 修改:'+str(len(mod_list))
state += u'\n mod:'+str(len(mod_list))
diff = [ self.diffKey(key, 'MOD') for key in mod_list ]
# print u'修改:\n' + ''.join( diff )
print u'mod:\n' + ''.join( diff )
self.is_assets_changed = True
if self.is_assets_changed:
print(state)
# if raw_input('请确认后决定是否采用新的变化 (y/n)\n') == 'y':
if raw_input('take effect (y/n)\n') == 'y':
# print(u'正在为您写入...')
print(u'writing...')
self.writeVersion()
self.writeProject()
else:
# print(u'您选择了什么都不做')
print('do nothing.')
sys.exit(0)
else:
# state += u'没有变化,无需更改'
state += u'nothing changed.'
print(state)
def diffKey(self, key, mode='ADD'):
u'''比较资源key值'''
if mode == 'ADD':
return ' ' + key + '\n md5: ' + self.new_assets_dict.get(key)['md5'] + '\n'
elif mode == 'DEL':
return ' ' + key + '\n md5: ' + self.old_assets_dict.get(key)['md5'] + '\n'
elif mode == 'MOD':
cv = self.old_assets_dict.get(key)['md5']
nv = self.new_assets_dict.get(key)['md5']
return ' ' + key + '\n md5: ' + cv + ' => ' + nv + '\n'
return ''
def start(self):
self.readIgnore()
self.readVersion()
self.readProject()
self.newMd5()
self.diffMd5()
if __name__ == '__main__':
AssetsMd5Diff().start()
| true |
18b985d6968627de8a93e02f6560e2e43edc9d80 | Python | JKFjkf/Practise | /二分查找/有序不重复数组.py | UTF-8 | 783 | 3.671875 | 4 | [] | no_license | def binaryserch(a, x):
mid = tempmid = len(a) // 2 # mid 用于记录数组a的中间数,tempid 用于记录原始数组中mid的值
while len(a) > 1:
if a[mid] > x:
a = a[:mid]
mid = len(a) // 2
tempmid = tempmid - (len(a) - mid)
elif a[mid] < x:
a = a[mid + 1:]
mid = len(a) // 2
tempmid = tempmid + mid + 1
else:
break
if len(a) == 1:
tempmid = tempmid if a[mid] == x else -1
if len(a) < 1:
tempmid = -1
return tempmid
if __name__ == '__main__':
a = [2, 3, 4, 10, 40]
x=10
res = binaryserch(a,x)
if res != -1:
print("元素在数组中的索引为 %d" % res)
else:
print("元素不在数组中") | true |
7702fb951588cc3c0c24e9ce4d557382ae9e54e2 | Python | mgdreamingstar/algorithm014-algorithm014 | /Week_01/239_sliding_window_maximum.py | UTF-8 | 433 | 3 | 3 | [] | no_license | from collections import deque
from typing import List
def maxSlidingWindow(self, nums: List[int], k: int) -> List[int]:
if not nums:
return []
res = []
d = deque()
for i in range(len(nums)):
if d and d[0] < i - k + 1:
d.popleft()
while d and nums[d[-1]] < nums[i]:
d.pop()
d.append(i)
if i >= k - 1:
res.append(nums[d[0]])
return res
| true |
9035564c8bb71a4b4b6f4159a14de7c571f4618a | Python | DanTayar/python | /loops.py | UTF-8 | 149 | 3.828125 | 4 | [] | no_license | #How can i print out all the items in a list?
my_list = ['Apples', 'Oranges', 'Peaches', 3.14,20,'defeqfweqfew']
for x in my_list:
print(x,type(x)) | true |
83ac2f6b08c109522e0323ab3a7a89af0ae095d8 | Python | tungnkhust/Image-Classification-With-Bag-of-Visual-Model | /buil_bov.py | UTF-8 | 2,021 | 2.53125 | 3 | [] | no_license | import argparse
import cv2
import os
import numpy as np
from src.utils import load_json, get_files
from src.utils import read_image, get_extractor
from src.feature_extraction import get_descriptors, create_bov
def build_bov(config):
extractor_name = config['extractor_name']
bov_dir = config['bov_dir']
n_visuals = config['n_visuals']
extractor = get_extractor(extractor_name)
if extractor is None:
raise ValueError('extractor must be not None')
if os.path.exists(bov_dir) is False:
os.makedirs(bov_dir)
# list image path of dataset
img_paths = get_files(config['data_path'])
if config['use_extend_image']:
extend_img_paths = get_files(config['extend_image_dir'])
img_paths.extend(extend_img_paths)
print("Use extend image to build bov")
print(f'Num image extend {len(extend_img_paths)}')
if config['use_extend_image']:
bov_path = os.path.join(bov_dir, f'bov_{extractor_name}_{n_visuals}_extend.sav')
else:
bov_path = os.path.join(bov_dir, f'bov_{extractor_name}_{n_visuals}.sav')
print('Extraction use: ', extractor_name)
print('N_visuals: ', n_visuals)
print('Get descriptor')
descriptor_list = []
total_descriptors = 0
for i in range(len(img_paths)):
img_path = img_paths[i]
img = read_image(img_path, size=config['image_size'])
kps, des = get_descriptors(extractor, img)
if des is not None:
descriptor_list.append(des)
total_descriptors += len(des)
descriptors = np.vstack(descriptor_list)
print(descriptors.shape)
print("Total descriptor: ", total_descriptors)
print('Build bags of visual...')
create_bov(descriptors, n_visuals, bov_path=bov_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config_path', type=str, default='configs/bov_config.json')
args = parser.parse_args()
config = load_json(args.config_path)
build_bov(config)
| true |
e654669117bb0d71161998334c124540fad8abbe | Python | martinpoljak/pyircgate-daemon | /libraries/INIParser.py | UTF-8 | 1,723 | 2.671875 | 3 | [
"MIT"
] | permissive | #! /usr/bin/python
# -*- coding: utf-8 -*-
#
import sys
import ConfigParser
class INIParser(ConfigParser.RawConfigParser, object):
def __get_default(self, section, option, default, getter):
try:
value = getter(section, option)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
if default is not None:
value = default
else:
raise
return value
def __fix_string(self, string):
if len(string) > 2:
if ((string[0] == "\"") and (string[-1] == "\"")) or ((string[0] == "'") and (string[-1] == "'")):
string = string[1:-1]
return string
def get(self, section, option, default = None):
getter = lambda section, option: super(INIParser, self).get(section, option)
value = self.__get_default(section, option, default, getter)
value = self.__fix_string(value)
return value
def getint(self, section, option, default = None):
getter = lambda section, option: super(INIParser, self).getint(section, option)
return self.__get_default(section, option, default, getter)
def getfloat(self, section, option, default = None):
getter = lambda section, option: super(INIParser, self).getfloat(section, option)
return self.__get_default(section, option, default, getter)
def getboolean(self, section, option, default = None):
getter = lambda section, option: super(INIParser, self).getboolean(section, option)
return self.__get_default(section, option, default, getter)
def items(self, section):
section_items = super(INIParser, self).items(section)
result = []
for key, item in section_items:
if isinstance(item, str):
item = self.__fix_string(item)
result.append((key, item))
return result
| true |
49c277c5d22470743aae14990f4604054e23c273 | Python | MEENUVIJAYAN/python | /python lab 6/max.py | UTF-8 | 275 | 4 | 4 | [] | no_license | #Python program to get a maximum and mininum value in a dictionary
a = {'x':500, 'y':5874, 'z': 560}
maximum = max(a.keys(), key=(lambda k: a[k]))
minimum = min(a.keys(), key=(lambda k: a[k]))
print('Maximum Value: ',a[maximum])
print('Minimum Value: ',a[minimum])
| true |
113fa22de631ea0314d0a4d0f83b319788a04c07 | Python | liushooter/OneDayOneCommit | /python/cocos2d/review.py | UTF-8 | 5,029 | 3.609375 | 4 | [
"MIT"
] | permissive | import cocos
from cocos import scene
from cocos.layer import Layer, ColorLayer
from cocos.director import director
from cocos.scenes import *
from cocos.sprite import Sprite
from cocos.actions import *
from cocos.audio.pygame.mixer import Sound
from cocos.audio.pygame import mixer
from pyglet.window.key import symbol_string
# Here is how we will structure our code
# First, we will write an Audio class that is the child of SDL's Sound class
# Second we will write an Input layer that controls both the sprite and the audio
# And to spice things up, we'll add two states to the first layer: a normal and a trippy mode
# We start with the audio class, same as before
class Audio(Sound):
def __init__(self, filename):
super(Audio, self).__init__(filename)
# Pretty easy, I'd say
# Let's be fancy and make this a color layer AND an event handler
class InputLayer(cocos.layer.ColorLayer):
is_event_handler = True
def __init__(self, x=320, y=240, is_trippy=False):
super(InputLayer, self).__init__(46, 204, 113, 1000)
# We set the trippy boolean based on the value passed in (by default it's not trippy)
self.is_trippy = is_trippy
# Now we need a little guy to manipulate
self.sprite = Sprite('assets/img/grossini.png')
self.sprite.position = x, y
self.sprite.opacity = 0
self.add(self.sprite)
self.sprite.do(FadeIn(2))
# You should be bored seeing this same code over and over again
# Here's something different though
# Now I create an audio object and store it within self, based on whether or not it's trippy
if self.is_trippy:
# When it is trippy, I have a slowed down and distorted version of the song I made in Audacity
self.bg_music = Audio("assets/sound/LatinIndustriesSlow.ogg")
# I also start running a couple effects that make it seem very trippy
# It's important to note that you can do math on Cocos2D effects and actions
self.do((Waves(duration=4) * 100) + Liquid(duration=15))
# In this case I added two actions together and multiplied the waves by 100 for run
else:
# If it's not trippy then I just make it the same boring old song we've been using before
self.bg_music = Audio("assets/sound/LatinIndustries.ogg")
# We lower the volume of the background music and have it play the whole time
self.bg_music.set_volume(.2)
self.bg_music.play(-1)
# We don't need anything else here, let's just let our sprite be moved in the event handlers
# So now we can overload some default event handlers
# We'll let the user move in any direction on the screen with the arrow keys
# We'll only be doing keyboard input for this program
def on_key_press(self, key, modifiers):
# If you don't know what these next couple lines do, go check the previous tutorials
move_left = MoveBy((-50, 0), .5)
move_up = MoveBy((0, 50), .5)
# Check if they want to go left, and then actually make the sprite go left
if symbol_string(key) == "LEFT":
self.sprite.do(move_left)
# Or maybe if they want to move right?
elif symbol_string(key) == "RIGHT":
self.sprite.do(Reverse(move_left))
# Lastly we need to make it move up
elif symbol_string(key) == "UP":
self.sprite.do(move_up)
# Oh yeah don't forget about down
elif symbol_string(key) == "DOWN":
self.sprite.do(Reverse(move_up))
# That's it for movements!
# Now let's look at transitioning to a new scene
# Let's make the game all trippy when they hit space
elif symbol_string(key) == "SPACE":
# I need to stop the music before we transition to the next scene so that two songs aren't playing at once
self.bg_music.stop()
# If you were paying attention, you would've noticed I take three parameters in the init function
# I get the X and Y coordinates of the sprite to figure out where to place it when the scenes transition
coordinates = self.sprite.position
# You should try printing the X and Y coordinates yourself to see the type of object that it returns
if self.is_trippy:
# This line only runs when the layer is already trippy, and by default the layer makes itself not trippy
# This line only needs to give the X and Y coordinates in that case
director.replace(scene.Scene(InputLayer(coordinates[0], coordinates[1])))
else:
# This huge line makes a new scene, with a transition, and inputs the coordinates and makes it trippy!
director.replace(scene.Scene(InputLayer(coordinates[0], coordinates[1], True)))
# And finally we do our usual initialization and run the scene
mixer.init()
director.init()
director.run(scene.Scene(InputLayer()))
| true |
dfcd89a871ec688c88f5a1e9aac83ce8d4bf615a | Python | anuragmukherjee2001/Python_codes | /OOPs programming/Python oops/class2.py | UTF-8 | 818 | 3.9375 | 4 | [] | no_license | class Employee:
num_of_leaves = 0
def __init__(self, name, age, salary, role):
self.name = name
self.age = age
self.salary = salary
self.role = role
def print_details(self):
return f"The name is {self.name}, salary is {self.salary}, age is {self.age}, and the role is {self.role}"
@classmethod
def print_leaves(cls, newleaves):
cls.num_of_leaves = newleaves
@classmethod
def from_dash(cls, string):
return cls(*string.split("-"))
@staticmethod
def printgood(string):
print("This is good", string)
anurag = Employee("Anurag", 1000, 21, "Programmer")
amit = Employee("Amit", 2000, 23, "Coder")
Rohan = Employee.from_dash("Rohan-4800-22-Sweeper")
print(Rohan.age)
Employee.printgood("Amit")
print(anurag.print_details()) | true |
938e33b55bbacb9df677c3c0cc7249df247189cd | Python | virenar/annotator | /container/annotator_utils/variant_coverage_annotator.py | UTF-8 | 3,418 | 2.8125 | 3 | [] | no_license | from cyvcf2 import VCF
import os
import numpy as np
import pandas as pd
import argparse
parser = argparse.ArgumentParser(
description="Obtain variant coverage data and parse vep info from user provided vcf file generated from vep tool")
parser.add_argument("-i", "--input", help="user specified VCF file generated from vep tool",
type=str, required=True)
parser.add_argument("-o", "--output", help="output file name",
type=str, required=True)
def _get_vcf_header(path, string):
"""Get particular line from VCF that starts with a specific string.
Args:
path (str): A path to the vcf file.
string (str): A string for which to grep a line
Retruns:
Line in the vcf file.
"""
with open(path, 'r') as f:
for l in f:
if l.startswith(string):
header = l.rstrip().split('\t')
break
return header
def _get_sample_info(path):
"""Get all sample names from the vcf.
Args:
path (str): A path to the vcf file.
Returns:
Names of all the samples in the vcf file.
"""
header = _get_vcf_header(path, '#CHROM')
samples = header[9:]
return samples
def _get_vep_info(path):
"""Get vep info names.
Args:
path (str): A path to the vcf file.
Returns:
All VEP info indentifiers.
"""
header = _get_vcf_header(path, "##INFO=<ID=CSQ,Number=.")
vep_info = tuple(header[0].split(',')[-1].split('Format: ')[-1].split('">')[0].split('|'))
return vep_info
def get_coverage_info(path):
"""Get coverage metrics and percent alternate variants for all the samples.
Args:
path (str): A path to the vcf file.
Returns:
Pandas Dataframe containing all of the variants sequencing coverage metrics
and percent alternate variants for all the samples included in the vcf.
"""
samples = _get_sample_info(path)
cyvcf = VCF(path, strict_gt=True)
tup = []
header_coverage = list(map(str, np.hstack(('Key', 'variant_type',
['{}_depth'.format(s) for s in samples],
['{}_alt-reads'.format(s) for s in samples],
['{}_ref-reads'.format(s) for s in samples],
['{}_pct-alt-var'.format(s) for s in samples]))))
header_vep = _get_vep_info(path)
header = tuple(header_coverage)+header_vep+tuple(['Variant record'])
for v in cyvcf:
key = "{}_{}_{}/{}".format(v.CHROM,v.POS,v.REF,"/".join(v.ALT))
variant_record = tuple([str(v)])
alt_type = v.INFO['TYPE']
depth = v.gt_depths
alt_reads = v.gt_alt_depths
ref_reads = v.gt_ref_depths
pct_alt_var = np.round(100*(v.gt_alt_depths/v.gt_depths),decimals=2)
vep_info = tuple(str(v).split('\t')[7].split(';')[-1].split('|'))
coverage_info = tuple(list(map(str,(np.hstack((key, alt_type, depth, alt_reads, ref_reads, pct_alt_var))))))
tup.append(coverage_info+vep_info+variant_record)
coverage_info = pd.DataFrame(tup)
coverage_info.columns = header
return coverage_info
if __name__ == '__main__':
args = parser.parse_args()
df = get_coverage_info(os.path.abspath(args.input))
df.to_csv(os.path.abspath(args.output), index=False) | true |
e8c03dacd2e6bd64a4c3e6dce904b6435c65ed5c | Python | rajeshanu/rajeshprograms | /django/untitled2/demo8.py | UTF-8 | 210 | 2.875 | 3 | [] | no_license | no=5
for x in range(1,6):
for y in range(1,x+1):
print(no,end="")
if no==40:
no=40
print(no,end="")
no=no+5
else:
num=no+5
print() | true |
4e4e9e342d3f7d3367a1490cdf129d62b83742be | Python | claireyegian/Calculus | /FunctionAnalysisProgram.py | UTF-8 | 10,612 | 3.921875 | 4 | [] | no_license | #Claire Yegian and Romaney Granizo-Mackenzie
#11/6/18
#Function Analysis Program
from math import sin, cos, tan, acos, asin, atan, e, pi, log, log10, sqrt
function = input('Enter a function: ')
IntervalBeg = float(input('Enter the begining of an interval on which the function is continuous: '))
IntervalEnd = float(input('Enter the end of the interval: '))
step = 0.1
print(' ')
#Takes symmetric difference quotient to calculate the derivative at each designated x value
SlopeList = []
h = 0.001
a = IntervalBeg
while a <= IntervalEnd:
x = a + h
f1 = eval(function)
x = a - h
f2 = eval(function)
SymDif = (f1 - f2)/(2*h)
SlopeList.append(SymDif) #Adds all numerical derivatives to a list so they can be kept track of
a += step
#Finds extreme values
x = IntervalBeg #Determines if the first endpoint is a maximum or a minium. If the second y value is higher than the endpoint, it is a min. If the second is lower, it's a max.
y1 = eval(function)
x = IntervalBeg + step
y2 = eval(function)
if y1 > y2:
print('There is a local maxiumum at x='+str(IntervalBeg))
elif y1 < y2:
print('There is a local minimum at x='+str(IntervalBeg))
NumXVals = len(SlopeList)
ExtremaList = [IntervalBeg]
yList = [y1]
loc = 0
def FindX(loc):
return(IntervalBeg + (step*(loc)+step*(loc+1))/2)
while loc <= (NumXVals - 2): #Runs through all of the recorded numerical derivatives
if SlopeList[loc] < 0: #If the a given numerical derivative is negative, and the next recorded derivative is positive, there is an extreme.
if SlopeList[loc+1] > 0:
print('There is a local minimum around x='+ str(round(FindX(loc),4)))
ExtremaList.append(FindX(loc))
x = FindX(loc)
yList.append(eval(function))
elif SlopeList[loc+1] < 0:
pass
elif SlopeList[loc+1] == 0:
pass
elif SlopeList[loc] > 0: #If the a given numerical derivative is positive, and the next recorded derivative is negative, there is an extreme.
if SlopeList[loc+1] < 0:
print('There is a local maximum around x='+ str(round(FindX(loc),4)))
ExtremaList.append(FindX(loc))
x = FindX(loc)
yList.append(eval(function))
elif SlopeList[loc+1] > 0:
pass
elif SlopeList[loc+1] == 0:
pass
loc += 1
x = IntervalEnd - step #Determines if the last endpoint is a maximum or a minium. If the y value before it is higher than the endpoint, it is a min. If the y value is lower, it's a max.
y3 = eval(function)
x = IntervalEnd
y4 = eval(function)
if y3 > y4:
print('There is a local minimum at x='+str(IntervalEnd))
elif y3 < y4:
print('There is a local maxiumum at x='+str(IntervalEnd))
ExtremaList.append(IntervalEnd)
yList.append(y4)
print('The absolute maxiumum is y='+str(round(max(yList),4))) #Determines the highest and lowest y values in the list of extrema and prints that they are the absolute maxium and minimum.
print('The absolute minimum is y='+str(round(min(yList),4)))
#Finds increasing/decreasing
NumExtrema = len(ExtremaList)
IncDecList = [] #Creates a list of increasing/decreasing intervals
if SlopeList[0] > 0: #If the first numerical derivative is positive, the first object in the list is 'increasing', and then it alternates increasing and decreasing for the remainder of places in the extrema list.
Runs = 1
while Runs <= (NumExtrema-1):
if Runs%2 != 0:
IncDecList.append('increasing')
elif Runs%2 == 0:
IncDecList.append('decreasing')
Runs += 1
elif SlopeList[0] < 0: #If the first numerical derivative is negative, the first object in the list is 'decreasing', and then it alternates increasing and decreasing for the remainder of places in the extrema list.
Runs = 1
while Runs <= (NumExtrema-1):
if Runs%2 != 0:
IncDecList.append('decreasing')
elif Runs%2 == 0:
IncDecList.append('increasing')
Runs += 1
def IncDecIntervals(IncDecList, ExtremaList): #This function uses the list created above (alternating increasing and decreasing) and the list of x values for the extrema to print the increasing and decreasing intervals.
IncDecListLen = len(IncDecList)
index = 0
while index < (IncDecListLen):
print('The function is '+IncDecList[index]+' on the interval ['+str(round(ExtremaList[index],4))+', '+str(round(ExtremaList[index+1],4))+'].')
index += 1
IncDecIntervals(IncDecList, ExtremaList) #This calls the function above
#Takes the second derivative at each x value
SlopeList2 = [] #This a list of numerical derivatives at each of the x values from the first SlopeList transfered 0.001 units to the right.
a2 = IntervalBeg + 0.001
while a2 <= IntervalEnd: #This loop uses the symmetric difference quotient to determine the numerical derivative at each of the adjusted x values.
x = a2 + h
f1 = eval(function)
x = a2 - h
f2 = eval(function)
SymDif = (f1 - f2)/(2*h)
SlopeList2.append(SymDif)
a2 += step
SecondDeriv = [] #This is a list of the numerical second derivative at each step on the function.
i = 0
while i < (NumXVals-1): #The loop uses (y-y1)/(x-x1) to determine the slope using the symmetric difference quotients from SlopeList and SlopeList2 as y values and 0.001 as the difference in x values.
slope = (round(SlopeList2[i],6)-round(SlopeList[i],6))/0.001
SecondDeriv.append(slope)
i += 1
#Finds inflection points
def Find2DX(k): #This function finds the second derivative x value that coresponds to the number of times that the computer has run through the while loop below.
return(IntervalBeg + (step*(k)+step*(k+1))/2)
Num2DerivVals = len(SecondDeriv)
InflecList = [] #This a list of the function's inflection points.
k = 0
while k < (Num2DerivVals - 1):
if SecondDeriv[k] < 0: #If a given numerical second derivative is negative and the following second derivative is positive, the value is added to the list of inflection points.
if SecondDeriv[k+1] > 0:
InflecList.append(Find2DX(k))
print('There is an inflection point at around x='+str(round(Find2DX(k),4)))
elif SecondDeriv[k+1] < 0:
pass
elif SecondDeriv[k+1] == 0:
pass
elif SecondDeriv[k] > 0: #If a given numerical second derivative is positive and the following second derivative is negative, the value is added to the list of inflection points.
if SecondDeriv[k+1] < 0:
InflecList.append(Find2DX(k))
print('There is an inflection point at around x='+str(round(Find2DX(k),4)))
elif SecondDeriv[k+1] > 0:
pass
elif SecondDeriv[k+1] == 0:
pass
k += 1
if len(InflecList) == 0: #If there are no values added to the list of inflection points, the function has no inflection points.
print('There are no points of inflection.')
#Finds concave up/concave down
NumInflec = len(InflecList)
if NumInflec != 0: #If there are inflection points, the following if statements determine concavity.
ConUpDownList = [] #This creates a list similar to the increasing/decreasing one that records alternating intervals of concavity.
if SecondDeriv[0] > 0: #The program is finding which intervals are concave up or concave down. If the first numerical second derivative value is positive, then the program will find where the next interval where the second derivative is negative, the next positive, and so on.
Runs = 1
while Runs <= (NumInflec-1):
if Runs%2 != 0:
ConUpDownList.append('concave up')
elif Runs%2 == 0:
ConUpDownList.append('concave down')
Runs += 1
elif SecondDeriv[0] < 0: #If the first numerical second derivative value is negative, than the next interval will be positive, the next negative, and so on.
Runs = 1
while Runs <= (NumInflec-1):
if Runs%2 != 0:
ConUpDownList.append('concave up')
elif Runs%2 == 0:
ConUpDownList.append('concave down')
Runs += 1
def ConIntervals(ConUpDownList, InflecList): #This function takes the concavity list and the list of inflection points as arguments and prints a statement of the intervals on which the function is concave up or concave down.
CUDLLen = len(ConUpDownList)
index = 0
while index < (CUDLLen):
print('The function is '+ConUpDownList[index]+' on the interval ['+str(round(InflecList[index],4))+', '+str(round(InflecList[index+1],4))+'].')
index += 1
if SecondDeriv[0] > 0: #If the first numerical second derivative is positive, then the first interval will be concave up.
print('The function is concave up on the interval ['+str(round(IntervalBeg,4))+','+str(round(InflecList[0],4))+'].')
elif SecondDeriv[0] < 0: #If the first numerical second derivative is negative, then the first interval will be concave down.
print('The function is concave down on the interval ['+str(round(IntervalBeg,4))+','+str(round(InflecList[0],4))+'].')
ConIntervals(ConUpDownList, InflecList) #This calls the function above.
if SecondDeriv[-1] > 0: #If the last numerical second derivative is positive, then the last interval is concave up.
print('The function is concave up on the interval ['+str(round(InflecList[-1],4))+','+str(round(IntervalEnd,4))+'].')
elif SecondDeriv[-1] < 0: #If the last numerical second derivative is negative, then the last interval is concave down.
print('The function is concave down on the interval ['+str(round(InflecList[-1],4))+','+str(round(IntervalEnd,4))+'].')
elif NumInflec == 0: #If there are no inflection points, the following if statements decide concavity.
if SecondDeriv[0] > 0: #If the first numerical second derivative is positive, then the function is concave up on the whole interval.
print('The function is concave up on the interval ['+str(round(IntervalBeg,4))+','+str(round(IntervalEnd,4))+'].')
elif SecondDeriv[0] < 0: #If the first numerical second derivative is negative, then the function is concave down on the whole interval.
print('The function is concave down on the interval ['+str(round(IntervalBeg,4))+','+str(round(IntervalEnd,4))+'].')
elif SecondDeriv[0] == 0 and SecondDeriv[-1] == 0: #If the first and last numerical second derivatives are 0, the program assumes that the function has no concavity.
print('The function has no concavity.')
| true |
cead469d227a73d0ff4dbf350bab1266040738c1 | Python | flanker-d/coursera_python | /course_1/week05/03_asyncio/24_generator.py | UTF-8 | 453 | 3.546875 | 4 | [] | no_license | #делает то же самое что и итератор. но
#1. не надо объявлять класс
#2. не нужно сохранять состояний в объектах или глобально, используем переменную на стеке
def MyRangeGenerator(top):
current = 0
while current < top:
yield current
current += 1
counter = MyRangeGenerator(3)
for it in counter:
print(it) | true |
66b815b4a2a87f5d70a4ecb170786d45477c7ae7 | Python | debu999/flaskmicroframework | /thermos/forms.py | UTF-8 | 3,072 | 2.53125 | 3 | [
"BSD-3-Clause"
] | permissive | from flask_login import current_user
from flask_wtf import FlaskForm
from wtforms.fields import StringField, PasswordField, BooleanField, SubmitField
from wtforms.fields.html5 import URLField
from wtforms.validators import DataRequired, url, Length, Regexp, EqualTo, Email, ValidationError
from thermos.models import User
class BookmarkForm(FlaskForm):
# url = URLField("url", validators=[DataRequired(), url()])
# description = StringField('description')
# user = StringField('user')
url = URLField("Enter your bookmark here: ", validators=[DataRequired(), url()])
description = StringField('Enter URL description(optional) here: ', validators=[Length(5, 100)])
user = StringField('Enter your username here: ')
tags = StringField("Tags", validators=[Regexp(r"[a-zA-Z0-9, ]*$",
message="Tags can contain only number and letters(comma separated)")])
def validate(self):
if not self.url.data.startswith((r"http://", r"https://")):
self.url.data = "".join(["https://",self.url.data])
if not FlaskForm.validate(self):
return False
if not self.description.data:
self.description.data = self.url.data
if not self.user.data:
self.user.data = current_user.username
# filter out the tags from the system
stripped = set(filter(None, [t.strip() for t in self.tags.data.split(",")]))
self.tags.data=",".join(sorted(stripped))
return True
class LoginForm(FlaskForm):
username = StringField("Your Username:", validators=[DataRequired(),])
password = PasswordField("Password:", validators=[DataRequired(), ])
remember_me = BooleanField("Keep Me Logged In")
submit = SubmitField("Log In")
class SignupForm(FlaskForm):
username = StringField("Username", validators=[DataRequired(),
Length(3, 16),
Regexp("^[A-Za-z0-9_]{3,}$",
message="Username can contain letters, digits and _")
])
password = PasswordField("Password", validators=[DataRequired(),
EqualTo("password2", message="Password must match. Please retype password."),
])
password2 = PasswordField("Confirm Password", validators=[DataRequired()])
email = StringField("Email", validators=[DataRequired(), Length(1,50), Email()])
submit = SubmitField("SignUp")
def validate_email(self, emailfield):
if User.query.filter_by(email=emailfield.data).first():
raise ValidationError("There is already a user with given email. If you have forgotten password, please reset.")
def validate_username(self, usernamefield):
if User.query.filter_by(username=usernamefield.data).first():
raise ValidationError("This username is already taken.")
| true |
cc7488e7c84766c26797fac8f2ae4e70e0b7d7b8 | Python | jhoanatan25/don | /apps/backend.py | UTF-8 | 729 | 2.65625 | 3 | [] | no_license | from .models import person
class userBack:
def authenticate(self, username=None, password=None):
try:
user = person.objects.get(username=username)
except:
try:
user = person.objects.get(email=username)
except:
user = None
if user:
print("User " + user.username + " is trying to enter")
if user.check_password(password):
return user
else:
return None
else:
return None
def get_user(self, username):
try:
return person.objects.get(username=username)
except:
return None
| true |
658fb0ecc72e9fb612dfe8c3cc3d0323f8a02724 | Python | Yeedy/Leetcode | /435.py | UTF-8 | 1,035 | 3.4375 | 3 | [] | no_license | # 435. 无重叠区间
class Solution:
# 贪心算法-pro
# 76ns(98.01%), 16.6MB(60.32%)
def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
if not intervals:
return 0
intervals = sorted(intervals, key=lambda x: x[1])
ans = 0
end = -float('inf') # 结束时间
for i in intervals:
if i[0] >= end:
ans += 1
end = i[1]
return len(intervals) - ans
# 贪心算法
# 104ms(34.98%), 16,6MB(46.21%)
# def eraseOverlapIntervals(self, intervals: List[List[int]]) -> int:
# if not intervals:
# return 0
#
# intervals.sort(key=lambda x: x[0])
# ans = 0
# pre = 0
# for i in range(1, len(intervals)):
# if intervals[pre][1] > intervals[i][0]:
# ans += 1
# if intervals[pre][1] > intervals[i][1]:
# pre = i
# else:
# pre = i
#
# return ans
| true |
4a9c8032aa9893b30eb548c13275600a81054bd9 | Python | Juddling/pystepper | /draw.py | UTF-8 | 941 | 3.046875 | 3 | [] | no_license | from stl import mesh
from numpy import linalg
import atexit
import pulse
your_mesh = mesh.Mesh.from_file('stl/circle.stl')
print("just use y and z")
last = None
ticks = 50000
def end():
pulse.cleanup()
atexit.register(end)
def tick_rate(val):
# e.g. y should move every 20 ticks
if val == 0:
return -1
return int(ticks / val)
def move(movedict):
y_rate = tick_rate(movedict['y'])
z_rate = tick_rate(movedict['z'])
pulse.dir_y(y_rate > 0)
pulse.dir_z(z_rate > 0)
for i in range(ticks):
if y_rate != -1 and i % y_rate == 0:
pulse.pulse_y()
if z_rate != -1 and i % z_rate == 0:
pulse.pulse_z()
for face in your_mesh.vectors:
for v in face:
if last is None:
last = v
continue
towards = {'y': v[1]-last[1], 'z': v[2]-last[2], 'distance': linalg.norm(v-last)}
move(towards)
print(towards)
| true |
11c006c383d96c5cee9a8fe4172dc512da55e227 | Python | zw-999/learngit | /spider/urllib2_url_pagecontent.py | UTF-8 | 1,122 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python
# coding=utf-8
#利用urllib2通过指定的url攫取网页内容
# urllib2 用一个request对像来映射你提出的http请求,将返回一个相关请求response对像
import urllib2
req = urllib2.Request('http://www.baidu.com')
response = urllib2.urlopen(req)
page = response.read()
#print page
#在http请求时,可以发送data表单数据
import urllib
url = 'https://api.github.com/some/endpoint'
values = {'name' : 'WHY',
'location' : 'SDU',
'language' : 'Python' }
data = urllib.urlencode(values) #编码工作
req = urllib2.Request(url,data) #发送请求同时传data表单
reqponse = urllib2.urlopen(req) #接受反馈的信息
the_page = response.read()
print the_page
#设置Headers到http请求,浏览器确认身分是通过User-Agent头,当创建一个请对像,你可以给他一个包含头数据的字典
user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'
headers = {'User-Agent':user_agent}
data = urllib.urlencode(values)
req = urllib2.Request(url,data,headers)
response = urllib2.urlopen(req)
the_page= response.read()
| true |
1632c6630742fdcac830d94462f08d2bfa659ea4 | Python | sachin1005singh/complete-python | /python_program/turtle/event use.py | UTF-8 | 203 | 2.71875 | 3 | [] | no_license | import turtle
screen.screensize(400,600)
turtle.home()
turtle.begin_poly()
turtle.fd(100)
turtle.left(20)
turtle.fd(30)
turtle.left(60)
turtle.fd(50)
turtle.end_poly()
s = Shape("compound")
| true |
12ae1c8a0d4a67d3a9a2103e7fbb14790fbc8130 | Python | kunsaeedan01/Guess-a-number | /main.py | UTF-8 | 611 | 3.875 | 4 | [] | no_license | from art import logo
import random
print(logo)
number = random.randint(1, 101)
print("Welcome to the number guessing game")
level = input("Type e for easy or h for hard level:")
if level == "e":
attempts = 10
elif level == "h":
attempts = 5
the_end = False
while the_end != True:
guess = int(input("Guess a number: "))
if guess > number:
print("To high")
elif guess < number:
print("Too low")
elif guess == number:
print(f"You found the number: {number}")
the_end == True
attempts -= 1
if attempts == 0:
print("Game over, you ran out of attempts")
the_end == True
| true |
6b69c089e497a02d37ae5fbcf78c9db84f91806a | Python | karthik25/easyApi | /result.py | UTF-8 | 214 | 2.84375 | 3 | [
"MIT"
] | permissive | class Result:
def __init__(self):
pass
last_result = None
result_type = ''
@staticmethod
def store_result(obj, type):
Result.last_result = obj
Result.result_type = type | true |
f4fa04a45ea0a92b140df08eb38703c7dd0def5a | Python | yordanovagabriela/HackBulgaria | /week1/the_final_round/spam_and_eggs.py | UTF-8 | 409 | 3.75 | 4 | [] | no_license | def prepare_meal(number):
if number < 1:
return "\"\""
elif number % 3 == 0:
counter = 1
while number % 3**counter == 0:
counter += 1
meal = "spam " * (counter-1)
if number % 5 == 0:
meal += "and eggs"
return "\"%s\"" % meal
elif number % 5 == 0:
return "eggs"
else:
return "\"\""
print(prepare_meal(45))
| true |
541096bba9319d2f5cd338ed7e8670b845b3713f | Python | GuillaumePv/BadooBot | /main.py | UTF-8 | 2,343 | 2.828125 | 3 | [] | no_license | from selenium import webdriver
from time import sleep
class BadooBot():
def __init__(self):
self.username = input("Nom utilisateur Facebook :")
self.password = input("Mot de passe Facebook :")
self.driver = webdriver.Chrome("./Driver/chromedriver")
self.driver.get("https://badoo.com/fr/")
sleep(2)
fb_btn = self.driver.find_element_by_xpath('//*[@id="page"]/div[2]/div[3]/div/div[3]/div/div[1]/div[2]/div/div/a')
fb_btn.click()
sleep(2)
base_window = self.driver.window_handles[0]
self.driver.switch_to_window(self.driver.window_handles[1])
email_in = self.driver.find_element_by_xpath('//*[@id="email"]')
email_in.send_keys(self.username)
pw_in = self.driver.find_element_by_xpath('//*[@id="pass"]')
pw_in.send_keys(self.password)
login_btn = self.driver.find_element_by_xpath('//*[@id="u_0_0"]')
login_btn.click()
sleep(2)
try:
log_btn = self.driver.find_element_by_xpath('//*[@id="u_0_4"]/div[2]/div[1]/div[1]/button')
log_btn.click()
except Exception:
sleep(2)
self.driver.switch_to_window(base_window)
def like(self):
self.driver.find_element_by_xpath('//*[@id="mm_cc"]/div[1]/section/div/div[2]/div/div[2]/div[1]/div[1]')\
.click()
def close_popup1(self):
self.driver.find_element_by_xpath('/html/body/aside/section/div[1]/div/div[2]/div/div[1]')\
.click()
def close_popup2(self):
self.driver.find_element_by_xpath('/html/body/aside/section/div[1]/div/div[3]/span')\
.click()
def close_popup3(self):
self.driver.find_element_by_xpath('//*[@id="simple-page"]/div[3]/section/div[2]/div')\
.click()
def autolike(self):
go = True
while go:
sleep(0.5)
try:
self.like()
except Exception:
sleep(2)
try:
self.close_popup1()
except Exception:
try:
self.close_popup2()
except Exception:
self.close_popup3()
if __name__ == "__main__":
bot = BadooBot()
sleep(5)
bot.autolike() | true |
c9311cb23ee6b9edd921a954320d570228c002b8 | Python | Alirakym/PP2Summer | /TSIS 5/15.py | UTF-8 | 132 | 2.84375 | 3 | [] | no_license | from random import randint
with open("input.txt","r") as f:
ls = f.readlines()
rnd = randint(0,len(ls)-1)
print(ls[rnd]) | true |
0bad70902dd96c61c1ac3793094ae4736131528a | Python | SunshineTuring/hacker | /ch01/availableHost_text.py | UTF-8 | 292 | 2.671875 | 3 | [] | no_license | import os
Host = '192.168.100.'
fileName = "available.log"
sFile = open(fileName,'w')
for i in range(1,255):
status = os.system("ping -n 1 %s%s"%(Host, str(i)))
if status == 0:
sFile.write("%s%s"%(Host, str(i)))
print "%s%s is available"%(Host, str(i))
sFile.close()
| true |
6f0e1eb2cd5288ad9a80e5d89698f696a428c88f | Python | Astony/Homeworks | /homework8/tests/test_for_ORM.py | UTF-8 | 1,514 | 3.28125 | 3 | [] | no_license | import os
import sqlite3
import pytest
from homework8.task02.DataBaseClass import TableData
def test_of_len_method(create_db):
"""Check len of TableData's instance"""
with TableData(create_db, "presidents") as presidents:
assert len(presidents) == 3
def test_get_item_method(create_db):
"""Check the method of getting item from db in case when item exists"""
with TableData(create_db, "presidents") as presidents:
assert presidents["Obama"] == ("Obama", "America", 2)
def test_contains_method(create_db):
"""Check contain method"""
with TableData(create_db, "presidents") as presidents:
assert "Putin" in presidents
assert not "West" in presidents
def test_iteration_method(create_db):
"""Check that iteration protocol is working via list comprehension"""
with TableData(create_db, "presidents") as presidents:
presidents_name_list = [president["name"] for president in presidents]
assert presidents_name_list == ["Trump", "Obama", "Putin"]
def test_not_existing_db():
"""Check if db doesn't exists in a directory it will caused Error instead of creating new db"""
with pytest.raises(IOError, match=f"No such db"):
with TableData("abacaba", "president") as presidents:
print("hello")
def test_wrong_arguments(create_db):
"""Check that it will be an error then we input wrong argument"""
with TableData(create_db, "presidents") as presidents:
assert presidents["Murphy"] == []
| true |
a01440fe165d47ad7397c3f3f97efb2bda97f576 | Python | gokul-subbu/dl | /scripts/rgb_hsv.py | UTF-8 | 322 | 2.640625 | 3 | [] | no_license | def rgb_hsv(r,g,b):
r,g,b= r/255, g/255, b/255
mx, mn = max(r, g,b), min(r, g,b)
df=mx-mn
if mx==mn: h=0
elif mx==r: h=(60* ((g-b)/df)+360)%360
elif mx==g: h=(60* ((b-r)/df)+120)%360
elif mx==b: h=(60* ((r-g)/df)+240)%360
if mx==0: s=0
else: s=(df/mx)*100
v=mx*100
return h,s,v
| true |
0d163aef6f0cc49ec79cd1a4348dc402c71c3beb | Python | nobody48sheldor/convection | /convection.py | UTF-8 | 7,168 | 2.59375 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
from math import *
from functools import cache
from matplotlib import cm
from mpl_toolkits.mplot3d import axes3d
from matplotlib import style
from concurrent.futures import ProcessPoolExecutor
style.use('dark_background')
def psi_0(x, y):
T = theta*((np.exp(-0.2*(x-2)**2)+np.exp(-0.2*(x+2)**2)) * np.exp(-0.1*y**2))
return(T)
def v(x):
Vx = []
for i in range(n):
V = []
for j in range(n):
S = -vx[i]*np.exp(-1.5*(x[j]-5.8)**2)
V.append(S)
Vx.append(V)
return(Vx)
def temperature_temps1(V1):
Temperature_ = []
Temp = []
Yj = []
Xw = []
Temp.append(np.array(psi_0(X, Y)))
i = 1
while i < int(T/4):
j = 0
Yj = []
while j < n-2:
w = 0
Xw = []
while w < n-2:
xw = Temp[i-1][j][w] + ((D*((Temp[i-1][j][w+2]- 2*Temp[i-1][j][w+1] + Temp[i-1][j][w])/(dx**2) + (Temp[i-1][j+2][w] - 2*Temp[i-1][j+1][w] + Temp[i-1][j][w])/(dy**2))) - (V1[w]*(Temp[i-1][j+1][w] - Temp[i-1][j][w])/dy)) * dt
Xw.append(xw)
if x[w] == x[int(n/2)+int((n/20)*2.2)]:
if y[j] == y[int(n/2)]:
Temperature_.append(xw)
if xw < theta/20:
return(i*dt)
w = w + 1
Xw.append(xw)
Xw.append(xw)
Yj.append(Xw)
j = j + 1
Yj.append(Xw)
Yj.append(Xw)
Yja = np.array(Yj)
Temp.append(Yja)
i = i + 1
# print(i, "/", T)
def temperature_temps2(V2):
Temperature_ = []
Temp = []
Yj = []
Xw = []
Temp.append(np.array(psi_0(X, Y)))
i = int(T/4)
while i < int(T/2):
j = 0
Yj = []
while j < n-2:
w = 0
Xw = []
while w < n-2:
xw = Temp[i-1][j][w] + ((D*((Temp[i-1][j][w+2]- 2*Temp[i-1][j][w+1] + Temp[i-1][j][w])/(dx**2) + (Temp[i-1][j+2][w] - 2*Temp[i-1][j+1][w] + Temp[i-1][j][w])/(dy**2))) - (V2[w]*(Temp[i-1][j+1][w] - Temp[i-1][j][w])/dy)) * dt
Xw.append(xw)
if x[w] == x[int(n/2)+int((n/20)*2.2)]:
if y[j] == y[int(n/2)]:
Temperature_.append(xw)
if xw < theta/20:
return(i*dt)
w = w + 1
Xw.append(xw)
Xw.append(xw)
Yj.append(Xw)
j = j + 1
Yj.append(Xw)
Yj.append(Xw)
Yja = np.array(Yj)
Temp.append(Yja)
i = i + 1
# print(i, "/", T)
def temperature_temps3(V3):
Temperature_ = []
Temp = []
Yj = []
Xw = []
Temp.append(np.array(psi_0(X, Y)))
i = int(T/2)
while i < int(3*T/4):
j = 0
Yj = []
while j < n-2:
w = 0
Xw = []
while w < n-2:
xw = Temp[i-1][j][w] + ((D*((Temp[i-1][j][w+2]- 2*Temp[i-1][j][w+1] + Temp[i-1][j][w])/(dx**2) + (Temp[i-1][j+2][w] - 2*Temp[i-1][j+1][w] + Temp[i-1][j][w])/(dy**2))) - (V3[w]*(Temp[i-1][j+1][w] - Temp[i-1][j][w])/dy)) * dt
Xw.append(xw)
if x[w] == x[int(n/2)+int((n/20)*2.2)]:
if y[j] == y[int(n/2)]:
Temperature_.append(xw)
if xw < theta/20:
return(i*dt)
w = w + 1
Xw.append(xw)
Xw.append(xw)
Yj.append(Xw)
j = j + 1
Yj.append(Xw)
Yj.append(Xw)
Yja = np.array(Yj)
Temp.append(Yja)
i = i + 1
# print(i, "/", T)
def temperature_temps4(V4):
Temperature_ = []
Temp = []
Yj = []
Xw = []
Temp.append(np.array(psi_0(X, Y)))
i = int(3*T/4)
while i < T:
j = 0
Yj = []
while j < n-2:
w = 0
Xw = []
while w < n-2:
xw = Temp[i-1][j][w] + ((D*((Temp[i-1][j][w+2]- 2*Temp[i-1][j][w+1] + Temp[i-1][j][w])/(dx**2) + (Temp[i-1][j+2][w] - 2*Temp[i-1][j+1][w] + Temp[i-1][j][w])/(dy**2))) - (V4[w]*(Temp[i-1][j+1][w] - Temp[i-1][j][w])/dy)) * dt
Xw.append(xw)
if x[w] == x[int(n/2)+int((n/20)*2.2)]:
if y[j] == y[int(n/2)]:
Temperature_.append(xw)
if xw < theta/20:
return(i*dt)
w = w + 1
Xw.append(xw)
Xw.append(xw)
Yj.append(Xw)
j = j + 1
Yj.append(Xw)
Yj.append(Xw)
Yja = np.array(Yj)
Temp.append(Yja)
i = i + 1
# print(i, "/", T)
def func1():
Temps1 = []
TEMP1 = 0
V1 = []
i = 0
while i < n:
V1 = Vx[i]
if temperature_temps1(V1) == None:
TEMP1 = Temps1[i-1]
else:
TEMP1 = 1/temperature_temps1(V1)
print("TEMP1 = ", TEMP1)
Temps1.append(TEMP1)
print(i)
i = i + 1
print(len(Temps1))
return(Temps1)
def func2():
Temps2 = []
TEMP2 = 0
V2 = []
j = 0
while j < n:
V2 = Vx[j]
if temperature_temps2(V2) == None:
TEMP2 = Temps2[j-1]
else:
TEMP2 = 1/temperature_temps2(V2)
print("TEMP2 = ", TEMP2)
Temps2.append(TEMP2)
print(j)
j = j + 1
print(len(Temps2))
return(Temps2)
def func3():
Temps3 = []
TEMP3 = 0
V3 = []
w = 0
while w < n:
V3 = Vx[w]
if temperature_temps3(V3) == None:
TEMP3 = Temps3[w-1]
else:
TEMP3 = 1/temperature_temps3(V3)
print("TEMP3 = ", TEMP3)
Temps3.append(TEMP3)
print(w)
w = w + 1
print(len(Temps3))
return(Temps3)
def func4():
Temps4 = []
TEMP4 = 0
V4 = []
h = 0
while h < n:
V4 = Vx[h]
if temperature_temps4(V4) == None:
TEMP4 = Temps[h-1]
else:
TEMP4 = 1/temperature_temps4(V4)
print("TEMP4 = ", TEMP4)
Temps4.append(TEMP4)
print(h)
h = h + 1
print(len(Temps4))
return(Temps4)
n = 150
T = 50
tmax = 100
D = 0.06
theta = 15
vx = np.linspace(0, 100, n)
x = np.linspace(-10, 10, n)
y = np.linspace(-10, 10, n)
X, Y = np.meshgrid(x, y)
t = np.linspace(0, tmax, T)
dx = x[1]-x[0]
dy = y[1]-y[0]
dt = t[1]-t[0]
Vx = v(x)
print(len(Vx), len(Vx[0]))
def main():
executor = ProcessPoolExecutor(max_workers=10)
P1 = executor.submit(func1)
P2 = executor.submit(func2)
P3 = executor.submit(func3)
P4 = executor.submit(func4)
T1 = P1.result()
T2 = P2.result()
T3 = P3.result()
T4 = P4.result()
Temps = []
for i in range(T1-1):
Temps.append(T1[i])
for i in range(T2-1):
Temps.append(T2[i])
for i in range(T3-1):
Temps.append(T3[i])
for i in range(T4-1):
Temps.append(T4[i])
plt.imshow(psi_0(X, Y))
plt.show()
plt.plot(vx, Temps)
plt.show()
| true |
ad9cb7ee84f5800eab339f68873ceb93a83b687e | Python | mahdi-zafarmand/leetcode_practice | /2.AddTwoNumbers.py | UTF-8 | 1,011 | 3.3125 | 3 | [] | no_license | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
head = ListNode()
it = head
carry = 0
while (l1 != None) or (l2 != None):
if l1 == None:
s = l2.val + carry
l2 = l2.next
elif l2 == None:
s = l1.val + carry
l1 = l1.next
else:
s = l1.val + l2.val + carry
l1, l2 = l1.next, l2.next
it.val = s % 10
carry = s // 10
if (l1 != None) or (l2 != None):
it.next = ListNode()
it = it.next
if carry != 0:
it.next = ListNode(carry, None)
return head
| true |
d49508674e3a7675e46dba3be6af2b224581fcea | Python | mattiatritto/textgenerator | /train.py | UTF-8 | 3,772 | 2.65625 | 3 | [] | no_license | import tensorflow as tf
from tensorflow.keras.layers.experimental import preprocessing
from model import MyModel
from onestep import OneStep
from preprocessing import delete_line_with_word
import numpy as np
import os
import time
import re
#Funzione che converte gli IDs in testo
def text_from_ids(ids):
return tf.strings.reduce_join(chars_from_ids(ids), axis=-1)
#Funzione che prende in input una sequenza, e genera input e target text
def split_input_target(sequence):
input_text = sequence[:-1]
target_text = sequence[1:]
return input_text, target_text
#------------INIZIO DEL PROGRAMMA-------------#
#Download and read data
path_to_file = tf.keras.utils.get_file('shakespeare.txt', 'https://storage.googleapis.com/download.tensorflow.org/data/shakespeare.txt')
""" text = open(path_to_file, 'rb').read().decode(encoding='utf-8')"""
#Preprocessing data
delete_line_with_word('chat.txt', 'sticker')
delete_line_with_word('chat.txt', 'audio omesso')
text = open(r'chat.txt').read().lower()
text = re.sub("[\(\[].*?[\)\]]", "", text)
vocab = sorted(set(text))
print('{} caratteri unici.'.format(len(vocab)))
#Prima di iniziare ad allenare il modello, dobbiamo convertire le stringhe in numeri
ids_from_chars = preprocessing.StringLookup(vocabulary=list(vocab))
chars_from_ids = tf.keras.layers.experimental.preprocessing.StringLookup(vocabulary=ids_from_chars.get_vocabulary(), invert=True)
#Dividiamo il testo in più parti
all_ids = ids_from_chars(tf.strings.unicode_split(text, 'UTF-8'))
ids_dataset = tf.data.Dataset.from_tensor_slices(all_ids)
seq_length = 100
examples_per_epoch = len(text) # (seq_length+1)
sequences = ids_dataset.batch(seq_length+1, drop_remainder=True)
dataset = sequences.map(split_input_target)
#Prima di dare in pasto i dati al programma, randomizziamo i dati e li mettiamo in piccoli "batch"
BATCH_SIZE = 64
BUFFER_SIZE = 10000
dataset = (dataset.shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True).prefetch(tf.data.experimental.AUTOTUNE))
vocab_size = len(vocab)
embedding_dim = 256
rnn_units = 1024
model = MyModel(vocab_size=len(ids_from_chars.get_vocabulary()), embedding_dim=embedding_dim, rnn_units=rnn_units)
#Controlliamo la lunghezza dell'output
for input_example_batch, target_example_batch in dataset.take(1):
example_batch_predictions = model(input_example_batch)
print(example_batch_predictions.shape, "# (batch_size, sequence_length, vocab_size)")
model.summary()
sampled_indices = tf.random.categorical(example_batch_predictions[0], num_samples=1)
sampled_indices = tf.squeeze(sampled_indices,axis=-1).numpy()
#Dato che il modello ritorna logits, impostiamo il flag dei logits
loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True)
example_batch_loss = loss(target_example_batch, example_batch_predictions)
mean_loss = example_batch_loss.numpy().mean()
tf.exp(mean_loss).numpy()
model.compile(optimizer='adam', loss=loss)
#Definisco i checkpoint
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "checkpoint_{epoch}")
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_prefix, save_weights_only=True)
EPOCHS = 30
history = model.fit(dataset, epochs=EPOCHS, callbacks=[checkpoint_callback])
one_step_model = OneStep(model, chars_from_ids, ids_from_chars)
start = time.time()
states = None
next_char = tf.constant(['ROMEO:'])
result = [next_char]
for n in range(1000):
next_char, states = one_step_model.generate_one_step(next_char, states=states)
result.append(next_char)
result = tf.strings.join(result)
end = time.time()
print(result[0].numpy().decode('utf-8'), '\n\n' + '_'*80)
#Salvataggio del modello
tf.saved_model.save(one_step_model, 'one_step') | true |
81517f1fb162e81049e6e12d58add76e0f5e9e05 | Python | jdunck/duplo | /duplo/doubles.py | UTF-8 | 10,181 | 2.859375 | 3 | [
"BSD-3-Clause"
] | permissive | import importlib, operator, sys
from collections import defaultdict
from contextlib import contextmanager
from . import six
class EmptyContext(ValueError):
pass
class Context(object):
"""
A stack of dictionaries, which provides an abstraction of shadowing
for applying and unapplying.
"""
def __init__(self, default):
self._default = default
self.stack = [defaultdict(default)]
def __getitem__(self, name):
for frame in reversed(self.stack):
if name in frame:
return frame[name]
else:
return self._default()
def __setitem__(self, name, value):
self.stack[-1][name] = value
@property
def depth(self):
return len(self.stack)
def push(self):
self.stack.append({})
def pop(self):
if len(self.stack) == 1:
raise EmptyContext("Context stack empty")
return self.stack.pop()
def keys(self):
all_keys = set()
for frame in self.stack:
all_keys.update(frame.keys())
return list(all_keys)
def items(self):
pairs = []
for key in self.keys():
pairs.append((key, self[key]))
return pairs
def update(self, values):
self.stack[-1].update(values)
class DoublerBase(object):
"""
An "interface" for managing doubles.
"""
def __init__(self, name):
self.name = name
def __unicode__(self):
return u"<Double: {0}>".format(self.name)
def __str__(self):
return unicode(self).encode('utf-8')
def apply(self):
raise NotImplementedError
def unapply(self):
raise NotImplementedError
class MissingPatchTarget(ValueError):
pass
class UnexpectedUnapply(TypeError):
pass
class PatchingDoubler(DoublerBase):
"""
A doubler which is applied through monkey patches.
Targets is a list of importable names to be patched, e.g.
['some.module:name']
"""
def __init__(self, name, variant, targets):
super(PatchingDoubler, self).__init__(name)
if isinstance(targets, six.string_types):
targets = [targets]
if len(targets) < 1:
raise MissingPatchTarget("There must be at least 1 target to patch.")
self.targets = targets
self.normals = [] # set when first applied, same order as targets
self.variant = variant
def patching_attribute(self, name_maybe):
return name_maybe is not None
def _parse_target(self, target):
try:
module, name_maybe = target.split(':')
except ValueError:
module, name_maybe = target, None
return (module, name_maybe)
def _format_target(self, module_name, name_maybe):
if name_maybe is None:
return module_name
else:
return "{0}:{1}".format(module_name, name_maybe)
def _resolve_module(self, module_name, name_maybe):
try:
module = importlib.import_module(module_name)
except ImportError:
# If patching an attribute of a module, fail if we can't find
# the needed module.
# But if patching a module, it's OK if it didn't
# originally exist.
if name_maybe is None:
module = None
else:
raise MissingPatchTarget("Unable to find {0}".format(module_name))
else:
return module
def _resolve_variant(self, variant):
if isinstance(variant, six.string_types):
try:
return self._resolve_target(variant)[0]()
except MissingPatchTarget: # assume it's a literal value
return variant
return variant
def _resolve_target(self, target):
"""
Returns a getter and setter for the given target.
"""
module_name, name_maybe = self._parse_target(target)
module = self._resolve_module(module_name, name_maybe)
if self.patching_attribute(name_maybe):
def make_attr_getter():
def getter():
try:
return getattr(module, name_maybe)
except AttributeError:
formatted_name = self._format_target(module_name, name_maybe)
raise MissingPatchTarget("Unable to find {0}".format(formatted_name))
return getter
return make_attr_getter(), lambda value: setattr(module, name_maybe, value)
else:
def make_module_setter():
def setter(value):
if value is None:
del sys.modules[module_name]
else:
sys.modules[module_name] = value
return setter
return lambda: module, make_module_setter()
def apply(self):
for target in self.targets:
getter, setter = self._resolve_target(target)
self.normals.append(getter())
variant = self._resolve_variant(self.variant)
setter(variant)
def unapply(self):
if not len(self.normals):
raise UnexpectedUnapply
for target in self.targets:
getter, setter = self._resolve_target(target)
original = self.normals.pop()
setter(original)
class MissingDouble(ValueError):
"""
No double with the given name is registered.
"""
pass
class UnappliedDouble(ValueError):
"""
The requested double has not been applied.
"""
pass
class DuplicateRegistration(ValueError):
"""
A double with the given name was already registered.
"""
pass
class DoubleManager(object):
"""
Applies each double once (and only once).
Register doubles, then apply them or unapply them as needed.
Calling apply on a previously-applied double does nothing, and
similarly with unapply on previously-unapplied.
.revert returns the doubles to the state they were in before
the previous call to apply or unapply.
"""
def __init__(self):
self._applieds = Context(bool)
self.registry = {}
def register_double(self, double):
if not isinstance(double, DoublerBase):
raise MissingDouble("Unable to register {0}.".format(double))
if double.name in self.registry:
raise DuplicateRegistration("{0} was registered twice. Duplicate import?".format(double.name))
self.registry[double.name] = double
def _resolve_included(self, include, exclude):
"""
Expands include and exclude into a concrete list of doubles
to work upon.
"""
include, exclude = self._conform_double_names(include), self._conform_double_names(exclude)
if include is None and exclude is None:
included = set(self.registry.keys())
elif include is None:
included = set(self.registry.keys()) - set(exclude)
elif exclude is None:
included = set(include)
else:
raise ValueError("Unable to both include and exclude.")
# check that there weren't any bad names
missing = set()
if include is not None:
missing = set(include) - included
elif exclude is not None:
missing = included & set(exclude)
return included
def _resolve_doubles(self, included):
"""
Maps the given double names to double instances.
"""
try:
return [self.registry[name] for name in included]
except KeyError:
raise MissingDouble
def _conform_double_names(self, doubles):
if doubles is None:
return
if isinstance(doubles, six.string_types):
doubles = [doubles]
if not all(d in self.registry for d in doubles):
raise MissingDouble
return doubles
@property
def applied(self):
"""
Returns the names of all currently-applied doubles.
"""
return [name for name, applied in self._applieds.items() if applied]
def is_applied(self, name):
return name in self.applied
def apply_doubles(self, include=None, exclude=None):
return self._manage_doubles(operator.not_, 'apply', include, exclude)
def unapply_doubles(self, include=None, exclude=None):
return self._manage_doubles(operator.truth, 'unapply', include, exclude)
def _manage_doubles(self, operator, action_attr, include=None, exclude=None):
included = self._resolve_included(include, exclude)
doubles = self._resolve_doubles(included)
self._applieds.push()
applied = []
for double in doubles:
status = self._applieds[double.name]
# only do if not already done:
if operator(status):
# actually apply or unapply
getattr(double, action_attr)()
self._applieds[double.name] = not status
applied.append(double.name)
return applied
def revert(self):
"""
Return the double application to the state it was in prior to
the most recent call to apply_ or unapply_doubles.
"""
try:
previous_doubles = self._applieds.pop()
except EmptyContext:
raise UnappliedDouble
for double_name, applied in previous_doubles.items():
if applied:
self.registry[double_name].unapply()
else:
self.registry[double_name].apply()
def _take_action(manager, attr, doubles):
doubles = manager._conform_double_names(doubles)
getattr(manager, attr)(doubles)
yield
manager.revert()
@contextmanager
def unapplied(manager, doubles):
"""
Unapply a double (if needed) within the block.
"""
return _take_action(manager, 'unapply_doubles', doubles)
@contextmanager
def applied(manager, doubles):
"""
Apply a double (if needed) within the block.
"""
return _take_action(manager, 'apply_doubles', doubles) | true |
6dbb123a159f1f1ddd6eb40a21b3c5dd67e840d2 | Python | zhangsiyu1103/SynthesisRegressor | /utils.py | UTF-8 | 128 | 2.609375 | 3 | [] | no_license |
def construct_params(length):
ret = []
for i in range(length):
ret.append("params_" + str(i))
return ret
| true |
ea9a9ff9249586b12cb94fc7e18f1b53ec42c0fb | Python | valentinemaris/ProjetGroupe5 | /duplicationFonction.py | UTF-8 | 4,608 | 3.265625 | 3 | [] | no_license | import trouve_fonction
import commentaires
import checkIndentation
import trouve_variables
"""
on considère qu'on as une base de donnèes de fonction classique sous forme de liste de ligne
"""
"""
le code à analyser est Code (liste de string)
input: code et code_controle sont 2 listes de strings
precision définie avec quelle précision on considère 2 codes identique
output: liste de dictionnaire qui pour chaque fonction de code donne les correspondente de code_controle
"""
def controle_duplication(Code,precision,code_controle):
Code=checkIndentation.retirerIndentation(commentaires.retirerCom(Code)) #on enlève indentation et commentaires
code_controle=checkIndentation.retirerIndentation(commentaires.retirerCom(code_controle))
Code=trouve_variables.snailVariables(Code,trouve_variables.countVariables(Code)) #on enlève les variables
code_controle=trouve_variables.snailVariables(code_controle,trouve_variables.countVariables(code_controle))
similitude_fonctions=[]
fonctions=trouve_fonction.count_fonction(Code) #trouve les fonctions de code
for num_fonction in range(len(fonctions)):
similitude_fonctions.append(controle_duplicat_fonction(Code[fonctions[num_fonction]["start"]+1:fonctions[num_fonction]["end"]],precision,code_controle))
return(similitude_fonctions)
"""
controle si une fonction est semblable à d'autres,
renvois une liste de dictionniare qui indique:
'ligneControle' : où ce trouve la fonction dans le texte de controle
'pourcentage' : le pourcentage de similitude avec cette fonction
"""
def controle_duplicat_fonction(fonction,precision,code_controle):
similitude=[] #dico (cf explication en haut)
l=len(fonction)
indice_controle,indice_fonction=0,0
pourcentage=0
while indice_controle<len(code_controle) : #parcour, le code de controle
prec=pourcentage_similitude_ligne(fonction[indice_fonction],code_controle[indice_controle+indice_fonction]) #controle si les lignes sont semblable
if prec>precision : #si oui controle la ligne suivante ecc...
pourcentage+=prec/l
indice_fonction+=1
if indice_fonction==l :
similitude.append({"pourcentage": pourcentage ,"ligneControle" : indice_controle })
indice_fonction=0
pourcentage=0
indice_controle+=1
else :
indice_fonction=0
indice_controle+=1
return (similitude)
"""
input: 2 strings
output: pourcentage de similitude des 2 strings
"""
def pourcentage_similitude_ligne(ligne1,ligne2):
l1,l2 =len(ligne1),len(ligne2)
if l1>l2 : #on ajuste la longueur des lignes
for k in range(l1-l2):
ligne2=ligne2+" "
l=l1
else :
for k in range(l2-l1):
ligne1=ligne1+" "
l=l2
same=0
for i in range(l): #on compte les caractère semblable
if ligne2[i]==ligne1[i]:
same+=1
return(same/l*100) #on retourne le pourcentage de caractères semblable
"""
input: 2 codes (tableau de strings) et une précision (float de 0 à 100)
output: print des données générales sur les duplicat entre les 2 input
"""
def print_resultats_similitude(Code,precision,code_controle):
Liste_duplicat=controle_duplication(Code,precision,code_controle)
s,pourcentage_tot=0,0
for k in Liste_duplicat :
if len(k)>0 :
s+=1
pourcentage_tot+=max_percent(k)
pourcentage_tot=pourcentage_tot/len(Liste_duplicat)
print(str(s)+" fonctions pourraient e^tre copié")
print("le pourcentage di similitude moyen avec d'autres codes est : "+str(pourcentage_tot)+"%")
def retuour_resresultats_similitude(Code,precision,code_controle):
Liste_duplicat=controle_duplication(Code,precision,code_controle)
s,pourcentage_tot=0,0
for k in Liste_duplicat :
if len(k)>0 :
s+=1
pourcentage_tot+=max_percent(k)
pourcentage_tot=pourcentage_tot/len(Liste_duplicat)
return(s,len(Liste_duplicat),pourcentage_tot)
"""
input: La liste des dico qui représente les similitude d'une fonction avec le code_controle
output: retourne le pourcentage maximale de ressemblance avec une autre fonction
"""
def max_percent(Liste_dico):
max=Liste_dico[0]["pourcentage"]
for k in Liste_dico:
if k["pourcentage"]>=max :
max=k["pourcentage"]
return(max)
| true |
91a487650d25138d183122299739803580ebe748 | Python | elliotCamblor/CTCI | /python/src/test_q_4_7.py | UTF-8 | 828 | 2.9375 | 3 | [] | no_license | import unittest
from q_4_7 import *
from lib.HSTree import BinaryTreeNode
class testcase1(unittest.TestCase):
def test_1(self):
root = BinaryTreeNode.buildTree([1, 2, 3], [2, 1, 3])
self.assertEqual(commonAncestor(root, root.left, root.right), root)
def test_2(self):
root = BinaryTreeNode.buildTree([1, 2, 3], [3, 2, 1])
self.assertEqual(commonAncestor(root, root.left, root.left.left), root.left)
def test_3(self):
root = BinaryTreeNode.buildTree([1, 2, 3], [3, 2, 1])
self.assertEqual(commonAncestor(root, root.left, root.left), root.left)
def test_4(self):
root = BinaryTreeNode.buildTree([1, 2, 3], [2, 1, 3])
self.assertEqual(commonAncestor(root, BinaryTreeNode(7), root.right), None)
if __name__ == "__main__":
unittest.main()
| true |
f78eebec5d2314ed9956dc8f7b6cb13f6c99d1bf | Python | AddisonG/codewars | /python/counting-change-combinations/counting-change-combinations.py | UTF-8 | 194 | 2.921875 | 3 | [] | no_license | def count_change(money, coins):
if money == 0: return 1
if money < 0: return 0
if not coins: return 0
return count_change(money-coins[0], coins) + count_change(money, coins[1:])
| true |
0dd23b7fe81b484cb402c0f40a54aa013b6e7114 | Python | kate711/day1 | /code/process.py | UTF-8 | 824 | 3.046875 | 3 | [] | no_license | import pandas as pd
import numpy as np
# datafile = 'D:/新建 Microsoft Office Excel 工作表.xlsx'
# data = pd.read_excel(datafile,header=None)
# min = (data-data.min())/(data.max()-data.min())
# zero = (data - data.mean())/data.std()
# float = data/10**np.ceil(np.log10(data.abs().max())) #小数定标规范化
# print("原始数据为:\n",data)
# print('--------------------')
# print('最小-最大规范化后的数据:\n',min)
from pandas import Series, DataFrame
df = DataFrame(np.random.randn(4, 3), index=list('abcd'), columns=['frist', 'second', 'third'])
print(df)
print(df.describe())
print(df.sum())
print(df.sum(axis=1))
print('-----------')
print(df.idxmax(), df.idxmin(), df.idxmin(axis=1))
print(df.cumsum())
print(df.var())
print(df.std())
print(df.pct_change())
print(df.cov())
print(df.corr())
| true |
58c0509a38805d6fa1a33012af98d5dd0807ea42 | Python | Eric-Le-Ge/ACM-ICPC-practice | /GCJ_kickstart/kickstart2019B/B.py | UTF-8 | 591 | 2.984375 | 3 | [] | no_license | T = int(raw_input())
def computeToLose(l):
tmp = []
for i in range(len(l)):
tup = l[i]
energy = max(0, tup[1]-tup[0]*tup[2]*(N-len(l)))
lost = min(energy, tup[2]*tup[0])
tmp.append([lost, energy, i])
return tmp
def solve(tuples):
ret = 0
for j in range(N):
tmp = sorted(computeToLose(tuples))
ret += tmp[-1][1]
tuples = tuples[:tmp[-1][2]]+tuples[tmp[-1][2]+1:]
return ret
for t in range(T):
N = int(raw_input())
tuples = []
for i in range(N):
tuples.append([int(_) for _ in raw_input().split()])
res = solve(tuples)
print "Case #{}: {}".format(t+1, res)
| true |
da91b514b3ecf4cbc946f80088a3b1f9d3732824 | Python | eyehint/pyMuc | /cmds/벗어.py | UTF-8 | 4,337 | 2.515625 | 3 | [] | no_license | from objs.cmd import Command
class CmdObj(Command):
def cmd(self, ob, line):
if len(line) == 0:
ob.sendLine('☞ 사용법: [아이템 이름] 해제')
return
msg = ''
if line == '모두' or line == '전부':
cnt = 0
i = 0
for obj in ob.objs:
if obj.inUse:
obj.inUse = False
ob.armor -= getInt(obj['방어력'])
ob.attpower -= getInt(obj['공격력'])
option = obj.getOption()
if option != None:
for op in option:
if op == '힘':
ob._str -= option[op]
elif op == '민첩성':
ob._dex -= option[op]
elif op == '맷집':
ob._arm -= option[op]
elif op == '체력':
ob._maxhp -= option[op]
elif op == '내공':
ob._maxmp -= option[op]
elif op == '필살':
ob._critical -= option[op]
elif op == '운':
ob._criticalChance -= option[op]
elif op == '회피':
ob._miss -= option[op]
elif op == '명중':
ob._hit -= option[op]
elif op == '경험치':
ob._exp -= option[op]
elif op == '마법발견':
ob._magicChance -= option[op]
if obj['종류'] == '무기':
ob.weaponItem = None
ob.sendLine('당신이 [36m' + obj.get('이름') + '[37m' + han_obj(obj.getStrip('이름')) + ' 착용해제 합니다.')
#ob.sendRoom('%s %s 착용해제 합니다.' % (ob.han_iga(), obj.han_obj()))
msg += '%s %s 착용해제 합니다.\r\n' % (ob.han_iga(), obj.han_obj())
cnt = cnt + 1
if cnt == 0:
ob.sendLine('☞ 착용중인 장비가 없어요.')
return
else:
ob.sendRoom(msg[:-2])
else:
item = ob.findObjInUse(line)
if item == None:
ob.sendLine('☞ 그런 아이템이 소지품에 없어요.')
return
if item.inUse == False:
ob.sendLine('☞ 그런 아이템이 소지품에 없어요.')
return
item.inUse = False
ob.armor -= getInt(item['방어력'])
ob.attpower -= getInt(item['공격력'])
option = item.getOption()
if option != None:
for op in option:
if op == '힘':
ob._str -= option[op]
elif op == '민첩성':
ob._dex -= option[op]
elif op == '맷집':
ob._arm -= option[op]
elif op == '체력':
ob._maxhp -= option[op]
elif op == '내공':
ob._maxmp -= option[op]
elif op == '필살':
ob._critical -= option[op]
elif op == '운':
ob._criticalChance -= option[op]
elif op == '회피':
ob._miss -= option[op]
elif op == '명중':
ob._hit -= option[op]
elif op == '경험치':
ob._exp -= option[op]
elif op == '마법발견':
ob._magicChance -= option[op]
if item['종류'] == '무기':
ob.weaponItem = None
ob.sendLine('당신이 [36m' + item.get('이름') + '[37m' + han_obj(item.getStrip('이름')) + ' 착용해제 합니다.')
ob.sendRoom('%s %s 착용해제 합니다.' % (ob.han_iga(), item.han_obj()))
| true |
505c5ebf1572e0ad50932ea4f9e861e8a7149f2a | Python | manalig7/Fake-News-and-Cyber-Bullying-Detection-Using-Ensemble-Methods | /Fake_News_Detection/Word_Vector_Method_2/sg_ft.py | UTF-8 | 7,560 | 2.546875 | 3 | [] | no_license | import keras.backend as K
import multiprocessing
import numpy as np
np.random.seed(20)
from random import seed
seed(20)
import tensorflow as tf
tf.set_random_seed(20)
from gensim.models.word2vec import Word2Vec
from keras.callbacks import EarlyStopping
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv1D
from keras.optimizers import Adam
from nltk.stem.lancaster import LancasterStemmer
from nltk.tokenize import RegexpTokenizer
import io
from sklearn.metrics import recall_score
from sklearn.metrics import precision_score
from sklearn.metrics import f1_score
import gensim
from gensim.models import FastText
from gensim.models import Word2Vec
# Set random seed (for reproducibility)
corpus = []
labels = []
def fit_transform(d):
res=[]
for i in range(0,len(d)):
temp=[]
for j in range(0,len(voc)):
#print(voc[j])
if voc[j] in d[i]:
#print (np.mean(model_W2V.wv[voc[j]]))
temp.append(np.mean(model_FT.wv[voc[j]]))
else :
temp.append(0)
res.append(temp)
return res
tsv = 'finaldataset_train.txt'
f=open(tsv,'r')
x = []
Y_train=[]
tokenized_corpus=[]
lent=[]
tokenizer = RegexpTokenizer(' ', gaps=True)
for line in f :
ls=line.split('\t')
x.append(ls[0])
temp = []
#print(ls[0])
for j in tokenizer.tokenize(ls[0].decode('utf-8')):
#print(j)
temp.append(j)
tokenized_corpus.append(temp)
lent.append(len(temp))
Y_train.append(int(ls[1]))
f.close()
#m=len(x)
"""
# Parse tweets and sentiments
with io.open(dataset_location, 'r', encoding='utf-8') as df:
for i, line in enumerate(df):
parts = line.split('\t')
# Sentiment (0 = Negative, 1 = Positive)
labels.append(int(parts[1]))
# Tweet
tweet = parts[0]
corpus.append(tweet.strip().lower())
print('Corpus size: {}'.format(len(corpus)))
# Tokenize and stem
tkr = RegexpTokenizer('[a-zA-Z0-9@]+')
stemmer = LancasterStemmer()
tokenized_corpus = []
for i, tweet in enumerate(corpus):
tokens = [stemmer.stem(t) for t in tkr.tokenize(tweet) if not t.startswith('@')]
tokenized_corpus.append(tokens)
"""
"""
# Gensim Word2Vec model
vector_size = 120
window_size = 8
# Create Word2Vec
model_W2V = Word2Vec(sentences=tokenized_corpus,
size=vector_size,
window=window_size,
negative=20,
iter=40,
seed=1000,
workers=multiprocessing.cpu_count(),sg=1)
"""
#model_FT = FastText(tokenized_corpus, size=10, window=5, min_count=1, workers=5, sg=1,max_vocab_size=10000)
#model_FT.save("sg_ft.model")
model_FT = gensim.models.FastText.load("sg_ft.model")
voc=list(model_FT.wv.vocab)
# Copy word vectors and delete Word2Vec model and original corpus to save memory
X_vecs = model_FT.wv
# Compute average and max tweet length
avg_length = 0.0
max_length = 0
for tweet in tokenized_corpus:
if len(tweet) > max_length:
max_length = len(tweet)
##################################READING IN THE TEST SET###################################
tsv = 'finaldataset_test.txt'
f=open(tsv,'r')
x = []
Y_test=[]
tokenized_corpus_test=[]
lent=[]
tokenizer = RegexpTokenizer(' ', gaps=True)
for line in f :
ls=line.split('\t')
x.append(ls[0])
temp = []
#print(ls[0])
for j in tokenizer.tokenize(ls[0].decode('utf-8')):
#print(j)
temp.append(j)
tokenized_corpus_test.append(temp)
lent.append(len(temp))
Y_test.append(int(ls[1]))
f.close()
#m=len(x)
"""
dataset_location = '/Users/haritareddy/Desktop/Major-Project/Fake_News_Detection/Model_on_Only_Train/finaldataset_test.txt'
corpus = []
labels_test = []
# Parse tweets and sentiments
with io.open(dataset_location, 'r', encoding='utf-8') as df:
for i, line in enumerate(df):
parts = line.split('\t')
# Sentiment (0 = Negative, 1 = Positive)
labels_test.append(int(parts[1]))
tweet = parts[0]
corpus.append(tweet.strip().lower())
tokenized_corpus_test = []
for i, tweet in enumerate(corpus):
tokens = [stemmer.stem(t) for t in tkr.tokenize(tweet) if not t.startswith('@')]
tokenized_corpus_test.append(tokens)
"""
for tweet in tokenized_corpus_test:
if len(tweet) > max_length:
max_length = len(tweet)
############################################################################################
print ("Reached Here")
# Tweet max length (number of tokens)
max_tweet_length = max_length
# Create train and test sets
# Generate random indexes
#indexes = set(np.random.choice(len(tokenized_corpus), train_size + test_size, replace=False))
X_train=fit_transform(tokenized_corpus)
print ("Done transforming train")
X_test=fit_transform(tokenized_corpus_test)
print ("Done transforming test")
#Y_train=labels
#Y_test=labels_test
print ("Finished with test")
"""
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(random_state=0, solver='lbfgs')
"""
"""
from sklearn.ensemble import GradientBoostingClassifier
clf = GradientBoostingClassifier(learning_rate=0.1, n_estimators=1000,max_depth=3, min_samples_split=5, min_samples_leaf=1, subsample=1,max_features='sqrt')
clf.fit(X_train,Y_train)
print "\nAccuracy on Training Set :"
print clf.score(X_train, Y_train)
print "Checking on Test Set"
print "\nAccuracy on Testing Set :"
print clf.score(X_test, Y_test)
y_pred=clf.predict(X_test)
print "\nPrecision Score"
print precision_score(Y_test, y_pred)
print "\nRecall Score"
print recall_score(Y_test, y_pred)
print "\nF1 Score"
print f1_score(Y_test, y_pred)
"""
print("################# Naive Bayes Classifier ####################")
from sklearn.naive_bayes import GaussianNB
clf = GaussianNB()
clf.fit(X_train,Y_train)
print ("\nAccuracy on Training Set :")
print (clf.score(X_train, Y_train))
print ("Checking on Test Set")
print ("\nAccuracy on Testing Set :")
print (clf.score(X_test, Y_test))
y_pred=clf.predict(X_test)
print ("\nPrecision Score")
print (precision_score(Y_test, y_pred))
print ("\nRecall Score")
print (recall_score(Y_test, y_pred))
print ("\nF1 Score")
print (f1_score(Y_test, y_pred))
print ("################### Random Forest Classifier ###############")
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier()
clf.fit(X_train,Y_train)
print ("\nAccuracy on Training Set :")
print (clf.score(X_train, Y_train))
print ("Checking on Test Set")
print ("\nAccuracy on Testing Set :")
print (clf.score(X_test, Y_test))
y_pred=clf.predict(X_test)
print ("\nPrecision Score")
print (precision_score(Y_test, y_pred))
print ("\nRecall Score")
print (recall_score(Y_test, y_pred))
print ("\nF1 Score")
print (f1_score(Y_test, y_pred))
print ("################### Logistic regression Classifier ###############")
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression(solver='lbfgs')
clf.fit(X_train,Y_train)
print ("\nAccuracy on Training Set :")
print (clf.score(X_train, Y_train))
print ("Checking on Test Set")
print ("\nAccuracy on Testing Set :")
print (clf.score(X_test, Y_test))
y_pred=clf.predict(X_test)
print ("\nPrecision Score")
print (precision_score(Y_test, y_pred))
print ("\nRecall Score")
print (recall_score(Y_test, y_pred))
print ("\nF1 Score")
print (f1_score(Y_test, y_pred))
#print f1_score(y_test, y_pred) | true |
960e5316749ba1727bc42097f506b88501e7d874 | Python | Aasthaengg/IBMdataset | /Python_codes/p03563/s914563541.py | UTF-8 | 62 | 3.34375 | 3 | [] | no_license | a = int(input())
b = int(input())
c = (b - a) * 2
print(a + c) | true |
866402dff8790d1c7cf8bba4e95638cb0972f992 | Python | ksinuk/python_open | /알고리즘/나는 학급회장이다.py | UTF-8 | 2,892 | 3.265625 | 3 | [] | no_license | import sys
sys.stdin = open("chairman_input.txt","r")
N = int(input())
for __ in range(N):
table = [[0,0,0] , [0,0,0] , [0,0,0]]
size = int(input())
for i in range(size):
a,b,c = map(int, input().split())
table[0][a-1] += 1
table[1][b-1] += 1
table[2][c-1] += 1
sum1 = table[0][0] + 2*table[0][1] + 3*table[0][2]
sum2 = table[1][0] + 2*table[1][1] + 3*table[1][2]
sum3 = table[2][0] + 2*table[2][1] + 3*table[2][2]
if sum1>sum2 and sum1>sum3: print(f"1 {sum1}")
elif sum2>sum1 and sum2>sum3: print(f"2 {sum2}")
elif sum3>sum1 and sum3>sum2: print(f"3 {sum3}")
elif sum1==sum2 and sum1>sum3:
if table[0][2]>table[1][2] or table[0][2]==table[1][2] and table[0][1]>table[1][1]: print(f"1 {sum1}")
elif table[0][2]<table[1][2] or table[0][2]==table[1][2] and table[0][1]<table[1][1]: print(f"2 {sum2}")
else: print(f"0 {sum1}")
elif sum1==sum3 and sum1>sum2:
if table[0][2]>table[2][2] or table[0][2]==table[2][2] and table[0][1]>table[2][1]: print(f"1 {sum1}")
elif table[0][2]<table[2][2] or table[0][2]==table[2][2] and table[0][1]<table[2][1]: print(f"3 {sum3}")
else: print(f"0 {sum1}")
elif sum2==sum3 and sum2>sum1:
if table[1][2]>table[2][2] or table[1][2]==table[2][2] and table[1][1]>table[2][1]: print(f"2 {sum2}")
elif table[1][2]<table[2][2] or table[1][2]==table[2][2] and table[1][1]<table[2][1]: print(f"3 {sum3}")
else: print(f"0 {sum2}")
else:# sum1==sum2==sum3
if table[2][2] > table[0][2] and table[2][2] > table[1][2]: print(f"3 {sum3}")
elif table[1][2] > table[0][2] and table[1][2] > table[2][2]: print(f"2 {sum2}")
elif table[0][2] > table[1][2] and table[0][2] > table[2][2]: print(f"1 {sum1}")
elif table[2][2] == table[0][2] and table[2][2] > table[1][2]:
if table[2][1]>table[0][1]: print(f"3 {sum3}")
elif table[2][1]<table[0][1]: print(f"1 {sum1}")
else: print(f"0 {sum1}")
elif table[1][2] == table[2][2] and table[1][2] > table[0][2]:
if table[2][1]>table[1][1]: print(f"3 {sum3}")
elif table[2][1]<table[1][1]: print(f"2 {sum2}")
else: print(f"0 {sum1}")
elif table[0][2] == table[1][2] and table[0][2] > table[2][2]:
if table[1][1]>table[0][1]: print(f"2 {sum2}")
elif table[1][1]<table[0][1]: print(f"1 {sum1}")
else: print(f"0 {sum1}")
else: #table[0][2] == table[1][2] and table[0][2] == table[2][2]:
if table[0][1] == table[1][1] and table[0][1] < table[2][1]: print(f"3 {sum3}")
elif table[0][1] == table[2][1] and table[0][1] < table[1][1]: print(f"2 {sum2}")
elif table[1][1] == table[2][1] and table[1][1] < table[0][1]: print(f"1 {sum1}")
else: print(f"0 {sum1}")
| true |
c2fa5d790306124336ca6217d95d3b0cff2e5f5c | Python | pluto-er/api-test | /venv/Lib/site-packages/readme_generator/__main__.py | UTF-8 | 473 | 2.65625 | 3 | [] | no_license | #!/usr/bin/env python
"""generate README"""
import click
import readme_generator
MODULE_NAME = "readme_generator"
PROG_NAME = 'python -m %s' % MODULE_NAME
USAGE = 'python -m %s source_folder ...' % MODULE_NAME
@click.command()
@click.argument('folders', nargs=-1, required=True)
def _cli(folders):
readme = readme_generator.Readme(folders)
string = readme.render()
if string:
print(string)
if __name__ == '__main__':
_cli(prog_name=PROG_NAME)
| true |
05b80f5644c0a7a9e78562f6ce3b89b4c3b351d2 | Python | Aasthaengg/IBMdataset | /Python_codes/p03478/s295836682.py | UTF-8 | 194 | 3.03125 | 3 | [] | no_license | n,a,b = list(map(int, input().split()))
def sum_digit(x):
s = 0
while x:
s += x % 10
x //= 10
return s
print(sum([i for i in range(1,n+1) if a <= sum_digit(i) <= b])) | true |
5f2d63224bbfd62d0986536ff57ba017193d960c | Python | zgq346712481/datafaker | /tests/unit/test_testutils.py | UTF-8 | 813 | 2.671875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
import os
import unittest
from datafaker.testutils import FileCreator
from datafaker.utils import read_file_lines
class TestFileCreator(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.fc = FileCreator()
@classmethod
def tearDownClass(cls):
cls.fc.remove_all()
def test_create_file(self):
fullpath = self.fc.create_file('file')
self.assertEqual(os.path.exists(fullpath), True)
self.assertEqual(os.path.getsize(fullpath), 8)
def test_create_size_file(self):
fullpath = self.fc.create_size_file('file.txt', '8K')
self.assertEqual(os.path.getsize(fullpath), 8*1024)
def test_read_file_lines(self):
filepath = "__init__.py"
read_file_lines(filepath)
| true |
461ae609e6a4c51d4762eb56999214debeb2e709 | Python | vedantshr/python-learning | /homework/day4/problem3.py | UTF-8 | 900 | 3.03125 | 3 | [] | no_license | if __name__ == "__main__":
n = int(input())
l = []
for n in range(n):
name = input()
score = float(input())
l.append([name, score])
l = sorted(l)
print (l)
# n = int(input())
# l = list()
# l1 = list()
# for n in range(n):
# name = input()
# score = float(input())
# l.append(name)
# l1.append(score)
# print(l,"\n",l1)
# for i in range(len(l1)):
# mini = l1[0]
# for j in range(len(l1)):
# if mini > l1[j]:
# mini = l1[j]
# x = l1.index(mini)
# l.remove(l[x])
# l1.remove(mini)
# print(l,"\n",l1)
# for i in range(len(l1)):
# minm = l1[0]
# for j in range(len(l1)):
# if minm > l1[j]:
# minm = l1[j]
# x = l1.index(minm)
# print(l[x])
| true |
e9aa8fe0cbce78af0380d2201ce982fd60aa55c3 | Python | Makhanya/PythonMasterClass | /dictionary/dictionaryComprehension.py | UTF-8 | 1,087 | 4.03125 | 4 | [] | no_license | # The syntax
# {_:_for_ in _}
#
# our first example
# members = dict(first=1,second = 2, third = 3)
# squared_numbers = {key:value ** 2 for key,value in numbers.items()}
# print(squared_numbers) #{first':1, 'second':4, 'third':9}
#
# members = dict(first=1, second=2, third=3, forth=4)
# print(members)
# squared_members = {x: (y**2) for x, y in members.items()}
# print(squared_members)
# More examples
# print({num: num ** 2 for num in [1, 2, 3, 4, 5, 6, 7]})
# str1 = "ABC"
# str2 = "123"
# comb = {str1[i]: str2[i]for i in range(0, len(str1))}
# print(comb)
# instructor = {'name': 'makhanya', ' city': 'Bisho',
# 'color': 'purple'}
# print(instructor)
# print({x.upper(): y.upper() for x, y in instructor.items()})
# Conditional logig with dictionaries
# num_list = [1,2,3,4]
# {num:("even" if num % 2 == 0 else "odd") for num in num_list}
# {1:'odd', 2: 'even', 3:'odd', 4:'even'}
num_list = [1, 2, 3, 4]
print({num: ('even' if num % 2 == 0 else 'odd') for num in range(1, 10)})
| true |
a2988a0fdae0b0e701262932bc0c76f110c438b0 | Python | chx-chx/newcode | /day04/3.数据集介绍.py | UTF-8 | 1,592 | 3.5 | 4 | [] | no_license | from sklearn.datasets import load_iris
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.model_selection import train_test_split
# 1.数据集获取
# 1.1小数据集获取
iris = load_iris()
print(iris)
# 1.2大数据集获取
# new = fetch_20newsgroups()
# print(new)
# 2.数据集属性描述
# print("数据集中特征值是:\n", iris.data)
# print("数据集中目标值是:\n", iris.target)
# print("数据集中特征值名字是:\n", iris.feature_names)
# print("数据集中目标值名字是:\n", iris.target_names)
# print("数据集的描述:\n", iris.DESCR)
# 3.数据可视化
# 3.1 数据类型转换,把数据用DataFrame储存
iris_data = pd.DataFrame(data=iris.data, columns=['Sepal_Length', 'Sepal_Width', 'Petal_Length', 'Petal_Width'])
iris_data["target"] = iris.target
def iris_plot(data, col1, col2):
sns.lmplot(x=col1, y=col2, data=data, hue="target", fit_reg=False)
plt.title("莺尾花数据展示")
plt.xlabel(col1)
plt.ylabel(col2)
plt.show()
# iris_plot(iris_data, "Sepal_Length", "Petal_Width")
# iris_plot(iris_data, "Sepal_Width", "Petal_Length")
# 4.数据集的划分
x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.2, random_state=22)
# print("训练集的特征值是:\n", x_train)
# print("测试集的特征值是:\n", x_test)
# print("训练集的目标值是:\n", y_train)
# print("测试集的目标值是:\n", x_test)
print("训练集的目标值形状:\n", y_train.shape)
print("测试集的目标值形状:\n", y_test.shape)
| true |
babacaacb3e1345914c6031d6dbdece374648fcf | Python | phejohnwang/Computer-Vision-Practice | /Video2Image.py | UTF-8 | 1,579 | 2.890625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
@author: pheno
Convert video into image frames (Sample00XXX.png)
Usage: python Video2Image.py -g Trial_1_2017_07_12_13_55_35.avi ./Depth
Currently set FPSCounter = 1 for full frames conversion
"""
import argparse
import cv2
def main(video_file_name, output_folder_path, grayscale):
FPSCounter = 1
SampleCounter = 1
cap = cv2.VideoCapture(video_file_name)
while (cap.isOpened()):
ret, frame = cap.read()
if not ret:
break
if FPSCounter == 1:
if grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
img_path = output_folder_path + '/Sample%05d.png' % SampleCounter
cv2.imwrite(img_path, frame)
if SampleCounter%300==0:
print(int(SampleCounter/300))
SampleCounter = SampleCounter + 1
FPSCounter = 1
else:
FPSCounter = FPSCounter + 1
cap.release()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="convert video into image frames (Sample00XXX.png)")
parser.add_argument("-g", "--grayscale", action="store_true", help="save as 8 bit grayscale")
parser.add_argument("input", help="input video file name")
parser.add_argument("output", help="output folder path")
args = parser.parse_args()
if args.grayscale:
grayscale = True
else:
grayscale = False
main(args.input, args.output, grayscale)
| true |
55161bf4a3ce1633575905631a60c9591bc4bcf8 | Python | kpence/easy_db | /tests/database_test.py | UTF-8 | 2,944 | 2.5625 | 3 | [
"MIT"
] | permissive | import unittest
import sys
sys.path.insert(1, '..')
import easy_db
class TestSQLite(unittest.TestCase):
def setUp(self):
self.database = easy_db.DataBase('test_sqlite3_db.db')
def test_dbtype(self):
print(self.database.db_type)
self.assertTrue(self.database.db_type == 'SQLITE3')
def test_tablename_pull(self):
tables = self.database.pull_all_table_names()
print(tables)
self.assertTrue(len(tables) == 3)
self.assertTrue(tables == sorted(tables))
def test_full_table_pull(self):
test_table_data = self.database.pull_table('TEST_TABLE')
print(test_table_data[0])
self.assertTrue(type(test_table_data) == list)
self.assertTrue(type(test_table_data[0]) == dict)
self.assertTrue(len(test_table_data) == 31)
def test_full_table_pull_specific_columns(self):
test_table_data = self.database.pull_table('TEST_TABLE', columns=('row_id', 'value_1'))
print(test_table_data[0])
self.assertTrue(type(test_table_data) == list)
self.assertTrue(type(test_table_data[0]) == dict)
self.assertTrue(len(test_table_data) == 31)
self.assertTrue(len(test_table_data[0].keys()) == 2)
def test_pull_where_id_in_list(self):
test_pulled_data = self.database.pull_table_where_id_in_list('THIRD_TABLE', 'parameter', [0.66, 0.67], use_multip=False)
self.assertTrue(len(test_pulled_data) == 116)
self.assertTrue(all(d['parameter'] in [0.66, 0.67] for d in test_pulled_data))
def test_table_creation_and_deletion(self):
self.database.create_table('TEST_TABLE_CREATION', {'col_1': str, 'col_2': float})
self.database.append_to_table('TEST_TABLE_CREATION', [{'col_1': 'row_A', 'col_2': 1.5}, {'col_1': 'row_B', 'col_2': 3.7}])
self.database.drop_table('TEST_TABLE_CREATION')
self.assertTrue(True)
def test_progress_callback(self):
callback = lambda *args: print('Making progress...')
data = self.database.pull_table('THIRD_TABLE', progress_handler=callback)
class TestUtil(unittest.TestCase):
def setUp(self):
self.database = easy_db.DataBase('test_sqlite3_db.db')
def test_name_clean(self):
self.assertTrue(easy_db.util.name_clean('table'))
self.assertFalse(easy_db.util.name_clean('Malic10s;--'))
self.assertFalse(easy_db.util.name_clean('DROP TABLE;'))
self.assertTrue(easy_db.util.name_clean('TABLE_1'))
self.assertFalse(easy_db.util.name_clean('{email=dude@test.com}'))
self.assertFalse(easy_db.util.name_clean('drop'))
def test_malicious_query(self):
data = self.database.pull_table('DROP TABLE TEST_TABLE')
self.assertTrue(data is None)
data = self.database.pull_table('TEST_TABLE', columns=('row_id;1=1;--', 'value_1'))
self.assertTrue(data is None)
if __name__ == '__main__':
unittest.main(buffer=True)
| true |
0fc77904a541f18f33664f2aa9dfa271d17d377d | Python | coconutstd/Text-Recognition | /directory.py | UTF-8 | 475 | 2.921875 | 3 | [] | no_license | import os
import glob
def image_list(directory):
filelist = []
for (path, dir, files) in os.walk(directory):
for filename in files:
ext = os.path.splitext(filename)[-1]
if ext == '.jpg':
filelist.append(filename)
return filelist
def remove_all_files(directory):
for (path, dir, files) in os.walk(directory):
for filename in files:
os.remove(str(path)+'/'+str(filename))
| true |
77982f2198fdfd1158c2a8acedc179cc978e87d8 | Python | lamminhbao/pwnjutsu | /TAnalyzer.py | UTF-8 | 7,166 | 2.515625 | 3 | [] | no_license | #v1.5
from util import *
from Instruction import *
from Debugger import *
from Memory import *
from Register import *
from Address import *
from TList import *
from TObject import *
import parser
class TAnalyzer:
logger = logging.getLogger('TAnalyzer')
def __init__(self, target_file_name):
self.debugger = Debugger(target_file_name)
self.tlist = TList()
self.step_flag = STEP_INTO
self.default_payload = 'AAAAAAAAAAAAAAAA'
self.deep_level = 1
self.tobj_list = []
self.detected = 0
def print_dst_src_dict(self, obj):
tmp_dict = obj.get_dict_dst_src()
for k in tmp_dict:
self.logger.debug('%s %s %s %s', '\t', k, "<-----", tmp_dict[k])
@staticmethod
def add_list_loop(listoflist):
tmp = []
for i in listoflist:
tmp += i
return tmp
def check_n_add_or_remove(self, inst):
tmp_dict = {}
for src in inst.get_src():
taint_src = self.tlist.check(src)
if taint_src != None:
self.logger.info('Taint source: %s' % str(taint_src))
tmp_dict[taint_src] = inst.get_dst(src)
else:
self.logger.debug('%s isn\'t in TList' % str(src))
tmp_taint_dst = self.add_list_loop(tmp_dict.values())
self.tlist.add(tmp_taint_dst)
tmp_clean_dst = [obj for obj in inst.get_dst() if obj not in tmp_taint_dst]
self.tlist.remove(tmp_clean_dst)
return tmp_dict
def handle_inst(self, inst):
# set step_flag -> step into
self.step_flag = STEP_INTO
# print
self.print_dst_src_dict(inst)
return self.check_n_add_or_remove(inst)
def handle_call_inst(self, inst):
f = inst.function
if f and f.is_known():
# set step_flag -> step over if know this function
self.step_flag = STEP_OVER
# print
self.print_dst_src_dict(f)
return self.check_n_add_or_remove(f)
else:
# handle call inst
return self.handle_inst(inst)
def stop_condition(self, res_from_dbg, inst):
if 'The program is not being run.' in res_from_dbg:
return 0
elif isinstance(inst, Instruction):
if inst.op == 'ret':
self.deep_level -= 1
elif inst.op == 'call' and self.step_flag == STEP_INTO:
self.deep_level += 1
return self.deep_level
def check_tobj(self, tobj, obj):
if isinstance(tobj.dst, Register) and isinstance(obj, Register):
if tobj.dst.get_name() == obj.get_name():
return True
if isinstance(tobj.dst, Memory) and isinstance(obj, Memory):
if obj.contains(tobj.dst)[0] != NOT_CONTAIN:
return True
def find_tobj_src(self, tobj, tobj_list):
for iobj in tobj_list:
if isinstance(tobj.src, Register) and isinstance(iobj.dst, Register):
if tobj.src.get_name() == iobj.dst.get_name():
return iobj
if isinstance(tobj.src, Memory) and isinstance(iobj.dst, Memory):
if iobj.dst.contains(tobj.src)[0] == FULL_CONTAIN:
return iobj
return None
def taint_chain(self, tobj):
chain = [tobj]
next_tobj = tobj
tmp_list = self.tobj_list[:]
while next_tobj:
next_tobj = self.find_tobj_src(next_tobj, tmp_list)
if next_tobj:
tmp_list.remove(next_tobj)
chain.append(next_tobj)
return chain
def check_sink(self, taint_dict, inst):
if taint_dict:
for isrc in taint_dict:
for idst in taint_dict[isrc]:
tmp_tobj = TObject(idst, isrc, inst)
self.tobj_list.append(tmp_tobj)
if self.check_tobj(tmp_tobj, eip):
if self.debugger.elf.canary:
alert = 'EIP is tainted!\n'
alert += 'Detect the binary using stack canaries.\n'
alert += 'Execution flow may not be hijacked.'
else:
alert = 'EIP is tainted!\n'
alert += 'The binary don\'t use stack canaries.\n'
alert += 'Execution flow can be hijacked.'
return alert, self.taint_chain(tmp_tobj)
if inst.get_type() == CALL_INST:
if inst.function.get_name() == 'printf':
format_str = Memory(inst.function.get_args(0), DEFAULT_STRLEN)
taint_src = self.tlist.check(format_str, RETURN_ROOT)
if taint_src != None:
tmp_tobj = TObject(format_str, taint_src, inst)
alert = '1st argument (format string) of printf is tainted!\nFormat string attack can be performed.'
return alert, self.taint_chain(tmp_tobj)
elif inst.function.get_name() == 'fopen':
path = Memory(inst.function.get_args(0), DEFAULT_STRLEN)
taint_src = self.tlist.check(path, RETURN_ROOT)
if taint_src != None:
if path._address_value != taint_src._address_value:
tmp_tobj = TObject(path, taint_src, inst)
alert = '1st argument (path) of fopen is tainted!\nArbitrary file can be read.'
return alert, self.taint_chain(tmp_tobj)
else:
pass
return '', []
def check_input_file(self):
alert = ''
flag = DEFAULT_FILE
if self.debugger.elf.canary:
alert += 'Detect the binary using stack canaries.\n'
alert += 'This tool may not run correctly.\n'
flag = CANARY_DETECT
if self.debugger.elf.bits != 32:
alert += 'Detect %d-bit binary.\n' % (self.debugger.elf.bit)
alert += 'This tool does not support %d-bit binary.' % (self.debugger.elf.bit)
flag = NOT_32BIT_ELF
return flag, alert
def start(self):
res_from_dbg = ''
inst = None
count = 0
flag, alert = self.check_input_file()
if flag == NOT_32BIT_ELF:
exit(alert)
elif flag == CANARY_DETECT:
if parser.cmd_args.force:
print alert
else:
exit(alert)
try:
while(self.stop_condition(res_from_dbg, inst)):
inst = self.debugger.get_current_instruction()
self.logger.info(inst)
inst = Instruction(inst)
inst_type = inst.get_type()
# new_taint_obj = []
# taint_src = []
taint_dict = {}
if inst_type == CALL_INST:
# new_taint_obj, taint_src = self.handle_call_inst(inst)
taint_dict = self.handle_call_inst(inst)
elif inst_type == CONDITION_INST:
pass
else:
# new_taint_obj, taint_src = self.handle_inst(inst)
taint_dict = self.handle_inst(inst)
if self.step_flag == STEP_OVER:
self.logger.info('Step over')
## 1.6
self.logger.debug('Taint_dict: %s', taint_dict)
check_sink, chain = self.check_sink(taint_dict, inst)
if check_sink:
self.detected = 1
print check_sink
for i in chain:
print '\t->', i
if inst.is_need_input():
if not parser.cmd_args.input:
res_from_dbg = self.debugger.step(self.default_payload, self.step_flag)
else:
manual_input = raw_input('program need input> ').strip()
res_from_dbg = self.debugger.step(manual_input, self.step_flag)
else:
res_from_dbg = self.debugger.step(None, self.step_flag)
if parser.cmd_args.output:
print '[Output when step]', res_from_dbg
count += 1
if not self.detected:
print 'No vulnerability was detected.'
except Exception as e:
print e
return count
def main(path):
tainter = TAnalyzer(path)
t = time.time()
count = tainter.start()
print 'Number of instruction:', count
print 'Time consumming:', time.time() - t
if __name__ == '__main__':
parser.argparser(main)
| true |
fba7e2551e441d9371029e529cb3f7c556d9b9d5 | Python | zopepy/leetcode | /friend_circles.py | UTF-8 | 988 | 3.03125 | 3 | [] | no_license | class Solution:
def findCircleNum(self, adj):
"""
:type M: List[List[int]]
:rtype: int
"""
def get_friends(m, i, n):
children = []
for j in range(0, n):
if m[i][j] == 1 and i!=j:
children.append(j)
return children
visited = set()
l = len(adj)
circles = 0
for node in range(l):
if node in visited:
continue
cur = [node]
while cur:
newcur = []
for curnode in cur:
friends = get_friends(adj, curnode, l)
for frd in friends:
if frd not in visited:
newcur.append(frd)
visited.add(curnode)
cur = newcur
circles += 1
visited.add(node)
return circles
amd = []
s = Solution()
print(s.findCircleNum(amd)) | true |
5a942d96da9f24014c9722d4b269f4644f57840b | Python | MichalGiemza/SieciNeur | /Hopfield/Hopfield.py | UTF-8 | 1,428 | 3.484375 | 3 | [] | no_license |
x = 4
y = 5
N = x * y
hi = 0
def element(m, el):
return m[el / y][el % x - 1]
def wypisz(X):
for a in X:
print a
def kopiuj(src):
dst = [[0 for i in range(x)] for j in range(y)]
for i in range(y):
for j in range(x):
dst = src
return dst
def sgn(x):
if x < 0:
return -1
return 1
def pole_wypadkowe(t, W, i):
suma = 0
for j in range(N):
if i != j:
suma += W[i][j] * element(t, j) + hi
return suma
def odtworz(te):
t = kopiuj(te)
while True:
t_poprz = kopiuj(t)
for i in range(N):
t[i / y][i % x - 1] = sgn(pole_wypadkowe(t_poprz, W, i))
if t_poprz == t:
break
return t
tr = [[
[1, 1, 1, 1],
[-1, -1, -1, -1],
[1, 1, 1, 1],
[-1, -1, -1, -1],
[1, 1, 1, 1]
],[
[-1, 1, 1, -1],
[-1, 1, 1, -1],
[-1, 1, 1, -1],
[-1, 1, 1, -1],
[-1, 1, 1, -1]
]]
te1 = [
[1, 1, 1, -1],
[1, -1, 1, -1],
[1, -1, -1, 1],
[1, -1, -1, 1],
[1, 1, 1, 1]
]
te2 = [
[1, -1, -1, -1],
[1, -1, -1, -1],
[1, -1, 1, -1],
[1, -1, -1, 1],
[1, 1, -1, -1]
]
print "Trening:"
wypisz(tr[0])
print ""
wypisz(tr[1])
print "\n"
# Wyznaczanie wag
W = [[0 for i in range(N)] for j in range(N)]
for t in tr:
# Suma
for i in range(N):
for j in range(N):
W[i][j] += element(t, i) * element(t, j)
# 1 / N
W[i][j] /= N
# Test
t = odtworz(te1)
print "\nTest:"
wypisz(t)
t = odtworz(te2)
print "\nTest:"
wypisz(t) | true |
324db30b21ede3093c5d51d13620074bf769d86e | Python | hvwesten/REL-experiments | /mapping/freebasewikimapper.py | UTF-8 | 2,942 | 2.890625 | 3 | [] | no_license | import gzip, re, os, json
from wikimapper import WikiMapper
"""
Class responsible for mapping Freebase ids to Wikipedia titles.
Uses the wikimapper (https://github.com/jcklie/wikimapper)
"""
class FreebaseWikipediaMapper:
def __init__(self, base_url, wikimapper_path):
self.mapping_folder = os.path.join(base_url, 'mapping/data')
self.fb2w_url = os.path.join(self.mapping_folder, 'fb2w.nt.gz') # Freebase to Wikidata file
self.wikimapper_url = os.path.join(self.mapping_folder, wikimapper_path)
self.wikimapper = WikiMapper(self.wikimapper_url) # Mapper from Wikidata to Wikipedia
self.mapping_dict = self.__create_fb2wp_dict()
def __create_fb2wp_dict(self):
lines_read = 0
fb2wp_dict = {}
fb2wp_save_file = os.path.join(self.mapping_folder, 'fb2wp_dict.json')
if os.path.isfile(fb2wp_save_file):
print("Loading Freebase to Wikipedia mapping dictionary")
with open(fb2wp_save_file, 'r') as f:
fb2wp_dict = json.load(f)
else:
print("Creating Freebase to Wikipedia mapping dictionary")
with gzip.open(self.fb2w_url, 'rt') as f:
fb_regex = re.compile('<http://rdf.freebase.com/ns(.*)>')
wd_regex = re.compile('<http://www.wikidata.org/entity/(.*)>')
for line in f:
lines_read += 1
if lines_read % 100000 == 0:
print("Processed {} lines".format(lines_read))
break
if line.startswith('#') or lines_read == 4:
pass
else:
line = line.strip()
parts = line.split('\t')
fb_url = parts[0]
fb_id = fb_regex.match(fb_url).group(1).replace('.', '/')
wd_url = parts[2]
wd_id = wd_regex.match(wd_url).group(1)
# Get wikipedia titles for wikidata ID using wikimapper
wp_titles = self.wikimapper.id_to_titles(wd_id)
fb2wp_dict[fb_id] = {wd_id: wp_titles}
# Save the dictionary in a json file for future experiments
with open(fb2wp_save_file, 'w') as f:
json.dump(fb2wp_dict, f)
return fb2wp_dict
def get_title(self, freebase_id):
if freebase_id in self.mapping_dict:
results = list(self.mapping_dict[freebase_id].values())[0]
if len(results) > 0:
return results[0] # return first candidate of the wikipedia titles
return "<>"
if __name__ == "__main__":
base_url = ""
fbwikimapper = FreebaseWikipediaMapper(base_url, 'index_enwiki-20190420.db')
print(fbwikimapper.get_title('/m/0695j'))
| true |
f13710d08d20f415502bde9fc7075a89c4e1d901 | Python | zahid-rahman/web-crawler | /web/main.py | UTF-8 | 1,790 | 2.71875 | 3 | [] | no_license | from bs4 import BeautifulSoup as soup
from urllib.request import urlopen as uReq
my_url = 'https://www.flipkart.com/search?q=iphone&otracker=search&otracker1=search&marketplace=FLIPKART&as-show=on&as=off'
uClient = uReq(my_url)
page_html = uClient.read()
uClient.close()
page_soup = soup(page_html, "html.parser")
containers = page_soup.findAll("div", {"class": "_1UoZlX"})
# print(len(containers))
# print(soup.prettify(containers[0]))
# container=containers[0]
# print(container.img['alt'])
fileName = "product_info.csv"
f = open(fileName, 'w')
# header = "Product_name,Price,raitng\n";
# f.write(header)
for container in containers:
product_name = container.img['alt']
price_container = container.find_all("div", {"class": "_1vC4OE _2rQ-NK"})
price = price_container[0].text.strip()
rating = container.find_all("div", {"class": "hGSR34 _2beYZw"})
product_rating = rating[0].text
#
# rating_count = container.find_all('div',{"class": "_38sUEc"})
#
#
# review_count = container.find_all("span",{"class": "_1VpSqZ"})
# total_review_count = review_count
trim_price = ''.join(price.split(','))
rm_rupee = trim_price.split("₹")
add_price = "Rs." + rm_rupee[1]
split_price = add_price.split("E")
final_price = split_price[0]
split_rating = product_rating.split(" ")
final_rating = split_rating[0]
# print(product_name.replace(",","|")+","+final_price+","+product_rating+","+total_rating_count+","+total_review_count+"\n")
print("Product name :"+product_name.replace(",","|")+"\n"+"Price :"+final_price+"\n"+"product rating :"+final_rating+"\n\n")
f.write("Product name :"+product_name.replace(",","|")+"\n"+"Price :"+final_price+"\n"+"product rating :"+final_rating+"\n\n")
f.close()
| true |