blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
feee6a973e2885bcf64d99f8824d41619ce82081 | Python | haotianzhu/Questions_Solutions | /container_with_max_water/solution.py | UTF-8 | 718 | 3.875 | 4 | [
"WTFPL"
] | permissive | import math
class Solution(object):
def maxArea(self, height):
"""
:type height: List[int]
:rtype: int
"""
max_area = 0
i = 0
j = len(height)-1
# i as an index of left
# j as an index of right
while(i < j):
# a1 is left's height, while a2 is height of right
a1 = height[i]
a2 = height[j]
# calculate area(a1,a2), and compare with min_area we find so far
max_area = max(min(a1,a2)*(j-i), max_area )
# if a1 is less, move left index to right may increase area
# else move right to left
if a1 < a2: i+=1
else : j-=1
return max_area
if __name__ == '__main__':
mysolution = Solution()
print([1,2,4,3])
re2 = mysolution.maxArea([2,3,4,5,18,17,6])
print(re2)
| true |
1d33f72d234e29510c6d0ba07b8b8f7e7c597d49 | Python | anish10375/Python_course | /multiplication _table(17).py | UTF-8 | 451 | 3.234375 | 3 | [] | no_license |
multiplicand = int(input(" enter multiplier here "))
print(" 17 * 1 = " , multiplicand*1)
print(" 17 * 2 = " , multiplicand*2)
print(" 17 * 3 = " , multiplicand*3)
print(" 17 * 4 = " , multiplicand*4)
print(" 17 * 5 = " , multiplicand*5)
print(" 17 * 6 = " , multiplicand*6)
print(" 17 * 7 = " , multiplicand*7)
print(" 17 * 8 = " , multiplicand*8)
print(" 17 * 9 = " , multiplicand*9)
print(" 17 * 10 = " , multiplicand*10)
| true |
52f4176a38da13010118312dc197d5478055f787 | Python | LucasAyoub/TensorFlow | /Subtração.py | UTF-8 | 353 | 3.078125 | 3 | [] | no_license | import tensorflow as tf
with tf.compat.v1.Session() as sess:
rand_a = tf.random.normal([3], 2.0)
rand_b = tf.random.uniform([3], 1.0, 4.0)
diff = tf.subtract(rand_a, rand_b)
print('\nTensor rand_a: ', sess.run(rand_a))
print('\nTensor rand_b: ', sess.run(rand_b))
print('\nSubtração entre os 2 tensores é: ', sess.run(diff)) | true |
2f7099ee2d864a932f7d04b4c52b4b834470f1c0 | Python | sourjp/programming_contest | /AtCoder/ABC149B.py | UTF-8 | 191 | 2.6875 | 3 | [] | no_license | a, b, k = map(int, input().split())
def test(a, b, k):
if k - a > 0:
kk = k - a
return 0, max(0, b - kk)
else:
return a-k, b
ans = test(a, b, k)
print(*ans) | true |
e1f93a25a155531f57e401d20b546df4470a6bab | Python | kumaya/python-programs | /LRUcache.py | UTF-8 | 1,100 | 3.6875 | 4 | [] | no_license | # test implementation of LRU cache.
from collections import OrderedDict
class LRUCache(object):
def __init__(self, capacity):
self.__capacity = capacity
self.__cache = OrderedDict()
def set(self, key, value):
try:
self.__cache.pop(key)
except KeyError:
if len(self.__cache) >= self.__capacity:
self.__cache.popitem(last=False)
self.__cache[key] = value
def get(self, key):
try:
value = self.__cache.pop(key)
self.__cache[key] = value
return value
except KeyError:
return -1
def get_cache_details(self):
return self.__cache
if __name__ == "__main__":
cache = LRUCache(2)
cache.set('name', 'john')
cache.set('age', '12')
print cache.get_cache_details()
cache.set('name', 'doey')
print cache.get_cache_details()
print cache.get('age')
print cache.get_cache_details()
cache.set('aaa', 'aaaaa')
print cache.get_cache_details()
print cache.get('age')
print cache.get_cache_details()
| true |
522268acd9d45bbb88a573b56eff5b4fd861e624 | Python | szabgab/slides | /talks/python-pair-programming-and-tdd-workshop/test/test_mymath_more.py | UTF-8 | 256 | 3 | 3 | [] | no_license | import mymath
def test_div():
assert mymath.div(6, 3) == 2
assert mymath.div(42, 2) == 21
def test_add():
assert mymath.add(2, 2) == 4
assert mymath.add(0, 0) == 0
assert mymath.add(-1, 1) == 0
assert mymath.add(19, 23) == 42
| true |
c3503da4111e8b7760e84da563ff69c1bc5590ff | Python | mattwhitworth/regressor | /regressionEngine/linearRegressor/linearRegressor.py | UTF-8 | 815 | 2.796875 | 3 | [] | no_license | import numpy as np
from regressionEngine import abstractRegressor as ar
class LinearRegressor(ar.AbstractRegressor):
def calculate_hypothesis(self, inputValues):
return inputValues.dot(self.thetas.transpose())
def compute_cost(self):
# calculate the cost function
hypothesis = self.calculate_hypothesis(self.normalizedFeatures)
cost = np.square(hypothesis - self.inputYs)
cost = cost.sum() / (2 * (np.shape(self.normalizedFeatures)[0]))
return cost
def predict(self, fileName):
return self.make_prediction(fileName)
def predict_for_plot(self, fileName):
return self.make_prediction(fileName)
def train(self, alpha, maxIterations, reportFrequency):
self.perform_gradient_descent(alpha, maxIterations, reportFrequency)
| true |
1fa20cb75ac482826d33d830a480d91183759a15 | Python | bedomohamed/transcribe-piano | /transcribe2.py | UTF-8 | 4,037 | 2.515625 | 3 | [] | no_license | #%pylab inline
#pylab.rcParams['figure.figsize'] = 26, 5
from scipy.io import wavfile
from scipy.fftpack import fft
from scipy.optimize import lsq_linear
import numpy as np
import pylab
import sys, os, subprocess
import random
from pyknon.genmidi import Midi
from pyknon.music import NoteSeq, Note
#learning
classes = np.arange(-30, 40) #notes from F#2 to E8
n_classes = len(classes)
tempo = 180 #tempo of file for sample notes. fps / tempo gives how much of the sample we analyze
fps = 6 #frequency of the chords we recognize
part_length = 44100 / fps #size of a part we analyze
input_length = 600 #number of amplitudes of spectrogram we analyze
suppress_noise = 10000 # for nice printing
timespan = 60 * 5 * fps / tempo
out_tempo = fps * 60
minimal_volume = 0.01 # output volume threshold
#testing
poly = 0 # size of chord to test recognition on
n_samples = 50 # number of tests
#todo: polishing
def read_mp3(filename):
if filename.endswith('.mp3'):
rc=1
if rc: rc = os.system('mpg123 -w temp.wav '+filename)
if rc: rc = os.system('ffmpeg -i '+filename+' -vn -acodec pcm_s16le -ac 1 -ar 44100 -f wav temp.wav')
if rc: rc = os.system('avconv -i '+filename+' -vn -acodec pcm_s16le -ac 1 -ar 44100 -f wav temp.wav')
if rc: rc = os.system('mpg321 -w temp.wav '+filename)
if rc: exit('unable to convert mp3 to wav. install either ffmpeg or avconv or mpg123 or mpg321.')
filename = "temp.wav"
return wavfile.read(filename)
def channel_freqs(channel1, part_length=part_length, input_length=input_length):
#channel1 = channel1[part_length/2:]
parts = len(channel1) / part_length
freqs = np.array([abs(fft(channel1[i*part_length:(i+1)*part_length]))[:input_length] for i in range(parts)])
pylab.imshow(freqs.T, extent=(0,parts,input_length,0), cmap='spectral')
#pylab.show()
return freqs
def random_samples(sample_size):
"get random notes"
return np.array([random.sample(range(n_classes), random.choice([poly])) for i in range(sample_size)])
def clean_freq(samples):
"create freq samples"
sample_size = len(samples)
chords = [NoteSeq([Note(classes[i]) for i in sample]) for sample in samples]
midi = Midi(1, tempo=tempo)
for i in range(sample_size): midi.seq_chords([chords[i]], time=5*i)
midi.write("temp.mid")
subprocess.call("timidity temp.mid -Ow -o temp.wav".split(), stdout=subprocess.PIPE)
rate, data = wavfile.read('temp.wav')
return channel_freqs(data.T[0])[:sample_size*timespan:timespan].astype(int) / suppress_noise
notes_start = clean_freq(np.arange(n_classes).reshape([n_classes,1]))
if poly:
answers = random_samples(n_samples)
g = clean_freq(answers)
k=0
for t in range(n_samples):
vol_orig = g[t].mean()
result = lsq_linear(notes_start.T, g[t], (0, np.inf))
notes = result.x.argsort()[-poly:]
if set(notes) != set(answers[t]):
k+=1
print t, 'precision -', set(notes)-set(answers[t]), 'recall +', set(answers[t])-set(notes)
print k*2, '% error'
def test_output(x, g):
midi = Midi(1, tempo=out_tempo)
for i in range(n_classes):
dur = 0
vol = 0
for t,v in enumerate(x.T[i]):
min_volume = minimal_volume * g[t] / g.mean()
if v*v>min_volume:
if dur:
vol = (vol / dur + v*v/min_volume ) * (dur+1)
else:
vol = v*v/min_volume
dur += 1
elif dur:
midi.seq_notes([Note(classes[i], dur=dur/4., volume=min(100,int(vol)))], time=t)
dur = 0
vol = 0
midi.write("output.mid")
os.system("timidity output.mid")
#f[fi].argsort()[-3:]
if not sys.argv[1:]: sys.argv.append('giovanni_allevi-pensieri_nascosti.mp3')
g = channel_freqs(read_mp3(sys.argv[1])[1].T[0]).astype(int) / suppress_noise
x = np.zeros([len(g),n_classes])
for i,b in enumerate(g):
print '{:.1%}'.format(float(i)/len(g))
result = lsq_linear(notes_start.T, b, (0, np.inf))
if not result.status:
print result
x[i] = result.x
pylab.imshow(x.T, cmap='spectral')
#pylab.show()
test_output(x, g.mean(axis=1)) | true |
a980e7d346ef567a54ceaeaefb6a4bf7d5b71f9e | Python | wuQAQ/PyCrawler | /PicCrawler/test.py | UTF-8 | 2,852 | 2.953125 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import pymongo
import gridfs
import time
# 获取mongoClient对象
client = pymongo.MongoClient("localhost", 27017)
# 获取使用的database对象
db = client.test
# 获取图片存储集合
fs = gridfs.GridFS(db, "images")
def save_pic_to_disk():
"""
将数据库中文件转存到本地文件系统
:return: 无
"""
fss = fs.find()
for fl in fss:
print(fl.md5)
tp_file = open('d:/img/' + fl.md5 + '.jpg', "wb")
tp_file.write(fl.read())
tp_file.close()
def mongodb_delete(title, author):
"""
根据小说标题和作者删除其小说封面信息
例如:mongodb_delete('桃花扇','孔尚任')
:param title 小说标题
:param author 小说作者
"""
record = db.novel.find_one({"title": title, "author": author})
print(record)
_id = record.get("_id")
_img = record.get("imag")
db.novel.remove({"_id": _id})
fs.delete(_img)
def iterator(url):
"""
遍历指定地址下的小说条目
获取小说封面、标题和作者信息
然后保存至数据库
最后获取递归到下一页进行遍历
:param url: 小说列表页面地址
:return: 无返回
"""
print(url)
# 获取页面html,并使用beautifulsoup进行解析
rs = requests.get(url).content.decode("gbk")
bs_obj = BeautifulSoup(rs, "html.parser")
content = bs_obj.find("div", {"id": "content"})
# 获取小说列表,并遍历小说数据
novels = bs_obj.findAll("div", {"class": "Sum Search_line"})
for novel in novels:
# 获取小说的名称、索引地址、作者
name = novel.find("h2").find("a").get_text()
index = novel.find("h2").find("a").get("href")
author = novel.find("em").find("a").get_text()
# 获取小说封面,并使用gridfs保存至mongo
img = novel.find("img")
rs = requests.get(img.get("src"))
# 这种方式是将小说的题目等信息与封面图片保存在一起
# fs.put(rs.content,title=name, author=author, url=index)
# 这种方式进行保存图片文件,然后记录文件编号,比较像关系数据库的使用方式
_id = fs.put(rs.content)
db.novel.save(dict(title=name, author=author, url=index, imag=_id, time=time.time()))
# 获取下一页链接,如果存在下一页就进行递归遍历
next_page = content.find("div", {"id": "pagelink"}).find("a", {"class": "next"})
if next_page:
iterator(next_page.get("href"))
# 遍历全本小说网的小说信息
# 从小说站点的导航上可以看出,该站点小说分为11个类型,而且类型编号从1开始递增
for i in range(1, 12):
iterator('http://www.wanbenxiaoshuo.net/sort/' + str(i) + '_1.html')
# 关闭资源
client.close()
| true |
d04106e96bd3cd875ff40ee601049745cf55f5ec | Python | duncantoo/Atomic-Time | /GARMIN/Analysis/Kalman/Kalman.py | UTF-8 | 4,472 | 2.578125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Fri Nov 20 13:19:25 2015
@author: Duncan
Use a Kalman filter on GPS serial data to predict the PPS arrial time
Kalman filter smooths time of GPS serial data arrival, use PPS-SER distribution average to get expected PPS time
We must supply uncertainties in GPS serial time (given by PPS_SER dist) and arduino time (~1 ms)
Also we use the *drift* on the arduino -- the average second length discrepency
format:
lines must contain
txxxx...,xxxx... (times for serial,pps)
"""
import numpy as np
import matplotlib as mplt
import matplotlib.pyplot as plt
import KalmanFilter as klm
filename = "GARNMEA20160131_190024ChckdCor"
# extract data into arrays
contents = open("../../Results/"+filename+".txt", mode='r')
contentsTxt = contents.readlines()
contents.close()
ser_T = [0]*len(contentsTxt)
pps_T = [0]*len(contentsTxt)
j = 0
for i in range(len(contentsTxt)):
line = contentsTxt[i]
if (line[0]=='t'):
commaLoc = line.index(',')
ser_T[j] = int(line[1:commaLoc])
pps_T[j] = int(line[commaLoc+1:])
j += 1
start = 0
end = j
ser_T = ser_T[start:end]
pps_T = pps_T[start:end]
serE_T = [0]*len(ser_T) # expected time of serial arrival
covU_T = [0]*len(ser_T) # expected uncertainty
ardU_t = 0.5 # uncertainty in arduino times
ardD_t = (pps_T[-1]-pps_T[0])/(len(pps_T)-1)-1000 # arduino drift per millisecond (from post-analysis) - defined as ard_ms in 1s - 1000
serU_t = 150 # uncertainty in gps serial arrival times
covU_T[0] = 100
serE_T[0] = ser_T[0]
for i in range(len(serE_T)-1):
serE_T[1+i], covU_T[1+i] = klm.KalFilIter(serE_T[i], 1000+ardD_t, ser_T[1+i], covU_T[i], ardU_t, serU_t)
ppsserE_dT = [0]*len(serE_T)
for i in range(len(serE_T)):
ppsserE_dT[i] = serE_T[i]-pps_T[i]
ppsser_dT = [0]*len(ser_T)
for i in range(len(ppsser_dT)):
ppsser_dT[i] = ser_T[i]-pps_T[i]
serser_dT = [0]*(len(ser_T)-1)
for i in range(len(serser_dT)):
serser_dT[i] = ser_T[1+i]-ser_T[i]
ppspps_dT = [0]*(len(ser_T)-1)
for i in range(len(serser_dT)):
ppspps_dT[i] = pps_T[1+i]-pps_T[i]
serEserE_dT = [0]*(len(serE_T)-1)
for i in range(len(serEserE_dT)):
serEserE_dT[i] = serE_T[1+i]-serE_T[i]
mplt.rcParams.update({'font.size': 16})
fig = plt.figure(figsize=(10,6))
yLow = min(min(ppsser_dT),min(ppsserE_dT))
yHi = max(max(ppsser_dT),max(ppsserE_dT))
yLow = max(0, int(yLow/20)*20)
yHi = min(1000, int(yHi/20+1)*20)
xplot = np.linspace(0,len(ppsser_dT),len(ppsser_dT))
ser_ppsser = plt.scatter(xplot, ppsser_dT, s=2, linewidth=0, color="black")
ser_ppsserE = plt.scatter(xplot, ppsserE_dT, s=2, linewidth=0, color="red")
plt.xlim(min(xplot),max(xplot))
plt.ylim(yLow,yHi)
plt.title("PPS Serial difference using Kalman filter")
plt.xlabel("Sample")
plt.ylabel("Time difference / ms")
lgndh = plt.legend([ser_ppsser,ser_ppsserE],["Raw","Kalman"])
lgndh.legendHandles[0]._sizes = [30]
lgndh.legendHandles[1]._sizes = [30]
params = {'legend.fontsize': 18}
plt.rcParams.update(params) # the legend text fontsize
plt.annotate("std dev "+str(int(round(np.std(ppsser_dT),0)))+
" --> "+str(int(round(np.std(ppsserE_dT),0)))+" ms", xy=(0.05, 0.95), xycoords='axes fraction')
plt.savefig("../../Results/"+filename+"ppsserKalman("+str(start)+"-"+str(end)+").png",dpi=400)
plt.savefig("../../Results/"+filename+"ppsserKalman("+str(start)+"-"+str(end)+").svg")
fig = plt.figure(figsize=(10,6))
yLow = min(min(serser_dT),min(serEserE_dT))
yHi = max(max(serser_dT),max(serEserE_dT))
yLow = max(0, int(yLow/20)*20)
yHi = min(2000, int(yHi/20+1)*20)
xplot = np.linspace(0,len(serser_dT),len(serser_dT))
ser_serser = plt.scatter(xplot, serser_dT, s=2, linewidth=0, color="black")
ser_serEserE = plt.scatter(xplot, serEserE_dT, s=2, linewidth=0, color="red")
plt.xlim(min(xplot),max(xplot))
plt.ylim(yLow,yHi)
plt.title("Consecutive serial time difference using Kalman filter")
plt.xlabel("Sample")
plt.ylabel("Time difference / ms")
lgndh = plt.legend([ser_serser,ser_serEserE],["Raw","Kalman"])
lgndh.legendHandles[0]._sizes = [30]
lgndh.legendHandles[1]._sizes = [30]
params = {'legend.fontsize': 18}
plt.rcParams.update(params) # the legend text fontsize
plt.annotate("std dev "+str(int(round(np.std(serser_dT),0)))+
" --> "+str(round(np.std(serEserE_dT),1))+" ms", xy=(0.05, 0.95), xycoords='axes fraction')
plt.savefig("../../Results/"+filename+"serserKalman("+str(start)+"-"+str(end)+").png",dpi=400)
plt.savefig("../../Results/"+filename+"serserKalman("+str(start)+"-"+str(end)+").svg") | true |
d5bad1697693bcca69684a83190f22aa22b43544 | Python | NiaOrg/NiaPy | /niapy/task.py | UTF-8 | 9,037 | 2.984375 | 3 | [
"MIT"
] | permissive | # encoding=utf8
"""The implementation of tasks."""
import logging
from enum import Enum
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.ticker as ticker
from niapy.problems import Problem
from niapy.util.repair import limit
from niapy.util.factory import get_problem
logging.basicConfig()
logger = logging.getLogger("niapy.task.Task")
logger.setLevel("INFO")
class OptimizationType(Enum):
r"""Enum representing type of optimization.
Attributes:
MINIMIZATION (int): Represents minimization problems and is default optimization type of all algorithms.
MAXIMIZATION (int): Represents maximization problems.
"""
MINIMIZATION = 1.0
MAXIMIZATION = -1.0
class Task:
r"""Class representing an optimization task.
Date:
2019
Author:
Klemen Berkovič and others
Attributes:
problem (Problem): Optimization problem.
dimension (int): Dimension of the problem.
lower (numpy.ndarray): Lower bounds of the problem.
upper (numpy.ndarray): Upper bounds of the problem.
range (numpy.ndarray): Search range between upper and lower limits.
optimization_type (OptimizationType): Optimization type to use.
iters (int): Number of algorithm iterations/generations.
evals (int): Number of function evaluations.
max_iters (int): Maximum number of algorithm iterations/generations.
max_evals (int): Maximum number of function evaluations.
cutoff_value (float): Reference function/fitness values to reach in optimization.
x_f (float): Best found individual function/fitness value.
"""
def __init__(self, problem=None, dimension=None, lower=None, upper=None,
optimization_type=OptimizationType.MINIMIZATION, repair_function=limit, max_evals=np.inf,
max_iters=np.inf, cutoff_value=None, enable_logging=False):
r"""Initialize task class for optimization.
Args:
problem (Union[str, Problem]): Optimization problem.
dimension (Optional[int]): Dimension of the problem. Will be ignored if problem is instance of the `Problem` class.
lower (Optional[Union[float, Iterable[float]]]): Lower bounds of the problem. Will be ignored if problem is instance of the `Problem` class.
upper (Optional[Union[float, Iterable[float]]]): Upper bounds of the problem. Will be ignored if problem is instance of the `Problem` class.
optimization_type (Optional[OptimizationType]): Set the type of optimization. Default is minimization.
repair_function (Optional[Callable[[numpy.ndarray, numpy.ndarray, numpy.ndarray, Dict[str, Any]], numpy.ndarray]]): Function for repairing individuals components to desired limits.
max_evals (Optional[int]): Number of function evaluations.
max_iters (Optional[int]): Number of generations or iterations.
cutoff_value (Optional[float]): Reference value of function/fitness function.
enable_logging (Optional[bool]): Enable/disable logging of improvements.
"""
if isinstance(problem, str):
params = dict(dimension=dimension, lower=lower, upper=upper)
params = {key: val for key, val in params.items() if val is not None}
self.problem = get_problem(problem, **params)
elif isinstance(problem, Problem):
self.problem = problem
if dimension is not None or lower is not None or upper is not None:
logger.warning('An instance of the Problem class was passed in, `dimension`, `lower` and `upper` parameters will be ignored.')
else:
raise TypeError('Unsupported type for problem: {}'.format(type(problem)))
self.optimization_type = optimization_type
self.dimension = self.problem.dimension
self.lower = self.problem.lower
self.upper = self.problem.upper
self.range = self.upper - self.lower
self.repair_function = repair_function
self.iters = 0
self.evals = 0
self.cutoff_value = -np.inf * optimization_type.value if cutoff_value is None else cutoff_value
self.enable_logging = enable_logging
self.x_f = np.inf * optimization_type.value
self.max_evals = max_evals
self.max_iters = max_iters
self.n_evals = []
self.fitness_evals = [] # fitness improvements at self.n_evals evaluations
self.fitness_iters = [] # best fitness at each iteration
def repair(self, x, rng=None):
r"""Repair solution and put the solution in the random position inside of the bounds of problem.
Args:
x (numpy.ndarray): Solution to check and repair if needed.
rng (Optional[numpy.random.Generator]): Random number generator.
Returns:
numpy.ndarray: Fixed solution.
See Also:
* :func:`niapy.util.repair.limit`
* :func:`niapy.util.repair.limit_inverse`
* :func:`niapy.util.repair.wang`
* :func:`niapy.util.repair.rand`
* :func:`niapy.util.repair.reflect`
"""
return self.repair_function(x, self.lower, self.upper, rng=rng)
def next_iter(self):
r"""Increments the number of algorithm iterations."""
self.fitness_iters.append(self.x_f)
self.iters += 1
def eval(self, x):
r"""Evaluate the solution A.
Args:
x (numpy.ndarray): Solution to evaluate.
Returns:
float: Fitness/function values of solution.
"""
if self.stopping_condition():
return np.inf
self.evals += 1
x_f = self.problem.evaluate(x) * self.optimization_type.value
if x_f < self.x_f * self.optimization_type.value:
self.x_f = x_f * self.optimization_type.value
self.n_evals.append(self.evals)
self.fitness_evals.append(x_f)
if self.enable_logging:
logger.info('evals:%d => %s' % (self.evals, self.x_f))
return x_f
def is_feasible(self, x):
r"""Check if the solution is feasible.
Args:
x (Union[numpy.ndarray, Individual]): Solution to check for feasibility.
Returns:
bool: `True` if solution is in feasible space else `False`.
"""
return np.all((x >= self.lower) & (x <= self.upper))
def stopping_condition(self):
r"""Check if optimization task should stop.
Returns:
bool: `True` if number of function evaluations or number of algorithm iterations/generations or reference values is reach else `False`.
"""
return (self.evals >= self.max_evals) or (self.iters >= self.max_iters) or (self.cutoff_value * self.optimization_type.value >= self.x_f * self.optimization_type.value)
def stopping_condition_iter(self):
r"""Check if stopping condition reached and increase number of iterations.
Returns:
bool: `True` if number of function evaluations or number of algorithm iterations/generations or reference values is reach else `False`.
"""
r = self.stopping_condition()
self.next_iter()
return r
def convergence_data(self, x_axis='iters'):
r"""Get values of x and y-axis for plotting covariance graph.
Args:
x_axis (Literal['iters', 'evals']): Quantity to be displayed on the x-axis. Either 'iters' or 'evals'.
Returns:
Tuple[np.ndarray, np.ndarray]:
1. array of function evaluations.
2. array of fitness values.
"""
if x_axis == 'iters':
return np.arange(self.iters), np.array(self.fitness_iters)
else: # x_axis == 'evals'
r1, r2 = [], []
for i, v in enumerate(self.n_evals):
r1.append(v)
r2.append(self.fitness_evals[i])
if i >= len(self.n_evals) - 1:
break
diff = self.n_evals[i + 1] - v
if diff <= 1:
continue
for j in range(diff - 1):
r1.append(v + j + 1)
r2.append(self.fitness_evals[i])
return np.array(r1), np.array(r2)
def plot_convergence(self, x_axis='iters', title='Convergence Graph'):
"""Plot a simple convergence graph.
Args:
x_axis (Literal['iters', 'evals']): Quantity to be displayed on the x-axis. Either 'iters' or 'evals'.
title (str): Title of the graph.
"""
x, fitness = self.convergence_data(x_axis)
_, ax = plt.subplots()
ax.plot(x, fitness)
ax.xaxis.set_major_locator(ticker.MaxNLocator(integer=True))
if x_axis == 'iters':
plt.xlabel('Iterations')
else:
plt.xlabel('Fitness Evaluations')
plt.ylabel('Fitness')
plt.title(title)
plt.show()
| true |
f131a587bf73fea71602c9f5eed79ee3a1e03732 | Python | ColtonAarts/SpeechToTextSentimentAnalysis | /GUI.py | UTF-8 | 7,050 | 2.6875 | 3 | [] | no_license | import tkinter as tk
import speech_recognition as sr
from EmotionDetection.Classification import NeuralNetwork
from EmotionDetection.Lexical import LexicalAnalysis
from nltk.stem import WordNetLemmatizer
import re
from keras_preprocessing.text import Tokenizer
from keras_preprocessing.sequence import pad_sequences
import pandas
import operator
import numpy as np
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.r = sr.Recognizer()
self.lexical = LexicalAnalysis.LexicalAnalysis()
self.master = master
self.pack()
self.create_widgets()
self.running = False
self.text = ""
self.text_sequence = None
self.stemmer = WordNetLemmatizer()
df = pandas.read_csv("D:\\PycharmProjects\\ThesisWork\\Data\\EmotionDetection\\%_by_Emo_Full_Data_data (1).csv")
df['Tweet'] = df['Tweet'].apply(self.clean)
MAX_NB_WORDS = 50000
# Max number of words in each tweet.
self.MAX_SEQUENCE_LENGTH = 250
self.tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
self.tokenizer.fit_on_texts(df['Tweet'].values)
# Integer replacement
X = self.tokenizer.texts_to_sequences(df['Tweet'].values)
X = pad_sequences(X, maxlen=self.MAX_SEQUENCE_LENGTH)
# Gets categorical values for the labels
Y = pandas.get_dummies(df['Emotion']).values
self.neuralNetwork = NeuralNetwork.NeuralNetwork(X.shape[1], 4)
self.neuralNetwork.fit(X, Y)
def clean(self, tweet):
# Use this to remove hashtags since they can become nonsense words
# trimmed_tweet = re.sub(r'(\s)#\w+', r'\1', tweet)
# Remove all the special characters
trimmed_tweet = re.sub(r'\W', ' ', tweet)
# remove all single characters
trimmed_tweet = re.sub(r'\s+[a-zA-Z]\s+', ' ', trimmed_tweet)
# Remove single characters from the start
trimmed_tweet = re.sub(r'\^[a-zA-Z]\s+', ' ', trimmed_tweet)
# Substituting multiple spaces with single space
trimmed_tweet = re.sub(r'\s+', ' ', trimmed_tweet, flags=re.I)
# Removes numbers
trimmed_tweet = ''.join([i for i in trimmed_tweet if not i.isdigit()])
# # Removing prefixed 'b'
# trimmed_tweet = re.sub(r'^b\s+', '', trimmed_tweet)
# Converting to Lowercase
trimmed_tweet = trimmed_tweet.lower()
# Lemmatization
trimmed_tweet = trimmed_tweet.split()
trimmed_tweet = [self.stemmer.lemmatize(word) for word in trimmed_tweet]
trimmed_tweet = ' '.join(trimmed_tweet)
return trimmed_tweet
def create_widgets(self):
self.text_field = tk.Text()
self.text_field.tag_configure("red_tag", foreground="red")
self.text_field.tag_configure("yellow_tag", foreground="yellow")
self.text_field.tag_configure("black_tag", foreground="black")
self.text_field.tag_configure("green_tag", foreground="green")
self.text_field.tag_configure("blue_tag", foreground="blue")
self.label = tk.Label()
self.label.pack()
self.text_field.pack()
self.record = tk.Button(self, text="Push to Record")
self.record.pack(side="left")
self.record["command"] = self.start_capture
self.quit = tk.Button(self, text="QUIT", fg="red",
command=self.master.destroy)
self.quit.pack(side="bottom")
def start_capture(self):
self.text_field.insert(tk.END, "Talk")
print("Talk")
with sr.Microphone() as source:
audio_text = self.r.listen(source)
self.text_field.insert(tk.END, "Time over, Thanks")
# print("Time over, thanks")
# recoginize_() method will throw a request error if the API is unreachable, hence using exception handling
try:
# using google speech recognition
self.text = self.r.recognize_google(audio_text)
lst = list()
lst.append(self.text)
self.text_sequence = self.tokenizer.texts_to_sequences(lst)
self.text_sequence = pad_sequences(self.text_sequence, self.MAX_SEQUENCE_LENGTH)
results = self.neuralNetwork.predict(self.text_sequence)
indexes = ""
# results = model.predict(X_test)
for prediction in results:
max_percent = max(prediction)
indexes = str(prediction.tolist().index(max_percent))
if indexes == '0':
print("anger")
indexes = "anger"
elif indexes == "1":
print("fear")
indexes = "fear"
elif indexes == "2":
print("joy")
indexes = "joy"
else:
print("sadness")
indexes = "sadness"
print("Text: " + self.text)
print(indexes)
colours = self.lexical_analysis(self.text)
words = self.text.split(" ")
self.text_field.delete('1.0', tk.END)
self.text_field.insert(tk.END, self.text)
for num in range(len(words)):
word = words[num]
offset = "+%dc" % len(word)
pos_start = self.text_field.search(word, '1.0', tk.END)
while pos_start:
pos_end = pos_start + offset
self.text_field.tag_add(colours[num]+"_tag", pos_start, pos_end)
pos_start = self.text_field.search(word, pos_end, tk.END)
self.text_field.insert(tk.END, "\n" + indexes)
except:
print("Sorry, I did not get that")
def lexical_analysis(self, sentence):
sentence = sentence.split(" ")
lst = list()
for word in sentence:
values = self.lexical.find_sentiment(word)
max_value = max(values.items(), key=operator.itemgetter(1))[0]
print(values)
print(max_value)
if values[max_value] != 0:
if max_value == "fear":
lst.append("blue")
elif max_value == "anger":
lst.append("red")
elif max_value == "sadness":
lst.append("yellow")
elif max_value == "joy":
lst.append("green")
else:
lst.append("black")
print(lst)
return lst
def end_capture(self):
print(self.text)
root = tk.Tk()
app = Application(master=root)
app.mainloop() | true |
1a2b0d7bf88463b80f241430f5f067c3ea842f12 | Python | zani0x03/python | /scraping/expressao-regular.py | UTF-8 | 1,806 | 4.09375 | 4 | [] | no_license | import re
texto = "Esta uma aula de Python. Esta é uma aula sobre expressões regulares."
# padrao = "Esta" -- Palavra padrão
# padrao = "."
# resultado = re.search(padrao,texto,re.DOTALL) -- considera o enter um caractere válido
# resultado = re.search(padrao,texto) -- não considera o enter um caracter válido
# padrao = "^Esta" -- encontra pois está no começo da frase
# padrao = "^uma" -- não encontra pois não está no começo da frase
# resultado = re.search(padrao,texto)
# padrao = "regulares.$" -- encontra pois está no final do texto
# padrao = "sobre$" -- não encontra pois não está no final do texto
# resultado = re.search(padrao,texto)
# padrao = "[aeiou]" -- procura ocorrências com os caracteres
# padrao = "[a-z]" -- procura ocorrências minúculas
# resultado = re.search(padrao, texto) -- retorna a primeira ocorrência
# resultado = re.findall(padrao,texto)
# padrao = "a*" -- exibe onde tiver a e onde não tiver vai colocar vazio;
# padrao = "a+" --> uma ou mais ocorrência de a, só exibirá onde tiver a;
# padrao = "\d" --> números no texto;
# padrao = "\D" --> tudo que não seja número;
# padrao = "\s" --> todo caractere que seja de espaçamento;
# padrao = "\S" -> todo caractere que não seja de espaçamento;
# padrao = "\w" --> alfanumérico, número e sublinhado;
# padrao = "\w" --> o inverso, qualquer caractere que não seja alfanumérico; número e sublinhado;
# padrao = "\d{}" chave qtd de caracteres por exemplo \d{1} traz 1...2...3...4 \d{2} traz 10...20...30
# padrao = "(a) | (\d)" --> () grupo de expressões e o |(ou lógico) ;
#pythex.org site para testar expressões regulares
#regex101.com/#python
#print(resultado)
# print(resultado)
# if resultado:
# print(resultado.group())
# else:
# print("resultado não encontrado") | true |
fff16bf03dbd5ac7723b9238248604b652e2320a | Python | Robel-RT/Udemy1 | /dataopertion.py | UTF-8 | 379 | 3.78125 | 4 | [] | no_license | monday_temprature = [9.1, 8.8, 7.5]
monday_temprature.append(6.3)
print(monday_temprature)
monday_temprature = [9.1, 8.8, 7.5, 6.3, 8.2]
monday_temprature[2:]
print(monday_temprature)
monday_temprature[-3]
print(monday_temprature)
mystring = ['hello', 4, 5.6, 9]
print(mystring[0][2])
student_grades = {"Merry": 9.1, "Sim": 8.8, "Steven": 7.5}
print(student_grades["Merry"])
| true |
ebd714d27693b7d1ed4e0d37417dc1e7eeb89ed7 | Python | CS373-summer-2013/cs373-collatz-tests | /mgt91-TestCollatz.py | UTF-8 | 4,092 | 3.125 | 3 | [] | no_license | #
# mgt91-TestCollatz.py.py
#
#
# Created by Matt Thurner on 6/12/13.
# Copyright (c) 2013 University of Texas at Austin. All rights reserved.
#
#!/usr/bin/env python
# -------------------------------
# projects/collatz/TestCollatz.py
# Copyright (C) 2013
# Glenn P. Downing
# -------------------------------
"""
To test the program:
% python TestCollatz.py >& TestCollatz.py.out
% chmod ugo+x TestCollatz.py
% TestCollatz.py >& TestCollatz.py.out
"""
# -------
# imports
# -------
import StringIO
import unittest
from Collatz import collatz_read, collatz_eval, collatz_print, collatz_solve
# -----------
# TestCollatz
# -----------
class TestCollatz (unittest.TestCase) :
# ----
# read
# ----
def test_read (self) :
r = StringIO.StringIO("1 10\n")
a = [0, 0]
b = collatz_read(r, a)
self.assert_(b == True)
self.assert_(a[0] == 1)
self.assert_(a[1] == 10)
# both values are 1
def test_read_1 (self) :
r = StringIO.StringIO("1 1\n")
a = [0, 0]
b = collatz_read(r, a)
self.assert_(b == True)
self.assert_(a[0] == 1)
self.assert_(a[1] == 1)
# values are presented in inverse fashion
def test_read_inverse (self) :
r = StringIO.StringIO("20 10\n")
a = [0, 0]
b = collatz_read(r, a)
self.assert_(b == True)
self.assert_(a[0] == 20)
self.assert_(a[1] == 10)
# second value is large
def test_read_large (self) :
r = StringIO.StringIO("1 5000\n")
a = [0, 0]
b = collatz_read(r, a)
self.assert_(b == True)
self.assert_(a[0] == 1)
self.assert_(a[1] == 5000)
# ----
# eval
# ----
def test_eval_1 (self) :
v = collatz_eval(1, 10)
self.assert_(v == 20)
def test_eval_2 (self) :
v = collatz_eval(100, 200)
self.assert_(v == 125)
def test_eval_3 (self) :
v = collatz_eval(201, 210)
self.assert_(v == 89)
def test_eval_4 (self) :
v = collatz_eval(900, 1000)
self.assert_(v == 174)
# values are both 1
def test_eval_5 (self) :
v = collatz_eval(1, 1)
self.assert_(v == 1)
# reversed values
def test_eval_6 (self) :
v = collatz_eval(10, 5)
self.assert_(v == 20)
# same value
def test_eval_7 (self) :
v = collatz_eval(5, 5)
self.assert_(v == 6)
# -----
# print
# -----
def test_print (self) :
w = StringIO.StringIO()
collatz_print(w, 1, 10, 20)
self.assert_(w.getvalue() == "1 10 20\n")
# values are both 1
def test_print_one (self) :
w = StringIO.StringIO()
collatz_print(w, 1, 1, 1)
self.assert_(w.getvalue() == "1 1 1\n")
# same value
def test_print_same (self) :
w = StringIO.StringIO()
collatz_print(w, 5, 5, 6)
self.assert_(w.getvalue() == "5 5 6\n")
# reversed values
def test_print_reverse (self) :
w = StringIO.StringIO()
collatz_print(w, 10, 5, 20)
self.assert_(w.getvalue() == "10 5 20\n")
# -----
# solve
# -----
def test_solve (self) :
r = StringIO.StringIO("1 10\n100 200\n201 210\n900 1000\n")
w = StringIO.StringIO()
collatz_solve(r, w)
self.assert_(w.getvalue() == "1 10 20\n100 200 125\n201 210 89\n900 1000 174\n")
# one line
def test_solve_one (self) :
r = StringIO.StringIO("1 5\n")
w = StringIO.StringIO()
collatz_solve(r, w)
self.assert_(w.getvalue() == "1 5 8\n")
# two lines
def test_solve_two (self) :
r = StringIO.StringIO("5 5\n5 10\n")
w = StringIO.StringIO()
collatz_solve(r, w)
self.assert_(w.getvalue() == "5 5 6\n5 10 20\n")
# reversed values
def test_solve_reverse (self) :
r = StringIO.StringIO("1 1\n1 5\n5 10\n10 5\n")
w = StringIO.StringIO()
collatz_solve(r, w)
self.assert_(w.getvalue() == "1 1 1\n1 5 8\n5 10 20\n10 5 20\n")
# ----
# main
# ----
print "TestCollatz.py"
unittest.main()
print "Done." | true |
dbbfdd35412ea39f1e9f202fb53ece4a78e31960 | Python | junhappy/Python | /p8-2.py | UTF-8 | 253 | 3.640625 | 4 | [] | no_license | #p8-2課題
import numpy as np
a =np.array([ 3,5,6,2,1 ])
b =np.arange( 1,6 )
print (a)
print (b)
for i in range(5):
print (a[i] + b[i])
print ('')
for i in range(5):
print (2 * a[i])
for i in range(5):
print ( np.sin(0.5) * b[i])
| true |
8f0167180c1ac0facf72737115bf014058406469 | Python | karina-cherednyk/Information-Retrieval-Model | /Cherednyk_03_v0/collection_info.py | UTF-8 | 374 | 3.234375 | 3 | [] | no_license | class WordCollectionInfo:
def __init__(self, terms, documents, all_words):
self.terms = terms
self.documents = documents
self.uniqueWords = len(terms)
self.allWords = all_words
def __str__(self):
res = ""
for i, (key, val) in enumerate(self.terms.items()):
res += f"{i}){key}: {val}\n"
return res
| true |
9e618af90dc87309fae487e22bceb31ec59f93f4 | Python | dedekinds/pyleetcode | /858_Mirror Reflection_medium.py | UTF-8 | 483 | 2.953125 | 3 | [] | no_license | 见稍后知乎分析
向上不断翻折,注意上方两个顶点的交替变化
class Solution(object):
def gcd(self,a,b):
if b==0:return a
return self.gcd(b,a%b)
def mirrorReflection(self, p, q):
"""
:type p: int
:type q: int
:rtype: int
"""
x = int(q/self.gcd(p,q))
#print(x)
if x%2==0:return 0
else:
if int(x*p/q)%2 == 0:return 2
else:return 1
| true |
8948b54f367030057266d668f2883afaa583a78a | Python | PiggerZZM/leetcode-exercises | /LeetCode674/674.py | UTF-8 | 733 | 3.296875 | 3 | [] | no_license | #!/usr/bin/env python
#-*- coding:utf-8 -*-
# author:andin
# datetime:2018/11/18 17:38
# software: PyCharm
class Solution:
def findLengthOfLCIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) == 0 :
return 0
elif len(nums) == 1:
return 1
result = 1
max_result = 1
for i in range(1,len(nums)):
if nums[i-1]<nums[i]:
result += 1
if result > max_result:
max_result = result
else:
result = 1
return max_result
if __name__ == '__main__':
s = Solution()
nums = [1,3,5,4,2,7]
print(s.findLengthOfLCIS(nums)) | true |
0b064c8439f258bdac1391e1850e88a3e320c45a | Python | Nesnahor/cvxpy | /cvxpy/atoms/affine/kron.py | UTF-8 | 2,656 | 2.84375 | 3 | [
"Apache-2.0"
] | permissive | """
Copyright 2013 Steven Diamond
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cvxpy.atoms.affine.affine_atom import AffAtom
import cvxpy.utilities as u
import cvxpy.lin_ops.lin_utils as lu
import numpy as np
class kron(AffAtom):
"""Kronecker product.
"""
# TODO work with right hand constant.
# TODO(akshayka): make DGP-compatible
def __init__(self, lh_expr, rh_expr):
super(kron, self).__init__(lh_expr, rh_expr)
@AffAtom.numpy_numeric
def numeric(self, values):
"""Kronecker product of the two values.
"""
return np.kron(values[0], values[1])
def validate_arguments(self):
"""Checks that both arguments are vectors, and the first is constant.
"""
if not self.args[0].is_constant():
raise ValueError("The first argument to kron must be constant.")
elif self.args[0].ndim != 2 or self.args[1].ndim != 2:
raise ValueError("kron requires matrix arguments.")
def shape_from_args(self):
"""The sum of the argument dimensions - 1.
"""
rows = self.args[0].shape[0]*self.args[1].shape[0]
cols = self.args[0].shape[1]*self.args[1].shape[1]
return (rows, cols)
def sign_from_args(self):
"""Same as times.
"""
return u.sign.mul_sign(self.args[0], self.args[1])
def is_incr(self, idx):
"""Is the composition non-decreasing in argument idx?
"""
return self.args[0].is_nonneg()
def is_decr(self, idx):
"""Is the composition non-increasing in argument idx?
"""
return self.args[0].is_nonpos()
@staticmethod
def graph_implementation(arg_objs, shape, data=None):
"""Kronecker product of two matrices.
Parameters
----------
arg_objs : list
LinOp for each argument.
shape : tuple
The shape of the resulting expression.
data :
Additional data required by the atom.
Returns
-------
tuple
(LinOp for objective, list of constraints)
"""
return (lu.kron(arg_objs[0], arg_objs[1], shape), [])
| true |
2891ac1bacbf82bfeb406673ee818d57e70b6d62 | Python | scottnguyen/revlo-python-client | /examples/songrequests/irc.py | UTF-8 | 1,744 | 2.609375 | 3 | [
"MIT"
] | permissive | import socket, re, time, sys
class Irc:
def __init__(self, config):
self.config = config
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
self.get_irc_socket_object()
def close():
self.sock.close()
def check_login_status(self, data):
data = data.decode("utf-8")
if re.match(r'^:(testserver\.local|tmi\.twitch\.tv) NOTICE \* :Login unsuccessful\r\n$', data):
return False
else:
return True
def send(self, msg):
msg += '\r\n'
print("Sending message: {}".format(msg))
ba = bytearray()
ba.extend(map(ord, msg))
self.sock.send(ba)
def send_message(self, channel, message):
self.send('PRIVMSG #{} :{}'.format(channel, message))
def get_irc_socket_object(self):
try:
self.sock.connect((self.config['server'], int(self.config['port'])))
except Exception as e:
print('Cannot connect to server ({}:{}).'.format((self.config['server'], self.config['port']), 'error'))
print("{}".format(e))
self.sock.settimeout(None)
self.send('USER {}'.format(self.config['username']))
self.send('PASS {}'.format(self.config['password']))
self.send('NICK {}'.format(self.config['username']))
self.send("CAP REQ :twitch.tv/membership")
self.send("CAP REQ :twitch.tv/commands")
self.send("CAP REQ :twitch.tv/tags")
if self.check_login_status(self.sock.recv(1024)):
print('Log into TwitchIRC successful.')
else:
print("Log into TwitchIRC Unsuccessful.")
sys.exit()
self.join(self.config['channel'])
return self.sock
def join(self, channel):
self.send('JOIN #{}'.format(channel))
def leave(self, channel):
self.send('PART {}'.format(channel))
| true |
6f319b8a3a5a7fa255efb311149073f74c999449 | Python | maxmaximo-github/kali_projects | /tools2/telnet/dns_resolver.py | UTF-8 | 672 | 2.78125 | 3 | [] | no_license | #!/usr/bin/env python3
from dns import resolver
qname_list = [
"ns1.example.com", "sw1.example.com", "sw2.example.com",
"r1.example.com", "r2.example.com", "r3.example.com",
"sw3.example.com", "sw4.example.com"
]
aaaa_exist = []
aaaa_not_exist = []
for name in qname_list:
try:
answers = resolver.resolve(f"{name}", "AAAA")
for answer in answers:
aaaa_exist.append(f"Host {name} has a AAAA with {answer}")
except (resolver.NXDOMAIN, resolver.NoAnswer):
aaaa_not_exist.append(f"Host {name} doesn't AAAA")
aaaa_exist.extend(aaaa_not_exist)
print(f"\n\n")
for i in aaaa_exist:
print(f"{i}") | true |
db734e4bba9c34f1e0a9b023c623b81266d5a990 | Python | bunnisiva/Fashion-pytorch | /pyfiles/FashionMNISTDataset.py | UTF-8 | 790 | 3.03125 | 3 | [] | no_license | class FashionMNISTDataset(Dataset):
'''Fashion MNIST Dataset'''
def __init__(self, csv_file, transform=None):
"""
Args:
csv_file (string): Path to the csv file
transform (callable): Optional transform to apply to sample
"""
data = pd.read_csv(csv_file)
self.X = np.array(data.iloc[:, 1:]).reshape(-1,
1, 28, 28) # .astype(float);
self.Y = np.array(data.iloc[:, 0])
del data
self.transform = transform
def __len__(self):
return len(self.X)
def __getitem__(self, idx):
item = self.X[idx]
label = self.Y[idx]
if self.transform:
item = self.transform(item)
return (item, label)
| true |
f827daaa9f115a4327b4dc2b6cae0dddf9a3997a | Python | aeyc/RandomBiningEncoding | /src/task3.py | UTF-8 | 1,594 | 3.015625 | 3 | [] | no_license | def decrypt(msg):
error = 0
corrected = 0
# Calculate syndrome
s = [0, 0, 0]
# D1 + D2 + D3 + H0
s[0] = (int(msg[1]) + int(msg[2]) + int(msg[3]) + int(msg[6])) % 2
# D0 + D2 + D3 + H1
s[1] = (int(msg[0]) + int(msg[2]) + int(msg[3]) + int(msg[5])) % 2
# D0 + D1 + D3 + H2
s[2] = (int(msg[0]) + int(msg[1]) + int(msg[3]) + int(msg[4])) % 2
syndrome = (s[2] << 2) | (s[1] << 1) | s[0]
#print(syndrome)
msg = list(msg)
if syndrome ==6:
if msg[0]=='0':
msg[0]='1'
else:
msg[0]='0'
if syndrome ==5:
if msg[1]=='0':
msg[1]='1'
else:
msg[1]='0'
if syndrome ==3:
if msg[2]=='0':
msg[2]='1'
else:
msg[2]='0'
if syndrome ==7:
if msg[3]=='0':
msg[3]='1'
else:
msg[3]='0'
out = ''
if msg[0] == '1':
if msg[1]=='0':
out +='1'
else:
out +='0'
if msg[2]=='0':
out +='1'
else:
out +='0'
if msg[3]=='0':
out +='1'
else:
out +='0'
else:
out = msg[1] + msg[2] + msg[3]
return out
if __name__ == "__main__":
print("Enter Input String of bits - ") #get in input the ciphertext
input_string = input().strip()
h = decrypt(input_string) #get the dencoded text
print('ciphertext u: ' + h) #print in output the plaintext
| true |
e72c2ec21a2fdafd6135c4c89bd7cb7024254a99 | Python | meikefrdrchs/MyExercises | /assignment_2018_06_05.py | UTF-8 | 243 | 3.984375 | 4 | [] | no_license | def checknumber():
number = input("Type a number!\n")
if number.isdigit() is True:
newnumber = int(number)
print("Cool, your number is",newnumber)
else:
print("Sorry, that's not a number.")
checknumber()
| true |
88fbe1dc13f47f7ff0433809937187446989d7a1 | Python | mohamedun/ExperimentalParallel | /plotting2.py | UTF-8 | 304 | 2.75 | 3 | [] | no_license | import matplotlib.pyplot as plt
from matplotlib import colors
import numpy as np
data = np.random.rand(10, 10 ,3)
# create discrete colormap
cmap = colors.ListedColormap(['red', 'blue'])
bounds = [0,10,20]
norm = colors.BoundaryNorm(bounds, cmap.N)
fig, ax = plt.subplots()
ax.imshow(data)
plt.show() | true |
64022a1bae2fcc985d1d3f86461fc9f676b82773 | Python | brunodantascg/listaExercicioPythonBR | /5exerciciosFuncoes/9reverso.py | UTF-8 | 298 | 4.625 | 5 | [] | no_license | # Exercício Funções - Questão 9
# Reverso do número. Faça uma função que retorne o reverso de um número inteiro informado. Por exemplo: 127 -> 721.
def inverte(num):
y = num
return y[::-1]
num = str(input("Informe número: "))
print(" {0} ---> {1}.".format(num, inverte(num)))
| true |
4adef8530d57b89a5bf4bc28f1fddf7008cb2c4e | Python | AveryPratt/code-katas | /src/pascal_row.py | UTF-8 | 758 | 3.59375 | 4 | [
"MIT"
] | permissive | """O(k) solution for finding a row in pascal's triangle."""
def find_row(row_num):
"""Return the row of pascal's triangle corresponding to the input number."""
row_num -= 1
row_str = str(11 ** row_num)
print(row_str)
pasc = []
carry = 0
prev_carry = 0
for idx in range(row_num // 2 + 1):
inner = int(row_str[:-row_num + idx])
outer = int(row_str[-idx - 1])
print(carry)
print(outer)
print(inner)
print()
prev_carry = carry
carry = inner - outer
pasc.append(outer + 10 * prev_carry)
row_str = row_str[-row_num + idx:]
if row_num % 2:
pasc.extend(pasc[::-1])
else:
pasc.extend(pasc[len(pasc) - 2::-1])
return pasc
| true |
626c99192747d1ce20618f85c33e2a80e70ecd85 | Python | nedlrichards/novice_stakes | /novice_stakes/periodic/bragg_scatter.py | UTF-8 | 2,714 | 2.9375 | 3 | [
"MIT"
] | permissive | """
===========================================
Plane wave reflection from periodic surface
===========================================
Common framework for scatter with a plane wave source and a periodic surface
"""
import numpy as np
from math import pi
class Bragg:
"""Compute reflection coefficents for cosine surface"""
def __init__(self, Lper, c=1500., attn=0):
"""surface specific properties
attn: volume attenuation, dB / km"""
self.Lper = Lper
self.Kper = 2 * pi / Lper
self.c = c
self.attn = attn
self.delta = lambda k: 1j * self.attn / 8686.\
/ np.real(k + np.spacing(1))
def kacous(self, facous):
"""complex wavenumeber of medium"""
k = 2 * pi * facous / self.c
return k + 1j * self.delta(k)
def xsampling(self, facous, decimation=8):
"""Make a periodically sampled xaxis"""
dx = self.c / (decimation * facous)
numx = int(np.ceil(self.L / dx))
dx = self.L / numx
xaxis = np.arange(numx) * dx
return xaxis, dx
def qvec(self, theta_inc, num_eva, facous):
"""Return vector of bragg grating orders
cutoff after num_eva evanescent orders on each side
"""
kacous = np.real(self.kacous(facous))
kx = np.real(np.cos(theta_inc) * kacous)
num_p = np.fix((kacous - kx) / self.Kper) + num_eva
num_n = np.fix((kacous + kx) / self.Kper) + num_eva
qvec = np.arange(-num_n, num_p + 1)
return qvec
def bragg_angles(self, theta_inc, qs, facous):
"""Computer the brag angle cosine vectors"""
kacous = self.kacous(facous)
# compute bragg orders
a0 = np.real(np.cos(theta_inc) * kacous)
b0 = np.conj(np.sqrt(kacous ** 2 - a0 ** 2))
aq = a0 + qs * self.Kper
bq = np.conj(np.sqrt(kacous ** 2 - aq ** 2))
return a0, aq, b0, bq
def p_sca(self, theta_inc, qs, facous, rs, xsrc, zsrc, xrcr, zrcr):
"""
Scattered pressure field from plane wave reflection coefficents
"""
a0, aq, b0, bq = self.bragg_angles(theta_inc, qs, facous)
phase = -a0 * xsrc - b0 * zsrc + aq * xrcr - bq * zrcr
p_sca = rs @ np.exp(-1j * phase)
return p_sca
def r_energy(self, theta_inc, qs, facous, rs):
"""Calculate the energy conservation relative to 1"""
kacous = self.kacous(facous)
_, aq, b0, bq = self.bragg_angles(theta_inc, qs, facous)
# compute energy
reali = np.abs(np.real(aq ** 2)) <= np.real(kacous) ** 2
en_conn = np.abs(rs[reali]) ** 2 * np.real(bq[reali]) / np.real(b0)
return np.sum(en_conn)
| true |
a3e57abc2b99adfe91cc01af00f8cfd12c841d09 | Python | OtgerVihalem/PasswordGeneratorProject | /passwordgenerator.py | UTF-8 | 2,921 | 2.90625 | 3 | [] | no_license | #importing Libraries
from tkinter import *
import random, string
import pyperclip
import hashlib
import os
import re
###initialize window
root =Tk()
root.geometry("400x400")
root.resizable(0,0)
root.title("DataFlair - PASSWORD GENERATOR")
#heading
heading = Label(root, text = 'PASSWORD GENERATOR' , font ='arial 15 bold').pack()
Label(root, text ='DataFlair', font ='arial 15 bold').pack(side = BOTTOM)
pass_label_text = "Password length"
###select password length
pass_label = Label(root, text = pass_label_text, font = 'arial 10 bold').pack()
pass_len = IntVar()
length = Spinbox(root, from_ = 4, to_ = 32 , textvariable = pass_len , width = 15).pack()
#####define function
pass_str = StringVar()
passwordsafety="Parooli turvalisuse tase"
def Generator():
password = ''
for x in range (0,4):
password = random.choice(string.ascii_uppercase)+random.choice(string.ascii_lowercase)+random.choice(string.digits)+random.choice(string.punctuation)
for y in range(pass_len.get()- 4):
password = password+random.choice(string.ascii_uppercase + string.ascii_lowercase + string.digits + string.punctuation)
pass_str.set(password)
#password length difficulty
x = True
while x:
if (len(password) < 6):
TestLabel['text'] = "Väga nõrk parool"
break
if (len(password) > 20):
TestLabel['text'] = "Nõrk parool"
break
elif not re.search("[a-z]", password):
TestLabel['text'] = "Keskmine parool"
break
elif not re.search("[0-9]", password):
TestLabel['text'] = "Tugev parool"
break
elif not re.search("[A-Z]", password):
TestLabel['text'] = "Väga tugev parool"
break
elif not re.search("[$#@]", password):
TestLabel['text'] = "Kõige tugevaim parool"
break
elif re.search("\s", password):
break
else:
# re.search("[a-z]" - nõrk
# TestLabel['text'] = "nõrk"
# re.search("[a-z]" and re.search("[0-9]" - keskmine
# TestLabel['text'] = "keskmine"
# re.search("[a-z]" and re.search("[0-9]" and re.search("[A-Z]" - tugev
# TestLabel['text'] = "tugev"
print("Valid Password")
x = False
break
if x:
print("Not a Valid Password")
###button
Button(root, text = "GENERATE PASSWORD" , command = Generator ).pack(pady= 5)
Entry(root , textvariable = pass_str).pack()
########function to copy
def Copy_password():
pyperclip.copy(pass_str.get())
Button(root, text = 'COPY TO CLIPBOARD', command = Copy_password).pack(pady=5)
##password difficulty text box
TestLabel = Label(root, text =passwordsafety, font ='arial 15 bold')
TestLabel.pack(pady = 6)
# loop to run program
root.mainloop()
| true |
741435d01ce7492e1ad3e2909fce96666d2c58b9 | Python | deeksha004/Computer-Vision | /safety_helmet_and_mask_prediction/yolov3_ann.py | UTF-8 | 2,124 | 2.609375 | 3 | [] | no_license | # For yolov3 annotations
import pandas as pd
import os
import numpy
import glob
df = pd.read_csv("data.csv")
def convert(df):
yolo_box = []
for i in range(0, len(df)):
dw = 1. / df['width'][i]
dh = 1. / df['height'][i]
center_x = ((df['xmin'][i] + df['xmax'][i]) / 2.0)*dw
center_y = ((df['ymin'][i] + df['ymax'][i]) / 2.0)*dh
w = (df['xmax'][i] - df['xmin'][i])*dw
h = (df['ymax'][i] - df['ymin'][i])*dh
yolo_box.append([center_x, center_y, w, h])
return yolo_box
df['yolo_box'] = convert(df)
#print(df.head())
unique_img_ids = df.image_id.unique()
#print(len(unique_img_ids))
if not os.path.exists("yolo_train_annotations"):
os.makedirs("yolo_train_annotations")
folder_location = "yolo_train_annotations"
#change unique_img_ids[:2] to unique_img_ids to iterate through all images
for img_id in unique_img_ids: # loop through all unique image ids. Remove the slice to do all images
#print(img_id)
filt_df = df.query("image_id == @img_id") # filter the df to a specific id
#print(filt_df.shape[0])
all_boxes = filt_df.yolo_box.values
file_name = "{}/{}.txt".format(folder_location,img_id) # specify the name of the folder and get a file name
s = "0 %s %s %s %s \n" # the first number is the identifier of the class. If you are doing multi-class, make sure to change that
with open(file_name, 'a') as file: # append lines to file
for i in all_boxes:
new_line = (s % tuple(i))
file.write(new_line)
all_imgs = glob.glob("images/*.jpg")
all_imgs = [i.split("/")[-1].replace(".jpg", "") for i in all_imgs]
print(len(unique_img_ids))
print(len(all_imgs))
positive_imgs = df.image_id.unique().astype(str)
print(len(positive_imgs))
if len(positive_imgs) != len(all_imgs):
negative_images = set(all_imgs) - set(positive_imgs)
print("All images:, positive images:, Negative images:",len(all_imgs), len(positive_imgs), len(negative_images))
for i in list(negative_images):
file_name = "yolo_train_annotations/{}.txt".format(i)
#print(file_name)
with open(file_name, 'w') as fp:
pass
| true |
59e5f555a708cbe8277c1f9b37d37e2d96315ed8 | Python | kcarter80/2020-advent-of-code | /day-18/part-1.py | UTF-8 | 983 | 3.71875 | 4 | [] | no_license | def evaluate_with_same_precedence(expression):
list_expression = expression.split(' ')
while (len(list_expression) >= 3):
operand_1 = list_expression.pop(0)
operator = list_expression.pop(0)
operand_2 = list_expression.pop(0)
list_expression.insert(0,eval('%s%s%s' %(operand_1,operator,operand_2)))
return str(list_expression[0])
def evaluate_expression(expression):
while expression.find(')') != -1:
end_index = expression.find(')')
start_index = expression.rfind('(',0,end_index)
result_inside_the_parantheses = evaluate_with_same_precedence(expression[start_index+1:end_index])
expression = expression[:start_index] + result_inside_the_parantheses + expression[end_index+1:]
return evaluate_with_same_precedence(expression)
# placing the rows from the input file into a list
with open('input-1') as input_file:
input_lines = input_file.readlines()
sum = 0
for input_line in input_lines:
sum += int(evaluate_expression(input_line.rstrip()))
print(sum) | true |
0057a329b4c89f66393c18b744edceda211c7cab | Python | bcarlier75/python_sandbox | /palindrome_list_int_str.py | UTF-8 | 450 | 4.03125 | 4 | [] | no_license | def pal_str(st):
if st == st[::-1]:
print("The string is a palindrome")
else:
print("Not a palindrome")
def pal_list(arr):
for i in range(0, int(len(arr) / 2)):
if arr[i] != arr[len(arr) - 1 - i]:
print("Not a palindrome")
return
print("The array is a palindrome")
arr = [1, 2, 3, 4, 5, 4, 3, 2, 1]
pal_list(arr)
test = 12321
st = str(test)
pal_str(st)
st = 'racecar'
pal_str(st)
| true |
d82d19700a71c176bfd111d8bb4eb8aa1fcb6856 | Python | pkulwj1994/ade-code | /ade/common/data_utils/dataset.py | UTF-8 | 2,870 | 2.578125 | 3 | [] | no_license | from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import numpy as np
class ToyDataset(object):
def __init__(self, dim, data_file=None, static_data=None):
if data_file is not None:
self.static_data = np.load(data_file)
elif static_data is not None:
self.static_data = static_data
else:
self.static_data = None
self.dim = dim
# print(self.static_data.shape)
def gen_batch(self, batch_size):
raise NotImplementedError
def data_gen(self, batch_size, auto_reset):
if self.static_data is not None:
num_obs = self.static_data.shape[0]
while True:
for pos in range(0, num_obs, batch_size):
if pos + batch_size > num_obs: # the last mini-batch has fewer samples
if auto_reset: # no need to use this last mini-batch
break
else:
num_samples = num_obs - pos
else:
num_samples = batch_size
yield self.static_data[pos : pos + num_samples, :]
if not auto_reset:
break
np.random.shuffle(self.static_data)
else:
while True:
yield self.gen_batch(batch_size)
class SizedToyDataset(object):
def __init__(self, dim, data_file=None, static_data=None):
if data_file is not None:
self.static_data = np.load(data_file)
inds = np.random.choice(np.arange(self.static_data.shape[0]), size=1000)
self.static_data = self.static_data[inds]
elif static_data is not None:
self.static_data = static_data
else:
self.static_data = None
self.dim = dim
print(self.static_data.shape)
def gen_batch(self, batch_size):
raise NotImplementedError
def data_gen(self, batch_size, auto_reset):
if self.static_data is not None:
num_obs = self.static_data.shape[0]
while True:
for pos in range(0, num_obs, batch_size):
if pos + batch_size > num_obs: # the last mini-batch has fewer samples
if auto_reset: # no need to use this last mini-batch
break
else:
num_samples = num_obs - pos
else:
num_samples = batch_size
yield self.static_data[pos : pos + num_samples, :]
if not auto_reset:
break
np.random.shuffle(self.static_data)
else:
while True:
yield self.gen_batch(batch_size)
| true |
53cdcabb2ac69137559926e6d3107cef2fc9c41e | Python | Breath287/Spidering | /url_encodeordecode.py | UTF-8 | 302 | 3.28125 | 3 | [] | no_license | # urlencode()
from urllib import parse
# create query of dictionary with key word
query_str = {'wd': '爬虫'}
# call parse module to encode
result = parse.urlencode(query_str)
# use format function to format string and splice the url
url = 'http://www.google.com/s?{}'.format(result)
print(url)
| true |
702fe36842e612ecd7d2dcdfe50248e4b8f2db6c | Python | chenghaz/10707project | /puchengy/test-share-weights-cnn/models/ssdh.py | UTF-8 | 1,002 | 2.71875 | 3 | [
"MIT"
] | permissive | ''''
SSDH-VGG
'''
import torch
import torch.nn as nn
from torch.autograd import Variable
class SSDH(nn.Module):
def __init__(self, vgg, H):
super(SSDH, self).__init__()
self.features = nn.Sequential(*list(vgg.features.children()))
self.f2h = nn.Linear(512, H)
self.h2o = nn.Linear(H, 10)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
hidden = self.sigmoid(self.f2h(out))
output = self.h2o(hidden)
return hidden, output
class SSDH_BINARY(nn.Module):
def __init__(self, vgg):
super(SSDH_BINARY, self).__init__()
self.features = nn.Sequential(*list(vgg.features.children()))
self.f2h = vgg.f2h
self.sigmoid = nn.Sigmoid()
def forward(self, x):
out = self.features(x)
out = out.view(out.size(0), -1)
hidden = (torch.sign(self.sigmoid(self.f2h(out)) - 0.5) + 1) / 2
return hidden | true |
214481de810331de34748db4551547782ac84fdd | Python | rajivsarvepalli/Python-Projects | /gmuwork/graphs_and_visuals/useful_classifier_graphs.py | UTF-8 | 9,097 | 3.140625 | 3 | [] | no_license | import matplotlib.pyplot as plt
import numpy as np
def confusion_matrix_plotter(y_true,y_pred,classes,normalize=False,title='Confusion matrix',cmap=plt.cm.Blues,plt_show=True):
'''
Plots the conusion matrix of one classifier
input: the ground truth, and
the predicted values, \n
a list of the classes in string format, \n
Normalize determines whether confusion_matrix is normalized,\n
title is title of the plot, cmap is what color is cahnged to show the outputs in graph format,
plt_show determines whether the plot is displayed at the end or not\n
output: a confusion matrix plot in color with a label,\n
and displays the plain confusion matrix in printed out format as well\n
The color is darkest where the most values, and lightest where there are the least
'''
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import numpy as np
import itertools
y_pred = np.array(y_pred)
cm = confusion_matrix(y_true,y_pred)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print('Confusion matrix, without normalization')
print(cm)
plt.figure()
plt.imshow(cm, interpolation='nearest', cmap=cmap,)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if plt_show:
plt.show()
def validation_curve(classifier,X,y,param_name,param_range,plt_show=True):
import matplotlib.pyplot as plt
from sklearn.model_selection import validation_curve
plt.figure()
param_range = param_range
train_scores, test_scores = validation_curve(classifier, X, y, param_name=param_name, param_range=param_range,)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.title("Validation Curve")
plt.xlabel(param_name)
plt.ylabel("Score")
plt.ylim(0.0, 1.1)
lw = 2
plt.semilogx(param_range, train_scores_mean, label="Training score",
color="darkorange", lw=lw)
plt.fill_between(param_range, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.2,
color="darkorange", lw=lw)
plt.semilogx(param_range, test_scores_mean, label="Cross-validation score",
color="navy", lw=lw)
plt.fill_between(param_range, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.2,
color="navy", lw=lw)
plt.legend(loc="best")
plt.xticks(np.arange(min(param_range), max(param_range)+1, 1.0))
if plt_show:
plt.show()
def learning_curve(classifier,X,y,cv=None,n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5),plt_show=True):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : integer, optional
Number of jobs to run in parallel (default 1).
"""
from sklearn.model_selection import learning_curve
import matplotlib.pyplot as plt
plt.figure()
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(classifier, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
if plt_show:
plt.show()
def plot_calibration_curve(X_train,y_train,X_test,y_test,est, name, fig_index,plt_show=False):
"""Plot calibration curve for est w/o and with calibration. """
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=None)
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
if plt_show:
plt.show()
if __name__ == "__main__":
#confusion_matrix_plotter([0,0,0,0,2,2],[1,1,0,1,2,2],['hi','me','re'])
from sklearn.datasets import load_digits
from sklearn.ensemble import RandomForestClassifier
digits = load_digits()
x,y =digits.data,digits.target
print(len(x))
validation_curve(RandomForestClassifier(),x,y,"min_samples_split",[2,10],plt_show=False)
learning_curve(RandomForestClassifier(),x,y,train_sizes=np.linspace(.01, 1.0, 5),plt_show=False)
forest = RandomForestClassifier()
forest.fit(x,y)
confusion_matrix_plotter(y,forest.predict(x),['0','1','2','3','4','5','6','7','8','9'])
| true |
26fddd3c7460ef5746874f0f4afc6c7e0747f5b5 | Python | cgianmarco/quick-nn-tester | /NNTester/tester.py | UTF-8 | 2,744 | 2.765625 | 3 | [] | no_license | import sys
from PyQt4.Qt import *
from predict import *
class Canvas(QWidget):
def __init__(self):
super(Canvas, self).__init__()
self.pressed = False
self.passed_points = []
def paintEvent(self, e):
painter = QPainter()
painter.begin(self)
painter.setPen(QColor(0, 0, 0))
painter.setBrush(QColor(0, 0, 0))
for point in self.passed_points:
painter.drawEllipse(point, 10, 10)
painter.end()
def mousePressEvent(self, QMouseEvent):
self.pressed = True
position = QMouseEvent.pos()
if position not in self.passed_points:
self.passed_points.append(position)
self.repaint()
def mouseMoveEvent(self, QMouseEvent):
if (self.pressed):
position = QMouseEvent.pos()
if position not in self.passed_points:
self.passed_points.append(position)
self.repaint()
def mouseReleaseEvent(self, QMouseEvent):
self.pressed = False
def resize(self, pixelmap):
# resize and convert to Image
pixelmap = pixelmap.scaledToHeight(28)
pixelmap = pixelmap.scaledToWidth(28)
img = pixelmap.toImage()
# save image
img.save("../checkpoints/test.png")
return img
def get_pixels(self, img):
pixels = []
for i in range(28):
pixels.append([])
for j in range(28):
pixels[i].append(1 - QColor(img.pixel(j, i)).getRgbF()[0])
return pixels
def process_pixels(self):
# grab Canvas pixels
pixelmap = QPixmap.grabWidget(self)
# resize pixels and convert to Image
resized_image = self.resize(pixelmap)
# make prediction with pixels as input
predict(self.get_pixels(resized_image))
# empty canvas
self.passed_points = []
self.repaint()
class Tester():
def __init__(self):
app = QApplication(sys.argv)
window = QWidget()
window.setWindowTitle("Digit Recognizer")
window.show()
canvas = Canvas()
canvas.setPalette(QPalette(QColor(255, 255, 255)))
canvas.setAutoFillBackground(True)
canvas.setPalette(QPalette(QColor(255, 255, 255)))
canvas.setAutoFillBackground(True)
canvas.setFixedSize(280, 280)
button = QPushButton('Recognize')
button.setFixedSize(290, 50)
button.clicked.connect(canvas.process_pixels)
layout = QGridLayout(window)
layout.addWidget(canvas, 0, 0)
layout.addWidget(button, 1, 0)
layout.setRowStretch(1, 1)
window.setGeometry(300, 300, 300, 300)
sys.exit(app.exec_())
| true |
0239c2a309264edee7ac499a6dd54afd07be9ef8 | Python | dxcv/PythonGUI | /test_learn/chapter7信号与槽/7.5窗口数据传递/transParam_DateDialog_2.py | UTF-8 | 1,707 | 2.921875 | 3 | [] | no_license | # -*- coding:UTF-8 -*-
'''
多窗口数据传递--信号与槽
该窗口为子窗口
子窗口发射的信号有两种,其中一种是发射PyQt内置的一些信号,另一种是发射自定义的信号
'''
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
class DateDialog(QDialog):
Signal_OneParameter=pyqtSignal(str)
def __init__(self,parent=None):
super(DateDialog,self).__init__(parent)
self.setWindowTitle('子窗口:用来发射信号')
#在布局中添加控件
layout=QVBoxLayout(self)
self.label1=QLabel(self)
self.label1.setText('前者发射内置信号\n后者发射自定义信号')
self.datetime_inner=QDateTimeEdit(self)
self.datetime_inner.setCalendarPopup(True)
self.datetime_inner.setDateTime(QDateTime.currentDateTime())
self.datetime_emit=QDateTimeEdit(self)
self.datetime_emit.setCalendarPopup(True)
self.datetime_emit.setDateTime(QDateTime.currentDateTime())
layout.addWidget(self.label1)
layout.addWidget(self.datetime_inner)
layout.addWidget(self.datetime_emit)
#使用两个button(Ok和Cancel分别连接accept()和reject()槽函数)
buttons=QDialogButtonBox(
QDialogButtonBox.Ok | QDialogButtonBox.Cancel,
Qt.Horizontal,self
)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
layout.addWidget(buttons)
self.datetime_emit.dateTimeChanged.connect(self.emit_signal)
def emit_signal(self):
date_str=self.datetime_emit.dateTime().toString()
self.Signal_OneParameter.emit(date_str) | true |
a04ee20d9616b3b25e78adffdb1438e24866b43a | Python | mhearne-usgs/earthquake-impact-utils | /impactutils/transfer/ftpsender.py | UTF-8 | 10,071 | 2.96875 | 3 | [
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | #!/usr/bin/env python
# stdlib imports
from ftplib import FTP, error_perm
import os.path
import shutil
import tempfile
# local
from .sender import Sender
class FTPSender(Sender):
'''Class for sending and deleting files and directories via FTP.
PDLSender uses a local installation of Product Distribution Layer (PDL)
(https://ehppdl1.cr.usgs.gov/index.html#documentation)
to send a file or a directory, along with desired metadata to one or more
PDL hubs.
Required properties:
- remote_host Name of FTP server.
- remote_directory String path on remote_host where local files should
be copied to.
Optional properties:
- user String user name, for FTP servers where anonymous login is not
allowed.
- password String password, for FTP servers where anonymous login is
not allowed.
Usage:
sender = FTPSender(properties={'remote_host':'ftp.gov',
'remote_directory':'/pub/incoming/event1'},
local_directory = '/home/user/event1')
sender.send() => Creates remote url: ftp://ftp.gov/pub/incoming/event1 with contents of /home/user/event1 in it.
OR
sender = FTPSender(properties={'remote_host':'ftp.gov',
'remote_directory':'/pub/incoming/event1'},
local_directory = '/home/user/event1/version1')
sender.send() => Creates remote url: ftp://ftp.gov/pub/incoming/event1 with contents of /home/user/event1/version1 in it.
OR
sender = FTPSender(properties={'remote_host':'ftp.gov',
'remote_directory':'/pub/incoming/event1'},
local_files = ['/home/user/event1/version1/file1.txt','/home/user/event1/version1/file2.txt'])
sender.send() => Creates remote files: ftp://ftp.gov/pub/incoming/event1/file1.txt AND
ftp://ftp.gov/pub/incoming/event1/file1.txt
'''
_required_properties = ['remote_directory', 'remote_host']
_optional_properties = ['user', 'password']
def send(self):
'''
Send any files or folders that have been passed to constructor.
Returns:
Tuple of Number of files sent to remote SSH server and message
describing success.
Raises:
Exception when files cannot be sent to remote FTP server for any
reason.
'''
remote_host = self._properties['remote_host']
remote_folder = self._properties['remote_directory']
try:
# this should put us at the top level folder
ftp = self._setup()
# send any files we want
nfiles = 0
for f in self._local_files:
self.__sendfile(f, ftp)
nfiles += 1
# send everything in the directories we specified
if self._local_directory is not None:
local_directory = self._local_directory
allfiles = self.getAllLocalFiles()
for filename in allfiles:
try:
self._copy_file_with_path(
ftp, filename, remote_folder,
local_folder=local_directory)
nfiles += 1
except:
x = 1
ftp.quit()
return (nfiles, f'{int(nfiles):d} files were sent successfully to {remote_host} {remote_folder}')
except Exception as obj:
raise Exception(
f'Could not send to {host}. Error "{str(obj)}"')
def cancel(self):
"""
Create a cancel file (named as indicated in constructor "cancelfile"
parameter) in remote_directory on remote_host.
Args:
cancel_content: String containing text that should be written to
the cancelfile.
Returns:
A string message describing what has occurred.
"""
remote_host = self._properties['remote_host']
remote_folder = self._properties['remote_directory']
ftp = self._setup()
# Create local .cancel file, then copy it to ftp server
tempdir = tempfile.mkdtemp()
try:
tfile = os.path.join(tempdir, self._cancelfile) # local file
f = open(tfile, 'wt')
f.close()
ftp.cwd(remote_folder)
self.__sendfile(tfile, ftp)
except Exception as e:
raise Exception(
f'Could not create .cancel file on {remote_host}/{remote_folder}')
finally:
shutil.rmtree(tempdir)
return (f'{self._cancelfile} file succesfully placed on {remote_host} {remote_folder}')
def _setup(self):
"""Initiate an ftp connection with properties passed to constructor.
Navigate to/create directory (as necessary) specified by
remote_directory property.
Returns:
Instance of the ftplib.FTP class.
"""
host = self._properties['remote_host']
remote_folder = self._properties['remote_directory']
# attempt to login to remote host
try:
dirparts = self._split(remote_folder)
ftp = FTP(host)
if 'user' in self._properties:
user = self._properties['user']
else:
user = ''
if 'password' in self._properties:
password = self._properties['password']
else:
password = ''
if user == '':
ftp.login()
else:
ftp.login(user, password)
except error_perm as msg:
raise Exception(f'Could not login to remote host {host}')
# attempt to cd to remote directory
try:
self._create_remote_directory(ftp, remote_folder)
except Exception as e:
ftp.quit()
raise Exception(
f'Could not navigate to directory "{remote_folder}" on remote host {host}')
return ftp
def _create_remote_directory(self, ftp, remote_directory):
"""Create directory (recursively) on remote_host.
Args:
ftp: ftplib.FTP instance.
remote_directory: String path of directory on remote system which
needs to be created.
Raises:
Exception when unable to create remote_directory.
"""
# attempt to cd to remote directory
ftp.cwd('/')
try:
ftp.cwd(remote_directory)
except error_perm as msg:
dirparts = self._split(remote_directory)
for directory in dirparts:
try:
ftp.cwd(directory)
except error_perm as msg:
try:
ftp.mkd(directory)
ftp.cwd(directory)
except error_perm as msg:
raise Exception(
f'Unable to create subdirectory {directory}.')
def _copy_file_with_path(self, ftp, local_file, remote_folder,
local_folder=None):
"""
Copy local_file to remote_folder, preserving relative path and creating
required sub-directories.
Usage:
local_file: /home/user/data/events/us2016abcd/data_files/datafile.txt
remote_folder: /data/archive/events
local_folder: /home/user/data/events/us2016abcd
would create:
/data/archive/events/us2016abcd/data_files/datafile.txt
local_file: /home/user/data/events/us2016abcd/data_files/datafile.txt
remote_folder: /data/archive/events/us2016abcd
local_folder: None
would create:
/data/archive/events/us2016abcd/datafile.txt
Args:
local_file: Local file to copy.
remote_folder: Remote folder to copy local files to.
local_folder: Top of local directory where file copying started.
If None, local_file should be copied to a file of the same
name (not preserving path) into remote_folder.
"""
if local_folder is None:
ftp.cwd(remote_folder)
self.__sendfile(filename, ftp)
else:
local_parts = local_file.replace(local_folder, '').strip(
os.path.sep).split(os.path.sep)
remote_parts = self._split(remote_folder)
all_parts = remote_parts + local_parts
remote_file = '/' + '/'.join(all_parts)
print(remote_file)
remfolder, remfile = self._path_split(remote_file)
try:
ftp.cwd(remfolder)
except error_perm as ep:
self._create_remote_directory(ftp, remfolder)
self.__sendfile(local_file, ftp)
ftp.cwd(remote_folder)
def __sendfile(self, filename, ftp):
'''Internal function used to send a file using an FTP object.
Args:
filename: Local filename
ftp: Instance of FTP object.
'''
# in case somebody is polling for this file,
# make a temporary file first, then rename it
# so the poller doesn't grab it before its finished transferring.
fbase, fpath = os.path.split(filename) # this is a local file
tmpfile = fpath + '.tmp'
cmd = "STOR " + tmpfile
# we don't tell the ftp server about the local path to the file
# actually send the file
ftp.storbinary(cmd, open(filename, "rb"), 1024)
# rename it to the desired destination
ftp.rename(tmpfile, fpath)
def _join(self, *path_parts):
return '/' + '/'.join(path_parts)
def _split(self, path):
return path.strip('/').split('/')
def _path_split(self, path):
parts = path.strip('/').split('/')
fname = parts[-1]
fpath = '/' + '/'.join(parts[0:-1])
return (fpath, fname)
| true |
988155fc563493869a5ceead557940f8b9751f70 | Python | kaleumelo/tamagochi | /main.py | UTF-8 | 6,016 | 3.078125 | 3 | [] | no_license | import random
class Pet():
cores = ('branco', 'preto', 'preto e branco', 'cinza', 'marrom', 'tricolor', 'difereciado')
def __init__(self):
self.nome = input('Qual o nome do seu Pet? - ')
x = random.randint(0, 6)
self.cor = self.cores[x]
self.fome = 100
self.sono = 100
self.humor = 70
self.saude = 100
self.dimas = 0
def comer(self, comida):
if comida in self.comida_preferida:
self.fome += 20
elif comida in self.comida_envenenada:
self.fome += 5
self.saude -= 25
else:
self.fome += 10
if self.fome > 100:
self.fome = 100
self.saude -= 5
self.sono -= 10
self.humor -= 10
def dormir(self):
if self.sono == 100:
self.humor -= 30
self.sono = 100
self.fome -= 20
self.humor -= 15
if self.saude < 100:
self.saude += 5
def jogar(self, jogo):
if jogo in self.jogo_preferido:
self.humor += 20
self.dimas += 1
if self.dimas == 5:
print('Voce ganhou !!! UUHUUUL')
breakpoint
else:
self.humor += 10
if self.humor > 100:
self.humor = 100
self.sono -= 25
self.sono -= 10
self.fome -= 15
def Pet_morrendo(self):
if self.sono <= 0 or self.fome <= 0 or self.saude <= 0 or self.humor <= 0:
return True
else:
return False
def preferencias_Pet(self):
print('A/O {} prefere comer {} e jogar com {}, mas tome cuidado - ele pode morrer por causa de {}.'.format(
self.nome, ', '.join(self.comida_preferida), ', '.join(self.jogo_preferido), ', '.join(self.comida_envenenada)))
def print_status(self):
print('Como {} se sente:\n fome: {}\n sono: {}\n humor: {}\n saude: {}\n diamantes:{}'.format(
self.nome, self.fome, self.sono, self.humor, self.saude, self.dimas))
class Gato(Pet):
comida_preferida = ['peixe', 'carne', 'leite']
comida_envenenada = ['chocolate', 'lixo', 'cenoura', 'doce']
jogo_preferido = ['ratinho de pelucia', 'laser']
class Cachorro(Pet):
comida_preferida = ['carne', 'bone']
comida_envenenada = ['chocolate', 'leite', 'cenoura', 'doce']
jogo_preferido = ['bola', 'sapatos do dono']
class Coelho(Pet):
comida_preferida = ['lixo', 'cenoura']
comida_envenenada = ['peixe', 'carne', 'doce']
jogo_preferido = ['abraco', 'historinha no ouvido']
p = ''
pets_possiveis = ['gato', 'cachorro', 'coelho']
acoes = ['comer', 'dormir', 'jogar']
jogos = ['ratinho de pelucia', 'laser', 'bola', 'sapatos do dono', 'abraco', 'historinha no ouvido']
foods = ['peixe', 'carne', 'leite', 'bone', 'lixo', 'cenoura']
while p == '' or p.lower() not in pets_possiveis:
p = input('Qual o tipo de Pet q voce quer? (gato, cachorro, coelho): ')
if p.lower() == 'gato':
pet = Gato()
elif p.lower() == 'cachorro':
pet = Cachorro()
else:
pet = Coelho()
print('Show! Voce tem um {} chamado {}! Que tem a cor {}!:)'.format(p.lower(), pet.nome, pet.cor))
pet.preferencias_Pet()
while True:
try:
if not pet.Pet_morrendo():
answ = int(input('Voce quer jogar mais?\n 1 - sim\n 2 - nao\n'))
if answ == 1:
pet.print_status()
acao = int(input('Por favor, escolha uma acao (precisa digitar um numero):\n 1 - {}\n 2 - {}\n 3 - {}\n'.format(
acoes[0], acoes[1], acoes[2])))
if 1 <= acao <= 3:
acao = acoes[acao - 1]
if acao == acoes[0]:
comida = int(input(
'Por favor, escolha a comida (precisa digitar um numero):\n 1 - {}\n 2 - {}\n 3 - {}\n 4 - {}\n 5 - {}\n 6 - {}\n'.format(
foods[0], foods[1], foods[2], foods[3], foods[4], foods[5])))
if 1 <= comida <= 6:
comida = foods[comida - 1]
pet.comer(comida)
else:
print('Perdao, mas voce tem que escolher entre 1 e 6, talvez ele coma mais tarde. ;(')
elif acao == acoes[1]:
pet.dormir()
else:
jogo = int(input(
'Por favor, escolha o jogo (precisa digitar um numero):\n 1 - {}\n 2 - {}\n 3 - {}\n 4 - {}\n 5 - {}\n 6 - {}\n'.format(
jogos[0], jogos[1], jogos[2], jogos[3], jogos[4], jogos[5])))
if 1 <= jogo <= 6:
jogo = jogos[jogo - 1]
pet.jogar(jogo)
else:
print('Perdao, mas voce tem que escolher entre 1 e 6, talvez ele coma mais tarde. ;(')
else:
print('Perdao, mas voce tem que escolher entre 1 e 3! ')
elif answ == 2:
break
else:
print('NAAAAO, TU MATOU O MENÒÒÒ!!! A/O {} ta no ceu agora, descanse em paz!;(\n Por favor, NAO faca isso com animais de verdade! '.format(pet.nome))
break
except ValueError as err:
print('Valor inserido nao valido!') | true |
b29c67c307a6f7687e8767b8009d28ae830c25ac | Python | pavanteja295/Continual-Learning-Benchmark | /models/layer5_network.py | UTF-8 | 1,496 | 2.625 | 3 | [
"MIT"
] | permissive | import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
class Layer5_Network(nn.Module):
"""Small architechture"""
def __init__(self,num_classes=2):
super(Layer5_Network, self).__init__()
self.act=OrderedDict()
self.conv1 = nn.Conv2d(3, 32, 3)
self.conv2 = nn.Conv2d(32, 32, 3)
self.drop_outA = nn.Dropout(0.15)
self.conv3 = nn.Conv2d(32, 64, 3)
self.conv4 = nn.Conv2d(64,64,3)
self.drop_outB = nn.Dropout(0.15)
self.conv5 = nn.Conv2d(64,128,2)
self.last = nn.Linear(128*4, num_classes)
def logits(self, x):
x = self.last(x)
return x
def forward(self, x):
x = self.conv1(x)
self.act['conv1_pre_relu']=x
x = F.relu(x)
x = self.conv2(x)
self.act['conv2_pre_relu']=x
x = F.relu(x)
x = F.max_pool2d(x, 2, 2)
x = self.drop_outA(x)
x = self.conv3(x)
self.act['conv3_pre_relu']=x
x = F.relu(x)
x = self.conv4(x)
self.act['conv4_pre_relu']=x
x = F.relu(x)
x = F.max_pool2d(x, 2, 2)
x = self.drop_outB(x)
x = self.conv5(x)
self.act['conv5_pre_relu']=x
x = F.relu(x)
x = F.avg_pool2d(x, 2, 2)
x = self.logits(x.view(-1, 128*4))
# x = self.last(x)
#self.act['fc1_output']=x
return x
# return F.log_softmax(x, dim=1),x
| true |
01f07eecab2c35ffed790f1182e21e1c1fde8df1 | Python | pavanyadav007/Data_Extracting | /Assignments/test_assignment_2.py | UTF-8 | 1,927 | 3.328125 | 3 | [] | no_license | from assignment_2 import *
def strayNumber_odd_out():
assert strayNumber_odd_out([100,100,100,5]) == 5
assert strayNumber_odd_out([100,0,0]) == 100
assert strayNumber_odd_out([1,1,1,1]) == 1
assert strayNumber_odd_out([23,23,23,23]) == 0
assert strayNumber_odd_out([1000,1000,1000,1]) == 1
def test_Mean_of_elements():
assert Mean_of_elements([1,2,3,4]) == 2
assert Mean_of_elements([12,3,45,7,8]) != 'none'
assert Mean_of_elements([10,12,13,14]) == 12
assert Mean_of_elements([66,67,69]) == 67
#Find the average speed of vehicle, given the distance travelled for fixed time intervals, e.g. [0, 0.1, 0.25, 0.45, 0.55, 0.7, 0.9, 1.0]
def calculation_avg():
assert calculation_avg(10,[0, 0.1, 0.25, 0.45, 0.55, 0.7, 0.9, 1.0,12]) == 23.0
assert calculation_avg(100,[19, 13, 123, 5, 0.3, 77, 99, 11]) == 47.0
assert calculation_avg(1,[19, 122, 99, 11]) == 0
#* Find the no.of people in a bus, given the data of people onboarding & alighting at each station
def pepole_onBoard_total():
pepole_onBoard_total(5, [8,6,4,3,1], [2,1,1,1,1]) == 16
pepole_onBoard_total(4, [0,5,4,3,1], [1,1,1,1,3]) == 8
pepole_onBoard_total(4, [0,5,8,6,9,1], [1,5,6,6] )== 1
#* Find the missing number, given the original list and modified one
def Finding_missed_one():
Finding_missed_one([3,5,6,10,12,11], [6,10,2]) == 2
Finding_missed_one([5,6,8,10,11], [6,7,8,10,11]) == 7
Finding_missed_one([3,1,10,11], [6,10,12,2]) == 6
#* Find the difference between two lowest numbers in the list
def difference_bt_Low():
difference_bt_Low([10,50,8,40,2]) == 6
difference_bt_Low([100,500,877,324,568]) == 224
difference_bt_Low([999,687,800,394,566]) == 174
#In a given list, count no.of elements smaller than their mean
def find_mean_lower():
find_mean_lower([1,2,5,4,5,6]) == 2
find_mean_lower([6,4,5,7,5,8]) == 3
find_mean_lower([6,4,12,23,8]) == 3 | true |
e69af54d9fa8a724bdd001233331210f51507130 | Python | Anne19953/LearnPython | /Day9/进程与线程的区别.py | UTF-8 | 646 | 3.015625 | 3 | [] | no_license | #!/usr/bin/env python
# coding:utf-8
"""
Name : 进程与线程的区别.py
Author : anne
Time : 2019-08-31 16:52
Desc:
"""
from threading import Thread
from multiprocessing import Process
import os
def work():
global n
n=0
if __name__ == '__main__':
# n=100
# p=Process(target=work)
# p.start()
# p.join()
# print('主',n) #毫无疑问子进程p已经将自己的全局的n改成了0,但改的仅仅是它自己的,查看父进程的n仍然为100
t=Thread(target=work)
t.start()
n = 1
t.join()
print('主',n) #查看结果为0,因为同一进程内的线程之间共享进程内的数据 | true |
f1fb0210c4c220097e00229ed32a76f2fa19f600 | Python | dslab-epfl/myedu-catalog | /app/epfl/courses/search/parser.py | UTF-8 | 4,323 | 2.734375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012 EPFL.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Query parser."""
__author__ = "stefan.bucur@epfl.ch (Stefan Bucur)"
import re
TOKEN_PATTERN = r"""
(?P<term>[^\s"':]+)
|(?P<doublequote>\".*?(?:"|$))
|(?P<singlequote>\'.*?(?:'|$))
|(?P<whitespace>[\s]+)
|(?P<colon>[:])
"""
TOKEN_RE = re.compile(TOKEN_PATTERN, re.VERBOSE | re.UNICODE)
def TokenizeQuery(query_string, discard_ws=True):
position = 0
while True:
m = TOKEN_RE.match(query_string, position)
if not m:
break
position = m.end()
token_name = m.lastgroup
token_value = m.group(token_name)
if discard_ws and token_name == "whitespace":
continue
yield token_name, token_value
if position != len(query_string):
raise ValueError("Tokenization error at position %d of %d"
% (position, len(query_string)))
class SearchQuery(object):
TERM = 0
FILTER = 1
DIRECTIVE = 2
def __init__(self, terms=None, filters=None, directives=None):
self.components = []
if directives:
self.components.extend([(self.DIRECTIVE, (k, v)) for k, v in directives.iteritems()])
if filters:
self.components.extend([(self.FILTER, (k, v)) for k, v in filters])
if terms:
self.components.extend([(self.TERM, term) for term in terms])
@property
def terms(self):
return [term for t, term in self.components if t == self.TERM]
@property
def filters(self):
return [filt for t, filt in self.components if t == self.FILTER]
@property
def directives(self):
return dict([directive for t, directive in self.components
if t == self.DIRECTIVE])
def ReplaceFilter(self, key, value):
self.components[:] = [(t, v) for t, v in self.components
if t != self.FILTER or v[0] != key]
self.components.append((self.FILTER, (key, value)))
def GetString(self, include_directives=True):
query_string = []
for t, value in self.components:
if t == self.DIRECTIVE and include_directives:
query_string.append("@s:%s" % (value[0], value[1]))
if t == self.FILTER:
query_string.append("%s:%s" % (value[0], value[1]))
if t == self.TERM:
query_string.append(value)
return " ".join(query_string)
def ExtractTerms(self):
result = []
for t, value in self.components:
if t == self.FILTER:
result.extend(re.findall(r"\w+", value[1], re.UNICODE))
if t == self.TERM:
result.extend(re.findall(r"\w+", value, re.UNICODE))
return result
@classmethod
def _FixSpecialTerm(cls, term):
lo_term = term.lower()
if lo_term == "or":
return "OR"
return term
@classmethod
def ParseFromString(cls, query_string):
query = cls()
last_term = None
found_colon = False
is_directive = False
for tname, tvalue in TokenizeQuery(query_string):
if tname == "doublequote" or tname == "singlequote" or tname == "term":
if found_colon:
query.components.pop()
if is_directive:
query.components.append((cls.DIRECTIVE, (last_term.lstrip("@"), tvalue)))
else:
query.components.append((cls.FILTER, (last_term, tvalue)))
found_colon = False
last_term = None
is_directive = False
else:
is_directive = tvalue.startswith("@")
last_term = tvalue
query.components.append((cls.TERM, cls._FixSpecialTerm(tvalue)))
elif tname == "colon":
if last_term:
found_colon = True
else:
found_colon = False
query.components.append((cls.TERM, cls._FixSpecialTerm(tvalue)))
return query
| true |
f22fb6f2fb7eb9c0d97b21bcc7c372266349498d | Python | niroyb/CompInfo_H13 | /ChallengeAppelGagnant/solve cases.py | UTF-8 | 608 | 3 | 3 | [] | no_license |
import subprocess as sp
def getOutput(args, inputStr):
#Execute process
sorter = sp.Popen(args, stdin=sp.PIPE, stdout=sp.PIPE)
#Send input to created process through stdin
sorter.stdin.write(inputStr)
sorter.stdin.close()
#Obtain output from the created process
result = sorter.stdout.read()
return result
with open('AllCases.txt') as f:
cases = f.read().splitlines()
with open('AllCasesSolved.txt', 'w') as outf:
args = ['python', 'sommeChiffresNombresFinal2.py']
for case in cases:
sol = getOutput(args, case)
outf.write(case+'\t'+sol+'\n')
| true |
16eed1dfe0d1a000483a9c1a85a9c2267f782b9f | Python | skanin/MCTS | /games/nim/Nim.py | UTF-8 | 2,029 | 3.328125 | 3 | [] | no_license | import yaml
class Nim():
def __init__(self, num_stones, max_removal, starting_player):
self.player = starting_player
self.starting_player = starting_player
self.num_stones = num_stones
self.max_removal = max_removal
self.cfg = yaml.safe_load(open('config.yaml', 'r'))
self.LEGAL_MOVES = [i for i in range(1, self.max_removal + 1)]
self.winner = -1
def get_legal_moves(self):
return [i for i in self.LEGAL_MOVES if i <= self.num_stones]
def to_string_representation(self):
return f'{self.player if not self.is_win() else self.opposite_player()}{self.num_stones}'
def opposite_player(self):
return 1 if self.player == 2 else 2
def is_win(self):
return self.num_stones == 0
def get_winner(self):
if not self.is_win():
return False
return self.player
def is_legal_move(self, move):
return move in self.get_legal_moves()
def change_player(self):
self.player = 1 if self.player == 2 else 2
def make_move(self, move, mcts=True):
if not self.is_legal_move(move):
raise('Not legal move!')
self.num_stones -= move
if self.is_win():
self.winner = self.player
return self.to_string_representation(), True, self.player, self.get_legal_moves()
self.change_player()
return self.to_string_representation(), False, self.player, self.get_legal_moves()
def game_from_string_representation(self, st):
player = int(st[0])
num_stones = int(st[1:])
max_removal = self.cfg['nim']['max_removal']
nim = Nim(num_stones, max_removal)
nim.player = player
return nim
def game_from_game(self, st, old_game):
player = int(st[0])
num_stones = int(st[1:])
max_removal = self.cfg['nim']['max_removal']
game = Nim(num_stones, max_removal, old_game.starting_player)
game.player = player
return game | true |
cb90c01631b3e50f4fa42827c9f8db4ca0d32368 | Python | lukasmartinelli/sharpen | /challenges/backtracking/letter_phone.py | UTF-8 | 1,220 | 3.734375 | 4 | [
"CC0-1.0"
] | permissive | digit_lookup = {
'0': ['0'],
'1': ['1'],
'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']
}
def letter_combinations(digits):
return list(possible_combination(digits, []))
def possible_combination(digits, combination):
if len(digits) > 0:
digit = digits[0]
for letter in digit_lookup[digit]:
new_combination = combination + [letter]
for c in possible_combination(digits[1:], new_combination):
yield ''.join(c)
else:
yield ''.join(combination)
def test_combination():
assert letter_combinations("23") == [
"ad", "ae", "af", "bd", "be",
"bf", "cd", "ce", "cf"
]
assert letter_combinations("1589") == [
"1jtw", "1jtx", "1jty", "1jtz", "1juw", "1jux", "1juy", "1juz",
"1jvw", "1jvx", "1jvy", "1jvz", "1ktw", "1ktx", "1kty", "1ktz",
"1kuw", "1kux", "1kuy", "1kuz", "1kvw", "1kvx", "1kvy", "1kvz",
"1ltw", "1ltx", "1lty", "1ltz", "1luw", "1lux", "1luy", "1luz",
"1lvw", "1lvx", "1lvy", "1lvz"
]
| true |
35c5dd6a6954c4b49f758de218dad90e299f6f7f | Python | aminyl/PythonUtils | /sqlite.py | UTF-8 | 636 | 2.5625 | 3 | [] | no_license | """
Revert history from ipython history file.
Command line:
sqlite3 history.sqlite -line ' select source_raw from history;' > hist_all_sql.txt
"""
import sqlite3
def fetch_all(cursor, name):
cursor.execute("select * from %s" % name)
return cursor.fetchall()
path = "../../tmp/ipython/profile_default/history.sqlite"
conn = sqlite3.connect(path)
cursor = conn.cursor()
cursor.execute("select name from sqlite_master where type='table'")
targets = [f[0] for f in cursor.fetchall()]
res = {t:fetch_all(cursor, t) for t in targets}
his = [h[-1] for h in res["history"]]
s = "\n".join(his)
open("ipython_hist.txt", "w").write(s)
| true |
6a067f5b57f2d0904ed545ec10a5adc18117357c | Python | paragrapharamus/msdp | /libs/PrivacyRaven/src/privacyraven/extraction/metrics.py | UTF-8 | 969 | 2.703125 | 3 | [
"MIT",
"Apache-2.0"
] | permissive | import torch
import tqdm
from libs.PrivacyRaven.src.privacyraven.extraction.synthesis import process_data
from libs.PrivacyRaven.src.privacyraven.utils.query import get_target, query_model
def label_agreement(
test_data,
substitute_model,
query_victim,
victim_input_shape,
substitute_input_shape,
):
"""Returns the number of agreed upon data points between victim and substitute,
thereby measuring the fidelity of an extraction attack"""
limit = int(len(test_data))
if limit >= 100:
# We limit test data to 100 samples for efficiency
limit = 100
x_data, y_data = process_data(test_data, limit)
substitute_result = get_target(substitute_model, x_data, substitute_input_shape)
victim_result = query_victim(x_data)
agreed = torch.sum(torch.eq(victim_result, substitute_result)).item()
print(f"Fidelity: Out of {limit} data points, the models agreed upon {agreed}: {100 * agreed / limit:.2f}%")
return agreed / limit
| true |
aa677a6038b674747b9a11e4ac4ed3e0f7a7843b | Python | rachelmkim/Effects-of-Greenhouse-Gas-Emissions-on-Different-Areas-of-Canada | /main.py | UTF-8 | 16,685 | 3.125 | 3 | [] | no_license | """CSC110 Fall 2020 Final Project, Main
Description
===============================
This module is the graphical user interface that allows the user to access all the components of
our project. To display a map, the interface prompts the user to enter a valid year, which creates
three maps. To display a graph, the user needs to select a station that could be filtered out by
province as well as by the search bar.
Copyright and Usage Information
===============================
This file is provided solely for the personal and private use of TA's and professors
teaching CSC110 at the University of Toronto St. George campus. All forms of
distribution of this code, whether as given or with any changes, are
expressly prohibited. For more information on copyright for CSC110 materials,
please consult our Course Syllabus.
This file is Copyright (c) 2020 Dana Alshekerchi, Nehchal Kalsi, Rachel Kim, Kathy Lee.
"""
from tkinter import Button, Entry, Label, StringVar, mainloop, Tk, Toplevel
from tkinter import ttk
import json
import ast
from PIL import ImageTk, Image
import data_reading
import combine
import maps
# Creates the main window
ROOT = Tk()
# Opens the image for the title
TITLE_IMAGE = Image.open('title_image.png')
# Resizes the image
SMALLER = TITLE_IMAGE.resize((300, 300), Image.ANTIALIAS) # (300, 255)
NEW_TITLE = ImageTk.PhotoImage(SMALLER)
# Displays the image as a label
TITLE_LABEL = Label(ROOT, image=NEW_TITLE, borderwidth=0)
TITLE_LABEL.grid(row=1, column=1, columnspan=4)
def window(main) -> None:
"""
Sets the main window up to be in the middle of
the screen as well as determines the size of the screen
"""
main.title('Effects of Greenhouse Gases in Canada')
main.update_idletasks()
width = 575
height = 550
x = (main.winfo_screenwidth() // 2) - (width // 2)
y = (main.winfo_screenheight() // 2) - (height // 2)
main.geometry('{}x{}+{}+{}'.format(width, height, x, y))
# Creates an icon
ROOT.iconbitmap('leaf.ico')
# Background colour
ROOT.config(bg='#FFE4AE')
# From Assignment 2 Part 4
def read_temp_data(file: str) -> dict:
"""Return a dictionary mapping course codes to course data from the data in the given file.
In the returned dictionary:
- each key is a string representing the course code
- each corresponding value is a tuple representing a course value, in the format
descried in Part 3 of the assignment handout.
Note that the implementation of this function provided to you is INCOMPLETE since it just
returns a dictionary in the same format as the raw JSON file. It's your job to implement
the functions below, and then modify this function body to get the returned data
in the right format.
Preconditions:
- file is the path to a JSON file containing course data using the same format as
the data in data/course_data_small.json.
file is the name (or path) of a JSON file containing course data using the format in
the sample file course_data_small.json.
"""
with open(file) as json_file:
data_input = json.load(json_file)
return data_input
# Retrieving data needed
UNFILTERED_DATA = read_temp_data('data.json')
DATA = {x: UNFILTERED_DATA[x] for x in UNFILTERED_DATA if UNFILTERED_DATA[x] != {}}
CITIES = [ast.literal_eval(x)[0] for x in DATA.keys()]
PROVINCE = [ast.literal_eval(x)[1] for x in DATA.keys()]
ABB_TO_PROVINCE = {'BC': 'British Columbia', 'MAN': 'Manitoba', 'ALTA': 'Alberta',
'NFLD': 'Newfoundland and Labrador', 'PEI': 'Prince Edward Island',
'YT': 'Yukon', 'NB': 'New Brunswick', 'SASK': 'Saskatchewan',
'NU': 'Nunavut', 'ONT': 'Ontario', 'NS': 'Nova Scotia',
'NWT': 'Northwest Territories', 'QUE': 'Quebec'}
def map_open() -> None:
"""
Opens three maps on different browsers based on the year inputted when the map button is clicked
Precondition:
- 1990 < YEAR_SELECT.get() <= 2018
"""
# Retrieves data needed from files
province_geojson_file_name = 'canada_provinces.geojson'
weather_stations_geojson = 'weather_stations.geojson'
daily_temps_geojson = 'data_for_maps_since_1990.json'
emissions_csv_file_name = 'GHG_IPCC_Can_Prov_Terr.csv'
province_id_map = maps.format_province_id_map(province_geojson_file_name)
emissions_data_frame = data_reading.read_ghg_emissions_for_maps(emissions_csv_file_name)
emissions_difference_data_frame = maps.calculate_emissions_difference(emissions_data_frame)
temperatures_difference_data_frame = maps.calculate_temp_difference(
maps.format_temps(weather_stations_geojson, daily_temps_geojson))
# This occurs when the the correct input (a year between 1991-2018)
try:
year = int(YEAR_SELECT.get())
if 1991 <= year <= 2018:
maps.plot_emissions_map(province_geojson_file_name, 'Raw Data', emissions_data_frame,
province_id_map, year)
maps.plot_emissions_map(province_geojson_file_name,
'Difference', emissions_difference_data_frame,
province_id_map, year)
maps.plot_temperatures_map(province_geojson_file_name, 'Difference',
temperatures_difference_data_frame, year)
# If the year is not between 1991 and 2018
else:
raise ValueError
except ValueError:
YEAR_RANGE_LABEL.config(text='Wrong input. \n Enter year \n(1991 - 2018)',
bg='#FFE4AE', fg='#800000')
def province_filter(event) -> None:
"""
Enables the search button when the province is selected
Filters out stations, only those in the province chosen appear
"""
SEARCH_BUTTON['state'] = 'normal'
cities_in_province = [ast.literal_eval(x)[0] for x in DATA.keys()
if ABB_TO_PROVINCE[ast.literal_eval(x)[1]] == PROVINCE_COMBO.get()]
# Changes the city back to its original format
sorted_cities = [x.replace('_', ' ').title() for x in cities_in_province]
sorted_cities.sort()
CITY_COMBO['values'] = sorted_cities
def selected(event) -> None:
"""
Opens a new browser with the plotly graph of the station selected
Graph compares temperature anomaly of station and CO2 emission of the province
"""
province = ''
city_chosen = CITY_COMBO.get().upper().replace(' ', '_')
# Gets the province in which the city is located in
for item in CITIES:
if city_chosen == item:
province = PROVINCE[CITIES.index(item)]
break
ghg_data = data_reading.read_ghg_emissions('GHG_IPCC_Can_Prov_Terr.csv')
key = "('" + city_chosen + "', '" + province + "')"
combine.combine_plots(ghg_data, DATA[key], ABB_TO_PROVINCE[province], CITY_COMBO.get())
def search() -> None:
"""
Searches for the station located in the province selected based
on the characters written in the search entry box
"""
search_values = CITY_TYPE.get().lower()
cities_in_province = [ast.literal_eval(x)[0] for x in DATA.keys()
if ABB_TO_PROVINCE[ast.literal_eval(x)[1]] == PROVINCE_COMBO.get()]
if search_values in ('', ' '):
CITY_COMBO['values'] = [x.replace('_', ' ').title() for x in cities_in_province]
else:
display_values = []
for value in [x.replace('_', ' ').title() for x in cities_in_province]:
if search_values in value.lower():
display_values.append(value)
display_values.sort()
CITY_COMBO['values'] = display_values
def creators_page() -> None:
"""
Opens another window which showcases a picture of the creators
"""
creators_window = Toplevel(ROOT)
creators_window.title('Creators')
creators_window.update_idletasks()
width = 575
height = 350
x = (creators_window.winfo_screenwidth() // 2) - (width // 2)
y = (creators_window.winfo_screenheight() // 2) - (height // 2)
creators_window.geometry('{}x{}+{}+{}'.format(width, height, x, y))
creators_window.iconbitmap('leaf.ico')
creators_window.config(bg='#FFE4AE')
introduction_label = Label(creators_window, text='This project was created by...',
font=('Helvetica', 10, 'bold'),
bg='#FFE4AE', fg='#800000', borderwidth=0)
introduction_label.grid(row=1, column=1, columnspan=4, pady=(10, 20))
# Opens the image for the title
creator_image = Image.open('creator_image.png')
# Resizes the image
resized = creator_image.resize((600, 250), Image.ANTIALIAS)
new_creator = ImageTk.PhotoImage(resized)
# Displays the image as a label
creator_label = Label(creators_window, image=new_creator, borderwidth=0)
creator_label.photo = new_creator
creator_label.grid(row=2, column=1, columnspan=4)
why_label = Label(creators_window, text='for the CSC110 Final Project',
font=('Helvetica', 10, 'bold'),
bg='#FFE4AE', fg='#800000', borderwidth=0)
why_label.grid(row=3, column=1, columnspan=4, pady=(10, 0))
def instructions_page() -> None:
"""
Opens a new window on to of the original window to display further instructions as to what
the user should expect when the buttons are clicked
"""
instructions_window = Toplevel(ROOT)
instructions_window.title('Instructions')
instructions_window.update_idletasks()
width = 575
height = 250
x = (instructions_window.winfo_screenwidth() // 2) - (width // 2)
y = (instructions_window.winfo_screenheight() // 2) - (height // 2)
instructions_window.geometry('{}x{}+{}+{}'.format(width, height, x, y))
instructions_window.iconbitmap('leaf.ico')
instructions_window.config(bg='#FFE4AE')
map_instructions_title = Label(instructions_window, text='Map Instructions',
font=('Helvetica', 10, 'bold', 'underline'),
bg='#FFE4AE',
fg='#800000',
borderwidth=0)
map_instructions_title.pack()
map_instructions = Label(instructions_window, text='1. Enter a year between 1991 and 2018.\n'
' 2. Upon '
'clicking Map with a valid year, following '
'three maps appear, '
'displaying:\n a. CO2 equivalent of GHG '
'emissions across '
'Canada for the given year \n b. '
'Difference in CO2 equivalent output '
'across Canada, for the given year '
'and 1990\n c. Difference in mean '
'temperatures for each weather station, '
'for a given year compared to 1990 ',
bg='#FFE4AE', fg='#800000', borderwidth=0)
map_instructions.pack()
graph_instructions_title = Label(instructions_window, text='Graph Instructions',
font=('Helvetica', 10, 'bold', 'underline'),
bg='#FFE4AE',
fg='#800000',
borderwidth=0)
graph_instructions_title.pack(pady=(15, 0))
graph_instructions = Label(instructions_window, text='1. Select a province or territory'
' in the Province/Territory dropdown '
'menu.\n 2. Enter keywords of the '
'weather station under Search '
'Station and click Search. \n Select '
'the station in the Station dropdown '
'menu.\n '
'3. Once a weather station selected,'
' a graph will display in '
'your browser.This displays\n the '
'temperature anomaly and CO2 '
'equivalent of GHG emissions for\n '
'your selected weather '
'station. ',
bg='#FFE4AE', fg='#800000', borderwidth=0)
graph_instructions.pack()
# Labels for all the buttons and entry boxes for user friendliness
# Map Widgets
VIEW_MAP_LABEL = Label(ROOT, text='View Map', font=('Helvetica', 10,
'bold', 'underline'), bg='#FFE4AE',
fg='#800000', borderwidth=0)
VIEW_MAP_LABEL.grid(row=2, column=1, columnspan=4)
YEAR_RANGE_LABEL = Label(ROOT, text='Enter year\n(1991 - 2018)', bg='#FFE4AE', fg='#800000')
YEAR_RANGE_LABEL.grid(row=3, column=2)
YEAR_SELECT = Entry(ROOT, width=7)
YEAR_SELECT.grid(row=4, column=2)
MAP_BUTTON = Button(ROOT, text='Map', command=map_open, bg='#800000', fg='#FFE4AE')
MAP_BUTTON.grid(row=4, column=3, padx=15)
# Graph Widgets
VIEW_GRAPH_LABEL = Label(ROOT, text='View Graph', font=('Helvetica', 10, 'bold',
'underline'), bg='#FFE4AE', fg='#800000',
borderwidth=0)
VIEW_GRAPH_LABEL.grid(row=5, column=1, columnspan=4, pady=(15, 0))
PROVINCE_LABEL = Label(ROOT, text='1. Province/Territory', bg='#FFE4AE', fg='#800000')
PROVINCE_LABEL.grid(row=6, column=1, padx=15)
PROVINCE_OPTIONS = [ABB_TO_PROVINCE[x] for x in ABB_TO_PROVINCE]
PROVINCE_OPTIONS.sort()
PROVINCE_COMBO = ttk.Combobox(ROOT, value=PROVINCE_OPTIONS)
PROVINCE_COMBO.current(0)
PROVINCE_COMBO.bind('<<ComboboxSelected>>', province_filter)
PROVINCE_COMBO.grid(row=7, column=1, padx=15)
SEARCH_LABEL = Label(ROOT, text='2. Station Search', bg='#FFE4AE', fg='#800000')
SEARCH_LABEL.grid(row=6, column=2, padx=15)
CITY_TYPE = StringVar()
SEARCH_TEXT = Entry(ROOT, text=CITY_TYPE)
SEARCH_TEXT.grid(row=7, column=2, padx=15)
SEARCH_BUTTON = Button(ROOT, text='Search', command=search, bg='#800000', fg='#FFE4AE')
SEARCH_BUTTON['state'] = 'disabled'
SEARCH_BUTTON.grid(row=7, column=3, padx=15)
STATION_LABEL = Label(ROOT, text='3. Station', bg='#FFE4AE', fg='#800000')
STATION_LABEL.grid(row=6, column=4, padx=15)
CITY_OPTIONS = [x.replace('_', ' ').title() for x in CITIES]
CITY_OPTIONS.sort()
CITY_COMBO = ttk.Combobox(ROOT, value=[x.replace('_', ' ').title() for x in CITY_OPTIONS])
CITY_COMBO.bind('<<ComboboxSelected>>', selected)
CITY_COMBO.grid(row=7, column=4, padx=15)
INSTRUCTIONS_BUTTON = Button(ROOT, text='Instructions', command=instructions_page,
bg='#800000', fg='#FFE4AE')
INSTRUCTIONS_BUTTON.grid(row=8, column=1, pady=(30, 0))
CREATORS_BUTTON = Button(ROOT, text='Creators', command=creators_page, bg='#800000', fg='#FFE4AE')
CREATORS_BUTTON.grid(row=8, column=4, pady=(30, 0))
window(ROOT)
mainloop()
if __name__ == '__main__':
import python_ta
python_ta.check_all(config={
# the names (strs) of imported modules
'extra-imports': ['tkinter', 'json', 'python_ta', 'python_ta.contracts',
'ast', 'PIL', 'data_reading', 'combine', 'maps'],
# the names (strs) of functions that call print/open/input
'allowed-io': ['read_temp_data'],
'max-line-length': 100,
'disable': ['R1705', 'C0200']
})
import python_ta.contracts
python_ta.contracts.DEBUG_CONTRACTS = False
python_ta.contracts.check_all_contracts()
import doctest
doctest.testmod()
| true |
aad05ad8d96d00c6f0e9c3684e015ba626ebafd4 | Python | ejbryant28/interview-questions-practice-problems | /ert_test.py | UTF-8 | 3,031 | 3.921875 | 4 | [] | no_license |
def calc_max_profits_1(stock_prices):
#initialize buy and sell times to be first items
sell = stock_prices[0]
buy = stock_prices[0]
#initialize gains to be 0.. if there is no possible gain you don't want to buy
gains = 0
for i, first in enumerate(stock_prices):
for j in range(i+1, len(stock_prices)):
if stock_prices[j] - first > gains:
sell = stock_prices[j]
buy = first
gains = sell - buy
#the directions don't specify this, but i think it'd be helpful. If there are no possible gains, I'm returning None.
if not gains:
return None
return gains
#given test case
assert(calc_max_profits_1([10, 7, 4, 8, 12, 9]) == 8)
# #test case where there's no possible gains
assert(calc_max_profits_1([20, 19, 18, 17, 5]) == None)
#test case where the buy and sell times aren't the overall min and max
assert(calc_max_profits_1([100, 20, 19, 17, 21, 15]) == 4)
def calc_max_profits_2(prices):
if not prices:
return None
max_profit = 0
min_price = prices[0]
for price in prices:
#check to see if current is lower
min_price = min(min_price, price)
#check to see if there's a better gain
temp_profit = price - min_price
max_profit = max(max_profit, temp_profit)
if not max_profit:
return None
return max_profit
#given test case
assert(calc_max_profits_2([10, 7, 4, 8, 12, 9]) == 8)
# #test case where there's no possible gains
assert(calc_max_profits_2([20, 19, 18, 17, 5]) == None)
#test case where the buy and sell times aren't the overall min and max
assert(calc_max_profits_2([100, 20, 19, 17, 21, 15]) == 4)
def merge_sort(lst_1, lst_2):
if not lst_1:
return lst_2
if not lst_2:
return lst_1
if lst_1[0] <= lst_2[0]:
return [lst_1[0]] + merge_sort(lst_1[1:], lst_2)
if lst_1[0] > lst_2[0]:
return [lst_2[0]] + merge_sort(lst_1, lst_2[1:])
def get_nth_item(n, lst_1, lst_2):
s_list = merge_sort(lst_1, lst_2)
if not s_list:
return None
return s_list[n-1]
assert(get_nth_item(4, [2, 2, 5, 7, 7, 12], [3, 6, 8, 10, 13])==5)
assert(get_nth_item(6, [2, 2, 5, 7, 7, 12], [3, 6, 8, 10, 13])==7)
assert(get_nth_item(3, [], [1, 5, 7]) == 7)
assert(get_nth_item(4, [], []) == None)
def merge_sort_optimized(n, lst_1, lst_2, l=0):
if l == n:
return []
if not lst_1:
return lst_2[:n-l+1]
if not lst_2:
return lst_1[:n-l+1]
if lst_1[0] <= lst_2[0]:
return [lst_1[0]] + merge_sort_optimized(n, lst_1[1:], lst_2, l+1)
if lst_1[0] > lst_2[0]:
return [lst_2[0]] + merge_sort_optimized(n, lst_1, lst_2[1:], l+1)
def get_nth_item_optimized(n, lst_1, lst_2):
s_list = merge_sort_optimized(n, lst_1, lst_2)
if not s_list:
return None
return s_list[-1]
assert(get_nth_item_optimized(4, [2, 2, 5, 7, 7, 12], [3, 6, 8, 10, 13])==5)
assert(get_nth_item_optimized(6, [2, 2, 5, 7, 7, 12], [3, 6, 8, 10, 13])==7)
assert(get_nth_item_optimized(3, [], [1, 5, 7]) == 7)
assert(get_nth_item_optimized(4, [], []) == None)
| true |
6bc12ca824e914c73e557f475bc7ab7014400e0e | Python | dw2008/coding365 | /201906/0611.py | UTF-8 | 753 | 3.59375 | 4 | [] | no_license | tableData = [["apples", 'oranges', 'cherries', 'banana'],
['Alice', 'Bob', 'Carol', 'David'],
['dogs', 'cats', 'moose', 'goose']]
def findLong(alist) :
result = 0
for x in alist :
if len(x) > result :
result = len(x)
return result
def printCol(alist) :
maxLen = findLong(alist)
for x in alist :
print(x.rjust(maxLen))
def printTable(table) :
alist = list()
rows = len(table[0])
cols = len(table)
for row in range(0, rows) :
thing = ""
for col in range(0, cols) :
maxLen = findLong(table[col])
thing = thing + table[col][row].rjust(maxLen)
thing = thing + " "
print(thing)
printTable(tableData) | true |
ae1ea2ad009fe798f7d2b07f52d885c12264a4c1 | Python | desai10/competitive-coding | /codeforces/Codeforces Round #807 (Div. 2)/MarkTheDustSweeper.py | UTF-8 | 344 | 3.359375 | 3 | [] | no_license | t = int(input())
while t > 0:
n = int(input())
arr = [int(x) for x in input().split()]
st = n - 1
ans = 0
for i in range(n - 2, -1, -1):
ans += arr[i]
if arr[i] > 0:
st = i
while st < n - 1:
if arr[st] == 0:
ans += 1
st += 1
print(ans)
t -= 1
| true |
e79482f8c5c66d794c357d553811287e52ad8401 | Python | lewis617/python-algorithm | /src/102-unique-binary-search-trees-ii.py | UTF-8 | 279 | 2.8125 | 3 | [] | no_license | class Solution:
# @param A : integer
# @return an integer
def numTrees(self, A):
dp = [0] * (A+1)
dp[0] = 1
dp[1] = 1
for i in range(2, A+1):
for j in range(i):
dp[i] += dp[j]*dp[i-j-1]
return dp[-1] | true |
e636dad96d60938e0ceb74d104ca63b076f3b5a4 | Python | STIXProject/openioc-to-stix | /openioc2stix/objectify.py | UTF-8 | 52,922 | 2.59375 | 3 | [
"BSD-3-Clause"
] | permissive | # Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# builtin
import logging
# external
from cybox.core import Object
# internal
from . import xml, utils
# Module logger
LOG = logging.getLogger(__name__)
def _assert_field(obj, attrname):
klass = obj.__class__
if hasattr(obj, attrname):
return
if hasattr(klass, attrname):
return
raise AttributeError("Object has no attribute: %s" % attrname)
def _set_field(obj, attrname, value, condition=None):
# Set the attribute
setattr(obj, attrname, xml.sanitize(value))
attr = getattr(obj, attrname)
if hasattr(attr, 'condition') and condition:
attr.condition = condition
return attr
def _set_numeric_field(obj, attrname, value, condition=None):
# Remove any braces if they exist (sometimes they do)
stripped = value.strip('[]')
# Split on ' TO ', which can be used in Indicators to designate ranges.
values = stripped.split(' TO ')
if len(values) == 1:
return _set_field(obj, attrname, values[0], condition)
# ' TO ' found. This is a range.
field = _set_field(obj, attrname, values, "InclusiveBetween")
if condition in ('Contains', 'Equals'):
field.apply_condition = "ANY"
elif condition in ("DoesNotContain", "DoesNotEqual"):
field.apply_condition = "NONE"
else:
field.apply_condition = "ALL" # TODO: Is this correct?
return field
def set_field(obj, attrname, value, condition=None):
_assert_field(obj, attrname)
if utils.is_numeric(obj, attrname):
return _set_numeric_field(obj, attrname, value, condition)
else:
return _set_field(obj, attrname, value, condition)
def has_content(obj):
if not hasattr(obj, '_fields'):
return False
return any(x for x in obj._fields.values())
## primary object functions
def create_disk_obj(search_string, content_string, condition):
from cybox.objects.disk_object import Disk, DiskPartition, PartitionList
disk = Disk()
part = DiskPartition()
disk_attrmap = {
"DiskItem/DiskName": "disk_name",
"DiskItem/DiskSize": "disk_size"
}
part_attrmap = {
"DiskItem/PartitionList/Partition/PartitionLength": "partition_length",
"DiskItem/PartitionList/Partition/PartitionNumber": "partition_id",
"DiskItem/PartitionList/Partition/PartitionOffset": "partition_offset",
"DiskItem/PartitionList/Partition/PartitionType": "partition_type"
}
if search_string in disk_attrmap:
set_field(disk, disk_attrmap[search_string], content_string, condition)
elif search_string in part_attrmap:
set_field(part, part_attrmap[search_string], content_string, condition)
disk.partition_list = PartitionList(part)
else:
return None
return Object(disk)
def create_dns_obj(search_string, content_string, condition):
from cybox.objects.dns_record_object import DNSRecord
from cybox.objects.dns_cache_object import DNSCache, DNSCacheEntry
cache = DNSCache()
record = DNSRecord()
attrmap = {
"DnsEntryItem/DataLength": "data_length",
"DnsEntryItem/Flags": "flags",
"DnsEntryItem/Host": "domain_name",
"DnsEntryItem/RecordData/Host": "record_data",
"DnsEntryItem/RecordData/IPv4Address": "record_data",
"DnsEntryItem/RecordName": "record_name",
"DnsEntryItem/RecordType": "record_type",
"DnsEntryItem/TimeToLive": "ttl"
}
if search_string in attrmap:
set_field(record, attrmap[search_string], content_string, condition)
else:
return None
entry = DNSCacheEntry()
entry.dns_entry = record
cache.dns_cache_entry = entry
return Object(cache)
def create_driver_obj(search_string, content_string, condition):
from cybox.objects.win_driver_object import WinDriver, DeviceObjectStruct, DeviceObjectList
windriver = WinDriver()
device = DeviceObjectStruct()
device_attrmap = {
"DriverItem/DeviceItem/AttachedDeviceName": "attached_device_name",
"DriverItem/DeviceItem/AttachedDeviceObject": "attached_device_object",
"DriverItem/DeviceItem/AttachedToDeviceName": "attached_to_device_name",
"DriverItem/DeviceItem/AttachedToDeviceObject": "attached_to_device_object",
"DriverItem/DeviceItem/AttachedToDriverName": "attached_to_driver_name",
"DriverItem/DeviceItem/AttachedToDriverObject": "attached_to_driver_object",
"DriverItem/DeviceItem/DeviceName": "device_name",
"DriverItem/DeviceItem/DeviceObject": "device_object"
}
driver_attrmap = {
"DriverItem/DriverInit": "driver_init",
"DriverItem/DriverName": "driver_name",
"DriverItem/DriverObjectAddress": "driver_object_address",
"DriverItem/DriverStartIo": "driver_start_io",
"DriverItem/DriverUnload": "driver_unload",
"DriverItem/ImageBase": "image_base",
"DriverItem/ImageSize": "image_size"
}
file_keys = (
"DriverItem/Sha1sum",
"DriverItem/Sha256sum",
"DriverItem/StringList/string"
)
if "/PEInfo/" in search_string:
return create_pefile_obj(search_string, content_string, condition)
if search_string in file_keys:
return create_file_obj(search_string, content_string, condition)
elif search_string in device_attrmap:
set_field(device, device_attrmap[search_string], content_string, condition)
windriver.device_object_list = DeviceObjectList(device)
elif search_string in driver_attrmap:
set_field(windriver, driver_attrmap[search_string], content_string, condition)
else:
return None
return Object(windriver)
def create_email_obj(search_string, content_string, condition):
from cybox.objects.file_object import File
from cybox.objects.email_message_object import (
Attachments, EmailMessage, EmailHeader, ReceivedLine, ReceivedLineList
)
email = EmailMessage()
header = EmailHeader()
received = ReceivedLine()
attachment = None
file_attrmap = {
"Email/Attachment/Name": "file_name",
"Email/Attachment/SizeInBytes": "size_in_bytes"
}
email_attrmap = {
"Email/Body": "raw_body",
"Email/EmailServer": "email_server" # Not a standard OpenIOC indicator term
}
received_attrmap = {
"Email/Received": "timestamp",
"Email/ReceivedFromHost": "from_",
"Email/ReceivedFromIP": "from_"
}
header_attrmap = {
"Email/BCC": "bcc",
"Email/CC": "cc",
"Email/Content-Type": "content_type",
"Email/Date": "date",
"Email/From": "from_",
"Email/In-Reply-To": "in_reply_to",
"Email/MIME-Version": "mime_version",
"Email/Subject": "subject",
"Email/To": "to",
"Email/ReplyTo": "reply_to" # Not a standard OpenIOC indicator term
}
if search_string in email_attrmap:
set_field(email, email_attrmap[search_string], content_string, condition)
elif search_string in file_attrmap:
attachment = File()
set_field(attachment, file_attrmap[search_string], content_string, condition)
email.attachments = Attachments(attachment.parent.id_)
elif search_string in header_attrmap:
set_field(header, header_attrmap[search_string], content_string, condition)
email.header = header
elif search_string in received_attrmap:
set_field(received, received_attrmap[search_string], content_string, condition)
header.received_lines = ReceivedLineList(received)
else:
return None
if not attachment:
return Object(email)
email = Object(email)
email.add_related(attachment, "Contains")
return email
def create_win_event_log_obj(search_string, content_string, condition):
from cybox.common.properties import String
from cybox.objects.win_event_log_object import WinEventLog, UnformattedMessageList
eventlog = WinEventLog()
attrmap = {
"EventLogItem/CorrelationActivityId": "correlation_activity_id",
"EventLogItem/CorrelationRelatedActivityId": "correlation_related_activity_id",
"EventLogItem/EID": "eid",
"EventLogItem/ExecutionProcessId": "execution_process_id",
"EventLogItem/ExecutionThreadId": "execution_thread_id",
"EventLogItem/blob": "blob",
"EventLogItem/category": "category",
"EventLogItem/categoryNum": "category_num",
"EventLogItem/genTime": "generation_time",
"EventLogItem/index": "index",
"EventLogItem/log": "log",
"EventLogItem/machine": "machine",
"EventLogItem/message": "message",
"EventLogItem/reserved": "reserved",
"EventLogItem/source": "source",
"EventLogItem/type": "type_",
"EventLogItem/user": "user",
"EventLogItem/writeTime": "write_time"
}
if search_string in attrmap:
set_field(eventlog, attrmap[search_string], content_string, condition)
elif search_string == "EventLogItem/unformattedMessage/string":
s = String(xml.sanitize(content_string))
s.condition = condition
eventlog.unformatted_message_list = UnformattedMessageList(s)
else:
return None
return Object(eventlog)
def create_file_obj(search_string, content_string, condition):
from cybox.objects.file_object import File
from cybox.common import ExtractedStrings, ExtractedFeatures
f = File()
attrmap = {
"FileItem/Accessed": "accessed_time",
"FileItem/Created": "created_time",
"FileItem/DevicePath": "device_path",
"FileItem/FileExtension": "file_extension",
"FileItem/FileName": "file_name",
"FileItem/FilePath": "file_path",
"FileItem/FullPath": "full_path",
"FileItem/Md5sum": "md5",
"FileItem/Sha256sum": "sha256",
"FileItem/Sha1sum": "sha1",
"DriverItem/Sha1sum": "sha1",
"DriverItem/Md5sum": "md5",
"DriverItem/Sha256sum": "sha256",
"FileItem/Modified": "modified_time",
"FileItem/PeakEntropy": "peak_entropy",
"FileItem/SizeInBytes": "size_in_bytes",
"FileItem/Username": "user_owner"
}
winfile_keys = (
"FileItem/Drive",
"FileItem/FileAttributes",
"FileItem/FilenameAccessed",
"FileItem/FilenameCreated",
"FileItem/FilenameModified",
"FileItem/SecurityID",
"FileItem/SecurityType",
"FileItem/StreamList/Stream/Md5sum",
"FileItem/StreamList/Stream/Name",
"FileItem/StreamList/Stream/Sha1sum",
"FileItem/StreamList/Stream/Sha256sum",
"FileItem/StreamList/Stream/SizeInBytes"
)
if search_string in attrmap:
set_field(f, attrmap[search_string], content_string, condition)
elif search_string in winfile_keys:
return create_win_file_obj(search_string, content_string, condition)
elif search_string == "FileItem/INode":
return create_unix_file_obj(search_string, content_string, condition)
elif '/PEInfo/' in search_string:
return create_pefile_obj(search_string, content_string, condition)
elif "/StringList/string" in search_string:
extracted_features = ExtractedFeatures()
extracted_features.strings = ExtractedStrings(xml.sanitize(content_string))
f.extracted_features = extracted_features
else:
return None
return Object(f)
def create_hook_obj(search_string, content_string, condition):
from cybox.objects.win_kernel_hook_object import WinKernelHook
from cybox.common.digitalsignature import DigitalSignature
hook = WinKernelHook()
ds = DigitalSignature()
hook_attrmap = {
"HookItem/HookDescription": "hook_description",
"HookItem/HookedFunction": "hooked_function",
"HookItem/HookedModule": "hooked_module",
"HookItem/HookingAddress": "hooking_address",
"HookItem/HookingModule": "hooking_module"
}
ds_attrmap = {
"HookItem/DigitalSignatureHooking/CertificateIssuer": "certificate_issuer",
"HookItem/DigitalSignatureHooking/CertificateSubject": "certificate_subject",
"HookItem/DigitalSignatureHooking/Description": "signature_description",
"HookItem/DigitalSignatureHooking/SignatureExists": "signature_exists",
"HookItem/DigitalSignatureHooking/SignatureVerified": "signature_verified",
"HookItem/DigitalSignatureHooked/CertificateIssuer": "certificate_issuer",
"HookItem/DigitalSignatureHooked/CertificateSubject": "certificate_subject",
"HookItem/DigitalSignatureHooked/Description": "signature_description",
"HookItem/DigitalSignatureHooked/SignatureExists": "signature_exists",
"HookItem/DigitalSignatureHooked/SignatureVerified": "signature_verified"
}
if search_string in ds_attrmap:
set_field(ds, ds_attrmap[search_string], content_string, condition)
if "DigitalSignatureHooking" in search_string:
hook.digital_signature_hooking = ds
else:
hook.digital_signature_hooked = ds
elif search_string in hook_attrmap:
set_field(hook, hook_attrmap[search_string], content_string, condition)
else:
return None
return Object(hook)
def create_library_obj(search_string, content_string, condition):
from cybox.objects.library_object import Library
attrmap = {
"ModuleItem/ModuleBase": "base_address",
"ModuleItem/ModuleName": "name",
"ModuleItem/ModulePath": "path",
"ModuleItem/ModuleSize": "size"
}
library = Library()
if search_string in attrmap:
set_field(library, attrmap[search_string], content_string, condition)
else:
return None
return Object(library)
def create_network_connection_obj(search_string, content_string, condition):
from cybox.objects.socket_address_object import SocketAddress
from cybox.objects.network_connection_object import (
NetworkConnection, Layer7Connections
)
from cybox.objects.http_session_object import (
HTTPSession, HTTPClientRequest, HTTPRequestResponse, HTTPRequestHeader,
HTTPRequestHeaderFields, HostField, HTTPRequestLine
)
# HTTP Session stuff
session = HTTPSession()
request_response = HTTPRequestResponse()
request = HTTPClientRequest()
request_line = HTTPRequestLine()
header = HTTPRequestHeader()
header_fields = HTTPRequestHeaderFields()
# Network Connection stuff
layer7 = Layer7Connections()
socketaddr = SocketAddress()
net = NetworkConnection()
# Pre-wire common HTTP Session properties
layer7.http_session = session
session.http_request_response = request_response
request_response.http_client_request = request
request.http_request_header = header
socket_attrmap = {
"PortItem/localIP": ("ip_address", "source_socket_address"),
"PortItem/remoteIP": ("ip_address", "destination_socket_address"),
"ProcessItem/PortList/PortItem/localIP": ("ip_address", "source_socket_address"),
}
if search_string in socket_attrmap:
socket_field, net_field = socket_attrmap[search_string]
set_field(socketaddr, socket_field, content_string, condition)
set_field(net, net_field, socketaddr)
elif search_string == "Network/DNS":
host = HostField()
header_fields.host = host
header.parsed_header = header_fields
set_field(host, "domain_name", content_string, condition)
elif search_string == "Network/HTTP_Referer":
set_field(header_fields, "referer", content_string, condition)
header.parsed_header = header_fields
elif search_string == "Network/String":
set_field(header, "raw_header", content_string, condition)
elif search_string == "Network/URI":
set_field(request_line, "value", content_string, condition)
request.http_request_line = request_line
elif search_string == "Network/UserAgent":
set_field(header_fields, "user_agent", content_string, condition)
header.parsed_header = header_fields
elif "PortItem/CreationTime" in search_string:
set_field(net, "creation_time", content_string, condition)
else:
return None
return Object(net)
def create_net_route_obj(search_string, content_string, condition):
from cybox.objects.network_route_entry_object import NetworkRouteEntry
from cybox.objects.address_object import Address
net = NetworkRouteEntry()
addr = Address(category=Address.CAT_IPV4)
addr_keys = set([
"RouteEntryItem/Destination",
"RouteEntryItem/Gateway",
"RouteEntryItem/Netmask"
])
attr_map = {
"RouteEntryItem/Destination": "destination_address",
"RouteEntryItem/Gateway": "gateway_address",
"RouteEntryItem/Interface": "interface",
"RouteEntryItem/IsIPv6": "is_ipv6",
"RouteEntryItem/Metric": "metric",
"RouteEntryItem/Netmask": "netmask",
"RouteEntryItem/Protocol": "protocol",
"RouteEntryItem/RouteAge": "route_age",
"RouteEntryItem/RouteType": "route_type"
}
if search_string in addr_keys:
set_field(addr, "address_value", content_string, condition)
set_field(net, attr_map[search_string], addr)
elif search_string in attr_map:
set_field(net, attr_map[search_string], content_string, condition)
else:
return None
return Object(net)
def create_port_obj(search_string, content_string, condition):
from cybox.objects.port_object import Port
port = Port()
netconn_keys = (
"PortItem/CreationTime",
"PortItem/localIP",
"PortItem/remoteIP"
)
attrmap = {
"PortItem/localPort": "port_value",
"PortItem/remotePort": "port_value",
"PortItem/protocol": "layer4_protocol"
}
if search_string in attrmap:
set_field(port, attrmap[search_string], content_string, condition)
elif search_string in netconn_keys:
return create_network_connection_obj(search_string, content_string, condition)
else:
return None
return Object(port)
def create_prefetch_obj(search_string, content_string, condition):
from cybox.common.properties import String
from cybox.objects.win_volume_object import WinVolume
from cybox.objects.win_prefetch_object import WinPrefetch, AccessedFileList
prefected_attrmap = {
"PrefetchItem/ApplicationFileName": "application_file_name",
"PrefetchItem/LastRun": "last_run",
"PrefetchItem/PrefetchHash": "prefetch_hash",
"PrefetchItem/TimesExecuted": "times_executed",
}
volume_attrmap = {
"PrefetchItem/VolumeList/VolumeItem/DevicePath": "device_path",
"PrefetchItem/VolumeList/VolumeItem/CreationTime": "creation_time",
"PrefetchItem/VolumeList/VolumeItem/SerialNumber": "serial_number"
}
prefetch = WinPrefetch()
# volume = WinVolume()
if search_string in prefected_attrmap:
set_field(prefetch, prefected_attrmap[search_string], content_string, condition)
elif search_string in volume_attrmap:
LOG.info("Cannot translate WinVolume object. See "
"https://github.com/CybOXProject/python-cybox/issues/269")
# set_field(volume, volume_attrmap[search_string], content_string, condition)
# prefetch.volume = volume
elif search_string == "PrefetchItem/AccessedFileList/AccessedFile":
s = String(xml.sanitize(content_string))
s.condition = condition
prefetch.accessed_file_list = AccessedFileList(s)
else:
return None
return Object(prefetch)
def create_process_obj(search_string, content_string, condition):
from cybox.common import ExtractedFeatures, ExtractedStrings, ExtractedString
from cybox.objects.process_object import Process, PortList, ImageInfo
from cybox.objects.port_object import Port
proc = Process()
port = Port()
image = ImageInfo()
exfeatures = ExtractedFeatures()
proc_attrmap = {
"ProcessItem/Username": "username",
"ProcessItem/name": "name",
"ProcessItem/parentpid": "parent_pid",
"ProcessItem/pid": "pid",
"ProcessItem/startTime": "start_time",
"ProcessItem/userTime": "user_time",
}
port_attrmap = {
"ProcessItem/PortList/PortItem/localPort": "port_value",
"ProcessItem/PortList/PortItem/remotePort": "port_value",
"ProcessItem/PortList/PortItem/protocol": "layer4_protcol"
}
image_attrmap = {
"ProcessItem/arguments": "command_line",
"ProcessItem/path": "path",
"ServiceItem/path": "path"
}
netconn_keys = (
"ProcessItem/PortList/PortItem/CreationTime",
"ProcessItem/PortList/PortItem/localIP",
"ProcessItem/PortList/PortItem/remoteIP"
)
winproc_keys = (
"HandleList",
"SectionList",
"ProcessItem/SecurityID",
"ProcessItem/SecurityType"
)
if any(term in search_string for term in winproc_keys):
return create_win_process_obj(search_string, content_string, condition)
elif search_string in netconn_keys:
return create_network_connection_obj(search_string, content_string, condition)
elif search_string in proc_attrmap:
set_field(proc, proc_attrmap[search_string], content_string, condition)
elif search_string in port_attrmap:
set_field(port, port_attrmap[search_string], content_string, condition)
proc.port_list = PortList(port)
elif search_string in image_attrmap:
set_field(image, image_attrmap[search_string], content_string, condition)
proc.image_info = image
elif search_string == "ProcessItem/StringList/string":
s = ExtractedString()
set_field(s, "string_value", content_string, condition)
exfeatures = ExtractedFeatures()
exfeatures.strings = ExtractedStrings(s)
proc.extracted_features = exfeatures
else:
return None
return Object(proc)
def create_registry_obj(search_string, content_string, condition):
from cybox.objects.win_registry_key_object import (
WinRegistryKey, RegistryValue, RegistryValues
)
value = RegistryValue()
key = WinRegistryKey()
key_attrmap = {
"RegistryItem/Username": "creator_username",
"RegistryItem/Hive": "hive",
"RegistryItem/KeyPath": "key",
"RegistryItem/Modified": "modified_time",
"RegistryItem/NumSubKeys": "num_subkeys",
"RegistryItem/NumValues": "num_values",
}
value_attrmap = {
"RegistryItem/Text": "data",
"RegistryItem/Value": "data",
"RegistryItem/Type": "data_type",
"RegistryItem/ValueName": "name"
}
if search_string in key_attrmap:
set_field(key, key_attrmap[search_string], content_string, condition)
elif search_string in value_attrmap:
set_field(value, value_attrmap[search_string], content_string, condition)
key.values = RegistryValues(value)
elif search_string == "RegistryItem/Path":
if not content_string.startswith("HKEY_"):
set_field(key, "key", content_string, condition)
elif "\\" not in content_string:
set_field(key, "hive", content_string, condition)
else:
hiveval, keyval = content_string.split("\\", 1)
set_field(key, "hive", hiveval, condition='Equals')
set_field(key, "key", keyval, condition)
else:
return None
return Object(key)
def create_service_obj(search_string, content_string, condition):
from cybox.objects.win_service_object import WinService, ServiceDescriptionList
from cybox.common.hashes import HashList
from cybox.common.properties import String
hashlist = HashList()
service = WinService()
attrmap = {
"ServiceItem/arguments": "startup_command_line",
"ServiceItem/mode": "startup_type",
"ServiceItem/name": "service_name",
"ServiceItem/serviceDLL": "service_dll",
"ServiceItem/serviceDLLCertificateSubject": "service_dll_certificate_subject",
"ServiceItem/serviceDLLCertificateIssuer": "service_dll_certificate_issuer",
"ServiceItem/serviceDLLSignatureExists": "service_dll_signature_exists",
"ServiceItem/serviceDLLSignatureVerified": "service_dll_signature_verified",
"ServiceItem/serviceDLLSignatureDescription": "service_dll_signature_description",
"ServiceItem/startedAs": "started_as",
"ServiceItem/status": "service_status",
"ServiceItem/type": "service_type"
}
hashmap = {
"ServiceItem/serviceDLLmd5sum": "md5",
"ServiceItem/serviceDLLsha1sum": "sha1",
"ServiceItem/serviceDLLsha256sum": "sha256"
}
proc_keys = (
"ServiceItem/path",
"ServiceItem/pid"
)
if search_string in proc_keys:
return create_process_obj(search_string, content_string, condition)
elif search_string in attrmap:
set_field(service, attrmap[search_string], content_string, condition)
elif search_string in hashmap:
set_field(hashlist, hashmap[search_string], content_string, condition)
service.service_dll_hashes = hashlist
elif search_string == "ServiceItem/description":
s = String(xml.sanitize(content_string))
service.description_list = ServiceDescriptionList(s)
else:
return None
return Object(service)
def create_system_object(search_string, content_string, condition):
from cybox.objects.address_object import Address
from cybox.objects.system_object import (
System, OS, BIOSInfo, NetworkInterface, NetworkInterfaceList,
DHCPServerList, IPInfo, IPInfoList
)
winsys_keys = (
"SystemInfoItem/productID",
"SystemInfoItem/regOrg",
"SystemInfoItem/regOwner",
"SystemInfoItem/domain"
)
sys_attrmap = {
"SystemInfoItem/processor": "processor",
"SystemInfoItem/timezoneDST": "timezone_dst",
"SystemInfoItem/timezoneStandard": "timezone_standard",
"SystemInfoItem/totalphysical": "total_physical",
"SystemInfoItem/uptime": "uptime",
"SystemInfoItem/user": "username",
"SystemInfoItem/availphysical": "available_physical_memory",
"SystemInfoItem/date": "date",
"SystemInfoItem/hostname": "hostname"
}
os_attrmap = {
"SystemInfoItem/buildNumber": "build_number",
"SystemInfoItem/installDate": "install_date",
"SystemInfoItem/OS": "platform",
"SystemInfoItem/patchLevel": "patch_level"
}
bios_attrmap = {
"SystemInfoItem/biosInfo/biosDate": "bios_date",
"SystemInfoItem/biosInfo/biosVersion": "bios_version"
}
iface_attrmap = {
"SystemInfoItem/MAC": "mac",
"SystemInfoItem/networkArray/networkInfo/MAC": "mac",
'SystemInfoItem/networkArray/networkInfo/adapter': "adapter",
'SystemInfoItem/networkArray/networkInfo/description': "description",
'SystemInfoItem/networkArray/networkInfo/dhcpLeaseExpires': "dhcp_lease_expires",
'SystemInfoItem/networkArray/networkInfo/dhcpLeaseObtained': "dhcp_lease_obtained"
}
os_ = OS()
system = System()
bios = BIOSInfo()
ipinfo = IPInfo()
iface = NetworkInterface()
if search_string in sys_attrmap:
set_field(system, sys_attrmap[search_string], content_string, condition)
elif search_string in os_attrmap:
set_field(os_, os_attrmap[search_string], content_string, condition)
system.os = os_
elif search_string in bios_attrmap:
set_field(bios, bios_attrmap[search_string], content_string, condition)
system.bios_info = bios
elif search_string in iface_attrmap:
set_field(iface, iface_attrmap[search_string], content_string, condition)
system.network_interface_list = NetworkInterfaceList(iface)
elif search_string in winsys_keys:
return create_win_system_obj(search_string, content_string, condition)
elif search_string == 'SystemInfoItem/networkArray/networkInfo/dhcpServerArray/dhcpServer':
addr = Address(xml.sanitize(content_string), category=Address.CAT_IPV4)
iface.dhcp_server_list = DHCPServerList(addr)
system.network_interface_list = NetworkInterfaceList(iface)
elif search_string == 'SystemInfoItem/networkArray/networkInfo/ipArray/ipInfo/ipAddress':
addr = Address(xml.sanitize(content_string), category=Address.CAT_IPV4)
ipinfo.ip_address = addr
iface.ip_list = IPInfoList(ipinfo)
system.network_interface_list = NetworkInterfaceList(iface)
elif content_string == 'SystemInfoItem/networkArray/networkInfo/ipArray/ipInfo/subnetMask':
addr = Address(xml.sanitize(content_string), category=Address.CAT_IPV4_NETMASK)
ipinfo.subnet_mask = addr
iface.ip_list = IPInfoList(ipinfo)
system.network_interface_list = NetworkInterfaceList(iface)
else:
return None
return Object(system)
def create_system_restore_obj(search_string, content_string, condition):
from cybox.objects.win_system_restore_object import WinSystemRestore, HiveList
from cybox.common.properties import String
restore = WinSystemRestore()
attrmap = {
"SystemRestoreItem/RestorePointName": "restore_point_name",
"SystemRestoreItem/RestorePointFullPath": "restore_point_full_path",
"SystemRestoreItem/RestorePointDescription": "restore_point_description",
"SystemRestoreItem/RestorePointType": "restore_point_type",
"SystemRestoreItem/Created": "created",
"SystemRestoreItem/ChangeLogEntrySequenceNumber": "changelog_entry_sequence_number",
"SystemRestoreItem/ChangeLogEntryFlags": "changelog_entry_flags",
"SystemRestoreItem/FileAttributes": "file_attributes",
"SystemRestoreItem/OriginalFileName": "original_file_name",
"SystemRestoreItem/BackupFileName": "backup_file_name",
"SystemRestoreItem/AclChangeUsername": "acl_change_sid",
"SystemRestoreItem/AclChangeSecurityID": "acl_change_security_id",
"SystemRestoreItem/OriginalShortFileName": "original_short_file_name",
"SystemRestoreItem/ChangeLogEntryType": "changelog_entry_type"
}
if search_string in attrmap:
set_field(restore, attrmap[search_string], content_string, condition)
elif content_string == "SystemRestoreItem/RegistryHives/String":
s = String(xml.sanitize(content_string))
s.condition = condition
restore.registry_hive_list = HiveList(s)
else:
return None
return Object(restore)
def create_user_obj(search_string, content_string, condition):
from cybox.objects.user_account_object import UserAccount
from cybox.objects.win_user_object import WinGroup, WinGroupList
user_account = UserAccount()
group = WinGroup()
winuser_keys = (
"UserItem/SecurityID",
"UserItem/SecurityType",
)
account_keys = (
"UserItem/description",
"UserItem/disabled",
"UserItem/lockedout"
)
attrmap = {
"UserItem/fullname": "full_name",
"UserItem/homedirectory": "home_directory",
"UserItem/passwordrequired": "password_required",
"UserItem/scriptpath": "script_path",
"UserItem/userpasswordage": "user_password_age"
}
if search_string in winuser_keys:
return create_win_user_obj(search_string, content_string, condition)
elif search_string in account_keys:
return create_account_obj(search_string, content_string, condition)
elif search_string in attrmap:
set_field(user_account, attrmap[search_string], content_string, condition)
elif search_string == "UserItem/grouplist/groupname":
set_field(group, "name", content_string, condition)
user_account.group_list = WinGroupList(group)
else:
return None
return Object(user_account)
def create_volume_obj(search_string, content_string, condition):
from cybox.objects.volume_object import Volume, FileSystemFlagList
from cybox.common.properties import String
attrmap = {
"VolumeItem/ActualAvailableAllocationUnits": "actual_available_allocation_units",
"VolumeItem/BytesPerSector": "bytes_per_sector",
"VolumeItem/CreationTime": "creation_time",
"VolumeItem/DevicePath": "device_path",
"VolumeItem/FileSystemType": "file_system_type",
"VolumeItem/IsMounted": "is_mounted",
"VolumeItem/Name": "name",
"VolumeItem/SectorsPerAllocationUnit": "sectors_per_allocation_unit",
"VolumeItem/SerialNumber": "serial_number",
"VolumeItem/TotalAllocationUnits": "total_allocation_units"
}
volume = Volume()
if search_string == "VolumeItem/DriveLetter":
return create_win_volume_obj(search_string, content_string, condition)
elif search_string in attrmap:
set_field(volume, attrmap[search_string], content_string, condition)
elif search_string == "VolumeItem/FileSystemFlags":
s = String(xml.sanitize(content_string))
s.condition = condition
volume.file_system_flag_list = FileSystemFlagList(s)
else:
return None
return Object(volume)
def create_win_system_obj(search_string, content_string, condition):
from cybox.objects.win_system_object import WinSystem
attrmap = {
'SystemInfoItem/domain': "domain",
'SystemInfoItem/productID': "product_id",
'SystemInfoItem/productName': "product_name",
'SystemInfoItem/regOrg': "registered_organization",
'SystemInfoItem/regOwner': "registered_owner"
}
if search_string not in attrmap:
return None
winsys = WinSystem()
set_field(winsys, attrmap[search_string], content_string, condition)
return Object(winsys)
def create_win_task_obj(search_string, content_string, condition):
from cybox.objects.win_task_object import (
WinTask, TaskAction, TaskActionList, IComHandlerAction,
IExecAction, Trigger, TriggerList, IShowMessageAction
)
attrmap = {
"TaskItem/AccountLogonType": "account_logon_type",
"TaskItem/AccountName": "account_name",
"TaskItem/AccountRunLevel": "account_run_level",
"TaskItem/ApplicationName": "application_name",
"TaskItem/Comment": "comment",
"TaskItem/CreationDate": "creation_date",
"TaskItem/Creator": "creator",
"TaskItem/ExitCode": "exit_code",
"TaskItem/MaxRunTime": "max_run_time",
"TaskItem/MostRecentRunTime": "most_recent_run_time",
"TaskItem/Name": "name",
"TaskItem/NextRunTime": "next_run_time",
"TaskItem/Parameters": "parameters",
"TaskItem/WorkItemData": "work_item_data",
"TaskItem/WorkingDirectory": "working_directory",
"TaskItem/Flag": "flags",
"TaskItem/Priority": "priority",
"TaskItem/Status": "status"
}
icom_attrmap = {
"TaskItem/ActionList/Action/COMClassId": "com_class_id",
"TaskItem/ActionList/Action/COMData": "com_data"
}
iexecaction_attrmap = {
"TaskItem/ActionList/Action/ExecArguments": "exec_arguments",
"TaskItem/ActionList/Action/ExecProgramPath": "exec_program_path",
"TaskItem/ActionList/Action/ExecWorkingDirectory": "exec_working_directory"
}
ishowmessage_attrmap = {
"TaskItem/ActionList/Action/ShowMessageBody": "show_message_body",
"TaskItem/ActionList/Action/ShowMessageTitle": "show_message_title"
}
trigger_attrmap = {
"TaskItem/TriggerList/Trigger/TriggerBegin": "trigger_begin",
"TaskItem/TriggerList/Trigger/TriggerDelay": "trigger_delay",
"TaskItem/TriggerList/Trigger/TriggerEnd": "trigger_end",
"TaskItem/TriggerList/Trigger/TriggerFrequency": "trigger_frequency",
"TaskItem/TriggerList/Trigger/TriggerMaxRunTime": "trigger_max_run_time",
"TaskItem/TriggerList/Trigger/TriggerSessionChangeType": "trigger_session_change_type"
}
email_map = {
"TaskItem/ActionList/Action/EmailBCC": "Email/BCC",
"TaskItem/ActionList/Action/EmailBody": "Email/Body",
"TaskItem/ActionList/Action/EmailCC": "Email/CC",
"TaskItem/ActionList/Action/EmailSubject": "Email/Subject",
"TaskItem/ActionList/Action/EmailFrom": "Email/From",
"TaskItem/ActionList/Action/EmailTo": "Email/To",
"TaskItem/ActionList/Action/EmailReplyTo": "Email/ReplyTo",
"TaskItem/ActionList/Action/EmailServer": "Email/EmailServer"
}
task = WinTask()
action = TaskAction()
actions = TaskActionList(action)
trigger = Trigger()
triggers = TriggerList(trigger)
if search_string in attrmap:
set_field(task, attrmap[search_string], content_string, condition)
elif search_string in icom_attrmap:
handler = IComHandlerAction()
set_field(handler, icom_attrmap[search_string], content_string, condition)
action.icomhandleraction = handler
task.action_list = actions
elif search_string in email_map:
email = create_email_obj(email_map[search_string], content_string, condition)
action.iemailaction = email
task.action_list = actions
elif search_string in iexecaction_attrmap:
execaction = IExecAction()
set_field(execaction, iexecaction_attrmap[search_string], content_string, condition)
action.iexecaction = execaction
task.action_list = actions
elif search_string in ishowmessage_attrmap:
ishowmessage = IShowMessageAction()
set_field(ishowmessage, ishowmessage_attrmap[search_string], content_string, condition)
action.ishowmessageaction = ishowmessage,
task.action_list = actions
elif search_string in trigger_attrmap:
set_field(trigger, trigger_attrmap[search_string], content_string, condition)
task.trigger_list = triggers
elif search_string == "TaskItem/ActionList/Action/ActionType":
set_field(action, "action_type", content_string, condition)
task.action_list = actions
else:
return None
return Object(task)
def create_win_volume_obj(search_string, content_string, condition):
LOG.info("Cannot translate WinVolume object. See "
"https://github.com/CybOXProject/python-cybox/issues/269")
return None
# from cybox.objects.win_volume_object import WinVolume
#
# if search_string != "VolumeItem/DriveLetter":
# return None
#
# volume = WinVolume()
# set_field(volume, "drive_letter", content_string, condition)
#
# return Object(volume)
def create_unix_file_obj(search_string, content_string, condition):
# python-cybox 2.1.0.11 does not support Unix File Object
pass
def create_win_file_obj(search_string, content_string, condition):
from cybox.objects.win_file_object import (
WinFile, WindowsFileAttribute, WindowsFileAttributes, Stream, StreamList
)
attrmap = {
"FileItem/Drive": "drive",
"FileItem/FilenameAccessed": "filename_accessed_time",
"FileItem/FilenameCreated": "filename_created_time",
"FileItem/FilenameModified": "filename_modified_time",
"FileItem/SecurityID": "security_id",
"FileItem/SecurityType": "security_type",
"FileItem/StreamList/Stream/Md5sum": "md5",
"FileItem/StreamList/Stream/Sha1sum": "sha1",
"FileItem/StreamList/Stream/Sha256sum": "sha256"
}
stream_attrmap = {
"FileItem/StreamList/Stream/Name": "name",
"FileItem/StreamList/Stream/SizeInBytes": "size_in_bytes"
}
file_ = WinFile()
stream = Stream()
streams = StreamList(stream)
if search_string in attrmap:
set_field(file_, attrmap[search_string], content_string, condition)
if search_string in stream_attrmap:
set_field(stream, stream_attrmap[search_string], content_string, condition)
file_.stream_list = streams
elif search_string == "FileItem/FileAttributes":
attr = WindowsFileAttribute(content_string)
attr.condition = condition
file_.file_attributes_list = WindowsFileAttributes(attr)
else:
return None
return Object(file_)
def create_pefile_obj(search_string, content_string, condition):
from cybox.common import DigitalSignature
from cybox.objects.file_object import (
EPJumpCode, EntryPointSignature, EntryPointSignatureList,
Packer, PackerList
)
from cybox.objects.win_executable_file_object import (
WinExecutableFile, PEVersionInfoResource, PEResource, PEResourceList,
PEChecksum, PEHeaders, PEOptionalHeader, PEExports, PEExportedFunctions,
PEExportedFunction, PEImport, PEImportedFunction, PEImportedFunctions,
PEImportList, PEFileHeader, PESection, PESectionList, PESectionHeaderStruct
)
ds_attrmap = {
"/PEInfo/DigitalSignature/CertificateIssuer": "certificate_issuer",
"/PEInfo/DigitalSignature/CertificateSubject": "certificate_subject",
"/PEInfo/DigitalSignature/Description": "certificate_description",
"/PEInfo/DigitalSignature/SignatureExists": "signature_exists",
"/PEInfo/DigitalSignature/SignatureVerified": "signature_verified"
}
verinfo_attrmap = {
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/Comments": "comments",
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/CompanyName": "companyname",
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/FileDescription": "filedescription",
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/FileVersion": "fileversion",
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/InternalName": "internalname",
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/Language": "language",
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/LegalCopyright": "legalcopyright",
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/LegalTrademarks": "legaltrademarks",
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/OriginalFilename": "originalfilename",
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/PrivateBuild": "privatebuild",
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/ProductName": "productname",
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/ProductVersion": "productversion",
"FileItem/PEInfo/VersionInfoList/VersionInfoItem/SpecialBuild": "specialbuild"
}
resource_attrmap = {
"FileItem/PEInfo/ResourceInfoList/ResourceInfoItem/Name": "name",
"FileItem/PEInfo/ResourceInfoList/ResourceInfoItem/Type": "type_"
}
checksum_attrmap = {
"/PEInfo/PEChecksum/PEComputedAPI": "pe_computed_api",
"/PEInfo/PEChecksum/PEFileAPI": "pe_file_api",
"/PEInfo/PEChecksum/PEFileRaw": "pe_file_raw"
}
epsig_attrmap = {
"/PEInfo/DetectedEntryPointSignature/Name": "name",
"/PEInfo/DetectedEntryPointSignature/Type": "type_",
}
jmpcode_attrmap = {
"/PEInfo/EpJumpCodes/Depth": "depth",
"/PEInfo/EpJumpCodes/Opcodes": "opcodes"
}
exports_attrmap = {
"/PEInfo/Exports/ExportsTimeStamp": "exports_time_stamp",
"/PEInfo/Exports/NumberOfNames": "number_of_names"
}
winexec = WinExecutableFile()
ds = DigitalSignature()
verinfo = PEVersionInfoResource()
verinforesources = PEResourceList(verinfo)
resource = PEResource()
resources = PEResourceList(resource)
checksum = PEChecksum()
exports = PEExports()
if "/PEInfo/ExtraneousBytes" in search_string:
set_field(winexec, "extraneous_bytes", content_string, condition)
elif any(k in search_string for k in ds_attrmap):
attr = utils.partial_match(ds_attrmap, search_string)
set_field(ds, attr, content_string, condition)
winexec.digital_signature = ds
elif any(k in search_string for k in checksum_attrmap):
attr = utils.partial_match(checksum_attrmap, search_string)
set_field(checksum, attr, content_string, condition)
winexec.pe_checksum = checksum
elif any(k in search_string for k in exports_attrmap):
attr = utils.partial_match(exports_attrmap, search_string)
set_field(exports, attr, content_string, condition)
winexec.exports = exports
elif any(k in search_string for k in epsig_attrmap):
packer = Packer()
epsig = EntryPointSignature()
packerlist = PackerList(packer)
epsiglist = EntryPointSignatureList(epsig)
packer.detected_entrypoint_signatures = epsiglist
winexec.packer_list = packerlist
attr = utils.partial_match(epsig_attrmap, search_string)
set_field(epsig, attr, content_string, condition)
elif any(k in search_string for k in jmpcode_attrmap):
epjumpcode = EPJumpCode()
packer = Packer()
packerlist = PackerList(packer)
packer.ep_jump_codes = epjumpcode
winexec.packer_list = packerlist
attr = utils.partial_match(jmpcode_attrmap, search_string)
set_field(epjumpcode, attr, content_string, condition)
elif search_string in verinfo_attrmap:
set_field(verinfo, verinfo_attrmap[search_string], content_string, condition)
winexec.resources = verinforesources
elif search_string in resource_attrmap:
set_field(resource, resource_attrmap[search_string], content_string, condition)
winexec.resources = resources
elif "/PEInfo/BaseAddress" in search_string:
headers = PEHeaders()
opt = PEOptionalHeader()
set_field(opt, "base_of_code", content_string, condition)
headers.optional_header = opt
winexec.headers = headers
elif "/Exports/ExportedFunctions/string" in search_string:
func = PEExportedFunction()
funclist = PEExportedFunctions(func)
exports.exported_functions = funclist
winexec.exports = exports
set_field(func, "function_name", content_string, condition)
elif search_string in ["FileItem/PEInfo/ImportedModules/Module/ImportedFunctions/string",
"DriverItem/PEInfo/ImportedModules/Module/ImportedFunctions/string"]:
import_ = PEImport()
imports = PEImportList(import_)
func = PEImportedFunction()
funcs = PEImportedFunctions(func)
import_.imported_functions = funcs
winexec.imports = imports
set_field(func, "function_name", content_string, condition)
elif "/PEInfo/ImportedModules/Module/Name" in search_string:
import_ = PEImport()
imports = PEImportList(import_)
winexec.imports = imports
set_field(import_, "file_name", content_string, condition)
elif "/PEInfo/PETimeStamp" in search_string:
header = PEFileHeader()
headers = PEHeaders()
headers.file_header = header
winexec.headers = headers
set_field(header, "time_date_stamp", content_string, condition)
elif "/PEInfo/Sections/Section/DetectedCharacteristics" in search_string:
section = PESection()
sections = PESectionList(section)
header = PESectionHeaderStruct()
section.section_header = header
winexec.sections = sections
set_field(header, "characteristics", content_string, condition)
else:
return None
return Object(winexec)
def create_win_user_obj(search_string, content_string, condition):
from cybox.objects.win_user_object import WinUser
winuser = WinUser()
attrmap = {
"UserItem/SecurityID": "security_id",
"UserItem/SecurityType": "security_type"
}
if search_string in attrmap:
set_field(winuser, attrmap[search_string], content_string, condition)
else:
return None
return Object(winuser)
def create_account_obj(search_string, content_string, condition):
from cybox.objects.account_object import Account
account = Account()
attrmap = {
"UserItem/description": "description",
"UserItem/disabled": "disabled",
"UserItem/lockedout": "locked_out"
}
if search_string in attrmap:
set_field(account, attrmap[search_string], content_string, condition)
else:
return None
return Object(account)
def create_win_memory_page_obj(search_string, content_string, condition):
from cybox.objects.win_memory_page_region_object import WinMemoryPageRegion
if search_string != "ProcessItem/SectionList/MemorySection/Protection":
return
page = WinMemoryPageRegion()
set_field(page, "protect", content_string, condition)
return Object(page)
def create_win_process_obj(search_string, content_string, condition):
from cybox.objects import win_process_object
from cybox.common import hashes
proc = win_process_object.WinProcess()
handle_attrmap = {
"ProcessItem/HandleList/Handle/AccessMask": "access_mask",
"ProcessItem/HandleList/Handle/Index": "id_",
"ProcessItem/HandleList/Handle/Name": "name",
"ProcessItem/HandleList/Handle/ObjectAddress": "object_address",
"ProcessItem/HandleList/Handle/PointerCount": "pointer_count",
"ProcessItem/HandleList/Handle/Type": "type_",
}
memory_attrmap = {
"ProcessItem/SectionList/MemorySection/Injected": "is_injected",
"ProcessItem/SectionList/MemorySection/Mapped": "is_mapped",
"ProcessItem/SectionList/MemorySection/Name": "name",
"ProcessItem/SectionList/MemorySection/RegionSize": "region_size",
"ProcessItem/SectionList/MemorySection/RegionStart": "region_start",
}
hash_attrmap = {
"ProcessItem/SectionList/MemorySection/Md5sum": "md5",
"ProcessItem/SectionList/MemorySection/Sha1Sum": "sha1",
"ProcessItem/SectionList/MemorySection/Sha256Sum": "sha256",
}
proc_attrmap = {
"ProcessItem/SecurityID": "security_id",
"ProcessItem/SecurityType": "security_type"
}
if "/PEInfo" in search_string:
return create_pefile_obj(search_string, content_string, condition)
elif search_string == "ProcessItem/SectionList/MemorySection/Protection":
create_win_memory_page_obj(search_string, content_string, condition)
elif search_string in proc_attrmap:
set_field(proc, proc_attrmap[search_string], content_string, condition)
elif search_string in handle_attrmap:
handle = win_process_object.WinHandle()
handles = win_process_object.WinHandleList(handle)
proc.handle_list = handles
set_field(handle, handle_attrmap[search_string], content_string, condition)
elif search_string in memory_attrmap:
section = win_process_object.Memory()
sections = win_process_object.MemorySectionList(section)
proc.section_list = sections
set_field(section, memory_attrmap[search_string], content_string, condition)
elif search_string in hash_attrmap:
hashlist = hashes.HashList()
section = win_process_object.Memory()
section.hashes = hashlist
sections = win_process_object.MemorySectionList(section)
proc.section_list = sections
set_field(hashlist, hash_attrmap[search_string], content_string, condition)
else:
return None
return Object(proc)
def make_object(search_string, content_string, condition):
retval = None
key = search_string.split('/', 1)[0]
if key in OBJECT_FUNCS:
makefunc = OBJECT_FUNCS[key]
retval = makefunc(search_string, content_string, condition)
if retval is None:
LOG.debug("Unable to map %s to CybOX Object.", search_string)
return retval
OBJECT_FUNCS = {
'DiskItem': create_disk_obj,
'DnsEntryItem': create_dns_obj,
'DriverItem': create_driver_obj,
'Email': create_email_obj,
'EventLogItem': create_win_event_log_obj,
'FileItem': create_file_obj,
'HookItem': create_hook_obj,
'ModuleItem': create_library_obj ,
'Network': create_network_connection_obj,
'PortItem': create_port_obj,
'PrefetchItem': create_prefetch_obj,
'ProcessItem': create_process_obj,
'RegistryItem': create_registry_obj,
'RouteEntryItem': create_net_route_obj,
'ServiceItem': create_service_obj,
'SystemInfoItem': create_system_object,
'SystemRestoreItem': create_system_restore_obj,
'TaskItem': create_win_task_obj,
'UserItem': create_user_obj,
'VolumeItem': create_volume_obj
}
| true |
3d519703592a29f9fad61d7c4af645234a0a48fc | Python | OSLL/edu-git-stats | /gitstat/proj-gitstat-annotate/treap.py | UTF-8 | 1,923 | 3.125 | 3 | [] | no_license | from random import random
class Treap:
def __init__(self, value=0):
self.value = value
self.sum = value
self.y = random()
self.id = 1
self.size = 1
self.left = self.right = None
def Normalize(self):
self.id = self.size = 1 + Size(self.left)
self.size += Size(self.right)
self.sum = self.value + Sum(self.left) + Sum(self.right)
def Size(node):
if node is None:
return 0
return node.size
def Sum(node):
if node is None:
return 0
return node.sum
def Split(node, x):
if node is None:
return None, None
if node.id <= x:
node.right, b = Split(node.right, x - node.id)
node.Normalize()
return node, b
else:
a, node.left = Split(node.left, x)
node.Normalize()
return a, node
def Merge(left, right):
if left is None:
return right
if right is None:
return left
if left.y < right.y:
left.right = Merge(left.right, right)
left.Normalize()
return left
else:
right.left = Merge(left, right.left)
right.Normalize()
return right
def Build(n, value=None):
root = None
for i in range(1, n + 1):
root = Merge(root, Treap(value or 0))
return root
def Debug(root, level = 1):
if root is None:
return
Debug(root.right, level + 1)
print("!", " " * level, "%d (id = %d size = %d)" % (root.value, root.id, root.size), sep="")
Debug(root.left, level + 1)
def Out(root, file=None):
i = 1
isEmpty = False
while not root is None:
v, root = Split(root, 1)
if (v.value):
print("%5d: %5d %s" % (i, v.value, file[i - 1] if not file is None else ""))
isEmpty = False
else:
if not isEmpty:
print("...")
isEmpty = True
i += 1
| true |
465f67bfeacd5b9c1e94a2a306af4ba09a93c834 | Python | flash-sinx/Web-Crawler | /db_utils.py | UTF-8 | 2,709 | 3 | 3 | [] | no_license |
from datetime import datetime
from datetime import timedelta
from pymongo import MongoClient
from cfg import config, db_cfg
client = MongoClient(db_cfg['host'], db_cfg['port'])
db = client[db_cfg['db']]
def insert_root(url):
'''
This function manually inserts the root url in the database
'''
doc = {
'Link': url,
'Source Link': url,
'isCrawled':False, #not crawled yet
'Last Crawled': "Never",
'Response Status':'' ,
'Content Type' :'',
'Content length': '',
'File Path':"",
'Date Created': datetime.now()
}
db.linkcol.insert_one(doc)
def insert_new_links(new_urls, source_url, max_url):
'''
Inserts all the new links on a page in database
source url is the link from which it was first extracted
'''
for link in new_urls:
if(already_inserted(link)):
continue
doc = {
'Link': link,
'Source Link': source_url,
'isCrawled':False, ##Initially the links are not crawled
'Last Crawled': "Never",
'Response Status':'' ,
'Content Type' :'',
'Content length': '',
'File Path':"",
'Date Created': datetime.now()
}
if max_url<=db.linkcol.count():
break
db.linkcol.insert_one(doc)
print(link+" inserted at "+str(db.linkcol.count()))
def already_inserted(link):
'''
checks if a link is already present in the database
'''
if db.linkcol.find_one({'Link':link})==None:
return False
return True
def all_crawled():
'''
This function check if there are uncrawled links which are
1. If they are never crawled before or
2. if they are crawled before 24 hours
:return: count of all uncrawled links
'''
count=0
for doc in db.linkcol.find({}):
if doc['Last Crawled']!='Never':
time_diff = datetime.now()-doc['Last Crawled']
if time_diff.days>=config['time_diff']:
count=count+1
else:
count=count+1
return count
def get_all_uncrawled():
uncrawled_url = set()
for doc in db.linkcol.find({}):
if doc['Last Crawled']=='Never':
uncrawled_url.add(doc['Link'])
else:
time_diff = datetime.now()-doc['Last Crawled']
if time_diff.days>=config['time_diff']:
uncrawled_url.add(doc['Link'])
return uncrawled_url
| true |
69d8d14b0da4fb86c8e4a8686bdaa920b3958779 | Python | pshivrame25/Point-to-Multipoint---FTP---UDP- | /s.py | UTF-8 | 7,201 | 2.515625 | 3 | [] | no_license | import sys
import socket
import time
import random
#*******************************************************************************************************************************************************************************************
#
#--------------- 1 6 B I T O N E ' S C O M P L E M E N T A D D I T I O N---------------------------------------------------------------------------------------------------------
#
#*******************************************************************************************************************************************************************************************
def carry_add(word1, word2):
result = word1 + word2
return (result & 0xffff) + (result >> 16)
#**********************************************************************************************************************************************************************
def checksum(data):
checksum_local = 0
if (len(data) % 2) == 0:
for i in range(0, len(data), 2):
word = ord(data[i]) + (ord(data[i+1]) << 8)
checksum_local = carry_add(checksum_local, word)
else:
for i in range(0, len(data)-1, 2):
word = ord(data[i]) + (ord(data[i+1]) << 8)
checksum_local = carry_add(checksum_local, word)
word = ord(data[len(data)-1]) + (ord(' ') << 8)
checksum_local = carry_add(checksum_local, word)
checksum_local = ~checksum_local & 0xffff
return bin(checksum_local).lstrip('0b').zfill(16)
#*******************************************************************************************************************************************************************************************
#
#--------------- S E R V E R R E P L Y & F I L E W R I T E----------------------------------------------------------------------------------------------------------------------------------------------------
#
#*******************************************************************************************************************************************************************************************
def server_reply_write(conn_socket, seq_no, file_ptr, data):
'''Replies with ACK message with seq_no'''
ack_id_field = '1010101010101010'
zero_field = '0000000000000000'
out_msg = seq_no + sep + zero_field + sep + ack_id_field
out_msg = out_msg.encode()
conn_socket.sendto(out_msg,clientAddress)
data=data.encode()
file_ptr.write(data)
return
#*******************************************************************************************************************************************************************************************
#
#--------------- C H E C K S U M M I N G & S E Q U E N C E N O. C H E C K------------------------------------------------------------------------------------------------------
#
#*******************************************************************************************************************************************************************************************
def check_pckt(conn_socket, in_msg, file_ptr):
'''Checks the UDP checksum and sequence no and if proper the packet is ACKed'''
global exp_in_msg_seq_no
in_msg_split = in_msg.split(sep)
in_msg_seq_no = in_msg_split[0]
in_msg_checksum = in_msg_split[1]
in_msg_data_id = in_msg_split[2]
in_msg_data = in_msg_split[3]
checksum_local = 'checksum'
if checksum_local == in_msg_checksum: # if checksum matches the received checksum
ack_id_field = '1010101010101010'
zero_field = '0000000000000000'
out_msg = in_msg_seq_no + sep + zero_field + sep + ack_id_field
if int(in_msg_seq_no, 2) == exp_in_msg_seq_no: # if sequence number matches the expected sequence number
if in_msg_data == '':
out_msg = out_msg.encode()
conn_socket.sendto(out_msg,clientAddress)
file_ptr.close()
conn_socket.close()
else:
server_reply_write(conn_socket, in_msg_seq_no, file_ptr, in_msg_data) # reply to client and write the data onto file
exp_in_msg_seq_no += len(in_msg_data)
else:
out_msg = out_msg.encode()
conn_socket.sendto(out_msg,clientAddress)
else:
print("ERROR::xxx::Checksum doesn't match. Received a corrupted packet::xxx::ERROR")
return False
return True
#*******************************************************************************************************************************************************************************************
#
#--------------- P R O B A B I L I S T I C L O S S S E R V I C E----------------------------------------------------------------------------------------------------------------------
#
#*******************************************************************************************************************************************************************************************
def loss_service(drop_prob):
'''Generates random value between 0 and 1. If generated random value greater than user input then processing is done'''
# rand_prob = 1.1
rand_prob = random.random()
if rand_prob < drop_prob:
return False
else:
return True
#*******************************************************************************************************************************************************************************************
#
#--------------- E X E C U T I O N O F M A I N C O D E---------------------------------------------------------------------------------------------------------------------------------------------------------
#
#*******************************************************************************************************************************************************************************************
server_port = int(sys.argv[1])
file_name = sys.argv[2]
drop_prob = float(sys.argv[3])
exp_in_msg_seq_no = 0
file_ptr = open(file_name, 'wb')
sep ='###'
server_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server = socket.gethostbyname(socket.gethostname())
server_port = 7735 # common welcome port on all RFC servers
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind((server, server_port)) # binding this socket to the welcome port
server_socket.settimeout(500)
#in_msg, clientAddress = server_socket.recvfrom(1500)
while True:
in_msg, clientAddress = server_socket.recvfrom(1500)
server_socket.settimeout(500)
process_flag = loss_service(drop_prob)
in_msg = in_msg.decode()
msg = in_msg.split(sep)
if process_flag:
check_pckt(server_socket, in_msg, file_ptr)
else:
print("Packet loss, sequence number = ",int(msg[0], 2))
| true |
fccfb0480169ade0c5b5fa3bed6822044dbd2c97 | Python | BobIT37/Pytest_Selenium | /SeleniumMethods/test_23_RadioButton.py | UTF-8 | 619 | 2.921875 | 3 | [] | no_license | from selenium.webdriver import Chrome
import pytest
from time import sleep
@pytest.fixture()
def setPath():
global driver
path = "/Users/bobit/Documents/Drivers/chromedriver"
driver = Chrome(executable_path=path)
yield
driver.quit()
def test_radio_button(setPath):
driver.maximize_window()
driver.get("file:///Users/bobit/PycharmProjects/Pytest_Selenium_Methods/files/index.html")
sleep(3)
element = driver.find_element_by_xpath("//input[@value='Mango']")
print("\nBEFORE: ", element.is_selected())
element.click()
sleep(2)
print("\nAFTER: ", element.is_selected()) | true |
580811d6051297b913bad30d1ef494332a136076 | Python | sapjunior/objdetection-pytorch | /objdetection/models/backbone/darknet.py | UTF-8 | 2,879 | 2.953125 | 3 | [] | no_license | import torch
import torch.nn as nn
from collections import OrderedDict
class DarkNetBasicBlock(nn.Module):
def __init__(self, inplanes, planes):
super(DarkNetBasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes[0], kernel_size=1,stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(planes[0])
self.relu1 = nn.LeakyReLU(0.1)
self.conv2 = nn.Conv2d(planes[0], planes[1], kernel_size=3,stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes[1])
self.relu2 = nn.LeakyReLU(0.1)
'''
x -+-> conv1 --> bn1 --> relu1 --> conv2 --> bn2 --> relu2 -+-> out
|________________________________________________________|
x and out size is equal
'''
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu1(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu2(out)
out += residual
return out
class DarkNet(nn.Module):
def __init__(self, layers):
super(DarkNet, self).__init__()
self.layers_out_filters = [64, 128, 256, 512, 1024]
self.inplanes = 32
###############
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu1 = nn.LeakyReLU(0.1)
###############
### each elements in 'layers' represent the number of residule basic block
### make_layers ==> [in,out] filter dim
self.layer1 = self._make_layer([32, 64], layers[0])
self.layer2 = self._make_layer([64, 128], layers[1])
self.layer3 = self._make_layer([128, 256], layers[2])
self.layer4 = self._make_layer([256, 512], layers[3])
self.layer5 = self._make_layer([512, 1024], layers[4])
def _make_layer(self, planes, blocks):
layers = []
# downsample
layers.append(("ds_conv", nn.Conv2d(self.inplanes, planes[1], kernel_size=3, stride=2, padding=1, bias=False)))
layers.append(("ds_bn", nn.BatchNorm2d(planes[1])))
layers.append(("ds_relu", nn.LeakyReLU(0.1)))
# blocks
self.inplanes = planes[1]
for i in range(blocks):
layers.append(("residual_{}".format(i), DarkNetBasicBlock(self.inplanes, planes)))
return nn.Sequential(OrderedDict(layers))
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.layer1(x)
x = self.layer2(x)
out3 = self.layer3(x)
out4 = self.layer4(out3)
out5 = self.layer5(out4)
return out3, out4, out5
def darknet21():
return DarkNet([1, 1, 2, 2, 1])
def darknet53():
return DarkNet([1, 2, 8, 8, 4]) | true |
8a2c96735aa59f1b2eb728a41597a9fc34e07f16 | Python | amochtar/adventofcode | /2016/day-10/part-1.py | UTF-8 | 1,278 | 2.890625 | 3 | [
"MIT"
] | permissive | from collections import defaultdict
with open("input.txt", "r") as f:
input = [x.strip() for x in f.readlines()]
instructions = {}
bots = defaultdict(list)
outputs = {}
for i in input:
parts = i.split()
if parts[0] == 'value':
bots[parts[5]].append(int(parts[1]))
elif parts[2] == 'gives':
instructions[parts[1]] = {
'low': (parts[5], parts[6]),
'high': (parts[-2], parts[-1])
}
has_bots = False
has_outputs = False
while not (has_bots and has_outputs):
bot = {k: v for k, v in bots.iteritems() if len(v) > 1}
for name, values in bot.iteritems():
if 17 in values and 61 in values:
print "Part 1:", name
has_bots = True
x = instructions[name]
high = x['high']
if high[0] == 'bot':
bots[high[1]].append(max(values))
else:
outputs[high[1]] = max(values)
low = x['low']
if low[0] == 'bot':
bots[low[1]].append(min(values))
else:
outputs[low[1]] = min(values)
bots[name] = []
try:
part2 = outputs['0'] * outputs['1'] * outputs['2']
print "Part 2:", part2
has_outputs = True
except KeyError:
pass
| true |
e1a2ebffe044d666f0992c46c37c20cd8e28ef17 | Python | alexanderhenne/randcam | /tests/tests.py | UTF-8 | 692 | 2.875 | 3 | [
"Apache-2.0"
] | permissive | import unittest
import hashlib
import binascii
import randcam
byte_array = bytearray("randcam is a good project", "utf-8")
class RandomTest(unittest.TestCase):
def test_random(self):
m = hashlib.sha256()
m.update(byte_array)
digest = m.digest()
self.assertEqual(binascii.hexlify(digest).decode("utf-8"),
"47ed7bd2cd92e9a863c13ed2cda933fb093447f18e65fd2563e580d37a9e4e60")
class ShannonTest(unittest.TestCase):
def test_shannon(self):
entropy = randcam.shannon_entropy(byte_array)
self.assertEqual(entropy, 3.7034651896016464)
if __name__ == '__main__':
unittest.main()
| true |
83e6afff9d54a7c80d25986c72e1587128e08cc1 | Python | largecats/text-analysis | /tfwiki/tfwiki_url_scrapper.py | UTF-8 | 2,460 | 2.71875 | 3 | [] | no_license | import requests
import re
import numpy as np
import pandas as pd
import time
import random
from bs4 import BeautifulSoup
#import urllib
from IPython.core.display import clear_output
from time import sleep
from random import randint
from warnings import warn
import os
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
import csv
URL = 'https://tfwiki.net/mediawiki/index.php?title=Special%3AAllPages&from=&to=&namespace=0'
CSV_PATH = 'C:\\Users\\xiaolinfan\\Fun\\programming\\personal-projects\\text-analysis\\tfwiki\\urls.csv'
def write_dict_to_csv(d, csvPath):
pd.DataFrame(d).to_csv(csvPath, index=False, encoding='utf-8')
def convert_to_url(url):
if not url.startswith('http'):
url = 'https://tfwiki.net' + url
return url
def get_page_html(url):
sleep(random.uniform(0.5, 2))
response = requests.get(url)
pageHtml = BeautifulSoup(response.text, 'html.parser')
return pageHtml
def get_urls(pageHtml):
allPagesList = pageHtml.find('table', class_='allpageslist')
if allPagesList is None:
allPagesTableChunk = pageHtml.find('table', class_='mw-allpages-table-chunk')
if allPagesTableChunk is None:
return None
else:
results = []
urlInfos = allPagesTableChunk.find_all('td')
for urlInfo in urlInfos:
url, pageName = urlInfo.a.get('href'), urlInfo.a.get('title')
url = convert_to_url(url)
print('name={}, url={}'.format(pageName, url))
results.append({'name':pageName, 'url':url})
return results
else:
results = []
urlFromTos = allPagesList.find_all('tr')
for urlFromTo in urlFromTos:
url = convert_to_url(urlFromTo.find('td', class_='mw-allpages-alphaindexline').a.get('href'))
urls = get_urls(get_page_html(url))
if urls:
results += urls
else:
pattern = re.compile(r'(.+) to .+', flags=re.DOTALL)
pageName = pattern.findall(urlFromTo.text)[0]
print('name={}, url={}'.format(pageName, url))
results.append({'name':pageName, 'url':url})
return results
if __name__ == '__main__':
pageHtml = get_page_html(URL)
urls = get_urls(pageHtml)
write_dict_to_csv(d=urls, csvPath=CSV_PATH)
| true |
34837c82cf3514461192dc915b0f0903fc9aee04 | Python | kowlalisreecharan/OCR | /pthon code/code.py | UTF-8 | 2,171 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | import cv2 #opencv library
import numpy as np
from matplotlib import pyplot as plt
from transform import *
import subprocess
import os
import os.path
# Waiting for the image to get uploaded
while(not os.path.exists("C:\wamp\www\AndroidFileUpload\uploads\document.jpeg")):
pass
print("Performing OCR")
# Replace with your destination of image
img = cv2.imread('C:\wamp\www\AndroidFileUpload\uploads\document.jpeg')
#img = enhance(img)
org = img
ratio = img.shape[0] / 500.0
img = aspectResize(img, rows = 500)
#visit http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_tutorials.html to know about functions
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#processing image by Blurring, Edge detection and Binarization
img = cv2.GaussianBlur(img, (5, 5), 0)
kernel = np.ones((3,3),np.uint8)
img = cv2.dilate(img,kernel,iterations = 1)
img = cv2.Canny(img, 20, 75)
img = cv2.dilate(img,kernel,iterations = 1)
plt.imshow(img, cmap = 'gray', interpolation = 'bicubic')
plt.show()
#Approximating contours to rectangle to extract out essential part
_, contours, hierarchy = cv2.findContours(img, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
contours = sorted(contours, key = cv2.contourArea, reverse = True)[:5]
rect = None
for c in contours:
peri = cv2.arcLength(c, True)
approx = cv2.approxPolyDP(c, 0.02 * peri, True)
if len(approx) == 4:
rect = approx
break
if(not(rect is None)):
pts = rect.reshape(4, 2)
img = four_point_transform(org,pts*ratio)
else:
img = org
#if document is in landscape mode rotate it
h,w = img.shape[:2]
if(w < h):
img = rotate_image(img)
org = img
img = process_img(img)
img = crop_resize(img,org)
img = deskew_image(img)
plt.imshow(img, cmap = 'gray', interpolation = 'bicubic')
plt.show()
cv2.imwrite('final.png',img)
#You change whitelist according to the document to be processed
whitelist = "ABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890"
command = "tesseract final.png out -c tessedit_char_whitelist="+whitelist+" -psm 8"
subprocess.check_call(command)
os.remove("C:\wamp\www\AndroidFileUpload\uploads\document.jpeg")
f = open("out.txt")
print(f.read())
| true |
e4b0a6e87ed5c13ca334ea3dff8a54e6c7d51746 | Python | hanjackcyw/smartypy | /src/utils.py | UTF-8 | 1,806 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python
__author__ = 'Zach Dischner'
__copyright__ = ""
__credits__ = ["NA"]
__license__ = "NA"
__version__ = "0.0.2"
__maintainer__ = "Zach Dischner"
__email__ = "zach.dischner@gmail.com"
__status__ = "Dev"
__doc__ ="""
File name: utils.py
Created: 04/Sept/2016
Modified: 04/Sept/2016
Houses a couple common utilities used by various scripts.
"""
##############################################################################
# Imports
#----------*----------*----------*----------*----------*----------*----------*
import os
import subprocess
import re
from datetime import datetime
###### Module variables
_here = os.path.dirname(os.path.realpath(__file__))
## Colors!!!
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
##############################################################################
# Functions
#----------*----------*----------*----------*----------*----------*----------*
## Quickies to get current git hashes. Thanks SO http://stackoverflow.com/questions/14989858/get-the-current-git-hash-in-a-python-script
def get_git_revision_hash():
return subprocess.check_output(['git', 'rev-parse', 'HEAD']).replace("\n","")
def get_git_revision_short_hash():
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).replace("\n","")
def printColor(msg,color):
print(color + str(msg) + bcolors.ENDC)
def printYellow(msg):
printColor(msg,bcolors.WARNING)
def printGreen(msg):
printColor(msg,bcolors.OKGREEN)
def printBlue(msg):
printColor(msg, bcolors.OKBLUE)
def printRed(msg):
printColor(msg,bcolors.FAIL)
| true |
fb7bfb99f1549290c010208d0ae4b3fb24f34ad7 | Python | DadImScared/krsna_us_server | /harikatha_bot/harikatha_bot/spiders/harikatha_spider.py | UTF-8 | 1,188 | 3.03125 | 3 | [] | no_license | """This module contains the class HariKathaSpider it crawls http://www.purebhakti.com/"""
import scrapy
from ..items import HarikathaBotItem
class HariKathaSpider(scrapy.Spider):
"""Collects all links in the content section on the homepage and saves them with the category harikatha"""
name = "hknewsletter"
def start_requests(self):
urls = [
'http://www.purebhakti.com/'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
"""Collects all links and follows next page"""
for quote in response.css('.blog-featuredhas-side .items-row .item h2'):
yield HarikathaBotItem({
'link': response.urljoin(quote.css('a::attr(href)').extract_first().strip()),
'title': quote.css('a::text').extract_first().strip(),
'category': 'harikatha'
})
next_page = response.css('.blog-featuredhas-side .pagination-next a::attr(href)').extract_first()
if next_page is not None:
next_page = response.urljoin(next_page)
yield scrapy.Request(next_page, callback=self.parse)
| true |
c2ccaeff62829d6d0495d23e19da7f623589c9a3 | Python | nitecascade/solid-funicular | /bin/group-summary.py | UTF-8 | 384 | 2.875 | 3 | [] | no_license | #!/usr/bin/env python3
import json
import pprint
import sys
infile = sys.argv[1]
with open(infile) as fp:
for line in fp:
data = json.loads(line.strip())
member = data["member"]
try:
print("{id} {name!r}".format(**member))
except KeyError as exc:
print("KeyError: {}".format(exc))
print(pprint.pformat(data))
| true |
a4cfe85b58ce1c8d8eb537f7a17b218691ea6bb9 | Python | dev-himanshu/basic_python | /BasicPythonConcept/20. Loop_Control_Statement.py | UTF-8 | 417 | 4.21875 | 4 | [] | no_license | # Loop Control Statement.
# There are two types of loop control statement - (a). break and (b). continue.
# break :
num = int(input("Enter breakpoint of loop : "))
for i in range(999999999999):
print(i)
if i == num:
print("exit from loop.")
# continue :
num = int(input("Enter a point at which you want to skip (less than 10) : "))
for i in range(10):
print(i)
if i == num:
continue
| true |
3a06e0e4831a805163a9e368b205090493109681 | Python | OrpingtonClose/daily | /python/game_of_life_04.py | UTF-8 | 2,417 | 3.15625 | 3 | [] | no_license | import numpy as np
from scipy import signal
from time import sleep
from collections import deque
class AsciiArt:
def hello(self):
print(r"""
.----------------. .----------------. .-----------------. .----------------.
| .--------------. || .--------------. || .--------------. || .--------------. |
| | ________ | || | ____ | || | ____ _____ | || | _________ | |
| | |_ ___ `. | || | .' `. | || ||_ \|_ _| | || | |_ ___ | | |
| | | | `. \ | || | / .--. \ | || | | \ | | | || | | |_ \_| | |
| | | | | | | || | | | | | | || | | |\ \| | | || | | _| _ | |
| | _| |___.' / | || | \ `--' / | || | _| |_\ |_ | || | _| |___/ | | |
| | |________.' | || | `.____.' | || ||_____|\____| | || | |_________| | |
| | | || | | || | | || | | |
| '--------------' || '--------------' || '--------------' || '--------------' |
'----------------' '----------------' '----------------' '----------------'
""")
class GameOfLifeBoard(np.ndarray, AsciiArt):
def __init__(self, *args, **kwargs):
self.fill(0)
self[:,self.shape[1]//2] = 1
self.kernel = np.ones((3, 3))
self.stay_alive = np.array([2, 3])
self.make_alive = np.array([3])
def progress(self):
sums = signal.convolve(self, self.kernel, mode="same")
stay_alive = np.isin(sums, self.stay_alive)
is_alive_now = self == 1
stay_alive_result = is_alive_now & stay_alive
make_alive = np.isin(sums, self.make_alive)
is_dead_now = self == 0
make_alive_result = is_dead_now & make_alive
alive_result = make_alive_result | stay_alive_result
self.fill(0)
self[alive_result] = 1
def print(self):
bc = self.copy().astype("str")
bc[self.astype("bool")] = "x"
bc[~self.astype("bool")] = " "
print(np.append(bc, np.array([['\n']]*self.shape[0]), axis=1).astype(object).sum())
@property
def alive(self):
return int(self.sum())
#works only with 50 columns somehow
c = GameOfLifeBoard((22, 51))
q = deque()
q.append(c.alive)
while True:
c.progress()
c.print()
sleep(0.5)
q.append(c.alive)
if len(q) == 10:
_ = q.popleft()
if len([n for n in q for m in q if n != m]) == 0:
break
c.hello()
| true |
641c9d2651bc69ef7697e2e37246f685746d0575 | Python | zlsama/deeplearning | /dl01.py | UTF-8 | 2,031 | 2.90625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Sun Jul 1 10:29:50 2018
@author: zlsama
"""
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
train_x=np.linspace(-1,1,100)
train_y=2*train_x+np.random.randn(*train_x.shape)*0.3
plt.plot(train_x,train_y,'ro',label='Original data')
plt.legend()
plt.show()
X=tf.placeholder('float')
Y=tf.placeholder('float')
w=tf.Variable(tf.random_normal([1]),name='weight')
b=tf.Variable(tf.zeros([1]),name='bias')
z=tf.multiply(X,w)+b
cost=tf.reduce_mean(tf.square(Y-z))
learning_rate=0.01
optimizer=tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init=tf.global_variables_initializer()
training_epochs=20
display_step=2
plotdata={'batchsize':[],'loss':[]}
def moving_average(a,w=10):
if len(a)<w:
return a[:]
return [val if idx<w else sum(a[(idx-w):idx])/w for idx,val in enumerate(a)]
with tf.Session() as sess:
sess.run(init)
plotdata={'batchsize':[],'loss':[]}
for epoch in range(training_epochs):
for (x,y) in zip(train_x,train_y):
sess.run(optimizer,feed_dict={X:x,Y:y})
if epoch%display_step==0:
loss=sess.run(cost,feed_dict={X:train_x,Y:train_y})
print('epoch:',epoch+1,'cost=',loss,'w=',sess.run(w),'b=',sess.run(b))
if not (loss=='NA'):
plotdata['batchsize'].append(epoch)
plotdata['loss'].append(loss)
print('finished')
print('cost=',sess.run(cost,feed_dict={X:train_x,Y:train_y}),'w=',sess.run(w),'b=',sess.run(b))
plt.plot(train_x,train_y,'ro',label='Original data')
plt.plot(train_x,sess.run(w)*train_x+sess.run(b),label='fittedline')
plt.legend()
plt.show()
plotdata['avgloss']=moving_average(plotdata['loss'])
plt.figure(1)
plt.subplot(211)
plt.plot(plotdata['batchsize'],plotdata['avgloss'],'b--')
plt.ylabel('loss')
plt.xlabel('minibatch number')
plt.title('Minibatch run vs. training loss')
plt.show()
| true |
3675c1840cd22ddba6da00c024610c4a880f6626 | Python | DouglasKlafkeScheibler/Central_Scripts_DKS | /hidr_defs/test.py | UTF-8 | 545 | 2.53125 | 3 | [] | no_license | import sys
from functools import partial
from datetime import datetime
file = 'MLT.DAT'
with open(file,'rb') as f:
for measurement in iter(partial(f.read, 4), b''):
print(int.from_bytes(measurement, byteorder='little'))
# for measurement in iter(partial(f.read, 4), b''):
# data[currentStation].append(int.from_bytes(measurement, byteorder='little'))
# currentStation = currentStation + 1
# if currentStation == stations:
# currentStation = 0 | true |
60c25d2da7ab37147820147afd8c2b3d11779aa7 | Python | fabriciolelis/python_studying | /HackerRank/Basic Data Types/Nested Lists/main.py | UTF-8 | 628 | 3.078125 | 3 | [] | no_license | if __name__ == '__main__':
python_students = []
for _ in range(int(input())):
name = input()
score = float(input())
student = []
student.append(name)
student.append(score)
python_students.append(student)
python_students.sort(key=lambda student: student[1])
elem = python_students[0][1]
while elem in [j for i in python_students for j in i]:
del python_students[0]
second = python_students[0][1]
python_students.sort()
for i in range(len(python_students)):
if second == python_students[i][1]:
print(python_students[i][0]) | true |
87814e2e87f9c4eca1ce2f6de89a2e65fc80ef26 | Python | gianniskok/OpenCv-Uni-Projects-Greek | /ΚΟΚΚΟΡΟΣ ΙΩΑΝΝΗΣ 57090 ΕΡΓΑΣΙΑ 2/surf.py | UTF-8 | 6,558 | 2.671875 | 3 | [] | no_license | import cv2
import numpy as np
import time
start = time.time() #Έναρξη χρονομέτρησης
print("Start")
"""
ena = cv2.imread('termaaristera.jpg', cv2.COLOR_BGR2GRAY)
dyo = cv2.imread('aristera.jpg', cv2.COLOR_BGR2GRAY)
tria = cv2.imread('deksia.jpg', cv2.COLOR_BGR2GRAY)
tessera = cv2.imread('termadeksia.jpg', cv2.COLOR_BGR2GRAY)
leftest=cv2.cvtColor(ena, cv2.COLOR_BGR2GRAY)
left =cv2.cvtColor(dyo, cv2.COLOR_BGR2GRAY)
right =cv2.cvtColor(tria, cv2.COLOR_BGR2GRAY)
rightest =cv2.cvtColor(tessera, cv2.COLOR_BGR2GRAY)
"""
leftest = cv2.imread('hotel-03.png', cv2.COLOR_BGR2GRAY)
left = cv2.imread('hotel-02.png', cv2.COLOR_BGR2GRAY)
right = cv2.imread('hotel-01.png', cv2.COLOR_BGR2GRAY)
rightest = cv2.imread('hotel-00.png', cv2.COLOR_BGR2GRAY)
surf = cv2.xfeatures2d.SURF_create()
kp1, des1 = surf.detectAndCompute(rightest, None) #δημιουργία keypoints και descriptors με βάση τα keypoints
kp2, des2 = surf.detectAndCompute(right, None)
kp3, des3 = surf.detectAndCompute(left, None)
kp4, des4 = surf.detectAndCompute(leftest, None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm=FLANN_INDEX_KDTREE, trees=5)
search_params = dict(checks=50)
match = cv2.FlannBasedMatcher(index_params, search_params) #διαφορετική υλοποιήση BFmatcher()
matches = match.knnMatch(des1, des2, k=2) #αντιστοίχιση σημείων
matches2 = match.knnMatch(des3, des4, k=2)
good1 = []
good2 = []
for m, n in matches:
if m.distance < 0.75*n.distance: #βελτιστοποίηση συνάρτησης για αποφυγή αστοχιών
good1.append(m)
draw_params = dict(matchColor=(0, 0, 255), singlePointColor=None, flags=2)
left2 = cv2.drawMatches(rightest, kp1, right, kp2, good1, None, **draw_params) #Σχεδιασμός αντιστοίχισης των σημειων
for m, n in matches2:
if m.distance < 0.75 *n.distance:
good2.append(m)
draw_params = dict(matchColor=(0, 0, 255), singlePointColor=None, flags=2)
right2 = cv2.drawMatches(left, kp3, leftest, kp4, good2, None, **draw_params)
MIN_MATCH_COUNT = 10
if len(good1) > MIN_MATCH_COUNT:
src_pts = np.float32([kp1[m.queryIdx].pt for m in good1]).reshape(-1, 1, 2) #δημιουργία πρώτου κομματιού εικόνας με βάση τα matches
dst_pts = np.float32([kp2[m.trainIdx].pt for m in good1]).reshape(-1, 1, 2) #δημιουργία δεύτερου κομματιού εικόνας με βάση τα matches
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0) # Βρίσκει πώς πρέπει να μετατραπει η πρώτη για να "ταιριάξει" με τη δευτερη
h, w = rightest.shape
pts = np.float32([[0, 0], [0, h-1], [w-1, h-1], [w-1, 0]]).reshape(-1, 1, 2)
else:
print("Not enough matches are found1", (len(good1)))
dst = cv2.warpPerspective(rightest, M, (rightest.shape[1] + rightest.shape[1], right.shape[0])) #warping για σωστότερη αντιστοίχιση εικόνων και αύξηση pixels για να χωράει το output
dst[0:rightest.shape[0], 0:rightest.shape[1]] = right #τοποθέτηση των στοιχείων της εικόνας που βρίσκεται πιο δεξια στη θέση που τους αντιστοιχεί
"""
#Ακολοθει μεθοδολογια για περικοπη του μαυρου τμηματος της εικονας μετα την συχγωνευση με error στην τελικη ενωση could not broadcast input array from shape (768,1639) into shape (768,1546)
_, thresh = cv2.threshold(dst, 1, 255, cv2.THRESH_BINARY)
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) #εύρεση contours,δηλαδή καμπυλών που αποτελούν τα όρια μεταξύ εικόνας και μαύρων επιφανειών
cnt = contours[0]
x, y, w, h = cv2.boundingRect(cnt) #ορθογώνιο περίγραμμα γύρω από τα contours
dst = dst[y:y + h, x:x + w] #περικοπή μαύρων επιφανειών
"""
cv2.imwrite('output1.jpg', dst)
#cv2.imshow('main1', dst)
#cv2.waitKey(0)
if len(good2) > MIN_MATCH_COUNT:
src_pts2 = np.float32([kp3[m.queryIdx].pt for m in good2]).reshape(-1, 1, 2)
dst_pts2 = np.float32([kp4[m.trainIdx].pt for m in good2]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts2, dst_pts2, cv2.RANSAC, 5.0)
h, w = leftest.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
else:
print("Not enough matches are found2", (len(good2)))
dst2 = cv2.warpPerspective(left, M, (left.shape[1] + left.shape[1], leftest.shape[0]))
dst2[0:left.shape[0], 0:left.shape[1]] = leftest
"""
_, thresh = cv2.threshold(dst2, 1, 255, cv2.THRESH_BINARY)
contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
cnt = contours[0]
x, y, w, h = cv2.boundingRect(cnt)
dst2 = dst2[y:y + h, x:x + w]
"""
cv2.imwrite('output2.jpg', dst2)
#cv2.imshow('main1', dst2)
#cv2.waitKey(0)
finalleft = cv2.imread('output2.jpg', cv2.COLOR_BGR2GRAY)
finalright = cv2.imread('output1.jpg', cv2.COLOR_BGR2GRAY)
kp5, des5 = surf.detectAndCompute(finalleft, None)
kp6, des6 = surf.detectAndCompute(finalright, None)
matches3 = match.knnMatch(des5, des6, k=2)
good3 = []
for m, n in matches3:
if m.distance < 0.88*n.distance:
good3.append(m)
draw_params = dict(matchColor=(0, 0, 255), singlePointColor=None, flags=2)
final = cv2.drawMatches(finalleft, kp5, finalright, kp6, good3, None, **draw_params)
if len(good3) > MIN_MATCH_COUNT:
src_pts3 = np.float32([kp3[m.queryIdx].pt for m in good2]).reshape(-1, 1, 2)
dst_pts3 = np.float32([kp4[m.trainIdx].pt for m in good2]).reshape(-1, 1, 2)
M, mask = cv2.findHomography(src_pts3, dst_pts3, cv2.RANSAC, 5.0)
h, w = finalleft.shape
pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2)
else:
print("Not enough matches are found3", (len(good2)))
dst3 = cv2.warpPerspective(finalright, M, (finalright.shape[1] + finalright.shape[1], finalleft.shape[0]))
dst3[0:finalright.shape[0], 0:finalright.shape[1]] = finalleft
cv2.imwrite('output3.jpg', dst3)
#cv2.imshow('main', dst3)
#cv2.waitKey(0)
end = time.time() #τέλος χρονομέτρησης
print('End')
print(end - start)
| true |
6d8a9f20013e14c747da6b4769d5a4caa69b22b5 | Python | jfyao90/ArcFaceKeras | /metrics.py | UTF-8 | 1,464 | 2.78125 | 3 | [] | no_license | import math
import tensorflow as tf
from keras import backend as K
__all__=['ArcFaceLoss', 'logit_categorical_crossentropy']
class ArcFaceLoss() :
def __init__(self, s=30.0, m=0.5, n_classes=10, sparse=False, **kwargs) :
self.s = s
self.cos_m = math.cos(m)
self.sin_m = math.sin(m)
self.th = math.cos(math.pi - m)
self.mm = math.sin(math.pi - m) * m
self.sparse = sparse
self.n_classes = n_classes
def __call__(self, y_true, y_pred, **kwargs) :
cosine = tf.cast(y_pred, tf.float32)
if self.sparse :
labels = tf.cast(y_pred, tf.int32)
labels = tf.one_hot(y_pred, depth = self.n_classes)
else :
labels = tf.cast(y_true, tf.float32)
sine = tf.sqrt(1-tf.square(cosine))
phi = cosine * self.cos_m - sine * self.sin_m
phi = tf.where(cosine > self.th, phi, cosine - self.mm)
output = (labels * phi) + ((1.0 - labels) * cosine)
output *= self.s
losses = tf.nn.softmax_cross_entropy_with_logits_v2(y_true, output)
return K.mean(losses)/2
def logit_categorical_acc(y_true, y_pred):
### Use this metric since keras accuracy metric operates on probabilities instead of logit
y_pred = tf.nn.softmax(y_pred)
return K.cast(K.equal(K.argmax(y_true, axis=-1),
K.argmax(y_pred, axis=-1)),
K.floatx())
| true |
abe34e70aec2f1b42bf1d1080a031077efcbaff1 | Python | kumarsumit1/pythonapp | /learnPython.org/L11_Dictionaries.py | UTF-8 | 974 | 4.5625 | 5 | [] | no_license | #A dictionary works with keys and values
phonebook = {}
phonebook["John"] = 938477566
phonebook["Jack"] = 938377264
phonebook["Jill"] = 947662781
print(phonebook)
#another way to implement Dictionary is
phonebook1 = {
"John" : 938477566,
"Jack" : 938377264,
"Jill" : 947662781
}
print(phonebook1)
#Iterating over dictionaries
for name, number in phonebook.items():
print("Phone number of %s is %d" % (name, number))
#Removing a value one can use del or pop
del phonebook["John"]
print(phonebook)
phonebook1.pop("John")
print(phonebook1)
#defaultdict means that if a key is not found in the dictionary, then instead of a KeyError being thrown, a new entry is created.
#The type of this new entry is given by the argument of defaultdict.
#somedict = {}
#print(somedict[3]) # KeyError
from collections import defaultdict
someddict = defaultdict(int)
print(someddict[3]) # print int(), thus 0
print(someddict[3])
print(someddict[4])
| true |
f5e438e204a147dccad9866ab3b81503fc8bd604 | Python | LimHaksu/algorithm | /baekjoon/solved/old/11021/11021.py3.py | UTF-8 | 123 | 3.5625 | 4 | [] | no_license | t = int(input())
i = 0
while i < t:
i += 1
a,b = map(int, input().split())
print('Case #'+str(i)+': '+str(a+b)) | true |
c6dd5a1744c939337f2dcb7f6c138c87cfc82411 | Python | danionescu0/home-automation | /python-server/ifttt/parser/Tokenizer.py | UTF-8 | 2,526 | 2.828125 | 3 | [] | no_license | import re
from typing import List
from logging import RootLogger
from typeguard import typechecked
from ifttt.parser.Token import Token
from ifttt.parser.ParseException import ParseException
from ifttt.parser.TokenConverter import TokenConverter
class Tokenizer:
__token_rules = [
('A\[([0-9a-zA-Z\._]*)\]', Token.TYPE_ACTUATOR),
('S\[([0-9a-zA-Z\._]*)\]', Token.TYPE_SENSOR),
('SL\[([0-9a-zA-Z\._]*)\]', Token.TYPE_SENSOR_LAST_UPDATED),
('TIME', Token.TYPE_CURRENT_TIME),
('gt', Token.TYPE_EXPR_GREATER),
('lt', Token.TYPE_EXPR_LESS),
('btw', Token.TYPE_EXPR_BETWEEN),
('eq', Token.TYPE_EXPR_EQUAL),
('and', Token.TYPE_BOOLEAN_AND),
('or', Token.TYPE_BOOLEAN_OR),
('True|False', Token.TYPE_LITERAL_BOOLEAN),
('On|Off', Token.TYPE_ACTUATOR_STATE),
('[0-9]{1,2}\:[0-9]{1,2}', Token.TYPE_LITERAL_TIME),
('\d+', Token.TYPE_LITERAL_INT),
]
def __init__(self, root_logger: RootLogger) -> None:
self.__root_logger = root_logger
self.__token_converters = []
def add_token_converter(self, token_converter: TokenConverter):
self.__token_converters.append(token_converter)
@typechecked()
def tokenize(self, text: str) -> List[Token]:
return [self.__get_token(token_text) for token_text in self.__get_cleanned_text(text).split()]
def __get_cleanned_text(self, text : str) -> str:
return re.sub('[(),]', ' ', text)
def __get_token(self, token_text: str) -> Token:
for token_rule in self.__token_rules:
found_matches = re.findall(token_rule[0], token_text)
if not found_matches:
continue
return Token(token_text, token_rule[1], self.__get_token_value(token_rule[1], found_matches[0]))
raise ParseException('Cannot parse token symbol: {0}'.format(token_text))
def __get_token_value(self, token_type: str, token_raw_value: str):
token_converter = [converter
for converter in self.__token_converters if converter.get_supported_token() == token_type]
if 1 != len(token_converter):
return token_raw_value
token_converter = token_converter[0]
value = token_converter.get_value(token_raw_value)
self.__root_logger.debug('Value of token converter ({0}) for token raw value ({1}) is ({2})'
. format(type(token_converter), token_raw_value, value))
return value | true |
e9531f465eb75eb0a91f3bf8b0a02fe5904e4eb2 | Python | yangzongwu/leetcode | /archives/leetcode2/0048. Rotate Image.py | UTF-8 | 500 | 2.890625 | 3 | [] | no_license | class Solution:
def rotate(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: void Do not return anything, modify matrix in-place instead.
"""
n=len(matrix)-1
for row in range(0,n//2+1):
for column in range(row,n-row):
matrix[row][column], matrix[column][n-row],matrix[n-row][n-column],matrix[n-column][row]=
matrix[n-column][row],matrix[row][column], matrix[column][n-row],matrix[n-row][n-column]
| true |
0249ff5cb313f3e5a8e5139226c0184c66f2bac9 | Python | mozhubert/redmine | /redmine/issues.py | UTF-8 | 1,507 | 2.640625 | 3 | [] | no_license | # -*- coding: utf8 -*-
import requests
import json
import config
class Info:
def amount(self, url):
CheckURL = requests.get(url)
doc = json.loads(CheckURL.text)
return doc['total_count']
def list(self, url):
CheckURL = requests.get(url)
doc = json.loads(CheckURL.text)
if doc['total_count'] > 0:
for i in range(0, (doc['total_count']/25)+1):
NewURL = url + '&offset={}'.format(i*25)
CheckURL = requests.get(NewURL)
doc = json.loads(CheckURL.text)
for j in range(0, len(doc['issues'])):
issue = doc['issues'][j]
print issue['id'],
print issue['priority']['name'],
print issue['subject']
else:
print "Without any issue"
return False
def idlist(self, url):
CheckURL = requests.get(url)
doc = json.loads(CheckURL.text)
if doc['total_count'] > 0:
idlist = []
for i in range(0, (doc['total_count'] / 25) + 1):
NewURL = url + '&offset={}'.format(i*25)
print NewURL
CheckURL = requests.get(NewURL)
doc = json.loads(CheckURL.text)
for j in range(0, len(doc['issues'])):
idlist.append(doc['issues'][j]['id'])
return idlist
else:
print "Without any issue"
return False | true |
57b5790730cb70b858b3d805b491a1f5354fc422 | Python | GEDS1990/danmu_robot | /d_robot/show_base_data.py | UTF-8 | 456 | 2.8125 | 3 | [] | no_license | #!/usr/bin/env python3
#-*- coding:utf-8 -*-
import sys
class sbd():
def __init__(self):
self.base_data = []
with open('base_data.txt', 'r') as f2:
list1 = f2.readlines()
for i in range(0,len(list1)):
list1[i] = list1[i].rstrip('\n')
self.base_data = list1
if __name__ == '__main__':
show_base_data = sbd()
print('\n\n')
print(show_base_data.base_data)
print('\n\n') | true |
ba18d5c0dc6d7971357862d56bcda78f3e5042fc | Python | ckaros/pythonic_dactyl | /pythonic_dactyl.py | UTF-8 | 7,378 | 2.609375 | 3 | [] | no_license | print('plates')
rows=5
columns=6
plate = Keyboard_matrix(5, 6, 0, 0, 1, [0,0,10], 0,15,0, 19,19)
plate.project_rows(90)
plate.project_cols(200)
plate.ik[0][0]=True
plate.cm[1][1]=plate.cm[1][1]+3
plate.cm[2][1]=plate.cm[2][1]+6
plate.cm[3][1]=plate.cm[3][1]+4
#plate.cm[4][1]=plate.cm[4][1]+1
plate.cm[2][2]=plate.cm[2][2]-6
plate.cm[3][2]=plate.cm[3][2]-3
plate.generate()
plate2 = Keyboard_matrix(3, 3, 0, 0, 10, [-20,8,16], 15,15,0, 19,19)
plate2.ik[2][2]=True
plate2.ik[1][2]=True
plate2.ik[1][1]=True
plate2.project_rows(80)
plate2.project_cols(250)
plate2.im[0][2][1]=plate2.im[0][2][1]+9.5
plate2.im[0][1][1]=plate2.im[0][1][1]+9.5
plate2.im[0][2][3]=plate2.im[0][2][3]+5
plate2.im[0][1][3]=plate2.im[0][1][3]+5
plate2.generate()
thumbangle=12
#hulls connecting thumb and matrix
print('plate hulls')
#hull 2u key to 1u keys around them
plate2.column_hulls[0][0].disable()
conn=(plate2.sm[1][0].get_right()+plate2.sm[0][1].get_left()).hull()
conn+=(plate2.sm[1][0].get_corner('fr', 2, 3, 2, 3)+plate2.sm[0][1].get_front()).hull()
conn+=(plate2.sm[2][1].get_back()+plate2.sm[0][1].get_front()).hull()
#extend 2u keys down to make border of cluster
conn+=(plate2.sm[0][1].get_back(0.01,extrude=9.5)+\
plate2.sm[0][2].get_back(0.01,extrude=9.5)).hull()
#hull extensions to bottom left
conn+=(plate2.sm[0][1].get_corner('bl', 0,0,0.01, 9.5)+\
plate2.sm[1][1].get_left()+\
plate2.sm[0][0].get_right()).hull()
#rotate cluster
conn=conn.rotate(thumbangle)
#hull right 2u key to keywell
conn+=(plate2.sm[0][2].get_right().rotate(thumbangle)+\
plate.sm[0][1].get_left()+\
plate.sm[0][1].get_corner('bl',0,0,0.01,0.01)).hull()
#hull top right 1u key to keywell
conn+=(plate2.sm[2][1].get_right(extrude=2).rotate(thumbangle)+\
plate.sm[1][0].get_left(0.01,0)+\
plate.sm[2][0].get_corner('bl',0,0,0.01,0.01)).hull()
#hull middle of cluster to keywell
conn+=(plate2.sm[0][2].get_front().rotate(thumbangle)+\
plate2.sm[2][1].get_corner('br', 2, 3, 2, 3).rotate(thumbangle)+\
plate2.sm[0][1].get_corner('fr', 2, 3, 2, 3).rotate(thumbangle)+\
plate.sm[1][0].get_back()).hull()
plate.left_wall[1].disable()
plate.left_wall_hulls[0].disable()
plate.corner_hulls[0][0].disable()
print('case hulls')
#hull front of cluster case to main case
#create front wall for cluster (needs elegant solution)
largefront=((plate2.sm[0][1].get_back(0.01,extrude=12.5)+\
plate2.sm[0][2].get_back(0.01,extrude=12.5)).hull())
smallfront=((((plate2.sm[0][1].get_back(0.01,extrude=9.5)+\
plate2.sm[0][2].get_back(0.01,extrude=9.5)+\
(plate2.sm[0][1].get_back(0.01,extrude=-1)+\
plate2.sm[0][2].get_back(0.01,extrude=-1))))).hull())#.scale([1.1,1.1,1.1]))
smallfront+=((((plate2.sm[0][1].get_back(0.01,extrude=9.5)+\
plate2.sm[0][2].get_back(0.01,extrude=9.5)+\
(plate2.sm[0][1].get_back(0.01,extrude=-1)+\
plate2.sm[0][2].get_back(0.01,extrude=-1))))).hull()).translate([0,0,1])
smallfront+=((((plate2.sm[0][1].get_back(0.01,extrude=9.5)+\
plate2.sm[0][2].get_back(0.01,extrude=9.5)+\
(plate2.sm[0][1].get_back(0.01,extrude=-1)+\
plate2.sm[0][2].get_back(0.01,extrude=-1))))).hull()).translate([0,0,-1])
smallfront+=((((plate2.sm[0][1].get_back(0.01,extrude=9.5)+\
plate2.sm[0][2].get_back(0.01,extrude=9.5)+\
(plate2.sm[0][1].get_back(0.01,extrude=-1)+\
plate2.sm[0][2].get_back(0.01,extrude=-1))))).hull()).translate([-1,0,0])
smallfront+=((((plate2.sm[0][1].get_back(0.01,extrude=9.5)+\
plate2.sm[0][2].get_back(0.01,extrude=9.5)+\
(plate2.sm[0][1].get_back(0.01,extrude=-1)+\
plate2.sm[0][2].get_back(0.01,extrude=-1))))).hull()).translate([1,0,0])
caconn=project((plate2.sm[0][0].get_corner('br',0,0,0.01,3)+(largefront-smallfront)).hull())
caconn=caconn.rotate(thumbangle)
caconn+=project((plate2.sm[2][1].get_corner('fr', 2, 3, 2, 3).rotate(thumbangle)+plate.sm[2][0].get_corner('bl', 2, 3, 2, 3)).hull())
caconn+=project((plate.sm[0][1].get_corner('bl', 0, 0, 0.01,0.01)+\
plate2.sm[0][2].get_corner('br', 0, 0, 2, 3).rotate(thumbangle)+\
plate2.sm[0][2].get_corner('br', 0, 0, 2, 12.5).rotate(thumbangle)).hull())
plate2.right_wall[0].disable()
plate2.right_wall_hulls[0].disable()
plate2.front_right_corner.disable()
plate2.back_right_corner.disable()
plate2.back_wall[1].disable()
plate2.back_wall[2].disable()
plate2.back_wall_hulls[1].disable()
plate2.back_wall_hulls[0].disable()
plate2.right_wall_hulls[1].disable()
print('mounts')
for c in range(columns):
for r in range(rows):
if c==0 and r==0:
mount=plate.sm[r][c].get_left(thickness=0.8,extrude=False).translate([0,0,-3])
elif c==0 and r==1:
None
elif c==0:
mount+=plate.sm[r][c].get_left(thickness=0.8,extrude=False).translate([0,0,-3])
elif c==columns-1:
mount+=plate.sm[r][c].get_right(thickness=0.8,extrude=False).translate([0,0,-3])
if r==0:
mount+=plate.sm[r][c].get_back(thickness=0.8,extrude=False).translate([0,0,-3])
elif r==rows-1:
mount+=plate.sm[r][c].get_front(thickness=0.8,extrude=False).translate([0,0,-3])
for c in range(3):
for r in range(3):
if c==0 and r==0:
mount2=plate2.sm[r][c].get_left(thickness=0.8,extrude=False).rotate(thumbangle).translate([0,0,-3])
mount2+=plate2.sm[r][c].get_back(thickness=0.8,extrude=False).rotate(thumbangle).translate([0,0,-3])
elif c==0:
mount2+=plate2.sm[r][c].get_left(thickness=0.8,extrude=False).rotate(thumbangle).translate([0,0,-3])
elif c>0 and r==0:
mount2+=plate2.sm[r][c].get_back(thickness=0.8,extrude=False).translate([0,-9.5,-3]).rotate(thumbangle)
elif r==3-1:
mount2+=plate2.sm[r][c].get_front(thickness=0.8,extrude=False).rotate(thumbangle).translate([0,0,-3])
print('keys')
keys=[]
for row in range(rows):
for column in range(columns):
if row+column>0:
#plate.sm[row][column].get_keyswitch()
keys.append(plate.sm[row][column].get_keycap())
keys.append(plate2.sm[0][0].get_keycap().rotate(thumbangle))
keys.append(plate2.sm[0][1].get_keycap().rotate(thumbangle))
keys.append(plate2.sm[0][2].get_keycap().rotate(thumbangle))
keys.append(plate2.sm[1][0].get_keycap().rotate(thumbangle))
keys.append(plate2.sm[2][0].get_keycap().rotate(thumbangle))
keys.append(plate2.sm[2][1].get_keycap().rotate(thumbangle))
cable_hole = Cylinder(30, 7, center=True).rotate([90,0,0])
cable_hole = (cable_hole + cable_hole.translate([10,0,0])).hull().translate([26,100,0]).color("Blue")
print('unions')
right_hand=plate.get_matrix()+plate2.get_matrix().rotate(thumbangle)+conn+mount2+caconn
pl=plate2.get_plate().rotate(thumbangle)+plate.get_plate()+conn
ca=plate2.get_walls().rotate(thumbangle)+plate.get_walls()+mount2+caconn
print('writing')
(pl).write(r"\things\pythonic_dactyl_plate.scad")
(ca-pl).write(r"\things\pythonic_dactyl_case.scad")
(right_hand).write(r"\things\pythonic_dactyl.scad")
(pl.mirror([-1,0,0])).write(r"\things\pythonic_dactyl_plate_left.scad")
((ca-pl).mirror([-1,0,0])).write(r"\things\pythonic_dactyl_case_left.scad") | true |
c27ad91be9c8d76699679c09e90adafc82d93735 | Python | buzztroll/mixcoatl | /bin/dcm-list-firewall-rules | UTF-8 | 1,419 | 2.796875 | 3 | [
"Apache-2.0"
] | permissive | #!/usr/bin/env python
from mixcoatl.network.firewall import Firewall
from prettytable import PrettyTable
import argparse
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('firewallid', help='Firewall ID')
parser.add_argument("--verbose", "-v", help="Produce verbose output", action="store_true")
cmd_args = parser.parse_args()
f = Firewall(cmd_args.firewallid)
result = f.load()
if result is not None:
print("Cannot find the Firewall by the ID.")
sys.exit(1)
rules = f.rules
if cmd_args.verbose:
for rule in rules:
rule.pprint()
else:
firewall_rules_table = PrettyTable(["Firewall Rule ID", "Source", "Source Type",
"Destination", "Destination Type", "Protocol",
"Direction", "Start Port", "End Port",
"Permission", "Precedence"])
for rule in rules:
firewall_rules_table.add_row([rule.firewall_rule_id, rule.source, rule.source_type,
rule.destination, rule.destination_type, rule.protocol,
rule.direction, rule.start_port, rule.end_port,
rule.permission, rule.precedence])
print(firewall_rules_table)
| true |
a9453899c138d05448c2e7bc5aae6ecd90277482 | Python | jwbaldwin/stock-sorting-alg | /compareStock.py | UTF-8 | 2,631 | 3.203125 | 3 | [] | no_license | # James Baldwin 7/28/2016
# Program to compare stock movement to index marker and determine CBD
# Let user pick initial and index stock
import numpy as np
import pandas as pd
import csv
from pullData import scrape_list
SITE = "http://en.wikipedia.org/wiki/List_of_S%26P_500_companies"
def ticker_exists(ticker):
tickers = scrape_list(SITE)
tickers.append('SPINDEX')
if ticker in tickers:
return True
else:
return False
#grab each csv using the ticker +.csv
def grab_stocks_to_compare(stock_main, stock_index, beta_time):
print "Gathering data for %s to compare against %s" % (stock_main, stock_index)
#Here I need to get each adj close of the stocks and slice it so the
# days represented are equal in both cases
#ERROR: Soemthign wrong when comparing two different stocks (maybe index?)
main_df = pd.read_csv('data/'+stock_main+'.csv')
index_df = pd.read_csv('data/'+stock_index+'.csv')
#Get both stocks: Columns and amount by time
#a_adj_close = main_df['Adj Close']
#a = a_adj_close.tail(beta_time)
#b_adj_close = index_df['Adj Close']
#b = b_adj_close.tail(beta_time)
#covariance = np.cov(a,b)[0][1]
#variance = np.var(b)
#beta = covariance / variance
covmat = np.cov(main_df["Adj Close"].tail(beta_time), index_df["Adj Close"].tail(beta_time))
beta = covmat[0,1]/covmat[1,1]
print "The beta for your stock is: " + str(beta)
print "Using the amount of days: " + str(beta_time)
#get the stocks from the user
def get_stocks():
prompt= '> '
print "Please pick a ticker from the S&P 500 -------"
while True:
try:
print "Input the ticker for the stock to compare to an index: "
stock_main = raw_input(prompt)
if not ticker_exists(stock_main):
print "Sorry, that ticker doesn't exist"
break
else:
try:
print "Input the ticker for the index: "
stock_index = raw_input(prompt)
if not ticker_exists(stock_index):
print "Sorry, that ticker doesn't exist"
break
else:
print "Enter the amount of days: "
beta_time = int(raw_input(prompt))
grab_stocks_to_compare(stock_main, stock_index, beta_time)
break
except Exception, e:
print e
break
except Exception, e:
print e
print 'oops'
get_stocks()
| true |
1ea983f61e0b9bfe5a49885cb2049d250f63e19b | Python | taeyoung02/Algorithm | /중복제거.py | UTF-8 | 146 | 2.765625 | 3 | [] | no_license | def solution(arr):
print(arr)
sorted(list(set(arr)), key=lambda x: x.index)
print(arr)
return arr[:,0]
print(solution([1,1,5,3])) | true |
d37f440e25397d6903de7e91d8524b6222b6bc51 | Python | esz22/python-training | /seccion 03/operadoresLogicos.py | UTF-8 | 360 | 3.4375 | 3 | [] | no_license | #a=3
a=int(input("proporciona un valor: "))
valormin=0
valormax=5
dentroRango=(a>=valormin and a<=valormax)
if(dentroRango):
print("dentro de rango")
else:
print("fuera de rango")
vacaciones=True
diaDescanso=False
if(vacaciones or diaDescanso):
print("Puedes ir al parque")
else:
print("Tienes deberes que hacer")
print(not(vacaciones)) | true |
d12a26189df1312e6a87eb32e22fbe1f9ef618e6 | Python | yaron1000/LSHT-HSLT-MODIS-Landsat-Fusion | /Models/Conv/VGG-19.py | UTF-8 | 7,067 | 2.765625 | 3 | [] | no_license | """
The SR-GAN uses the 2nd layer of the VGG-19 to include feature detection
in the perceptual loss function.
--rather than using a model pretrained on image net, it may be more useful to use a pre-trained model, trained on
data more similar to that of the scenes we are using for landsat-modis super resolution
-> idea 1) train a binary classifier to differentiate landsat from modis: this does not really achieve the goal
of deriving meaningful features from the image. The major difference between landsat and modis is the resolution
so this sort of classifier would likely produce a model that distinguishes high res from low res.
-> idea 2) explore different landcover/other feature classification approaches on both landsat and modis images:
a) train both and then average weights
b) scale up modis and train on same model ( may cause too much variance between scenes )
"""
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
import os
import load_EuroSat as lE
from datetime import datetime
_CITATION = """
@misc{helber2017eurosat,
title={EuroSAT: A Novel Dataset and Deep Learning Benchmark for Land Use and Land Cover Classification},
author={Patrick Helber and Benjamin Bischke and Andreas Dengel and Damian Borth},
year={2017},
eprint={1709.00029},
archivePrefix={arXiv},
primaryClass={cs.CV}
}"""
### get data ###
"""
using eurosat dataset, this dataset uses the sentenial-2 collected satellite images
"""
euro_path = r"/project/6026587/x2017sre/EuroSat/"
### Hyperparameters ###
batch_size = 10
### initalize loaders ###
train_data = lE.training_data_loader(
base_dir=os.path.join(euro_path, "train_data"))
test_data = lE.testing_data_loader(
base_dir=os.path.join(euro_path, "test_data"))
### load data ###
train_data.load_data()
test_data.load_data()
### prep train-data ###
train_data.prepare_for_training(batch_size=batch_size)
test_data.prepare_for_testing()
### initialize model ###
vgg = tf.keras.applications.VGG19(
include_top=True,
weights=None,
input_tensor=None,
input_shape=[224, 224, 3],
pooling=None,
classes=1000,
classifier_activation="softmax"
)
### loss function ###
"""
Use MSE loss:
ref -> "https://towardsdatascience.com/loss-functions-based-on-feature-activation-and-style-loss-2f0b72fd32a9"
"""
m_loss = tf.keras.losses.MSE
### adam optimizer for SGD ###
optimizer = tf.keras.optimizers.Adam()
### intialize metrics ###
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_vgg-19_acc')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_vgg-19_acc')
### train step ###
@tf.function
def train_step(idx, sample, label):
with tf.GradientTape() as tape:
# preprocess for vgg-19
sample = tf.image.resize(sample, (224, 224))
sample = tf.keras.applications.vgg19.preprocess_input(sample)
predictions = vgg(sample, training=True)
# mean squared error in prediction
loss = tf.keras.losses.MSE(label, predictions)
# apply gradients
gradients = tape.gradient(loss, vgg.trainable_variables)
optimizer.apply_gradients(zip(gradients, vgg.trainable_variables))
# update metrics
train_loss(loss)
train_accuracy(label, predictions)
### generator test step ###
@tf.function
def test_step(idx, sample, label):
# preprocess for vgg-19
sample = tf.image.resize(sample, (224, 224))
sample = tf.keras.applications.vgg19.preprocess_input(sample)
# feed test sample in
predictions = vgg(sample, training=False)
t_loss = tf.keras.losses.MSE(label, predictions)
# update metrics
test_loss(t_loss)
test_accuracy(label, predictions)
### tensorboard ###
# initialize logs #
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
train_log_dir = './logs/gradient_tape/' + current_time + '/train'
test_log_dir = './logs/gradient_tape/' + current_time + '/test'
# image_log_dir = './logs/gradient_tape/' + current_time + '/image'
train_summary_writer = tf.summary.create_file_writer(train_log_dir)
test_summary_writer = tf.summary.create_file_writer(test_log_dir)
# image_summary_writer = tf.summary.create_file_writer(image_log_dir)
# Use tf.summary.scalar() to log metrics in training #
### Weights Dir ###
if not os.path.isdir('./checkpoints'):
os.mkdir('./checkpoints')
### TRAIN ###
EPOCHS = 1000
NUM_CHECKPOINTS_DIV = int(EPOCHS / 4)
save_c = 1
for epoch in range(EPOCHS):
# Reset the metrics at the start of the next epoch
train_loss.reset_states()
train_accuracy.reset_states()
test_loss.reset_states()
test_accuracy.reset_states()
for idx in range(train_data.get_ds_size() // batch_size):
# train step
batch = train_data.get_train_batch()
for sample, label in zip(batch[0], batch[1]):
sample = np.array(sample)[np.newaxis, ...]
label = np.array(label)[np.newaxis, ...]
train_step(idx, sample, label)
# write to train-log #
with train_summary_writer.as_default():
tf.summary.scalar('loss', train_loss.result(), step=epoch)
tf.summary.scalar('accuracy', train_accuracy.result(), step=epoch)
# test step
batch = test_data.get_test_batch(batch_size=batch_size)
for sample, label in zip(batch[0], batch[1]):
sample = np.array(sample)[np.newaxis, ...]
label = np.array(label)[np.newaxis, ...]
test_step(idx, sample, label)
"""discluding image writer until conceptually resolved
# image writer
with image_summary_writer.as_default():
# pass through last sample in test batch just to see
# pass through input
_x = vgg.get_layer(index=0)(sample)
### get layers ###
for i in range(2):
# up to block1_conv2 (Conv2D)
_x = vgg.get_layer(index=i)(_x)
img = vgg(sample, training=False)
tf.summary.image("conv output", _x, step=epoch)
"""
# write to test-log #
with test_summary_writer.as_default():
tf.summary.scalar('loss', test_loss.result(), step=epoch)
tf.summary.scalar('accuracy', test_accuracy.result(), step=epoch)
### save weights ###
if not epoch % NUM_CHECKPOINTS_DIV:
vgg.save_weights('./checkpoints/my_checkpoint_{}'.format(save_c))
save_c += 1
if not epoch % 100:
### outputs every 100 epochs so .out file from slurm is not huge. ###
template = 'Training VGG-19:\nEpoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch + 1,
train_loss.result(),
train_accuracy.result() * 100,
test_loss.result(),
test_accuracy.result() * 100))
# test | true |
db420a5355913070fb7e050037982ff549fa757b | Python | FrostMegaByte/random-python-projects | /dicethrow.py | UTF-8 | 521 | 4.40625 | 4 | [] | no_license | import random
class Die:
faces = 0
def __init__(self, faces = 6):
self.faces = faces
def roll(self):
print(random.randint(1, self.faces))
class ColourDie(Die):
colour = None
def __init__(self, colour, faces = 6):
super().__init__(faces)
self.colour = colour
def get_colour(self):
print(self.colour)
print("Normal die throw:")
testDie = Die(12)
testDie.roll()
print()
print("Colourful die throw:")
testColourDie = ColourDie("Red", 42)
testColourDie.get_colour()
testColourDie.roll() | true |
a073321f69635f4b26df370d0b0676e195f4d38c | Python | rtedwards/coronavirus-tracker | /app.py | UTF-8 | 1,125 | 2.796875 | 3 | [
"MIT"
] | permissive | import streamlit as st
from coronavirus.db_utils.db_utils import DataBase
from coronavirus.utilities.utilities import get_totals, string_of_spaces
from coronavirus.pages.world_map import load_world_map_page
from coronavirus.pages.country_totals import load_country_totals_page
# Display totals
confirmed, deaths, recovered = get_totals()
n_spaces = string_of_spaces(24)
st.markdown(f"\
### 🤒 {confirmed:,} {n_spaces}\
💀 {deaths:,} {n_spaces}\
🤕 {recovered:,}\n\
")
page = st.sidebar.radio(
"Choose page type to view:",
('World Totals', 'World Map'))
if page == 'World Totals':
load_country_totals_page()
else:
load_world_map_page()
# Sources
# TODO: display_sources() utility function
st.sidebar.markdown(
"Sources: \n\
[Johns Hopkins](https://github.com/CSSEGISandData/COVID-19) \n\
[Google](https://www.google.com/covid19/mobility/) \n\
[World Bank]\
(https://data.worldbank.org/indicator/EN.POP.DNST) \n\
")
st.sidebar.markdown(
"Github: [github.com/rtedwards]\
(https://github.com/rtedwards/coronavirus-tracker)"
)
| true |
e0879bc71377aff31d2815f9149a81bcbb0e7028 | Python | aistoume/Leetcode | /AnswerCode/832FlippingAnImage.py | UTF-8 | 247 | 3.09375 | 3 | [] | no_license | class Solution(object):
def flipAndInvertImage(self, A):
M =[]
for L in A:
M.append([int(not L[i]) for i in range(len(L)-1,-1,-1)])
return M
S = Solution()
A = [[1,1,0,0],[1,0,0,1],[0,1,1,1],[1,0,1,0]]
print S.flipAndInvertImage(A) | true |
268d876c88bebfb0cf1f5731af394d4d6bcd8014 | Python | Python-Repository-Hub/Pine-Data-Tools | /lib/csv2pine.py | UTF-8 | 1,610 | 3.359375 | 3 | [
"MIT"
] | permissive | #! /usr/bin/env python3
"""
Convert CSV file to pine format.
Usage:
csv2pine.py <input_file> <output_file> [<headers_present = 0>]
[<label_index = 0>]
If headers are present, set argv[3] == 1
If no labels in file, set argv[4] == -1
"""
import sys
import csv
def construct_line(label, line):
"""Build a data line using generic numbers for features"""
# Can scale the label (target) here, and convert multiclass datasets
# to have label vectors.
# Ex: if label==4 and there are 5 possible classes,
# target vector = 0,0,0,1,0 (start class index at 1)
# now build the line
new_line = []
new_line.append("{0} | ".format(label))
for i, item in enumerate(line):
# Can edit specific features ('items') here
# now convert to pine style and add to line
new_item = "{0},".format(item)
new_line.append(new_item)
new_line = "".join(new_line).rstrip(",")
new_line += "\n"
return new_line
# ---
input_file = sys.argv[1]
output_file = sys.argv[2]
try:
headers_present = int(sys.argv[3])
except IndexError:
headers_present = 0
try:
label_index = int(sys.argv[4])
except IndexError:
label_index = 0
i = open(input_file)
o = open(output_file, 'w')
reader = csv.reader(i)
if headers_present:
headers = next(reader)
for line in reader:
if label_index == -1: # if no label is present
label = ''
else:
label = line.pop(label_index)
new_line = construct_line(label, line)
if new_line: # we may skip certain lines
o.write(new_line)
i.close()
o.close()
| true |
32c58a7dbf4c1b9cae42ec804f497bca30bc089a | Python | mehalyna/TAQC | /Tasks for unittests/11.py | UTF-8 | 224 | 3.640625 | 4 | [] | no_license | a = int(input("Input number: "))
def is_repdigit(a):
if a == 0:
return True
b = str(a)
for i in range(1, len(b)):
if b[0] != b[i]:
return False
return True
print(is_repdigit(a))
| true |
c7f728c9d455ae315f171d0f9efed9da20e4e144 | Python | karthikh07/Python-core | /Basics/loops.py | UTF-8 | 116 | 3.625 | 4 | [] | no_license | n = int(input('enter Number for factorial: '))
res=1
for fact in range(n,1,-1):
res = res*fact
print (res)
| true |
de516e7f6e7398d131add07420e17f6f3ee19bd7 | Python | gtaddei/pf-opcua-conf-generator | /dataNodes.py | UTF-8 | 1,084 | 2.640625 | 3 | [] | no_license | class dataNodes:
def __init__(self, nb, name, freq, filename):
self.nb = nb
self.name = name
self.freq = freq
self.i = 0
self.filename = filename
def __iter__(self):
return self
def __next__(self):
for i in range(self.nb):
yield '''
<DataNode>
<Name>{}{}</Name>
<StringId>2:SimulationExamples.Functions.{}{}</StringId>
</DataNode>'''.format(self.name, i, self.name, i)
def next(self):
for i in range(self.nb):
yield '''
<DataNode>
<Name>{}{}</Name>
<StringId>2:SimulationExamples.Functions.{}{}</StringId>
</DataNode>'''.format(self.name, i, self.name, i)
def get_beginning(self):
return ''
def toString(self):
str = self.get_beginning()
for sub in next(self):
str += sub
return str
def printer(self):
with open(self.filename, "w") as f:
f.write(self.get_beginning())
for sub in next(self):
f.write(sub)
| true |
f268963c5288bce0e48a2e8e695c9f5a92e6ba92 | Python | NukeA/deep-learning-from-scratch-3 | /tests/test_max.py | UTF-8 | 2,030 | 2.921875 | 3 | [
"MIT"
] | permissive | import unittest
import numpy as np
from dezero import Variable
import dezero.functions as F
from dezero.utils import gradient_check, array_allclose
class TestMax(unittest.TestCase):
def test_forward1(self):
x = Variable(np.random.rand(10))
y = F.max(x)
expected = np.max(x.data)
self.assertTrue(array_allclose(y.data, expected))
def test_forward2(self):
shape = (10, 20, 30)
axis = 1
x = Variable(np.random.rand(*shape))
y = F.max(x, axis=axis)
expected = np.max(x.data, axis=axis)
self.assertTrue(array_allclose(y.data, expected))
def test_forward3(self):
shape = (10, 20, 30)
axis = (0, 1)
x = Variable(np.random.rand(*shape))
y = F.max(x, axis=axis)
expected = np.max(x.data, axis=axis)
self.assertTrue(array_allclose(y.data, expected))
def test_forward4(self):
shape = (10, 20, 30)
axis = (0, 1)
x = Variable(np.random.rand(*shape))
y = F.max(x, axis=axis, keepdims=True)
expected = np.max(x.data, axis=axis, keepdims=True)
self.assertTrue(array_allclose(y.data, expected))
def test_backward1(self):
x_data = np.random.rand(10)
f = lambda x: F.max(x)
self.assertTrue(gradient_check(f, x_data))
def test_backward2(self):
x_data = np.random.rand(10, 10) * 100
f = lambda x: F.max(x, axis=1)
self.assertTrue(gradient_check(f, x_data))
def test_backward3(self):
x_data = np.random.rand(10, 20, 30) * 100
f = lambda x: F.max(x, axis=(1, 2))
self.assertTrue(gradient_check(f, x_data))
def test_backward4(self):
x_data = np.random.rand(10, 20, 20) * 100
f = lambda x: F.sum(x, axis=None)
self.assertTrue(gradient_check(f, x_data))
def test_backward5(self):
x_data = np.random.rand(10, 20, 20) * 100
f = lambda x: F.sum(x, axis=None, keepdims=True)
self.assertTrue(gradient_check(f, x_data)) | true |
0ececf1b56dea751c2cd6085da1ba8aa2de1731e | Python | bangalorebyte-cohort22/Flask_2 | /app.py | UTF-8 | 958 | 2.78125 | 3 | [] | no_license | from flask import Flask, render_template, request
from flask_sqlalchemy import SQLAlchemy
import model
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db'
db = SQLAlchemy(app)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80), unique=True)
def __repr__(self):
return f'<User : {self.name}>'
def __str__(self):
return f'<User : {self.name}>'
@app.route('/',methods = ['GET','POST'])
def show():
if request.method == 'POST':
name = request.form['name']
status = model.alchemy_add_name(User(name=name), db)
names = model.alchemy_get_names(User)
return render_template('index.html', text=status , names=names)
names = model.alchemy_get_names(User)
return render_template('index.html', text='Stranger' , names=names )
if __name__ == "__main__":
app.run(debug=True) | true |
5f49083266cdf7a9766ecc0ce7655c83330b380f | Python | DanSagher/Caching-In-Cloud | /app.py | UTF-8 | 7,201 | 2.609375 | 3 | [] | no_license | import boto3
import requests
from datetime import datetime
from ec2_metadata import ec2_metadata
from flask import Flask, request
from uhashring import HashRing
data_dict = {}
expiration_dict = {}
app = Flask(__name__)
@app.route('/put', methods=['GET', 'POST'])
def put():
key = request.args.get('strKey')
data = request.args.get('data')
expiration_date = request.args.get('expirationDate')
# Find target node
healty_nodes_temp = get_healty_instances_id()
target_node = get_key_node_id(key, healty_nodes_temp)
target_node_index = healty_nodes_temp.index(target_node)
healty_nodes = healty_nodes_temp.copy()
alt_node_index = -1
alt_node = -1
# More than one instances
if (len(healty_nodes) > 1):
healty_nodes_temp.remove(target_node)
# Find alternative node
alt_node = get_key_node_id(key, healty_nodes_temp)
alt_node_index = healty_nodes.index(alt_node)
current_node_index = healty_nodes.index(ec2_metadata.instance_id)
if (target_node_index == current_node_index):
# This is target node
store_and_pass(key, data, expiration_date, alt_node)
elif (alt_node_index == current_node_index):
# This is alternative node
store_and_pass(key, data, expiration_date, target_node)
else:
pass_data_to_target(key, data, expiration_date, target_node)
return "", 201
@app.route('/get', methods=['GET'])
def get():
key = request.args.get('strKey')
# Find target node
healty_nodes_temp = get_healty_instances_id()
target_node = get_key_node_id(key, healty_nodes_temp)
target_node_index = healty_nodes_temp.index(target_node)
# Finde alt node
healty_nodes = healty_nodes_temp.copy()
alt_node_index = -1
# More than one instances
if (len(healty_nodes) > 1):
healty_nodes_temp.remove(target_node)
# Find alternative node
alt_node = get_key_node_id(key, healty_nodes_temp)
alt_node_index = healty_nodes.index(alt_node)
current_node_index = healty_nodes.index(ec2_metadata.instance_id)
if (target_node_index == current_node_index or alt_node_index == current_node_index):
# get data from current node
val = str(data_dict.get(key))
if (val != "None"):
exp_date = expiration_dict.get(key)
if (exp_date != "None"):
try:
datetime_object = datetime.strptime(exp_date, '%b-%d-%Y')
if (datetime_object > datetime.now()):
return val, 201
else:
data_dict.pop(key, None)
expiration_dict.pop(key, None)
return "None", 202
except:
print("Could not parse expiration date time.")
return val, 201
else:
return val, 201
# get data from target node
content, code = get_data_from_neighbor(key, target_node)
if code == 201:
return content, code
# get data from alternative node
content, code = get_data_from_neighbor(key, alt_node)
if code == 201:
return content, code
return "None", 202
@app.route('/getFromInstance', methods=['GET'])
def getFromInstance():
key = request.args.get('strKey')
val = str(data_dict.get(key))
if (val == "None"):
code = 202
else:
exp_date = expiration_dict.get(key)
if (exp_date != "None"):
try:
datetime_object = datetime.strptime(exp_date, '%b-%d-%Y')
if (datetime_object > datetime.now()):
return val, 201
else:
data_dict.pop(key, None)
expiration_dict.pop(key, None)
return "None", 202
except:
print("Could not parse expiration date time.")
code = 201
return val, code
@app.route('/healthcheck', methods=['GET', 'POST'])
def health():
return "bol", 200
@app.route('/putFromNeighbor', methods=['POST'])
def putFromNeighbor():
key = request.args.get('strKey')
data = request.args.get('data')
expiration_date = request.args.get('expirationDate')
data_dict[key] = data
expiration_dict[key] = expiration_date
return "", 201
def get_data_from_neighbor(key, neighbor_id):
if (neighbor_id != ec2_metadata.instance_id):
next_dns = get_instance_public_dns(neighbor_id)
end_point = "http://" + next_dns + "/getFromInstance?strKey=" + key
response = requests.get(url=end_point)
return response.content, response.status_code
else:
return "None", 202
def get_healty_instances_id():
elb = boto3.client('elbv2', region_name=ec2_metadata.region)
lbs = elb.describe_load_balancers()
isFound = False
for current_lb in lbs["LoadBalancers"]:
lb_arn = current_lb["LoadBalancerArn"]
response_tg = elb.describe_target_groups(
LoadBalancerArn=lb_arn
)
num_of_tg = len(response_tg["TargetGroups"])
for current_tg in response_tg["TargetGroups"]:
target_group_arn = current_tg["TargetGroupArn"]
response_health = elb.describe_target_health(
TargetGroupArn=target_group_arn
)
healty_instances = []
for instance in response_health['TargetHealthDescriptions']:
if instance['TargetHealth']['State'] == 'healthy':
healty_instances.append(instance['Target']['Id'])
if (instance['Target']['Id'] == ec2_metadata.instance_id):
isFound = True
if (isFound):
return healty_instances
return []
def get_instance_public_dns(instanc_id):
client = boto3.client('ec2', region_name=ec2_metadata.region)
response_in = client.describe_instances(
InstanceIds=[
str(instanc_id)
]
)
public_dns_name = response_in['Reservations'][0]['Instances'][0]['PublicDnsName']
return public_dns_name
def get_key_node_id(key, nodes):
hr = HashRing(nodes=nodes)
target_node_id = hr.get_node(key)
return target_node_id
def store_and_pass(key, data, expiration_date, instance_id):
data_dict[key] = data
expiration_dict[key] = expiration_date
if (instance_id == -1):
return
next_dns = get_instance_public_dns(instance_id)
end_point = "http://" + next_dns + "/putFromNeighbor?strKey=" + key + "&data=" + data + "&expirationDate=" + expiration_date
requests.post(url=end_point)
def pass_data_to_target(key, data, expiration_date, target_node):
next_dns = get_instance_public_dns(target_node)
# send regular put request, not from neighbor
end_point = "http://" + next_dns + "/put?strKey=" + key + "&data=" + data + "&expirationDate=" + expiration_date
requests.post(url=end_point)
| true |
f82b5dfae3e91b7a935b6c3bff97605fbbe8f482 | Python | dr-you-group/Data-Ingestion-and-Harmonization | /pipeline_logic/pcornet/python/parsing.py | UTF-8 | 3,101 | 2.59375 | 3 | [] | no_license | import csv
import tempfile
import shutil
from transforms.api import TransformInput, TransformOutput
from pyspark.sql import Row
from pcornet.pcornet_schemas import complete_domain_schema_dict, schema_dict_to_struct
from pcornet.site_specific_utils import get_site_dialect_params
def parse_input(ctx, my_input: TransformInput, error_df: TransformOutput, site_id: int, domain: str, regex: str):
def process_file(file_status):
# Copy contents of file from Foundry into temp file
with tempfile.NamedTemporaryFile() as t:
with my_input.filesystem().open(file_status.path, 'rb') as f_bytes:
shutil.copyfileobj(f_bytes, t)
t.flush()
# Read the csv, line by line, and use csv.Sniffer to infer the delimiter
# Write any improperly formatted rows to the errors DataFrame
with open(t.name, newline="", encoding="utf8", errors='ignore') as f:
with error_df.filesystem().open('error_rows', 'w', newline='') as writeback:
dialect = csv.Sniffer().sniff(f.read(1024))
f.seek(0)
dialect_params = get_site_dialect_params(site_id, domain)
r = csv.reader(f, delimiter=dialect.delimiter, **dialect_params)
w = csv.writer(writeback)
# Construct a pyspark.Row from our header row
header = next(r)
MyRow = Row(*header)
expected_num_fields = len(header)
error_encountered = False
for i, row in enumerate(r):
if len(row) == expected_num_fields:
# Properly formatted row
yield MyRow(*row)
else:
# Improperly formatted row
if not error_encountered:
# Create header for output csv
w.writerow(["row_number", "row_contents"])
error_encountered = True
# Write to a csv file in the errors dataset, recording the row number and malformed row
malformed_row = "|".join(row)
w.writerow([str(i), malformed_row])
files_df = my_input.filesystem().files(regex=regex)
processed_rdd = files_df.rdd.flatMap(process_file)
if processed_rdd.isEmpty():
# Get OrderedDict that specifies this domain's schema
schema_dict = complete_domain_schema_dict[domain]
# Create StructType for the schema with all types as strings
struct_schema = schema_dict_to_struct(schema_dict, all_string_type=True)
# Create empty dataset with proper columns, all string types
processed_df = processed_rdd.toDF(struct_schema)
else:
# csv file for the domain is empty
# Create dataset with whatever columns the site gave us, all string types
processed_df = processed_rdd.toDF()
return processed_df
| true |
e38893a990cfe0298484ac32defd138b7c4592d2 | Python | robbrad/UKBinCollectionData | /uk_bin_collection/uk_bin_collection/councils/WarwickDistrictCouncil.py | UTF-8 | 1,153 | 3 | 3 | [
"MIT"
] | permissive | # This script pulls (in one hit) the data
# from Warick District Council Bins Data
from bs4 import BeautifulSoup
from uk_bin_collection.uk_bin_collection.get_bin_data import \
AbstractGetBinDataClass
# import the wonderful Beautiful Soup and the URL grabber
class CouncilClass(AbstractGetBinDataClass):
"""
Concrete classes have to implement all abstract operations of the
baseclass. They can also override some
operations with a default implementation.
"""
def parse_data(self, page: str, **kwargs) -> dict:
# Make a BS4 object
soup = BeautifulSoup(page.text, features="html.parser")
soup.prettify()
data = {"bins": []}
for element in soup.find_all("strong"):
bin_type = element.next_element
bin_type = bin_type.lstrip()
collectionDateElement = element.next_sibling.next_element.next_element
collectionDate = collectionDateElement.getText()
dict_data = {
"type": bin_type,
"collectionDate": collectionDate,
}
data["bins"].append(dict_data)
return data
| true |
ed86585f914c1e311e30e257b773c67f2e0cda33 | Python | qq854051086/46-Simple-Python-Exercises-Solutions | /problem_02.py | UTF-8 | 662 | 4.28125 | 4 | [] | no_license | '''
Statement:
======================
Define a function max_of_three() that takes three numbers as arguments and returns the largest of
them.
'''
def max_of_three(num1, num2, num3):
if ((not isinstance(num1,int)) and (not isinstance(num1,float))) or ((not isinstance(num2,int)) and (not isinstance(num2,float))) or ((not isinstance(num3,int)) and (not isinstance(num3,float))):
raise TypeError("All three parameters should be integer or float.")
max_num = num1
if(num2>max_num):
max_num = num2
if(num3>max_num):
max_num = num3
return max_num
#return max(num1,max(num2,num3))
print(max_of_three(36,-366,900))
| true |