blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
49f835f332b3d7953c6d83da39d5b87e2471e4f8 | Python | OHedges1/discount_scraper | /discount.py | UTF-8 | 5,215 | 2.578125 | 3 | [] | no_license | #!/usr/bin/env python3
import requests
import json
import datetime
from bs4 import BeautifulSoup
class Playstation():
def __init__(self, number: int):
number = str(number)
url = 'https://web.np.playstation.com/api/graphql/v1//op?operationName=categoryGridRetrieve&variables={%22id%22:%22803cee19-e5a1-4d59-a463-0b6b2701bf7c%22,%22pageArgs%22:{%22size%22:' + number + ',%22offset%22:0},%22sortBy%22:{%22name%22:%22sales30%22,%22isAscending%22:false},%22filterBy%22:[],%22facetOptions%22:[]}&extensions={%22persistedQuery%22:{%22version%22:1,%22sha256Hash%22:%224ce7d410a4db2c8b635a48c1dcec375906ff63b19dadd87e073f8fd0c0481d35%22}}'
response = json.loads(requests.get(url).text)
self.games = response['data']['categoryGridRetrieve']['products']
weekno = datetime.datetime.today().weekday()
if weekno >= 5:
weekend_url = 'https://web.np.playstation.com/api/graphql/v1//op?operationName=categoryGridRetrieve&variables={%22id%22:%2216617674-2165-4899-9dbc-d56a68992cef%22,%22pageArgs%22:{%22size%22:' + number + ',%22offset%22:0},%22sortBy%22:{%22name%22:%22sales30%22,%22isAscending%22:false},%22filterBy%22:[],%22facetOptions%22:[]}&extensions={%22persistedQuery%22:{%22version%22:1,%22sha256Hash%22:%224ce7d410a4db2c8b635a48c1dcec375906ff63b19dadd87e073f8fd0c0481d35%22}}'
weekend_response = json.loads(requests.get(weekend_url).text)
self.weekend_games = weekend_response['data']['categoryGridRetrieve']['products']
else:
self.weekend_games = []
def get_name_price_pic(self):
gameinfo = []
for game in self.games + self.weekend_games:
item = []
item.append(game['name'])
item.append(game['price']['basePrice'])
item.append(game['price']['discountedPrice'])
item.append(game['media'][-1]['url'])
gameinfo.append(item)
return gameinfo
class NintendoSwitch():
def __init__(self):
url = 'https://store.nintendo.co.uk/en_gb/search-update-grid?cgid=deals-%26-offers-games&srule=most-popular&start=0&sz=12'
re = requests.get(url)
self.soup = BeautifulSoup(re.text, 'html.parser')
def get_name_price_pic(self):
names = [i.text.replace('\n', '').strip() for i in self.soup.find_all('h2', class_='card__title')]
all_prices = [i.get('content') for i in self.soup.find_all('data', class_='value')]
full_prices = [float(i) for i in all_prices[1::2]]
dis_prices = [float(i) for i in all_prices[::2]]
images = [i.get('src') for i in self.soup.find_all(class_='tile-image img img-fluid')]
return zip(names, full_prices, dis_prices, images)
class Steampowered():
def __init__(self):
url = 'https://store.steampowered.com/specials#tab=TopSellers'
re = requests.get(url)
soup = BeautifulSoup(re.text, 'html.parser')
self.refined_soup = soup.find('div', id='TopSellersRows')
def get_name_price_pic(self):
names = [i.text for i in self.refined_soup.find_all('div', class_='tab_item_name')]
full_prices = [i.text for i in self.refined_soup.find_all('div', class_='discount_original_price')]
dis_prices = [i.text for i in self.refined_soup.find_all('div', class_='discount_final_price')]
images = [i.get('src') for i in self.refined_soup.find_all('img', class_='tab_item_cap_img')]
return zip(names, full_prices, dis_prices, images)
class HtmlWriter():
def __init__(self):
self.p = Playstation(12)
self.n = NintendoSwitch()
self.s = Steampowered()
def preamble(self):
doc = [
'<!DOCTYPE html>',
'<meta charset="UTF-8">',
'<html>',
'<head>',
'<title>Game Discounts</title>',
'</head>',
'<body>',
]
return doc
def content(self):
doc = []
sources = {
'Playstation': self.p.get_name_price_pic(),
'Nintendo': self.n.get_name_price_pic(),
'Steampowered': self.s.get_name_price_pic(),
}
for source in sources:
doc.append('<table>')
doc.append('<tr>')
doc.append('<th>{}</th>'.format(source))
doc.append('<tr>')
for game in sources[source]:
doc.append('<tr>')
doc.append('<td>{}</td>'.format(game[0]))
doc.append('<td><s>{}</s> {}</td>'.format(game[1], game[2]))
doc.append('<td>')
doc.append('<picture>')
doc.append('<img src="{}" alt="{}" style="width:5cm;">'.format(game[3], game[0]))
doc.append('</picture>')
doc.append('</td>')
doc.append('</tr>')
doc.append('</table>')
return doc
def ending(self):
doc = [
'</body>',
'</html>',
]
return doc
h = HtmlWriter()
with open('index.html', 'w') as f:
for item in h.preamble() + h.content() + h.ending():
f.write(item + '\n')
| true |
5ac7c9bd688bea702e8c27ab24beed50514431a5 | Python | alpacanako/AtCoder | /ABC107/C.py | UTF-8 | 339 | 2.953125 | 3 | [] | no_license | N,K = map(int,input().split())
x = list(map(int,input().split()))
m = abs(x[0])*2 + abs(x[N-1])*2
for i in range(N-K+1):
if x[i] >= 0:
t = x[i+K-1]
elif x[i+K-1] <= 0:
t = -x[i]
elif -x[i] <= x[i+K-1]:
t = -x[i]*2 + x[i+K-1]
else:
t = -x[i] + x[i+K-1]*2
if t < m:
m = t
print(m)
| true |
fdb40a3723962b6e36dc1bd745223d1cb70d1987 | Python | riddhimanroy1010/esc180 | /Lectures/Lecture 32.py | UTF-8 | 1,692 | 4.0625 | 4 | [] | no_license | '''Return to merge sort'''
def merge(L1, L2):
i1 = 0
i2 = 0
res = []
while i1 < len(L1) and i2 < len(L2):
if L1[i1] < L2[i2]:
res.append(L1[i1])
i1 += 1
else:
res.append(L2[i2])
i2 += 1
res.extend(L1[i1:])
res.extend(L2[i2:])
return res
def insertion_sort(L):
if len(L) <= 1:
return L[:]
mid = 1
sorted1 = insertion_sort(L[:mid])
sorted2 = insertion_sort(L[mid:])
return merge(sorted1, sorted2)
# [1] k*n
# [n - 1] k*(n - 1)
# [n - 2] k*(n - 2)
# [n - 3] k*(n - 3)
# ...
# [1] k
# Total runtime = k*(n + n - 1 + n - 2.... + 1) = n + n + n....kn(n+1)/2 = O(n^2)
''' Different kinds of call trees '''
# Factorial
# Only one recursive call from each call, runtime does not depend on n, n decreases linearly
# total calls = n + 1
# Exponentiation
# Only one recursive call from each call, runtime does not depend on n, n decreased by a constant factor
# total calls: nlog(n)
# Slow
# Two recursive calls, runtime does not depend on n, n decreases by 1 each time
# Count by levels: level 1: 1, level 2: 2, level 3: 8....level n: 2^(n) calls
# sum of geometric series: 2^(n + 1) - 1 / (2 - 1)
# Sum List
# Two recursive calls, runtime does not depend on n, n decreases by constant factor each time
# 1 + 2 + 4 + 8...n; let 2^k = n;
# runtime = 2(k + 1) - 1; k = log2(n)
# therefore runtime = 2(log2(n) + 1); O(log(n)
# Mergesort
# Two recursive calls, runtime DOES depend on n, n decreases by 1 each time
| true |
c5514d8d58e8d43b63de7bd323f3eaafdaf492fa | Python | dweih/ArcherChess | /New Allocator.py | UTF-8 | 2,242 | 3.359375 | 3 | [] | no_license |
import math
# Takes [(score, confidence, board)], my_move(the boolean variable expressing whose turn it is)
# and the number points to invest
# Returns list of tuples [(points, board)] for investments
def PointAllocator( edgeInfo, my_move, points ):
# This part just returns empty if there is no information about the edges. Should not happen in practice
if len(edgeInfo) == 0:
return []
# Sorts the edges from worst to best, reverses if it is your move
sorted_edgeInfo = sorted(edgeInfo, key=itemgetter(0), reverse=not(my_move))
#This is a little thing for pulling one piece out of edgeInfo
edgescore = itemgetter(0)
edgeconfidence = itemgetter(1)
justtheboard = itemgetter(2)
# This calculates the mean score/confidence of all edges
Meanscore = sum(edgescore(edge) for edge in edgeInfo)/len(edgeInfo)
Meanconfidence = sum(edgeconfidence(edge) for edge in edgeInfo)/len(edgeInfo)
# Instruction for actual assignment of points
# Find moves of above-average score
Goodmoves = []
Interestingmoves = []
Boringmoves = []
for edge in edgeInfo:
if edgescore(edge)>=Meanscore:
Goodmoves.append(justtheboard(edge))
else:
if edgeconfidence(edge)<Meanconfidence:
Interestingmoves.append(justtheboard(edge))
else:
Boringmoves.append(justtheboard(edge))
# Find the amount that will be given to each move
Goodmovepoints = math.floor(0.6*points/len(Goodmoves))
if len(Interestingmoves)>0:
Othermovepoints = math.floor(0.4*points/len(Interestingmoves))
else:
Othermovepoints = 0
# Build the list
Finalset = []
Finalscores = []
for board in Goodmoves:
Finalset.append(board)
Finalscores.append(Goodmovepoints)
for board in Interestingmoves:
Finalset.append(board)
Finalscores.append(Othermovepoints)
for board in Boringmoves:
Finalset.append(board)
Finalscores.append(0)
# Find the total amount of points that were given out
# Hand any leftovers to the top choice
leftovers = points - sum(Finalscores)
Finalscores[0] += leftovers
# Return tuples in one giant list comprehension
return zip(Finalscores, Finalset)
| true |
34ab9c5f073471e71f34f1b6c3d9e9b9dae9b893 | Python | roince/Python_Crash_Course | /Mosh_Python/consitions.py | UTF-8 | 286 | 4.0625 | 4 | [] | no_license | name = input("please enter your name: ")
if len(name) < 3:
print("Your input is too short, name must be at least 3 characters")
elif len(name) > 51:
print("Your input is too long, name must be maximum of 50 characters")
else:
print(f"Welcome to the system, {name.title()}")
| true |
2fcef81a1c529ed12a578f6c396920694db2ea54 | Python | mingshaofeng/Client | /Severe.py | UTF-8 | 3,483 | 2.578125 | 3 | [] | no_license | # -*- coding:utf-8 -*-
#接收端
import socket
import threading
import time
import sys
import os,shutil
import struct
import hashlib
import pymysql
ip_port =("127.0.0.1", 8000)#定义监听地址和端口
def socket_service():
try:
#定义socket连接对象
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
#解决端口重用问题
s.setsockopt(socket.SOL_SOCKET,socket.SO_REUSEADDR,1)
s.bind(ip_port)#绑定地址
s.listen(5)#等待最大客户数
except socket.error as msg:
print(msg)#输出错误信息
exit(1)
print('监听开始...')
while 1:
conn, addr = s.accept()#等待连接
#多线程开启
t = threading.Thread(target=deal_data, args=(conn, addr))
t.start()
def deal_data(conn,addr):
print('接收的文件来自{0}'.format(addr))
#conn.send('欢迎连接服务器'.encode('utf-8'))
while True:
fileinfo_size =struct.calcsize('128sq')
#接收文件
buf =conn.recv(fileinfo_size)
if buf:
filename, filesize = struct.unpack('128sq',buf)
fn = filename.strip('\00'.encode('utf-8'))
new_filename = os.path.join('./'.encode('utf-8'),fn)
print('文件的新名字是{0},文件的大小为{1}'.format(new_filename,filesize))
recvd_size = 0
m = hashlib.md5()
fp = open(new_filename,'wb')
print('开始接收文件...')
pwd = os.getcwd()
while recvd_size < filesize:
if filesize - recvd_size > 1024:
data = conn.recv(1024)
recvd_size += len(data)
else:
data = conn.recv(filesize-recvd_size)#最后一次接收
recvd_size += len(data)
print('已接收:',int(recvd_size/filesize*100),'%')
m.update(data)
fp.write(data)#写入文件
fp.close()
md5_client = conn.recv(1024).decode('utf-8')
md5_server = m.hexdigest()
# print("服务器发来的md5:", md5_server)
# print("接收文件的md5:", md5_client)
#md5进行校验
Str_name =str(fn,encoding='utf-8')
path1 = pwd+'\\'+str(fn,encoding='utf-8')
path2 = 'C:\迅雷下载'
path3 = ('C:\迅雷下载\\'+str(fn,encoding='utf-8'))
shutil.move(path1,path2)
if md5_client == md5_server:
print('接收完毕,MD5校验成功...\n')
print('文件已存放在:'+path2)
else:
print('MD5验证失败')
f_mark = 3
#save_mysql(Str_name,path3,f_mark)
#conn.close()
break
f_url = bytes(path3,'utf-8')
f_mark=f_mark.to_bytes(length=2,byteorder='big',signed=False)
conn.send(fn)
conn.send(f_url)
conn.send(f_mark)
conn.close()
'''
def save_mysql(f_name,f_url,f_mark):
db = pymysql.connect(host='127.0.0.1', port=3306, user='MSF', password='1024161X', db='videos',
charset='utf8', )
cursor = db.cursor()
list_d = []
list_d.append(f_name)
list_d.append(f_url)
list_d.append(f_mark)
cursor.execute('insert into video(name,url,mark) values(%s,%s,%s)', list_d)
db.commit()
db.close()
'''
if __name__=='__main__':
# socket_service2()
socket_service() | true |
c69ef02187afc0aaa56e17ea11b0e81fcd9ee480 | Python | smsharma/GPSphere | /gaussianprocess.py | UTF-8 | 2,386 | 2.515625 | 3 | [
"MIT"
] | permissive | # Gaussian processes on a sphere
# Code originally from Tim Brandt (UCSB)
import numpy as np
from astropy.io import fits
import distances
from scipy import special, interpolate, optimize
import time
def like_gaussmix(x, val1, var1):
pmra = val1
dpmra = var1
lnL = np.sum(-(val1 - x)**2/(2*dpmra**2))
return lnL
def explorelike(p, d, d_cross, logd, logd_cross,
val1, var1):
sig2, alpha, nu = p
if sig2 <= 0 or alpha <= 0 or nu <= 0 or nu > 0.5:
return 1e100
x = gaussianprocess(d, d_cross, logd, logd_cross, val1, var1,
sig2, alpha, nu)
return -like_gaussmix(x, val1, var1)
def geninterp(xmin, xmax, sig2, nu, tol=1e-5):
n = 21
x = np.linspace(np.log(xmin), np.log(xmax), n)
y = sig2*2**(nu - 1)/special.gamma(nu)*np.exp(x*nu)
y *= special.kv(nu, np.exp(x))
f = interpolate.interp1d(x, y, bounds_error=False)
_x = x.copy()
_y = y.copy()
for i in range(15):
n = (n//2)*4 + 1
x = np.linspace(np.log(xmin), np.log(xmax), n)[1:-1:2]
y = sig2*2**(nu - 1)/special.gamma(nu)*np.exp(x*nu)
y *= special.kv(nu, np.exp(x))
indx = np.where((f(x) - y)**2 > tol**2)
if len(indx[0]) == 0:
break
_x = np.asarray(list(_x) + list(x[indx]))
_y = np.asarray(list(_y) + list(y[indx]))
f = interpolate.interp1d(_x, _y, bounds_error=False)
return f
def covariance(d, sig2, alpha, nu, logd, tol=1e-6):
mindist = 0.0000166
f = geninterp(mindist/alpha, np.pi/alpha, sig2, nu, tol=tol)
# dx = 1e-3
# x = np.arange(np.log(mindist/alpha), np.log(np.pi/alpha) + dx, dx)
# y = sig2*2**(nu - 1)/special.gamma(nu)*np.exp(x*nu)
# y *= special.kv(nu, np.exp(x))
# f = interpolate.interp1d(x, y, bounds_error=False)
covar = f(logd - np.log(alpha))
eps = 1e-10
val = sig2*2**(nu - 1)/special.gamma(nu)*(eps)**nu*special.kv(nu, eps)
covar[np.where(d == 0)] = val
return covar
def gaussianprocess(d, d_cross, logd, logd_cross, y, var,
sig2, alpha, nu, tol=1e-6):
K11 = covariance(d, sig2, alpha, nu, logd, tol=tol) + np.identity(d.shape[0])*var
K11_inv = np.linalg.inv(K11)
K12 = covariance(d_cross, sig2, alpha, nu, logd_cross, tol=tol)
return np.linalg.multi_dot([K12, K11_inv, y])
| true |
44a87baa535c963720da8cddef7415c311484e77 | Python | igorbonato/py_training | /a.p.i/apis.py | UTF-8 | 334 | 3.296875 | 3 | [] | no_license | import requests
url = 'https://api.exchangerate-api.com/v6/latest'
req = requests.get(url)
print(req.status_code)
dados = req.json()
print(dados)
valor_reais = float(input("informe o valor em reais a ser convertido\n"))
cotacao = dados['rates']['BRL']
print(f'R$ {valor_reais} em dólar valem US$ {(valor_reais / cotacao):.2f}')
| true |
bbbfa61467ae123b6482dacbedfabd16a71c7b74 | Python | Melissa201197/Trabajos_de_python | /pyton spider/def_nombre1,nombre2.py | UTF-8 | 227 | 3.3125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Tue Jan 26 11:56:38 2021
@author: USER
"""
def hola2(nombre1,nombre2):
print("Hola!",nombre1)
print("Hola!",nombre2,"\n")
hola2("Emmanuel","Ndubuisi")
hola2("Nnamdi","Sebastian") | true |
9add48176131f56c1d0ccc194ca6e3c92783a400 | Python | mhilmiasyrofi/tensorfuzz | /lib/mutation_functions.py | UTF-8 | 3,042 | 2.71875 | 3 | [
"LicenseRef-scancode-generic-cla",
"Apache-2.0"
] | permissive | # Copyright 2018 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# https://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions that mutate inputs for coverage guided fuzzing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# pylint: disable=too-many-locals
def do_basic_mutations(
corpus_element, mutations_count, constraint=None, a_min=-1.0, a_max=1.0
):
"""Mutates image inputs with white noise.
Args:
corpus_element: A CorpusElement object. It's assumed in this case that
corpus_element.data[0] is a numpy representation of an image and
corput_element.data[1] is a label or something we *don't* want to change.
mutations_count: Integer representing number of mutations to do in
parallel.
constraint: If not None, a constraint on the norm of the total mutation.
Returns:
A list of batches, the first of which is mutated images and the second of
which is passed through the function unchanged (because they are image
labels or something that we never intended to mutate).
"""
# Here we assume the corpus.data is of the form (image, label)
# We never mutate the label.
if len(corpus_element.data) > 1:
image, label = corpus_element.data
image_batch = np.tile(image, [mutations_count, 1, 1, 1])
else:
image = corpus_element.data[0]
image_batch = np.tile(image, [mutations_count] + list(image.shape))
sigma = 0.2
noise = np.random.normal(size=image_batch.shape, scale=sigma)
if constraint is not None:
# (image - original_image) is a single image. it gets broadcast into a batch
# when added to 'noise'
ancestor, _ = corpus_element.oldest_ancestor()
original_image = ancestor.data[0]
original_image_batch = np.tile(
original_image, [mutations_count, 1, 1, 1]
)
cumulative_noise = noise + (image_batch - original_image_batch)
# pylint: disable=invalid-unary-operand-type
noise = np.clip(cumulative_noise, a_min=-constraint, a_max=constraint)
mutated_image_batch = noise + original_image_batch
else:
mutated_image_batch = noise + image_batch
mutated_image_batch = np.clip(
mutated_image_batch, a_min=a_min, a_max=a_max
)
if len(corpus_element.data) > 1:
label_batch = np.tile(label, [mutations_count])
mutated_batches = [mutated_image_batch, label_batch]
else:
mutated_batches = [mutated_image_batch]
return mutated_batches
| true |
35c2c39c08db499cfb1edfd487f90729c6459bc4 | Python | janB003/OpenCV-Image-Processing | /Grabcut.py | UTF-8 | 1,512 | 2.671875 | 3 | [] | no_license | import numpy as np
import cv2
from matplotlib import pyplot as plt
class Grabcut:
def grabC(self, img):
mask = np.zeros(img.shape[:2], np.uint8)
bgdModel = np.zeros((1, 65), np.float64)
fgdModel = np.zeros((1, 65), np.float64)
#this is with the rectangle method, you'll need to manually input your own rect parameters
# rect = (544, 170, 300, 310)
# cv2.grabCut(img, mask, rect, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_RECT)
#
# mask2 = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
# img3 = img * mask2[:, :, np.newaxis]
# .............................
# newmask is the mask image I manually labelled
# newmask = cv2.imread('images/sidewalk31.png', 0)
newmask = cv2.imread('images/d435colored_4_masked.png', 0)
mask = np.where(((newmask > 0) & (newmask < 255)), cv2.GC_PR_FGD, 0).astype('uint8')
# whereever it is marked white (sure foreground), change mask=1
# whereever it is marked black (sure background), change mask=0
mask[newmask == 0] = 0
mask[newmask == 255] = 1
mask, bgdModel, fgdModel = cv2.grabCut(img, mask, None, bgdModel, fgdModel, 5, cv2.GC_INIT_WITH_MASK)
mask = np.where((mask == 2) | (mask == 0), 0, 1).astype('uint8')
img2 = img * mask[:, :, np.newaxis]
return img2
def GC(self, img):
image = self.grabC(img)
return image
| true |
aee0eba29ff4eaef39a8ecd67a1107b1ebc13750 | Python | 572470467/BucketClient | /table_roller/Lines.py | UTF-8 | 697 | 2.703125 | 3 | [] | no_license | import time
from flask import Flask, jsonify
import random
app = Flask(__name__)
list0=['motor0','motor1','motor2','motor3','sensor0','sensor1','sensor2','sensor3']
list1=['motor4','motor5','motor6','sensor4','sensor5','sensor6','sensor7']
def random_s():
dic = {}
for i in range(8):
dic[str(list0[i])] = str(random.randrange(2))
return dic
def random_n():
dic = {}
for i in range(7):
dic[str(list1[i])] = str(random.randrange(2))
return dic
@app.route('/s/status/')
def status_s():
d=random_s()
return jsonify(d)
@app.route('/n/status/')
def status_n():
d=random_n()
return jsonify(d)
if __name__ == '__main__':
app.run(port=5000)
| true |
60070a28971275d4824022c548ccd631253198b0 | Python | joosthoi1/chip8-checkbox | /RAM.py | UTF-8 | 125 | 2.671875 | 3 | [] | no_license | def ram(kb):
KB = 1024
return ["0"]*(KB*kb)
if __name__ == "__main__":
ram = Ram(1)
print(ram.ram)
| true |
e3a3e72c3be0a82f7226bd59f51e7266408ccfd3 | Python | sh999/Python-Study-Snippets | /multithreading.py | UTF-8 | 471 | 3.40625 | 3 | [] | no_license | '''
multithreading.py
Reference:
==========
http://www.tutorialspoint.com/python/python_multithreading.htm
'''
import thread
import time
def print_time(threadName, delay):
count = 0
while count < 5:
time.sleep(delay)
count += 1
print "%s: %s" % (threadName, time.ctime(time.time()))
try:
thread.start_new_thread(print_time, ("Thread-1", 2, ))
thread.start_new_thread(print_time, ("Thread-2", 4, ))
except:
print "Error: unable to start thread"
while 1:
pass
| true |
63981df2c268ccfebaf9b80262e5d22092e92299 | Python | HaedeunH/1Day1Commit | /people1/20201016 위장.py | UTF-8 | 295 | 3.0625 | 3 | [] | no_license | import collections
def solution(clothes):
a=[]
b=[]
result=1
for x,y in clothes:
a.append(y)
total=collections.Counter(a)
color=total.values()
for i in color:
b.append(i)
for i in range(len(b)):
result*=(b[i]+1)
return result-1
| true |
80b72406480c84077d51d0d53b7ff15ceb275dfc | Python | macinnis82/lending_club | /prob_lending_club.py | UTF-8 | 757 | 2.8125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 25 10:20:19 2015
@author: Administrator
"""
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as stats
loansData = pd.read_csv('https://spark-public.s3.amazonaws.com/dataanalysis/loansData.csv')
# Clean data
loansData.dropna(inplace=True)
# Boxplot
loansData.boxplot(column='Amount.Requested')
plt.show()
plt.savefig("boxplot.png")
# Histogram
loansData.hist(column='Amount.Requested')
plt.show()
plt.savefig("histogram.png")
# QQ Plot
plt.figure()
graph = stats.probplot(loansData['Amount.Requested'], dist="norm", plot=plt)
plt.show()
plt.savefig("qqplot.png")
"""
The results are pretty close to the graphs generated
when looking at the Amount.Funded.By.Investors column
""" | true |
9ebde2584cfbd7d1ba67fab690c9fe55da7c612f | Python | edinsonviveros/mision-tic-2021-ciclo_python | /G55/Unidad-2/Clase_1/if.py | UTF-8 | 468 | 3.953125 | 4 | [] | no_license | a = 0
# print(f'a = {a}')
# if a > 0:
# print(f'{a} es mayor que 0')
# else:
# if a == 0:
# print('a es igual a 0')
# else:
# print(f'{a} es menor que 0')
print(f'a = {a}')
if a == 0:
print('a es igual a 0')
elif a > 0:
print(f'{a} es mayor que 0')
else:
print(f'{a} es menor que 0')
#---------
a = input("Ingrese valor: ")
b = input("ingrese valor: ")
# if a > 0 and b > 0:
# print("a y b son maña")
# else:
# if a | true |
424f79bb42fb511e85dd1bb41e31200f2ac911e4 | Python | 196884/Python | /PE/pe0213.py | UTF-8 | 1,608 | 2.78125 | 3 | [] | no_license | from mpmath import *
mp.dps = 22
N = 30
N2 = N * N
cache = dict()
def density(i, j, k):
# density function starting at (i, j) and jumping k times:
cached = cache.get((i, j, k), None)
if cached != None:
return cached
if k == 0:
r = [ mpf(0) for z in range(0, N2)]
r[i + N * j] = mpf(1)
else:
r = [ mpf(0) for z in range(0, N2)]
b = mpf(0)
if j > 0:
b += 1
aux = density(i, j-1, k-1)
for z in range(0, N2):
r[z] += aux[z]
if j < N-1:
b += 1
aux = density(i, j+1, k-1)
for z in range(0, N2):
r[z] += aux[z]
if i > 0:
b += 1
aux = density(i-1, j, k-1)
for z in range(0, N2):
r[z] += aux[z]
if i < N-1:
b += 1
aux = density(i+1, j, k-1)
for z in range(0, N2):
r[z] += aux[z]
for z in range(0, N2):
r[z] /= mpf(b)
cache[(i, j, k)] = r
if k >= 20:
print (i, j, k, len(cache))
return r
def solve():
n = mpf(0)
for i in range(0, N):
for j in range(0, N):
print (i, j)
acc = mpf(1)
for a in range(0, N):
for b in range(0, N):
aux = density(a, b, 50)
acc *= mpf(1) - aux[i + N * j]
n += acc
print n
return 0
if __name__ == "__main__":
# could use symmetry,... to speed up, but this works
result = solve()
print "Result: %d" % result
| true |
ed9c7b3127838ec50dbaeb4238e09b87dd817958 | Python | 474416133/py-orm | /account-user/utils/__init__.py | UTF-8 | 1,732 | 2.71875 | 3 | [] | no_license | #-*- encoding:utf-8 -*-
__author__ = "474416133@qq.com"
import asyncio
class Singleton(type):
_instance = None
def __call__(cls, *args, **kwargs):
print (cls._instance)
if cls._instance == None:
return super(Singleton, cls).__call__(*args, *kwargs)
return cls._instance
import asyncio
class IDgen(object):
"""
id生成器
"""
def __init__(self):
self.partion = 1
self.partion_bit = 10
self.seq_bit = 11
self.max_seq = 1<<10
self.seq = 0
self.last_time = None
self.time_bit_skip = self.partion_bit + self.seq_bit
async def __call__(self):
_loop = asyncio.get_event_loop()
time_now = int(_loop.time())
if self.last_time == time_now:
if self.seq < self.max_seq:
self.seq += 1
else:
await asyncio.sleep(0.1)
_id = await self()
return _id
elif not self.last_time or self.last_time < time_now:
self.last_time = time_now
self.seq = 0
return (time_now << self.time_bit_skip) + \
(self.partion << self.seq_bit) + \
self.seq
async def _f():
count = 0
idgen = IDgen()
while 1:
_id = await idgen()
print("id:%s"%_id)
if count > 11:
break
count += 1
if __name__ == "__main__":
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
#idgen = IDgen()
count = 0
loop = asyncio.get_event_loop()
task = loop.create_task(_f())
loop.run_until_complete(task)
print("========================")
loop.run_forever()
#print(len(s)) | true |
bdcf0c9cef17e6fb2176eb21afb70a4751ab100d | Python | katryo/leetcode | /515-find-largest-value-in-each-tree-row/solution.py | UTF-8 | 1,195 | 3.625 | 4 | [] | no_license | from collections import deque
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def largestValues(self, root):
"""
:type root: TreeNode
:rtype: List[int]
"""
if not root:
return []
queue = deque()
queue.append((root, 0))
seen_levels = set()
ans = []
while queue:
node, level = queue.popleft()
# (node, level) = queue.popleft()
if level in seen_levels:
ans[level] = max(ans[level], node.val)
else:
ans.append(node.val)
seen_levels.add(level)
if node.left:
queue.append((node.left, level+1))
if node.right:
queue.append((node.right, level+1))
return ans
if __name__ == '__main__':
s = Solution()
root = TreeNode(1)
n2 = TreeNode(2)
n3 = TreeNode(3)
root.left = n3
root.right = n2
n5 = TreeNode(5)
n3.left = n5
n9 = TreeNode(9)
n2.right = n9
print(s.largestValues(root))
| true |
00be659d0acdb3dd63ef85c208034488be8bd1f0 | Python | SeanyDcode/codechallenges | /dailychallenge363.py | UTF-8 | 303 | 3.8125 | 4 | [] | no_license | # from dailycodingproblem.com
#
# Daily Challenge #363
# Write a function, add_subtract, which alternately adds and subtracts curried arguments. Here are some sample operations:
#
# add_subtract(7) -> 7
#
# add_subtract(1)(2)(3) -> 1 + 2 - 3 -> 0
#
# add_subtract(-5)(10)(3)(9) -> -5 + 10 - 3 + 9 -> 11
| true |
527e44abc76339348f9c68e39ce3299cf63f92aa | Python | Phantomn/Python | /study/byteofpython/except_handle.py | UTF-8 | 209 | 3.765625 | 4 | [] | no_license | try:
text = raw_input('Enter something --> ')
except EOFError:
print 'Why did you do an EOF on me?'
except KeyboardInterrupt:
print 'You cancelled the operation.'
else:
print 'You entered %s'%(text) | true |
2ba7b4f4ff828146d9dd8ec9d9543efbfd4f6969 | Python | espnet/espnet | /espnet2/text/cleaner.py | UTF-8 | 2,140 | 2.921875 | 3 | [
"Apache-2.0"
] | permissive | from typing import Collection
import tacotron_cleaner.cleaners
from jaconv import jaconv
from typeguard import check_argument_types
try:
from vietnamese_cleaner import vietnamese_cleaners
except ImportError:
vietnamese_cleaners = None
from espnet2.text.korean_cleaner import KoreanCleaner
try:
from whisper.normalizers import BasicTextNormalizer, EnglishTextNormalizer
except (ImportError, SyntaxError):
BasicTextNormalizer = None
class TextCleaner:
"""Text cleaner.
Examples:
>>> cleaner = TextCleaner("tacotron")
>>> cleaner("(Hello-World); & jr. & dr.")
'HELLO WORLD, AND JUNIOR AND DOCTOR'
"""
def __init__(self, cleaner_types: Collection[str] = None):
assert check_argument_types()
if cleaner_types is None:
self.cleaner_types = []
elif isinstance(cleaner_types, str):
self.cleaner_types = [cleaner_types]
else:
self.cleaner_types = list(cleaner_types)
self.whisper_cleaner = None
if BasicTextNormalizer is not None:
for t in self.cleaner_types:
if t == "whisper_en":
self.whisper_cleaner = EnglishTextNormalizer()
elif t == "whisper_basic":
self.whisper_cleaner = BasicTextNormalizer()
def __call__(self, text: str) -> str:
for t in self.cleaner_types:
if t == "tacotron":
text = tacotron_cleaner.cleaners.custom_english_cleaners(text)
elif t == "jaconv":
text = jaconv.normalize(text)
elif t == "vietnamese":
if vietnamese_cleaners is None:
raise RuntimeError("Please install underthesea")
text = vietnamese_cleaners.vietnamese_cleaner(text)
elif t == "korean_cleaner":
text = KoreanCleaner.normalize_text(text)
elif "whisper" in t and self.whisper_cleaner is not None:
text = self.whisper_cleaner(text)
else:
raise RuntimeError(f"Not supported: type={t}")
return text
| true |
080e1739618d3f74b04ac22ef4deb9ec96aab8cb | Python | BlakeMScurr/nitro-paper | /code/tests.py | UTF-8 | 795 | 2.84375 | 3 | [] | no_license | from overlap import overlap
from remove import remove
from cap import cap
from transfer import transfer
from claim import claim
def test_overlap():
assert overlap('a', [['a', 5], ['b', 3]], 10) == 5
assert overlap('b', [['a', 5], ['b', 3]], 10) == 3
assert overlap('b', [['a', 5], ['b', 3]], 7) == 2
def test_remove():
assert remove([['a', 5], ['b', 3]], 'a', 5) == [['a', 0], ['b', 3]]
assert remove([['a', 5], ['b', 3]], 'b', 2) == [['a', 5], ['b', 1]]
def test_transfer():
assert transfer('a', [['a', 5]], 10, 5) == (5, [['a', 0]], 5)
assert transfer('a', [['a', 5]], 10, 2) == (2, [['a', 3]], 8)
assert transfer('b', [['a', 5]], 10, 2) == (0, [['a', 5]], 10)
def test_claim():
assert claim('b', ['b'], [['a', 5], ['b', 5]], 5, 5) == (5, ['b'], [['a', 5], ['b', 0]], 0)
| true |
6f120bf06c34fb86b4393c7002b6e99d322efeaa | Python | AwesomeCrystalCat/py_s00 | /t10_do_op/do_op.py | UTF-8 | 716 | 4.625 | 5 | [] | no_license | print("---- Simple calculator ----")
print("Let's add some numbers")
a = input("Input your first value: ")
op = input("Input your operator: ")
if (op == "+" or op == "-" or op == "*" or op == "/"):
b = input("Input your second value: ")
if (a.isnumeric() and b.isnumeric()):
a = int(a)
b = int(b)
if (op == "+"):
res = a + b
elif (op == "-"):
res = a - b
elif (op == "/"):
res = a / b
elif (op == "*"):
res = a * b
print(f"{a} {op} {b} = {res}")
else:
print("Enter correct numbers")
else:
print("usage: the operator must be '*' or '+' or '-' or '/'")
print("---- Simple calculator ----")
| true |
9671746d47ef37b9676aea3ae1dbea8b716cb3e8 | Python | huangyingw/submissions | /1087/1087.brace-expansion.236740230.Runtime-Error.leetcode.py | UTF-8 | 1,002 | 3.203125 | 3 | [] | no_license | class Solution(object):
def permute(self, S):
if not S:
return []
if '{' not in S:
return [S]
stack, stack2 = [], []
brace = 0
for char in S:
if char == '{':
brace = 1
elif char == '}':
if not stack:
stack = stack2
else:
new_stack = []
for char in stack:
for char2 in stack2:
new_stack.append(char + char2)
stack = new_stack
stack2 = []
brace = 2
elif char != ',':
if brace == 1:
stack2.append(char)
elif brace == 2:
stack = [c + char for c in stack]
stack2 = []
else:
stack.append(char)
stack.sort()
stack.sort(key=len)
return stack
| true |
8d1c9fbbb46572967397224021ae4400e54bbf12 | Python | BUCT-CS1701-SE-Design/webDataCollectionSystem | /MUSEUMS/spiders/museum68.py | UTF-8 | 1,873 | 2.515625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
from MUSEUMS.items import MuseumsItem #包含这个item类,必须设置
class Museum68Spider(scrapy.Spider):
custom_settings={
'ITEM_PIPELINES':{'MUSEUMS.pipelines.MuseumsPipeline': 5,}
}
name = 'museum68'
allowed_domains = ['aymuseum.com']
start_urls = ['http://www.aymuseum.com/']
def parse(self, response):
item=MuseumsItem()
item["museumID"]=68
item["museumName"]='安源路矿工人运动纪念馆'
item["Location"]='江西省萍乡市安源区正街路'
item["telephone"]='(0799)7101123'
item["Link"]='http://www.aymuseum.com/'
item["opentime"]='星期二至星期日开放 星期一闭馆检修;夏季9:00——17:30(17:00停止入内) 冬季9:00——17:00(16:30停止入内)'
item["introduction"]='安源是中国工人运动的摇篮,是中国近代工业发祥地,是湘赣边界秋收起义策源地和主要爆发地。安源路矿工人运动纪念馆是为征集和保护安源工人运动的文物,研究和宣传安源革命斗争历史而于1956年创建的专题类博物馆,1968年兴建了陈列大楼,1984年邓小平同志为纪念馆题写馆名。馆区面积10万平方米,建筑面积21341平方米,展厅面积3567平方米(其中陈列馆面积3245平方米,安源工运时期廉政建设陈列馆面积322平方米)。我馆现有馆藏文物与资料5000余件,其中一级文物61件/套,二级文物67件/套,三级文物2050件/套;负责保护和宣传的文物保护单位共14处,其中全国重点文物保护单位4处,省级文物保护单位8处,市级文物保护单位1处,区级文物保护单位1处。下设有安源工运时期廉政建设陈列馆和中共湖南省委在安源革命活动展览馆两个展览馆。'
yield item
| true |
5c8f4db91307459bd70fcd04ff09e2cb8a4a5039 | Python | RacleRay/SimpleDiagnoseAssistant | /model/neCheck/rnn.py | UTF-8 | 1,471 | 2.59375 | 3 | [] | no_license | import torch
import torch.nn as nn
class NEmodel(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.rnn = nn.RNN(input_size, hidden_size, batch_first=True)
self.fc = nn.Linear(hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, inputs, length):
inputs = torch.nn.utils.rnn.pack_padded_sequence(inputs,
length.to('cpu'),
batch_first=True,
enforce_sorted=False)
step_outs, hn = self.rnn(inputs)
step_outs, len_unpack = torch.nn.utils.rnn.pad_packed_sequence(step_outs, batch_first=True)
hn = hn.squeeze(0)
out = self.fc(hn)
out = self.softmax(out)
return out
class RNN(nn.Module):
"Naive rnn implementarion"
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.fc_1 = nn.Linear(input_size + hidden_size, hidden_size)
self.fc_2 = nn.Linear(input_size + hidden_size, hidden_size)
self.softmax = nn.LogSoftmax(dim=-1)
def forward(self, inputs, hidden, length):
hidden = torch.cat([inputs, hidden], 1)
hidden = self.fc_1(hidden)
output = self.fc_2(hidden)
output = self.softmax(output)
return output, hidden | true |
2cee9327a383d0d9939c1a882c0fff42b20e7ad7 | Python | yeqinghuang516/UCSD-ECE285-Object-Detection-Using-Deep-Learning | /Mask RCNN/model.py | UTF-8 | 1,239 | 2.578125 | 3 | [] | no_license | import torch
import torchvision as tv
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor
class MaskRCNN(torch.nn.Module):
def __init__(self, num_classes, pretrained = True):
super(MaskRCNN, self).__init__()
self.model = tv.models.detection.maskrcnn_resnet50_fpn(pretrained = pretrained)
# get the number of input features for the classifier
in_features = self.model.roi_heads.box_predictor.cls_score.in_features
# replace the pre-trained head with a new one
self.model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)
# now get the number of input features for the mask classifier
in_features_mask = self.model.roi_heads.mask_predictor.conv5_mask.in_channels
hidden_layer = 256
# and replace the mask predictor with a new one
self.model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,
hidden_layer,
num_classes)
def forward(self, images, targets = None):
loss_dict = self.model(images, targets)
return loss_dict | true |
195b835cb0ba243d31c68802dc0a2bc98eb18c99 | Python | solivier/algo | /anagrams_finder/anagrams_finder.py | UTF-8 | 922 | 4.125 | 4 | [] | no_license | def anagrams(lst):
"""
Function to find anagram pairs
:param lst: A lst of strings
:return: Group of anagrams
"""
# Empty dictionary which holds subsets of all anagrams together
dictionary = {}
# traversing all the lst strings
for string in lst:
# sorting the lst string and storing it in a key
key = ''.join(sorted(string))
# if the key is already in the dictionary then appending the original lst(Anagram).
if key in dictionary.keys():
dictionary[key].append(string)
else: # If there is no key in the dictionary
dictionary[key] = []
dictionary[key].append(string)
# traversing the whole dictionary and concatenating values and keys
result = []
for key, value in dictionary.items():
if len(value) >= 2:
result.append(value)
result = sorted(result)
return result
| true |
aff1da369b0ff68c0f4dafb41d488948e245424c | Python | sswwd95/Study | /keras/keras45_ModelCheckPoint2_datetime.py | UTF-8 | 6,026 | 3.078125 | 3 | [] | no_license | #체크포인트에 날짜와 시간 표시
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
print(x_train.shape, y_train.shape) # (60000, 28, 28) (60000,)
print(x_test.shape, y_test.shape) # (10000, 28, 28) (10000,)
print(x_train[0])
print(y_train[0])
print(x_train[0].shape) #(28,28)
print(np.max)
x_train = x_train.reshape(60000,28,28,1).astype('float32')/255.
# x_train의 최댓값 : 255
# .astype('float32') -> 정수형을 실수형으로 바꾸는 것
x_test= x_test.reshape(10000,28,28,1)/255.
# 이렇게 해도 실수형으로 바로 된다.
#x_test = x_test.reshape(x_test.shape[0],x_test.shape[1],x_test.shape[2],1)) -> 코딩할 때 이렇게 쓰기!
#OneHotEncoding
from tensorflow.keras.utils import to_categorical
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
print(y_train.shape) #(60000,10)
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
model = Sequential()
model.add(Conv2D(filters=10, kernel_size=(2,2), padding='same', strides=1, input_shape=(28,28,1)))
model.add(MaxPooling2D(pool_size=2))
model.add(Conv2D(9,2))
model.add(Conv2D(8,2))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(40,activation='relu'))
model.add(Dense(10,activation='softmax'))
model.summary()
'''
########################################################################################################################################
import datetime # 컴퓨터에서 제공되는 시간과 동일. 클라우드에서 쓰면 미국시간으로 된다. 한국시간으로 바꿔서 잡아주기. 코랩은 영국시간 기준
# 덮어쓰기 할 경우 구분하기 좋다. ..?
date_now = datetime.datetime.now() # 문제점 : 여기 시간으로 고정된다. 분이 넘어가도 수정안됨.
# 체크포인트 내로 now()를 넣어서 수정
print(date_now)
date_time = date_now.strftime('%m%d_%H%M') #strttime = startime # 월, 일, 시간, 분
print(date_time)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
filepath = '../data/modelcheckpoint/' # 경로 변수만들기
filename = '_{epoch:02d}-{val_loss:.4f}.hdf5' # 파일 이름 변수 만들기
# stirng끼리 합치기 ("". join은 빈 공백 안에 넣는다는 것)
modelpath = "".join([filepath,"k45_",date_time, filename])
print(modelpath) #../data/modelcheckpoint/k45_0127_1018_{epoch:02d}-{val_loss:.4f}.hdf5
# modelpath = '../data/modelcheckpoint/k45_mnist_{epoch:02d}-{val_loss:.4f}.hdf5'(기존의 시간 포함 안한 경로)
#########################################################################################################################################
'''
########################################################################################################################################
import datetime # 컴퓨터에서 제공되는 시간과 동일. 클라우드에서 쓰면 미국시간으로 된다. 한국시간으로 바꿔서 잡아주기. 코랩은 영국시간 기준
# 덮어쓰기 할 경우 구분하기 좋다. ..?
# date_time = datetime.datetime.now().strftime('%m_%d_%H_%M_%S') #strttime = startime # 월, 일, 시간, 분
# print(date_time)
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
filepath = '../data/modelcheckpoint/' # 경로 변수만들기
filename = '_{epoch:02d}-{val_loss:.4f}.hdf5' # 파일 이름 변수 만들기
# stirng끼리 합치기 ("". join은 빈 공백 안에 넣는다는 것)
modelpath = "".join([filepath,"k45_",format(datetime.datetime.now().strftime('%m_%d_%H_%M_%S'))+filename])
print(modelpath) #../data/modelcheckpoint/k45_0127_1018_{epoch:02d}-{val_loss:.4f}.hdf5
# modelpath = '../data/modelcheckpoint/k45_mnist_{epoch:02d}-{val_loss:.4f}.hdf5'(기존의 시간 포함 안한 경로)
#########################################################################################################################################
es = EarlyStopping(monitor='val_loss', patience=20, mode='min')
cp = ModelCheckpoint(filepath=modelpath , monitor='val_loss', save_best_only=True, mode='auto')
#3. 컴파일, 훈련
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])
hist = model.fit(x_train,y_train ,callbacks=[es,cp], epochs=1000, validation_split=0.2, batch_size=16)
#4. 평가, 예측
loss, acc = model.evaluate(x_test, y_test, batch_size=16)
print('loss, acc : ', loss, acc)
y_predict = model.predict(x_test[:10])
print(y_predict)
print(y_test[:10])
# loss, acc : 0.06896616518497467 0.9800999760627747
#시각화
import matplotlib.pyplot as plt
plt.rc('font',family='Malgun Gothic') # 한글 폰트 설치
plt.figure(figsize=(10,6)) # 판 깔아주는 것.
plt.subplot(2,1,1) #(2행 1열 중 첫번째)
plt.plot(hist.history['loss'],marker='.', c='red', label='loss')
plt.plot(hist.history['val_loss'],marker='.', c='blue', label='val_loss')
plt.grid()
# subplot은 두 개의 그림을 그린다는 것. plot은 도화지 하나라고 생각.
plt.title('손실비용')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(loc='upper right') # 상단의 우측 부분에 라벨 명시를 해주는 것
# legend는 표시해주는 거라 그래프 보고 알아서 위치 설정하기.
plt.subplot(2,1,2) #(2행 2열 중 2번째)
plt.plot(hist.history['acc'],marker='.', c='red') #metrics의 이름과 똑같이 넣기
# 그림보면 갱신되는 점은 그대로 두고 뒤에 값 올라간 점은 없어도 된다.
plt.plot(hist.history['val_acc'],marker='.', c='blue')
plt.grid() # 격자. 모눈종이 형태. 바탕을 그리드로 하겠다는 것.
plt.title('정확도')
plt.ylabel('acc')
plt.xlabel('epoch')
plt.legend(['acc','val_acc']) # 레전드에 직접 라벨명 넣어줄 수 있다. 위치 알아서 설정함
plt.show()
# loss, acc : 0.05949907749891281 0.9835000038146973
| true |
6a31d65f764c05a685affc5275f005a14a9519a8 | Python | gurpreetkamboz21/cognitive_service_poc | /CognitiveAPI/process_image/extract_remote.py | UTF-8 | 1,105 | 2.546875 | 3 | [
"MIT"
] | permissive | import os
import requests
from config import COMPUTER_VISION_CONFIG
API_KEY = COMPUTER_VISION_CONFIG['API_KEY']
ENDPOINT = COMPUTER_VISION_CONFIG['OCR_ENDPOINT']
def init(image_url):
text = ''
results = get_text(image_url)
print(results)
text += parse_text(results)
return text
def parse_text(results):
"""
parse json into simple text
"""
text = ''
for region in results['regions']:
for line in region['lines']:
for word in line['words']:
text += word['text'] + ' '
text += '\n'
return text
def get_text(image_url):
"""
get response from server
"""
print('Processing...')
headers = {
'Ocp-Apim-Subscription-Key': API_KEY
}
params = {
'language': 'en',
'detectOrientation ': 'true',
'visualFeatures': 'Categories, Description, Color',
}
payload = {
'url': image_url}
response = requests.post(ENDPOINT, headers=headers,
params=params, json=payload)
results = response.json()
return results
| true |
a33c347acd991b3e64f658fbcb98deb2d10f7071 | Python | daum913/python_project | /assignments/if-1/if_1-3.py | UTF-8 | 415 | 4.5 | 4 | [] | no_license | # 문제 3.
# 나이를 입력받아 20살 이상이면 "adult"라고 출력하고 그렇지 않으면 몇 년후에 성인이 되는지를 "○ years later"라는 메시지를 출력하는 프로그램을 작성하시오.
# 입력 예: 18
# 출력 예: 2 years later
age = int(input("나이를 입력하세요 : "))
age2 = 20-age
if age>=20 :
print("adult")
elif age<20 :
print("{0} years later".format(age2)) | true |
cb3490a3c00a31fc8106deeb6c88e2ed248af963 | Python | noozip2241993/learning-python | /csulb-is-601/notes/box-plot.py | UTF-8 | 153 | 2.5625 | 3 | [] | no_license | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
df = pd.read_excel("example.xlsx", usecols=[0,1], sheet_name="Sheet1")
print(df) | true |
df5297fa7c9d631596751b4e7491ff27930ac572 | Python | XunylYasna/Murang-Algocom | /lovers/sevenminutes.py | UTF-8 | 1,030 | 3.4375 | 3 | [] | no_license | import re
import heapq
from heapq import heapify, heappop, heappush
def sum_all(n,num_list):
counter = 0
heapq.heapify(num_list) #sort
while len(num_list) > 1:
front_num = heappop(num_list)
next_num = heappop(num_list)
cur_sum = front_num + next_num
counter = counter + min(front_num,next_num)
heappush(num_list, cur_sum)
return counter + sum(num_list)
# str = 'I think I like eating soup because soup is the best thing to like eating.'.lower()
str = []
while True:
try:
line = input()
if line:
str.append(line)
else:
break
except EOFError:
break
str = ''.join(str)
str = str.lower()
str = re.findall(r'\w+', str)
str2 = set(str)
num_list = []
for word in str2 :
# print('Frequency of ', word , 'is :', str.count(word))
num_list.append(str.count(word))
# print(num_list)
# num_list = [2,1,2,2,2,1,1,1,1,1,1]
print(sum_all(len(num_list), num_list)) | true |
98838155ca69e2bceb6b79de71a3b514a73878ab | Python | nickklosterman/PythonSheriffSales | /SheriffSaleProcessors/SheriffSalesLinkCreatorWarrenCountyOhio.py | UTF-8 | 14,332 | 2.765625 | 3 | [] | no_license | #this program takes as input a saved search from the Montgomery County Sheriff sales site
#the input should be the detailed view listing of properties
import re
def striphtml(data):
p = re.compile(r'<.*?>')
return p.sub('', data)
def stripspaces(data):
p = re.compile(r' ')
return p.sub('+',data)
def stripampersand(data):
p = re.compile(r'&')
return p.sub('and',data) # &
def stripcomma(data):
p = re.compile(r',')
return p.sub(' ',data)
def prepprice(data):
temp=striphtml(data.rstrip())
pd = re.compile(r'\.\.') #take care of their double periods in numbers
pa = re.compile(r'\$')
temp2=temp.strip() #remove whitespace
p = re.compile(r',') #remove comma in price since that'll screw things up as far as outputting to csv.
return p.sub('',pa.sub('',pd.sub(',',temp2)))
def UpdateRecordInDatabase(SaleDate,CaseNumber,Address, Plaintiff,Defendant,Attorney,SoldTo,PID,Zipcode,Appraisal,MinBidAmt,SaleAmt,SaleStatus,key):
con = mdb.connect('localhost', 'nicolae', 'ceausescu', 'SheriffSales')
with con:
cur = con.cursor(mdb.cursors.DictCursor)
print("UPDATE Property SET SoldTo=%s, SaleAmt=%s, SaleStatus=%s WHERE id=%s" % (SoldTo,SaleAmt,SaleStatus,key))
cur.execute("UPDATE Property SET SoldTo=%s, SaleAmt=%s, SaleStatus=%s WHERE id=%s", (SoldTo,SaleAmt,SaleStatus,key))
con.commit()
cur.close()
con.close()
def InsertIntoDB(date,CaseNumber,Address, Plaintiff,Defendant,Attorney,SoldTo,PID,Zipcode,Appraisal,MinBidAmt,SaleAmt,SaleStatus):
con = mdb.connect('localhost', 'nicolae', 'ceausescu', 'SheriffSales')
with con:
cur = con.cursor(mdb.cursors.DictCursor)
# check to see if record exists before inserting, if exists check for salestatus change, update saleamt
cur.execute("INSERT INTO Property(SaleDate,CaseNumber,Address,SaleStatus,MinBid,Appraisal,ZipCode,Plaintiff,Defendant,Attorney,SoldTo,PID,SaleAmt) VALUES ( %s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)", (date, CaseNumber,Address,SaleStatus,MinBidAmt,Appraisal,Zipcode,Plaintiff,Defendant,Attorney,SoldTo,PID,SaleAmt)) #even though their database types are int/float etc they are entered as strings here....
con.commit()
cur.close()
con.close()
def InsertUpdateIfExistsIntoDB(date,CaseNumber,Address, Plaintiff,Defendant,Attorney,SoldTo,PID,Zipcode,Appraisal,MinBidAmt,SaleAmt,SaleStatus):
con = mdb.connect('localhost', 'nicolae', 'ceausescu', 'SheriffSales')
with con:
cur = con.cursor(mdb.cursors.DictCursor)
cur.execute("INSERT INTO Property(SaleDate,CaseNumber,Address,SaleStatus,MinBid,Appraisal,ZipCode,Plaintiff,Defendant,Attorney,SoldTo,PID,SaleAmt) VALUES ( %s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s) ON DUPLICATE KEY UPDATE SoldTo=%s,SaleAmt=%s,SaleStatus=%s", (date, CaseNumber,Address,SaleStatus,MinBidAmt,Appraisal,Zipcode,Plaintiff,Defendant,Attorney,SoldTo,PID,SaleAmt,SoldTo,SaleAmt,SaleStatus)) #even though their database types are int/float etc they are entered as strings here
con.commit()
cur.close()
con.close()
def QueryDatabaseIfRecordExists(date,CaseNumber,Address, Plaintiff,Defendant,Attorney,SoldTo,PID,Zipcode,Appraisal,MinBidAmt,SaleAmt,SaleStatus):
key=-1 #primary keys aren't negative as far as I know. This is the sentinel value
con = mdb.connect('localhost', 'nicolae', 'ceausescu', 'SheriffSales')
with con:
cur = con.cursor(mdb.cursors.DictCursor)
# check to see if record exists before inserting, if exists check for salestatus change, update saleamt
resultcount=int(cur.execute("SELECT * FROM Property WHERE SaleDate=%s and CaseNumber=%s and Address=%s and MinBid=%s and Appraisal=%s and ZipCode=%s and Plaintiff=%s and Defendant=%s and Attorney=%s and PID=%s ", (date, CaseNumber,Address,MinBidAmt,Appraisal,Zipcode,Plaintiff,Defendant,Attorney,PID))) # look for match on all fields except those that would've been update after teh property was sold
#print("SELECT * FROM Property WHERE SaleDate=%s and CaseNumber=%s and Address=%s and MinBid=%s and Appraisal=%s and ZipCode=%s and Plaintiff=%s and Defendant=%s and Attorney=%s and PID=%s and SaleStatus!=%s " % (date, CaseNumber,Address,MinBidAmt,Appraisal,Zipcode,Plaintiff,Defendant,Attorney,PID,SaleStatus)) # look for match on all fields except those that would've been update after teh property was sold
# resultcount=cur.row_count() can also be used at any time to obtain number of rows returned by the query
if resultcount==1:
row=cur.fetchone()
key=int(row['id'])
soldto=row['SoldTo']
salestatus=row['SaleStatus']
saleamt=float(row['SaleAmt']) #otherwise is a decimal object coming from MySQL
if salestatus!=SaleStatus: # and soldto!=SoldTo:
#print(saleamt,SaleAmt,salestatus,SaleStatus,soldto,SoldTo)
print("*"),
else:
key=-3 #if salestatus hasn't changed then don't update
# print(saleamt,SaleAmt,salestatus,SaleStatus,soldto,SoldTo)
# print(key)
if 1==0:
if date=="2012-04-06":
print("--%s--++%s++" %(row['SaleStatus'],SaleStatus))
elif resultcount>1:
print("multiple results:%i",resultcount)
key=-2
#rows=cur.fetchall()
else:
print("no results found"),
print("SELECT * FROM Property WHERE SaleDate=%s and CaseNumber=%s and Address=%s and MinBid=%s and Appraisal=%s and ZipCode=%s and Plaintiff=%s and Defendant=%s and Attorney=%s and PID=%s " % (date, CaseNumber,Address,MinBidAmt,Appraisal,Zipcode,Plaintiff,Defendant,Attorney,PID))
# print("SELECT * FROM Property WHERE SaleDate=%s and CaseNumber=%s and Address=%s and MinBid=%s and Appraisal=%s and ZipCode=%s and Plaintiff=%s and Defendant=%s and Attorney=%s and PID=%s and SaleStatus!=%s " % (date, CaseNumber,Address,MinBidAmt,Appraisal,Zipcode,Plaintiff,Defendant,Attorney,PID,SaleStatus)) # look for match on all fields except those that would've been update after teh property was sold
cur.close()
con.close()
return int(key) #otherwise it is a long
def ProcessFile(inputfilename,outputfilename):
print("Using input:%s and output:%s" % (inputfilename,outputfilename))
AuctionListFile=open(inputfilename,'r')
linecounter=0
propertyRecordcounter=0
SaleDate=""
CaseNumber=""
Defendant=""
Attorney=""
PID="" #parcel ID?
SoldTo=""
SaleStatus=""
SaleAmt=""
Address=""
Zipcode=""
Appraisal=""
MinBidAmt=""
SaleStatus=""
enddataflag=0
data=""
startTDflag=false
endTDflag=false
#create property record object. initialize with these extracted values
#geocode Property Record, check if record present. update with new data (sold info->soldto,saleprice)
#although keying off line # works, it isn't foolproof. yet there aren't tags to truly key off of either.
for line in AuctionListFile:
if (linecounter > 75) and (enddataflag==0): #first valid record starts after line 75
line1=line.strip()
linelower=line.lower()
if linelower.find("<tr class=\"")!=-1: # align=\"center\" valign=\"top\"")!=-1:
propertyRecordcounter=0 #this signals beginning of a record set.
if linelower.find("<td>")!=-1:
startTDflag=true
if linelower.find("</td>")!=-1:
endTDflag=true
#if the record is alll on one line then they
if propertyRecordcounter==10:
SaleDate=striphtml(line1.rstrip())
print(SaleDate)
#could be done in a switch statement but python doesn't implement them and for simplicity sake I'm not going the dictionary lookup route.
if propertyRecordcounter==4:
CaseNumber=striphtml(line1.rstrip())
print(CaseNumber)
if propertyRecordcounter==8:
Address=striphtml(stripampersand(stripcomma(line1.rstrip())))
if propertyRecordcounter==2:
Plaintiff=striphtml(stripampersand(stripcomma(line1.rstrip())))
if propertyRecordcounter==3:
Defendant=striphtml(stripampersand(stripcomma(line1.rstrip())))
if propertyRecordcounter==999999916:
Attorney=striphtml(stripampersand(stripcomma(line1.rstrip())))
if propertyRecordcounter==99999918:
SoldTo=striphtml(stripampersand(stripcomma(line1.rstrip())))
if propertyRecordcounter==7:
PIDa=striphtml(stripampersand(stripcomma(line1.rstrip())))
PID=PIDa.strip() #needed an additional strip
if propertyRecordcounter==999999927:
Zipcode=striphtml(line1.rstrip()) #some zips don't exist, are prepended by OH
if propertyRecordcounter==13:
SaleStatus=striphtml(stripampersand(stripcomma(line1.rstrip())))
if propertyRecordcounter==5:
Appraisal=prepprice(line1)
if propertyRecordcounter==6: #I had a record where the minbid or appraisal had two decimal points in it. I suppose that means that they don't typecheck the records and are input as strings and not as numeric types. Need to type check to prevent it occurring in the future
MinBidAmt=prepprice(line1)
if propertyRecordcounter==999999941:
SaleAmt=prepprice(line1)
propertyRecordcounter+=1
#we've reached the end of a record, output and reset counter
if line.find("</tr>")!=-1:
propertyRecordcounter=0
#reset counter when outside a record
#print(SaleDate,CaseNumber,Address,Plaintiff,Defendant,Attorney,SoldTo,Zipcode,Appraisal,MinBidAmt,SaleStatus)
### Clean up the formatting
date=SaleDate
appraisal=Appraisal
zipcode=Zipcode
minbid=MinBidAmt
salestatus=SaleStatus
saleamt=SaleAmt
if Appraisal=="" or Appraisal=="in Bid: ":
appraisal=0.0
else:
appraisal=float(Appraisal)
if SaleStatus=="NO BID NO SALE":
salestatus="NOBIDNOSALE"
else:
salestatus=SaleStatus
if MinBidAmt=="":
minbid=0.0
elif MinBidAmt=="SEE ENTRY":
minbid=0.0
elif MinBidAmt=="SEE ENTRY FOR AMOUNTS":
minbid=0.0
elif MinBidAmt=="SEE WRIT OF PARTITION":
minbid=0.0
else:
minbid=float(MinBidAmt)
if SaleAmt=="":
saleamt=-0.01
else:
saleamt=float(SaleAmt)
if Zipcode=="":
zipcode=0
else:
if Zipcode[0:2]=="OH":
if len(Zipcode)>7:
zipcode=int(Zipcode[3:])
else:
zipcode=0
else:
zipcode=int(Zipcode)
date=convertDateFormatMDYSlashDelimited(SaleDate)
###
data+=SaleDate+","+CaseNumber+","+Address+","+ Plaintiff+','+Defendant+','+ PID+',' +Zipcode+","+Appraisal+","+MinBidAmt+","+SaleAmt+','+SaleStatus+"\n"
if 1==0:
key=QueryDatabaseIfRecordExists(date,CaseNumber,Address, Plaintiff,Defendant,Attorney,SoldTo,PID,zipcode,appraisal,minbid,saleamt,salestatus)
if key==-1: # no results found, enter into database
print("-"),
#InsertUpdateIfExistsIntoDB(date,CaseNumber,Address, Plaintiff,Defendant,Attorney,SoldTo,PID,zipcode,appraisal,minbid,saleamt,salestatus)
InsertIntoDB(date,CaseNumber,Address, Plaintiff,Defendant,Attorney,SoldTo,PID,zipcode,appraisal,minbid,saleamt,salestatus)
elif key==-2:
print("uhoh multiple results returned")
elif key==-3:
print(","), #record is unchanged, don't do anything
else:
print("+"), #record has changed and we are updating it using the key
UpdateRecordInDatabase(date,CaseNumber,Address, Plaintiff,Defendant,Attorney,SoldTo,PID,zipcode,appraisal,minbid,saleamt,salestatus,key)
else:
nine=9
#InsertUpdateIfExistsIntoDB(date,CaseNumber,Address, Plaintiff,Defendant,Attorney,SoldTo,PID,zipcode,appraisal,minbid,saleamt,salestatus)
if line.find("</table>")!=-1: #this signals the end of the property list
enddataflag=1
else:
linecounter+=1
output=open(outputfilename,"w")
output.write(data)
output.close()
import datetime
def convertDateFormat(date):
print("I need to strip based on the / delimiter")
day=int(date[3:5])
month=int(date[0:2])
year=int(date[6:10])
dt=datetime.date(year,month,day)
return dt.strftime("%Y-%m-%d")
def convertDateFormatMDYSlashDelimited(date):
datesplit=date.split("/")
day=int(datesplit[1])
month=int(datesplit[0])
year=int(datesplit[2])
dt=datetime.date(year,month,day)
return dt.strftime("%Y-%m-%d")
########### MAIN ############
import sys
import MySQLdb as mdb
import urllib,urllib2,time
#check if argv[1]!=null and assign to
#if sys.argv[#
if 1==1:
if len(sys.argv)>1 and sys.argv[1]!="":
inputfilename=sys.argv[1]
else:
inputfilename="WarrenCountyOhio.slsgrid.asp.html"
if len(sys.argv)>2 and sys.argv[2]!="":
outputfilename=sys.argv[2]
else:
outputfilename="WarrenCountyoutput.txt"
print(inputfilename,outputfilename)
ProcessFile(inputfilename,outputfilename)
| true |
52f02adafc5d77499b520719d4035d67220738f2 | Python | nilax97/leetcode-solutions | /solutions/Sequential Digits/solution.py | UTF-8 | 661 | 2.96875 | 3 | [
"MIT"
] | permissive | class Solution:
def sequentialDigits(self, low: int, high: int) -> List[int]:
def make_seq(s,l,h,ans):
if (int(s[0]) + len(s))>10:
return ans
x = s[0]
for i in range(1,len(s)):
x = x + str(int(x[-1]) + 1)
# print(x,s,l,h,ans)
if l<= int(x) and h>= int(x):
ans.append(int(x))
return ans
l = len(str(low))
h = len(str(high))
ans = list()
for i in range(l,h+1):
s = "0" * (i-1)
for i in range(1,10):
ans = make_seq(str(i) + s,low,high,ans)
return ans
| true |
88fec31485fd844d56f826ce1a11467c205a3097 | Python | Neoanarika/probml-notebooks | /scripts/linreg_hierarchical_pymc3.py | UTF-8 | 17,914 | 3.25 | 3 | [
"MIT"
] | permissive | # -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/probml/pyprobml/blob/master/notebooks/linreg_hierarchical_pymc3.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="a230C8wYHlrO"
# # Hierarchical Bayesian Linear Regression in PyMC3
#
# The text and code for this notebook are taken directly from [this blog post](https://twiecki.io/blog/2014/03/17/bayesian-glms-3/) by Thomas Wiecki and Danne Elbers. [Original notebook](https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/GLM_hierarchical.ipynb).
#
# + [markdown] id="qfHB_ewgHlrO"
#
#
# Gelman et al.'s (2007) radon dataset is a classic for hierarchical modeling. In this dataset the amount of the radioactive gas radon has been measured among different households in all county's of several states. Radon gas is known to be the highest cause of lung cancer in non-smokers. It is believed to enter the house through the basement. Moreover, its concentration is thought to differ regionally due to different types of soil.
#
# Here we'll investigate this difference and try to make predictions of radon levels in different countys and where in the house radon was measured. In this example we'll look at Minnesota, a state that contains 85 county's in which different measurements are taken, ranging from 2 till 80 measurements per county.
#
# + [markdown] id="JzDno90bHlrO"
# First, we'll load the data:
# + id="SrdljbL4HlrO"
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import pandas as pd
url = 'https://github.com/twiecki/WhileMyMCMCGentlySamples/blob/master/content/downloads/notebooks/radon.csv?raw=true'
data = pd.read_csv(url)
county_names = data.county.unique()
county_idx = data['county_code'].values
# + colab={"base_uri": "https://localhost:8080/"} id="utcJNTvFMTgc" outputId="13afed6a-70ed-4805-b7ae-af88769aac38"
# !pip install arviz
import arviz
# + [markdown] id="a_DCxY0mHlrO"
# The relevant part of the data we will model looks as follows:
# + colab={"base_uri": "https://localhost:8080/", "height": 191} id="hSUE2MlLHlrO" outputId="f2af423b-509f-4ec0-e120-c1c1f7861f64"
data[['county', 'log_radon', 'floor']].head()
# + [markdown] id="fHi7Nuf-HlrP"
# As you can see, we have multiple `radon` measurements (log-converted to be on the real line) in a `county` and whether the measurement has been taken in the basement (`floor` == 0) or on the first floor (`floor` == 1). Here we want to test the prediction that radon concentrations are higher in the basement.
# + [markdown] id="nsLXqnDJHlrP"
# ## The Models
#
# ### Pooling of measurements
# Now you might say: "That's easy! I'll just pool all my data and estimate one big regression to asses the influence of measurement across all counties". In math-speak that model would be:
#
# $$radon_{i, c} = \alpha + \beta*\text{floor}_{i, c} + \epsilon$$
#
# Where $i$ represents the measurement, $c$ the county and floor contains which floor the measurement was made. If you need a refresher on Linear Regressions in `PyMC3`, check out my [previous blog post](https://twiecki.github.io/blog/2013/08/12/bayesian-glms-1/). Critically, we are only estimating *one* intercept and *one* slope for all measurements over all counties.
#
# ### Separate regressions
# But what if we are interested whether different counties actually have different relationships (slope) and different base-rates of radon (intercept)? Then you might say "OK then, I'll just estimate $n$ (number of counties) different regresseions -- one for each county". In math-speak that model would be:
#
# $$radon_{i, c} = \alpha_{c} + \beta_{c}*\text{floor}_{i, c} + \epsilon_c$$
#
# Note that we added the subindex $c$ so we are estimating $n$ different $\alpha$s and $\beta$s -- one for each county.
#
# This is the extreme opposite model, where above we assumed all counties are exactly the same, here we are saying that they share no similarities whatsoever which ultimately is also unsatisifying.
#
# ### Hierarchical Regression: The best of both worlds
#
# Fortunately there is a middle ground to both of these extreme views. Specifically, we may assume that while $\alpha$s and $\beta$s are different for each county, the coefficients all come from a common group distribution:
#
# $$\alpha_{c} \sim \mathcal{N}(\mu_{\alpha}, \sigma_{\alpha}^2)$$
# $$\beta_{c} \sim \mathcal{N}(\mu_{\beta}, \sigma_{\beta}^2)$$
#
# We thus assume the intercepts $\alpha$ and slopes $\beta$ to come from a normal distribution centered around their respective group mean $\mu$ with a certain standard deviation $\sigma^2$, the values (or rather posteriors) of which we also estimate. That's why this is called multilevel or hierarchical modeling.
#
# How do we estimate such a complex model with all these parameters you might ask? Well, that's the beauty of Probabilistic Programming -- we just formulate the model we want and press our [Inference Button(TM)](https://twiecki.github.io/blog/2013/08/12/bayesian-glms-1/).
#
# Note that the above is not a complete Bayesian model specification as we haven't defined priors or hyperpriors (i.e. priors for the group distribution, $\mu$ and $\sigma$). These will be used in the model implementation below but only distract here.
# + [markdown] id="Uww9m3FkHlrP"
# ## Probabilistic Programming
#
# ### Individual/non-hierarchical model
#
# To really highlight the effect of the hierarchical linear regression we'll first estimate the non-hierarchical Bayesian model from above (separate regressions). For each county a new estimate of the parameters is initiated. As we have no prior information on what the intercept or regressions could be we are placing a Normal distribution centered around 0 with a wide standard-deviation. We'll assume the measurements are normally distributed with noise $\epsilon$ on which we place a Half-Cauchy distribution.
# + colab={"base_uri": "https://localhost:8080/"} id="nS5yWSfCHlrP" outputId="17733989-e0cf-4825-94a4-70f4388e2c96"
# takes about 45 minutes
indiv_traces = {}
for county_name in county_names:
# Select subset of data belonging to county
c_data = data.loc[data.county == county_name]
c_data = c_data.reset_index(drop=True)
c_log_radon = c_data.log_radon
c_floor_measure = c_data.floor.values
with pm.Model() as individual_model:
# Intercept prior
a = pm.Normal('alpha', mu=0, sigma=1)
# Slope prior
b = pm.Normal('beta', mu=0, sigma=1)
# Model error prior
eps = pm.HalfCauchy('eps', beta=1)
# Linear model
radon_est = a + b * c_floor_measure
# Data likelihood
y_like = pm.Normal('y_like', mu=radon_est, sigma=eps, observed=c_log_radon)
# Inference button (TM)!
trace = pm.sample(progressbar=False)
indiv_traces[county_name] = trace
# + [markdown] id="uxue_H19HlrP"
# ## Hierarchical Model
# Instead of initiating the parameters separatly, the hierarchical model initiates group parameters that consider the county's not as completely different but as having an underlying similarity. These distributions are subsequently used to influence the distribution of each county's $\alpha$ and $\beta$.
# + id="WbiuioLiHlrP"
with pm.Model() as hierarchical_model:
# Hyperpriors
mu_a = pm.Normal('mu_alpha', mu=0., sigma=1)
sigma_a = pm.HalfCauchy('sigma_alpha', beta=1)
mu_b = pm.Normal('mu_beta', mu=0., sigma=1)
sigma_b = pm.HalfCauchy('sigma_beta', beta=1)
# Intercept for each county, distributed around group mean mu_a
a = pm.Normal('alpha', mu=mu_a, sigma=sigma_a, shape=len(data.county.unique()))
# Intercept for each county, distributed around group mean mu_a
b = pm.Normal('beta', mu=mu_b, sigma=sigma_b, shape=len(data.county.unique()))
# Model error
eps = pm.HalfCauchy('eps', beta=1)
# Expected value
radon_est = a[county_idx] + b[county_idx] * data.floor.values
# Data likelihood
y_like = pm.Normal('y_like', mu=radon_est, sigma=eps, observed=data.log_radon)
# + colab={"base_uri": "https://localhost:8080/"} id="uQ0Z8mWvHlrP" outputId="6e67398e-72b1-4115-9b40-8a5ef6e484e6"
with hierarchical_model:
hierarchical_trace = pm.sample()
# + id="NZ9jVYhfHlrP" outputId="21bf7842-4e80-4859-e023-2c91e0541e48"
pm.traceplot(hierarchical_trace);
# + colab={"base_uri": "https://localhost:8080/", "height": 442} id="VUi6rTAVL5We" outputId="842fcc43-74c3-4d06-d54c-a3f66c207630"
pm.traceplot(hierarchical_trace, var_names=['alpha', 'beta'])
# + [markdown] id="Tr7bjX1_HlrQ"
# The marginal posteriors in the left column are highly informative. `mu_a` tells us the group mean (log) radon levels. `mu_b` tells us that the slope is significantly negative (no mass above zero), meaning that radon concentrations are higher in the basement than first floor. We can also see by looking at the marginals for `a` that there is quite some differences in radon levels between counties; the different widths are related to how much measurements we have per county, the more, the higher our confidence in that parameter estimate.
# + [markdown] id="lmiYdG4dHlrQ"
# <div class="alert alert-warning">
#
# After writing this blog post I found out that the chains here (which look worse after I just re-ran them) are not properly converged, you can see that best for `sigma_beta` but also the warnings about "diverging samples" (which are also new in PyMC3). If you want to learn more about the problem and its solution, see my more recent blog post <a href='https://twiecki.github.io/blog/2017/02/08/bayesian-hierchical-non-centered/'>"Why hierarchical models are awesome, tricky, and Bayesian"</a>.
#
# </div>
# + [markdown] id="OdWOhSK_HlrQ"
# ## Posterior Predictive Check
#
# ### The Root Mean Square Deviation
#
# To find out which of the models works better we can calculate the Root Mean Square Deviaton (RMSD). This posterior predictive check revolves around recreating the data based on the parameters found at different moments in the chain. The recreated or predicted values are subsequently compared to the real data points, the model that predicts data points closer to the original data is considered the better one. Thus, the lower the RMSD the better.
#
# When computing the RMSD (code not shown) we get the following result:
#
# * individual/non-hierarchical model: 0.13
# * hierarchical model: 0.08
#
# As can be seen above the hierarchical model performs a lot better than the non-hierarchical model in predicting the radon values. Following this, we'll plot some examples of county's showing the true radon values, the hierarchial predictions and the non-hierarchical predictions.
# + colab={"base_uri": "https://localhost:8080/", "height": 390} id="mfE_smDNHlrQ" outputId="146718d1-2a3e-449f-bb5e-4d9c2cb7bcbc"
selection = ['CASS', 'CROW WING', 'FREEBORN']
fig, axis = plt.subplots(1, 3, figsize=(12, 6), sharey=True, sharex=True)
axis = axis.ravel()
for i, c in enumerate(selection):
c_data = data.loc[data.county == c]
c_data = c_data.reset_index(drop = True)
z = list(c_data['county_code'])[0]
xvals = np.linspace(-0.2, 1.2)
for a_val, b_val in zip(indiv_traces[c]['alpha'][::10], indiv_traces[c]['beta'][::10]):
axis[i].plot(xvals, a_val + b_val * xvals, 'b', alpha=.05)
axis[i].plot(xvals, indiv_traces[c]['alpha'][::10].mean() + indiv_traces[c]['beta'][::10].mean() * xvals,
'b', alpha=1, lw=2., label='individual')
for a_val, b_val in zip(hierarchical_trace['alpha'][::10][z], hierarchical_trace['beta'][::10][z]):
axis[i].plot(xvals, a_val + b_val * xvals, 'g', alpha=.05)
axis[i].plot(xvals, hierarchical_trace['alpha'][::10][z].mean() + hierarchical_trace['beta'][::10][z].mean() * xvals,
'g', alpha=1, lw=2., label='hierarchical')
axis[i].scatter(c_data.floor + np.random.randn(len(c_data))*0.01, c_data.log_radon,
alpha=1, color='k', marker='.', s=80, label='original data')
axis[i].set_xticks([0,1])
axis[i].set_xticklabels(['basement', 'first floor'])
axis[i].set_ylim(-1, 4)
axis[i].set_title(c)
if not i%3:
axis[i].legend()
axis[i].set_ylabel('log radon level')
# + [markdown] id="54MhFfi3HlrQ"
# In the above plot we have the data points in black of three selected counties. The thick lines represent the mean estimate of the regression line of the individual (blue) and hierarchical model (in green). The thinner lines are regression lines of individual samples from the posterior and give us a sense of how variable the estimates are.
#
# When looking at the county 'CASS' we see that the non-hierarchical estimation has huge uncertainty about the radon levels of first floor measurements -- that's because we don't have any measurements in this county. The hierarchical model, however, is able to apply what it learned about the relationship between floor and radon-levels from other counties to CASS and make sensible predictions even in the absence of measurements.
#
# We can also see how the hierarchical model produces more robust estimates in 'CROW WING' and 'FREEBORN'. In this regime of few data points the non-hierarchical model reacts more strongly to individual data points because that's all it has to go on.
#
# Having the group-distribution constrain the coefficients we get meaningful estimates in all cases as we apply what we learn from the group to the individuals and vice-versa.
# + [markdown] id="f_hLtCLtHlrQ"
# # Shrinkage
# Shrinkage describes the process by which our estimates are "pulled" towards the group-mean as a result of the common group distribution -- county-coefficients very far away from the group mean have very low probability under the normality assumption. In the non-hierachical model every county is allowed to differ completely from the others by just using each county's data, resulting in a model more prone to outliers (as shown above).
# + id="dmADXMsWHlrQ"
hier_a = hierarchical_trace['alpha'].mean(axis=0)
hier_b = hierarchical_trace['beta'].mean(axis=0)
indv_a = [indiv_traces[c]['alpha'].mean() for c in county_names]
indv_b = [indiv_traces[c]['beta'].mean() for c in county_names]
# + colab={"base_uri": "https://localhost:8080/", "height": 621} id="uwMGRgZTHlrQ" outputId="7ba0a007-40ff-427f-bcda-050e450ec920"
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111, xlabel='Intercept', ylabel='Floor Measure',
title='Hierarchical vs. Non-hierarchical Bayes',
xlim=(0.25, 2), ylim=(-2, 1.5))
ax.scatter(indv_a,indv_b, s=26, alpha=0.4, label = 'non-hierarchical')
ax.scatter(hier_a,hier_b, c='red', s=26, alpha=0.4, label = 'hierarchical')
for i in range(len(indv_b)):
ax.arrow(indv_a[i], indv_b[i], hier_a[i] - indv_a[i], hier_b[i] - indv_b[i],
fc="k", ec="k", length_includes_head=True, alpha=0.4, head_width=.02)
ax.legend();
# + [markdown] id="HZw2d4VCHlrQ"
# In the shrinkage plot above we show the coefficients of each county's non-hierarchical posterior mean (blue) and the hierarchical posterior mean (red). To show the effect of shrinkage on a single coefficient-pair (alpha and beta) we connect the blue and red points belonging to the same county by an arrow. Some non-hierarchical posteriors are so far out that we couldn't display them in this plot (it makes the axes to wide). Interestingly, all hierarchical posteriors of the floor-measure seem to be around -0.6 confirming out prediction that radon levels are higher in the basement than in the first floor. The differences in intercepts (which we take for type of soil) differs among countys indicating that meaningful regional differences exist in radon concentration. This information would have been difficult to find when just the non-hierarchial model had been used and estimates for individual counties would have been much more noisy.
# + [markdown] id="oIiVbYxWHlrQ"
# # Summary
#
# In this post, co-authored by Danne Elbers, we showed how a multi-level hierarchical Bayesian model gives the best of both worlds when we have multiple sets of measurements we expect to have similarity. The naive approach either pools all data together and ignores the individual differences, or treats each set as completely separate leading to noisy estimates as shown above. By placing a group distribution on the individual sets we can learn about each set and the group simultaneously. Probabilistic Programming in PyMC then makes Bayesian estimation of this model trivial.
#
#
# ## References
# * [The Inference Button: Bayesian GLMs made easy with PyMC3](https://twiecki.github.io/blog/2013/08/12/bayesian-glms-1/)
# * [This world is far from Normal(ly distributed): Bayesian Robust Regression in PyMC3](https://twiecki.github.io/blog/2013/08/27/bayesian-glms-2/)
# * [Chris Fonnesbeck repo containing a more extensive analysis](https://github.com/fonnesbeck/multilevel_modeling/)
# * [Shrinkage in multi-level hierarchical models](http://doingbayesiandataanalysis.blogspot.com/2012/11/shrinkage-in-multi-level-hierarchical.html) by John Kruschke
# * Gelman, A.; Carlin; Stern; and Rubin, D., 2007, "Replication data for: Bayesian Data Analysis, Second Edition",
# * Gelman, A., & Hill, J. (2006). Data Analysis Using Regression and Multilevel/Hierarchical Models (1st ed.). Cambridge University Press.
# * Gelman, A. (2006). Multilevel (Hierarchical) modeling: what it can and cannot do. Technometrics, 48(3), 432–435.
# + id="nRRXbDjWNsdl"
| true |
bf2f4145507f48cc2ea11eab3799a841473e753d | Python | AndersonHJB/PyCharm_Coder | /Coder_Old/pycharm_daima/每颗豆算法/test/test_two_2019_12_23.py | UTF-8 | 889 | 3.109375 | 3 | [] | no_license | # !/usr/bin/python3
# -*- coding: utf-8 -*-
# @Author:AI悦创 @DateTime :2019/12/23 15:52 @Function :功能 Development_tool :PyCharm
# code is far away from bugs with the god animal protecting
# I love animals. They taste delicious.
with open('i_have_a_dream.txt', 'r', encoding="utf-8") as file:
print(file.read()) # 一次性得到文本的所有内容,得到的数据类型为:string
# 获取变量的数据类型的方式
print(type(file.read())) # 输出:<class 'str'>
# -------------------------------------------------------------------
print(file.readlines()) # 逐行读取全部内容,得到的数据类型为:list
print(type(file.readlines())) # <class 'list'>
# -------------------------------------------------------------------
print(file.readline()) # 只读取一行内容
print(type(file.readline())) # <class 'str'>
| true |
70e302a10f8141b5e5b424ea611bc531ec5931d0 | Python | exgs/hongikuniv_chemical-engineering | /화공전산(2-2)/chapter5.다변수 비선형 방정식/연습문제/p5.1.py | UTF-8 | 302 | 3.5625 | 4 | [] | no_license | from math import log, sqrt, sin, cos, exp
# x는 리스트 : x[0] = x1, x[1] = x2
def f(x):
[x1,x2] = x
f1 = 0.5*x1**2 + 0.08*x2 - 0.5*sin(x1*x2)
f2 = 5.2*x1 -0.87*x2 - 0.92*exp(2*x1) + 2.5
return [f1,f2]
from scipy.optimize import root
sol = root(f, [0,0])
print(sol)
print("-"*30)
print(sol.x)
| true |
4071c3b9b0b1ae7a16054b11450857152dd16a80 | Python | alexforencich/verilog-ethernet | /lib/axis/rtl/axis_ram_switch_wrap.py | UTF-8 | 10,373 | 2.59375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
Generates an AXI Stream switch wrapper with the specified number of ports
"""
import argparse
from jinja2 import Template
def main():
parser = argparse.ArgumentParser(description=__doc__.strip())
parser.add_argument('-p', '--ports', type=int, default=[4], nargs='+', help="number of ports")
parser.add_argument('-n', '--name', type=str, help="module name")
parser.add_argument('-o', '--output', type=str, help="output file name")
args = parser.parse_args()
try:
generate(**args.__dict__)
except IOError as ex:
print(ex)
exit(1)
def generate(ports=4, name=None, output=None):
if type(ports) is int:
m = n = ports
elif len(ports) == 1:
m = n = ports[0]
else:
m, n = ports
if name is None:
name = "axis_ram_switch_wrap_{0}x{1}".format(m, n)
if output is None:
output = name + ".v"
print("Generating {0}x{1} port AXI stream RAM switch wrapper {2}...".format(m, n, name))
cm = (m-1).bit_length()
cn = (n-1).bit_length()
t = Template(u"""/*
Copyright (c) 2018-2021 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
// Language: Verilog 2001
`resetall
`timescale 1ns / 1ps
`default_nettype none
/*
* AXI4-Stream {{m}}x{{n}} RAM switch (wrapper)
*/
module {{name}} #
(
// FIFO depth in words (each virtual FIFO)
// KEEP_WIDTH words per cycle if KEEP_ENABLE set
// Rounded up to nearest power of 2 cycles
parameter FIFO_DEPTH = 4096,
// Command FIFO depth (each virtual FIFO)
// Rounded up to nearest power of 2
parameter CMD_FIFO_DEPTH = 32,
// Speedup factor (internal data width scaling factor)
// Speedup of 0 scales internal width to provide maximum bandwidth
parameter SPEEDUP = 0,
// Width of input AXI stream interfaces in bits
parameter S_DATA_WIDTH = 8,
// Propagate tkeep signal
parameter S_KEEP_ENABLE = (S_DATA_WIDTH>8),
// tkeep signal width (words per cycle)
parameter S_KEEP_WIDTH = ((S_DATA_WIDTH+7)/8),
// Width of output AXI stream interfaces in bits
parameter M_DATA_WIDTH = 8,
// Propagate tkeep signal
parameter M_KEEP_ENABLE = (M_DATA_WIDTH>8),
// tkeep signal width (words per cycle)
parameter M_KEEP_WIDTH = ((M_DATA_WIDTH+7)/8),
// Propagate tid signal
parameter ID_ENABLE = 0,
// input tid signal width
parameter S_ID_WIDTH = 8,
// output tid signal width
parameter M_ID_WIDTH = S_ID_WIDTH+{{cm}},
// output tdest signal width
parameter M_DEST_WIDTH = 1,
// input tdest signal width
// must be wide enough to uniquely address outputs
parameter S_DEST_WIDTH = M_DEST_WIDTH+{{cn}},
// Propagate tuser signal
parameter USER_ENABLE = 1,
// tuser signal width
parameter USER_WIDTH = 1,
// tuser value for bad frame marker
parameter USER_BAD_FRAME_VALUE = 1'b1,
// tuser mask for bad frame marker
parameter USER_BAD_FRAME_MASK = 1'b1,
// Drop frames marked bad
parameter DROP_BAD_FRAME = 0,
// Drop incoming frames when full
// When set, s_axis_tready is always asserted
parameter DROP_WHEN_FULL = 0,
{%- for p in range(n) %}
// Output interface routing base tdest selection
// Port selected if M_BASE <= tdest <= M_TOP
parameter M{{'%02d'%p}}_BASE = {{p}},
// Output interface routing top tdest selection
// Port selected if M_BASE <= tdest <= M_TOP
parameter M{{'%02d'%p}}_TOP = {{p}},
// Interface connection control
parameter M{{'%02d'%p}}_CONNECT = {{m}}'b{% for p in range(m) %}1{% endfor %},
{%- endfor %}
// Update tid with routing information
parameter UPDATE_TID = 0,
// select round robin arbitration
parameter ARB_TYPE_ROUND_ROBIN = 1,
// LSB priority selection
parameter ARB_LSB_HIGH_PRIORITY = 1,
// RAM read data output pipeline stages
parameter RAM_PIPELINE = 2
)
(
input wire clk,
input wire rst,
/*
* AXI Stream inputs
*/
{%- for p in range(m) %}
input wire [S_DATA_WIDTH-1:0] s{{'%02d'%p}}_axis_tdata,
input wire [S_KEEP_WIDTH-1:0] s{{'%02d'%p}}_axis_tkeep,
input wire s{{'%02d'%p}}_axis_tvalid,
output wire s{{'%02d'%p}}_axis_tready,
input wire s{{'%02d'%p}}_axis_tlast,
input wire [S_ID_WIDTH-1:0] s{{'%02d'%p}}_axis_tid,
input wire [S_DEST_WIDTH-1:0] s{{'%02d'%p}}_axis_tdest,
input wire [USER_WIDTH-1:0] s{{'%02d'%p}}_axis_tuser,
{% endfor %}
/*
* AXI Stream outputs
*/
{%- for p in range(n) %}
output wire [M_DATA_WIDTH-1:0] m{{'%02d'%p}}_axis_tdata,
output wire [M_KEEP_WIDTH-1:0] m{{'%02d'%p}}_axis_tkeep,
output wire m{{'%02d'%p}}_axis_tvalid,
input wire m{{'%02d'%p}}_axis_tready,
output wire m{{'%02d'%p}}_axis_tlast,
output wire [M_ID_WIDTH-1:0] m{{'%02d'%p}}_axis_tid,
output wire [M_DEST_WIDTH-1:0] m{{'%02d'%p}}_axis_tdest,
output wire [USER_WIDTH-1:0] m{{'%02d'%p}}_axis_tuser,
{% endfor %}
/*
* Status
*/
output wire [{{m-1}}:0] status_overflow,
output wire [{{m-1}}:0] status_bad_frame,
output wire [{{m-1}}:0] status_good_frame
);
// parameter sizing helpers
function [S_DEST_WIDTH-1:0] w_dw(input [S_DEST_WIDTH-1:0] val);
w_dw = val;
endfunction
function [{{m-1}}:0] w_s(input [{{m-1}}:0] val);
w_s = val;
endfunction
axis_ram_switch #(
.FIFO_DEPTH(FIFO_DEPTH),
.CMD_FIFO_DEPTH(CMD_FIFO_DEPTH),
.SPEEDUP(SPEEDUP),
.S_COUNT({{m}}),
.M_COUNT({{n}}),
.S_DATA_WIDTH(S_DATA_WIDTH),
.S_KEEP_ENABLE(S_KEEP_ENABLE),
.S_KEEP_WIDTH(S_KEEP_WIDTH),
.M_DATA_WIDTH(M_DATA_WIDTH),
.M_KEEP_ENABLE(M_KEEP_ENABLE),
.M_KEEP_WIDTH(M_KEEP_WIDTH),
.ID_ENABLE(ID_ENABLE),
.S_ID_WIDTH(S_ID_WIDTH),
.M_ID_WIDTH(M_ID_WIDTH),
.S_DEST_WIDTH(S_DEST_WIDTH),
.M_DEST_WIDTH(M_DEST_WIDTH),
.USER_ENABLE(USER_ENABLE),
.USER_WIDTH(USER_WIDTH),
.USER_BAD_FRAME_VALUE(USER_BAD_FRAME_VALUE),
.USER_BAD_FRAME_MASK(USER_BAD_FRAME_MASK),
.DROP_BAD_FRAME(DROP_BAD_FRAME),
.DROP_WHEN_FULL(DROP_WHEN_FULL),
.M_BASE({ {% for p in range(n-1,-1,-1) %}w_dw(M{{'%02d'%p}}_BASE){% if not loop.last %}, {% endif %}{% endfor %} }),
.M_TOP({ {% for p in range(n-1,-1,-1) %}w_dw(M{{'%02d'%p}}_TOP){% if not loop.last %}, {% endif %}{% endfor %} }),
.M_CONNECT({ {% for p in range(n-1,-1,-1) %}w_s(M{{'%02d'%p}}_CONNECT){% if not loop.last %}, {% endif %}{% endfor %} }),
.UPDATE_TID(UPDATE_TID),
.ARB_TYPE_ROUND_ROBIN(ARB_TYPE_ROUND_ROBIN),
.ARB_LSB_HIGH_PRIORITY(ARB_LSB_HIGH_PRIORITY),
.RAM_PIPELINE(RAM_PIPELINE)
)
axis_ram_switch_inst (
.clk(clk),
.rst(rst),
// AXI inputs
.s_axis_tdata({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tdata{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tkeep({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tkeep{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tvalid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tready({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tready{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tlast({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tlast{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tid({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tid{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tdest({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tdest{% if not loop.last %}, {% endif %}{% endfor %} }),
.s_axis_tuser({ {% for p in range(m-1,-1,-1) %}s{{'%02d'%p}}_axis_tuser{% if not loop.last %}, {% endif %}{% endfor %} }),
// AXI outputs
.m_axis_tdata({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tdata{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tkeep({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tkeep{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tvalid({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tvalid{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tready({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tready{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tlast({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tlast{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tid({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tid{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tdest({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tdest{% if not loop.last %}, {% endif %}{% endfor %} }),
.m_axis_tuser({ {% for p in range(n-1,-1,-1) %}m{{'%02d'%p}}_axis_tuser{% if not loop.last %}, {% endif %}{% endfor %} }),
// Status
.status_overflow(status_overflow),
.status_bad_frame(status_bad_frame),
.status_good_frame(status_good_frame)
);
endmodule
`resetall
""")
print(f"Writing file '{output}'...")
with open(output, 'w') as f:
f.write(t.render(
m=m,
n=n,
cm=cm,
cn=cn,
name=name
))
f.flush()
print("Done")
if __name__ == "__main__":
main()
| true |
ae22b8979fd6a0463a31da737a20e4ecd9873189 | Python | try-except-try-accept/advent_of_code_2020_solutions | /DAY 21 - allergens.py | UTF-8 | 2,615 | 2.96875 | 3 | [] | no_license | from collections import Counter
with open('day21.txt') as f:
data = f.read()
##
##data = """mxmxvkd kfcds sqjhc nhms (contains dairy, fish)
##trh fvjkl sbzzf mxmxvkd (contains dairy)
##sqjhc fvjkl (contains soy)
##sqjhc mxmxvkd sbzzf (contains fish)"""
##
data = data.strip().split("\n")
all_allergens = Counter()
allergen_map = {}
all_ingredients = []
for line in data:
ingredients, allergens = line.split(" (contains ")
if len(allergens) == 0:
input("help")
for allergen in allergens[:-1].split(", "):
ing = ingredients.split(" ")
all_ingredients.extend(list(ing))
all_allergens.update([allergen])
if allergen in allergen_map:
allergen_map[allergen].update(ing)
else:
allergen_map[allergen] = Counter(ing)
sorted_allergens = sorted(list([k, v] for k, v in allergen_map.items()), key=lambda x: x[1].most_common()[0])
final_choices = {}
for s in sorted_allergens:
allergen, ing = s
print("should be", all_allergens[allergen])
print(allergen, ing)
input()
final_choices[allergen] = [k for k, v in ing.items() if v == max(ing.values())]
while any([len(v) > 1 for v in final_choices.values()]):
for allergen, ings in final_choices.items():
print("looking at ", allergen, "should appear", all_allergens[allergen], "times")
print(ings)
input()
if len(ings) == 1:
rem = ings[0]
print(f"This means {rem} must be {allergen}")
[i.remove(rem) for a, i in final_choices.items() if a != allergen and rem in i]
for line in data:
ingredients = line.split(" (contains")[0].split(" ")
allergens = line.split(" (contains ")[1][:-1].split(", ")
for allergen, ingredient in final_choices.items():
ingredient = ingredient[0]
##
## if ingredient in ingredients and allergen not in allergens:
## print("allergen not listed,", ingredient, "found")
if ingredient not in ingredients and allergen in allergens:
print("allergen listed but", ingredient, "not found")
print(len(all_ingredients))
for allergen, ing in final_choices.items():
i = ing[0]
while i in all_ingredients:
all_ingredients.remove(i)
print("No allergens")
print(len(all_ingredients))
final_allergens = [i[0] for i in final_choices.values()]
tot = 0
for line in data:
ingredients = line.split(" (")[0].strip().split(" ")
for ing in ingredients:
if ing not in final_allergens:
tot += 1
print(result)
| true |
0c2c2afe3fabfbabc572ca69806a9a6c92395196 | Python | andres-condezo/holbertonschool-higher_level_programming | /0x06-python-classes/3-square.py | UTF-8 | 711 | 4.15625 | 4 | [] | no_license | #!/usr/bin/python3
"""Square module: for build Squares"""
class Square:
'''Square: A Square builder class with its atributes'''
def __init__(self, size=0):
'''
Build a squares with a private instance atribute
Args:
size (int): The size of the square, with a optional value of 0
'''
if type(size) is not int:
raise TypeError('size must be an integer')
if size < 0:
raise ValueError('size must be >= 0')
self.__size = size
def area(self):
'''
area: Public instance method to calculate the area of the square
Return: The current square area
'''
return self.__size ** 2
| true |
a84c288499b1da85b26da07e00a79b2150478aaa | Python | Mushfequr-Rahman/Improving_CSRNet | /config.py | UTF-8 | 899 | 2.625 | 3 | [
"MIT"
] | permissive | import torch
import os
from tensorboardX import SummaryWriter
class Config():
'''
Config class
'''
def __init__(self):
self.dataset_root = './data/part_A_final'
#self.dataset_root = './data/part_B_final'
self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
self.lr = 1e-5 # learning rate
self.batch_size = 1 # batch size
self.epochs = 2000 # epochs
self.checkpoints = './checkpoints' # checkpoints dir
self.writer = SummaryWriter() # tensorboard writer
self.__mkdir(self.checkpoints)
def __mkdir(self, path):
'''
create directory while not exist
'''
if not os.path.exists(path):
os.makedirs(path)
print('create dir: ',path)
| true |
5276ab6ebe05eb25f9d1ee7f98a2f11eb0e2e378 | Python | KIPAC/cfgmdl | /python/cfgmdl/param_holder.py | UTF-8 | 3,245 | 2.96875 | 3 | [
"BSD-3-Clause"
] | permissive | #!/usr/bin/env python
""" Tools to manage Property object that can be used as fit parameters.
"""
from collections.abc import Mapping
import numpy as np
from .array import Array
from .configurable import Configurable, Property
from .utils import is_none
from .unit import Unit
class ParamHolder(Configurable):
"""Wrapper around a data value
This includes value, bounds, error estimates and fixed/free status
(i.e., for fitting)
"""
value = Array(help='Parameter Value')
errors = Array(help='Parameter Uncertainties')
bounds = Array(help='Parameter Bounds')
scale = Array(default=1., help='Paramter Scale Factor')
free = Array(dtype=bool, default=False, help='Free/Fixed Flag')
unit = Property(dtype=Unit, default=None, help='Parameter unit')
def __init__(self, *args, **kwargs):
"""Constructor"""
kwcopy = kwargs.copy()
if args: #pragma: no cover
#if 'value' in kwcopy:
# raise ValueError("value keyword provided in addition to arguments")
kwcopy['value'] = args
if is_none(kwargs.get('value', None)):
kwargs.pop('value', None)
super(ParamHolder, self).__init__(**kwargs)
self.check_bounds()
def __str__(self, indent=0):
"""Return self as a string"""
return '{0:>{2}}{1}'.format('', '', indent) + "{_value} += {_errors} [{_bounds}] <{_scale}> {_free}".format(**self.__dict__)
def update(self, *args, **kwargs):
"""Update the parameter"""
kwcopy = kwargs.copy()
if args:
if 'value' in kwcopy:
raise ValueError("value keyword provided in addition to arguments")
if len(args) > 1:
raise ValueError("Only 1 argument allowed")
if isinstance(args[0], Mapping):
kwcopy.update(**args[0])
else:
kwcopy['value'] = args[0]
super(ParamHolder, self).update(**kwcopy)
self.check_bounds()
def check_bounds(self):
"""Hook for bounds-checking, invoked during assignment.
raises ValueError if value is outside of bounds.
does nothing if bounds is set to None.
"""
if np.isnan(self.value).all():
return
if np.isnan(self.bounds).all():
return
if np.bitwise_or(self.value < self.bounds[0], self.value > self.bounds[-1]).any(): #pylint: disable=unsubscriptable-object
raise ValueError("Value outside bounds: %.s [%s,%s]" % (self.value, self.bounds[0], self.bounds[-1])) #pylint: disable=unsubscriptable-object
def __call__(self):
"""Return the product of the value and the scale"""
base_val = self.scaled
if is_none(self.unit):
return base_val
return self.unit(base_val)
def set_from_SI(self, val):
"""Set the value from SI units"""
if is_none(self.unit):
self.value = val
return
self.value = self.unit.inverse(val)
@property
def SI(self):
"""Return the value in SI units"""
return self()
@property
def scaled(self):
"""Return the value in original units"""
return self.value * self.scale
| true |
4d5477866885122688680686f0e01d2f070874c6 | Python | piravp/scripts | /code-comment-counter/main.py | UTF-8 | 1,203 | 3.203125 | 3 | [] | no_license | import re
import argparse
FILENAME = "commentfile.py"
def block():
block_comments = 0
with open(FILENAME, "r") as file:
# check for block comments
for line in file:
reg = re.search("^\s*#",line)
# if not none
if reg:
block_comments += 1
#print(line)
return block_comments
def multi():
# multi-line comments
fileAsString = open(FILENAME).read()
# M-flag: Match over multiple lines
# S-flag: Makes . match any character including newline
regex_flags = re.M | re.S
match = re.findall("^\"\"\".*?\"\"\"", fileAsString, regex_flags)
multiline_comments = len(match)
#print("Multi-line comments:", multiline_comments)
return multiline_comments
def main():
singleC = block()
multiC = multi()
print("--")
print("Single comments found:", singleC)
print("Multi comments found:", multiC)
print("Total comments found:", singleC+multiC)
if __name__ == "__main__":
# parser = argparse.ArgumentParser(description="This is code comment counter for Python.",add_help="Comment code counter for python.")
# parser.print_help()
main() | true |
e1889a792aa4db5ceb13c475e7bddc25fa841406 | Python | RoleMining/ConstrainedRM | /PythonCode/PRucc/library.py | UTF-8 | 8,579 | 2.65625 | 3 | [
"MIT"
] | permissive | import random
import math
# real world datasets characteristics
def get_data(h=6):
# dataset number [dataset name, #users, #permissions, max#perm-per-user, max#users-have-perm]
datasets = {1: ['datasets/fire1.txt', 365, 709, 617, 251],
2: ['datasets/fire2.txt', 325, 590, 590, 298],
3: ['datasets/domino.txt', 79, 231, 209, 52],
4: ['datasets/apj.txt', 2044, 1164, 58, 291],
5: ['datasets/emea.txt', 35, 3046, 554, 32],
6: ['datasets/hc.txt', 46, 46, 46, 45],
7: ['datasets/customer.txt', 10021, 277, 25, 4184],
8: ['datasets/americas_small.txt', 3477, 1587, 310, 2866],
9: ['datasets/americas_large.txt', 3485, 101127, 733, 2812]}
return datasets[h]
# return characteristics of the optimal solution [dataset name, #roles, min-rpu, max-rpu, min-ppr, max-ppr]
def get_data_opt(h=6):
if h == 7:
print('WARNING: the optimal cover for customer dataset is missing - using UPA\'s values')
# dataset number [dataset name, #roles, min-rpu, max-rpu, min-ppr, max-ppr]
datasets = {1: ['datasets/fire1.txt', 64, 1, 9, 1, 395],
2: ['datasets/fire2.txt', 10, 1, 3, 2, 307],
3: ['datasets/domino.txt',20, 1, 9, 1, 201],
4: ['datasets/apj.txt', 453, 1, 8, 1, 52],
5: ['datasets/emea.txt', 34, 1, 1, 9, 554],
6: ['datasets/hc.txt', 14, 1, 6, 1, 32],
7: ['datasets/customer.txt', 0, 1, 25, 1, 25], # not optimal solution
8: ['datasets/americas_small.txt', 178, 1, 12, 1, 263],
9: ['datasets/americas_large.txt', 398, 1, 4, 1, 733]}
return datasets[h]
# generate mpr/mru values to test heuristics PRUCC1 and PRUCC2, see the paper for details
def get_test_sets(h=6, n_mpr=5, n_pru=5, fix='mpr', u_l='opt'):
#n_mpr number of values for the mpr parameter
#n_pru number of values for the mru parameter
dataset = get_data(h)
print(dataset)
#[dataset name, #roles, min-rpu, max-rpu, min-ppr, max-ppr]
dataset_opt = get_data_opt(h)
print(dataset_opt)
to_test = dict()
if u_l == 'opt':
upper_limit = dataset_opt[5] if fix == 'mpr' else dataset_opt[3]
else:
upper_limit = dataset[3]
upper_limit = upper_limit - 1
if fix == 'mpr':
fixed_constraint = n_mpr - 2
derived_constraint = n_pru - 2
opt_val = dataset_opt[5]
der_ul_val = dataset_opt[3] - 1
else:
fixed_constraint = n_pru - 2
derived_constraint = n_mpr - 2
opt_val = dataset_opt[3]
der_ul_val = dataset_opt[5] - 1
fixed_list = [2]
if upper_limit > 2:
for _ in range(fixed_constraint):
v = fixed_list[-1] + upper_limit // (fixed_constraint + 1)
if v not in fixed_list:
fixed_list.append(v)
if upper_limit not in fixed_list:
fixed_list.append(upper_limit)
print(fixed_list, opt_val)
for t in fixed_list:
derived_list = [math.ceil(dataset[3] / t)] # max#P/mpr or max#P/mru
if t != 1:
delta = (dataset[3] - derived_list[0]) // (derived_constraint + 1)
limit = dataset[3] - 1
for _ in range(derived_constraint):
tmp_val = derived_list[-1] + delta
if tmp_val not in derived_list:
derived_list.append(tmp_val)
if limit not in derived_list:
derived_list.append(limit)
#print(t, derived_list)
to_test[t] = derived_list
return dataset[0], fixed_list, to_test
# another way to generate mpr/mru values to test PRUCC1 and PRUCC2
def compute_test_sets(h=6, n_mpr=3, n_pru=3, fix='mpr'):
dataset = get_data(h)
to_test = dict()
fixed_constraint = n_mpr if fix == 'mpr' else n_pru
derived_constraint = n_pru if fix == 'mpr' else n_mpr
fixed_list = [1]
for _ in range(fixed_constraint):
fixed_list.append(fixed_list[-1] + dataset[3] // (fixed_constraint + 1))
fixed_list.append(dataset[3])
for t in fixed_list:
derived_list = [math.ceil(dataset[3] / t)]
if dataset[3] // t != dataset[3]:
for _ in range(derived_constraint):
tmp_val = derived_list[-1] + dataset[3] // (derived_constraint + 1)
if tmp_val not in derived_list:
derived_list.append(tmp_val)
derived_list.append(dataset[3])
to_test[t] = derived_list
return dataset[0], fixed_list, to_test
# synthetic roleset
# To specify each role the dataset generator randomly selects up to
# mpr integers in the interval [1, mpr] that are mapped to permissions
# see the paper for details
def generate_roleset(nr, np, mpr):
permissions = list(range(1, np+1))
r = 1
role_set = []
while r <= nr:
role_size = random.randint(1,mpr) # random size
role = sorted(set(random.sample(permissions, role_size)))
if role not in role_set:
role_set.append(role)
r += 1
mapping = dict()
i = 1 #new permission id
for role in role_set:
for p in role:
if p not in mapping:
mapping[p] = i
i += 1
new_role_set = []
for role in role_set:
new_role_set.append(set(map(lambda x: mapping[x], role)))
return role_set, mapping, new_role_set, list(range(1,i))
# generate synthetic datasets (represented by UA and PA), see the paper for details
def generate_dataset(nr, nu, np, mru, mpr):
ua = {} # dictionary (user, set of roles)
pa = {} # dictionary (role, set of permissions)
permissions = list(range(1, np+1))
roles = list(range(1, nr+1))
used_roles = set()
used_permissions = set()
role_set = []
# generate random roles
# print('generate random roles')
g_role_set, mapping, role_set, used_permissions = generate_roleset(nr, np, mpr)
r = 1
for role in role_set:
pa[r] = role
r += 1
# assign roles to users
# print('assign roles to users')
for u in range(1, nu+1):
n_r_u = random.randint(1, mru)
ua[u] = set(random.sample(roles, n_r_u))
used_roles.update(ua[u])
# print(u, ua[u], ' ', len(ua[u]))
# remove from pa un-used roles
unused_roles = set(roles).difference(used_roles)
for u_r in unused_roles:
del pa[u_r]
#print('u_r', used_roles, len(used_roles), 'expected:', nr)
#print('un_r', unused_roles)
#print('u_p', len(used_permissions), 'expected:', np)
return ua, pa, used_roles, used_permissions
# save the generated synthetic dataset (save the UPA matrix represented by UA and PA)
def save_dataset(ua, pa, nr, nu, np, mru, mpr, base='synthetic_datasets/'):
ds_name = str(nr) + '_' + str(nu) + '_' + str(np) + '_' + str(mru) + '_' + str(mpr) + '.txt'
# print(ds_name)
upa = open(base + 'upa_' + ds_name, 'w')
for (u, roles) in ua.items():
permissions = set()
for r in roles:
permissions.update(pa[r])
for p in sorted(permissions):
upa.write("{:>6} {:>6}\n".format(u, p))
roles = open(base + 'roles_' + ds_name, 'w')
for (r, permissions) in pa.items():
for p in sorted(permissions):
roles.write("{:>6} {:>6}\n".format(r, p))
return ds_name
# compute role-sets similarity
def compute_sim(roles_a, roles_b):
sim = 0
for r_a in roles_a.values():
s = 0
for r_b in roles_b.values():
if len(r_a.intersection(r_b)) / (len(r_a.union(r_b))) > s:
s = len(r_a.intersection(r_b)) / (len(r_a.union(r_b)))
sim += s
return sim / len(roles_a)
def compare_solutions(ua_orig, pa_orig, ua_mined, pa_mined):
if len(ua_orig) != len(ua_mined):
print('ERROR')
exit(1)
for u in ua_orig.keys():
original_roles = []
mined_roles = []
for r in ua_orig[u]:
original_roles.append(pa_orig[r])
for r in ua_mined[u]:
mined_roles.append(pa_mined[r])
nr = 0
for r in original_roles:
if r in mined_roles:
nr += 1
print('or:', len(original_roles), 'mr:', len(mined_roles), 'found:', nr)
# compute role-set accuracy
def disc_roles(orig_roles, mined_roles):
found_roles = 0
for r in orig_roles.values():
if r in mined_roles.values():
found_roles = found_roles + 1
return found_roles | true |
ece3c17cb0b0d26d9c003a491ead8caeacd0abde | Python | todd-demone/pong.py | /stopwatch.py | UTF-8 | 1,559 | 3.46875 | 3 | [] | no_license | # "Stopwatch: The Game" by Todd Demone
import simplegui
# define global variables
time = 0
is_running = False
total_stops = 0
successful_stops = 0
# define helper function format that converts time
# in tenths of seconds into formatted string A:BC.D
def format(t):
minutes = t / 600
seconds = t % 600 / 10
tenths_of_second = t % 10
if seconds < 10:
seconds = "0" + str(seconds)
else:
seconds = str(seconds)
return str(minutes) + ":" + seconds + "." + str(tenths_of_second)
# define event handlers for buttons; "Start", "Stop", "Reset"
def start():
timer.start()
def stop():
global total_stops, successful_stops
timer.stop()
total_stops += 1
if time % 10 == 0:
successful_stops += 1
def reset():
global time, total_stops, successful_stops
timer.stop()
time = 0
total_stops = 0
successful_stops = 0
# define event handler for timer with 0.1 sec interval
def tick():
global time
time += 1
# define draw handler
def draw(canvas):
canvas.draw_text(format(time), (200,250), 40, "Red")
canvas.draw_text(str(successful_stops) + "/" + str(total_stops), (350,50), 20, "Yellow")
# create frame
frame = simplegui.create_frame("Stop Watch", 500, 500)
# register event handlers
timer = simplegui.create_timer(100, tick)
frame.set_draw_handler(draw)
frame.add_button("Start", start, 100)
frame.add_button("Stop", stop, 100)
frame.add_button("Reset", reset, 100)
# start frame
frame.start()
# Please remember to review the grading rubric
| true |
a9a8124f9664ff61a3d2224f0429a7d962d2ec15 | Python | wy471x/learningNote | /python/excise/basic/chapter10/10-12.py | UTF-8 | 583 | 3.671875 | 4 | [] | no_license | #!/usr/bin/env python
# coding=utf-8
import json
def get_stored_number():
filename = 'number.json'
try:
with open(filename) as f_obj:
num = json.load(f_obj)
except IOError:
return None
else:
return num
def store_number():
resnum = get_stored_number()
if resnum:
print("I know your favorite number!It's " + str(resnum) + ".")
else:
num = input("Please enter your favorite number: ")
filename = 'number.json'
with open(filename,'w') as f_obj:
json.dump(num,f_obj)
store_number()
| true |
73044357a0c8288db203f67a79445680943db19a | Python | KProskuryakov/aoc2019 | /day09/day09.py | UTF-8 | 3,724 | 3.0625 | 3 | [
"MIT"
] | permissive | from copy import deepcopy
from itertools import permutations, chain
from asyncio import Queue
import asyncio
from typing import List, Tuple, cast
class IntCode:
def __init__(self, program_str: str, input_queue: Queue, output_queue: Queue):
self.mem = {i: int(v) for i, v in enumerate(program_str.split(","))}
self.cur = 0
self.input_queue: Queue = input_queue
self.output_queue: Queue = output_queue
self.rel_base = 0
async def process(self):
outputs = []
while True:
op = self.cur_op()
if op == 1:
self.add()
elif op == 2:
self.multiply()
elif op == 3:
await self.input()
elif op == 4:
self.output()
elif op == 5:
self.jump_if_true()
elif op == 6:
self.jump_if_false()
elif op == 7:
self.less_than()
elif op == 8:
self.equals()
elif op == 9:
self.adjust_rel_base()
elif op == 99:
return
else:
raise Exception(f"Op {op} is not valid.")
def add(self):
arg1 = self.arg_get(1)
arg2 = self.arg_get(2)
self.arg_set(3, arg1 + arg2)
self.cur += 4
def multiply(self):
arg1 = self.arg_get(1)
arg2 = self.arg_get(2)
self.arg_set(3, arg1 * arg2)
self.cur += 4
async def input(self):
val = await self.input_queue.get()
self.arg_set(1, val)
self.cur += 2
def output(self):
self.output_queue.put_nowait(self.arg_get(1))
self.cur += 2
def jump_if_true(self):
arg1 = self.arg_get(1)
arg2 = self.arg_get(2)
if arg1 != 0:
self.cur = arg2
else:
self.cur += 3
def jump_if_false(self):
arg1 = self.arg_get(1)
arg2 = self.arg_get(2)
if arg1 == 0:
self.cur = arg2
else:
self.cur += 3
def less_than(self):
arg1 = self.arg_get(1)
arg2 = self.arg_get(2)
if arg1 < arg2:
self.arg_set(3, 1)
else:
self.arg_set(3, 0)
self.cur += 4
def equals(self):
arg1 = self.arg_get(1)
arg2 = self.arg_get(2)
if arg1 == arg2:
self.arg_set(3, 1)
else:
self.arg_set(3, 0)
self.cur += 4
def adjust_rel_base(self):
arg1 = self.arg_get(1)
self.rel_base = self.rel_base + arg1
self.cur += 2
def arg_get(self, pos):
op_str = self.cur_op_str()
mode = int(op_str[-2 - pos])
immediate = self[self.cur + pos]
if mode == 0: # Position mode
return self[immediate]
elif mode == 1: # Immediate mode
return immediate
elif mode == 2: # Relative mode
return self[self.rel_base + immediate]
def arg_set(self, pos, val):
op_str = self.cur_op_str()
mode = int(op_str[-2 - pos])
write_loc = self[self.cur + pos]
if mode == 0:
self[write_loc] = val
elif mode == 1:
raise Exception("Invalid operation: Write to immediate location.")
elif mode == 2:
self[self.rel_base + write_loc] = val
def cur_op(self):
op_str = self.cur_op_str()
return int(op_str[-2:])
def cur_op_str(self):
return format(self[self.cur], "05d")
def __getitem__(self, key: int) -> int:
return self.mem.get(key, 0)
def __setitem__(self, key: int, value: int):
self.mem[key] = value
| true |
5eb96cfc06ddc73b8130009914c152a0697595f2 | Python | daikikuchi/Swiss-Tournament-Results | /tournament/tournament.py | UTF-8 | 4,237 | 3.625 | 4 | [] | no_license | #!/usr/bin/env python
#
# tournament.py -- implementation of a Swiss-system tournament
#
import psycopg2
def connect(db_name="tournament"):
"""Connect to the PostgreSQL database. Returns a database connection.
if there is a conection error, except is executed"""
try:
db = psycopg2.connect("dbname = {}".format(db_name))
c = db.cursor()
return db, c
except:
print "There was a connection error"
def deleteMatches():
"""Remove all the match records from the database."""
conn, c = connect()
c.execute("DELETE FROM matches")
conn.commit()
conn.close()
return
def deletePlayers():
"""Remove all the player records from the database."""
conn, c = connect()
c.execute("DELETE FROM players")
conn.commit()
conn.close()
return
def countPlayers():
"""Returns the number of players currently registered."""
# Use [0] to get the number of player and return it.
conn, c = connect()
query = "SELECT count(players.pid) AS no_of_player FROM players;"
c.execute(query)
no_of_player = c.fetchone()[0]
conn.close()
return no_of_player
def registerPlayer(name):
"""Adds a player to the tournament database.
The database assigns a unique serial id number for the player. (This
should be handled by your SQL database schema, not in your Python code.)
Args:
name: the player's full name (need not be unique).
"""
# pid(player id) is automatically generated by db
# USE %s and tuples to pass value of parameter
conn, c = connect()
query = "INSERT INTO players(name) VALUES(%s);"
c.execute(query, (name,))
conn.commit()
conn.close()
def playerStandings():
"""Returns a list of the players and their win records, sorted by wins.
The first entry in the list should be the player
in first place, or a player
tied for first place if there is currently a tie.
Returns:
A list of tuples, each of which contains (id, name, wins, matches):
id: the player's unique id (assigned by the database)
name: the player's full name (as registered)
wins: the number of matches the player has won
matches: the number of matches the player has played
"""
# Since only a few columns and no other use of this query, no use of view
# Use subqueries to get total number of wins and matches
# Join the table of players with the results of subqueries.
# results contains the data as a lists of tuples.
conn, c = connect()
query = "SELECT * FROM playerstandings;"
c.execute(query)
results = c.fetchall()
conn.close()
return results
def reportMatch(winner, loser):
"""Records the outcome of a single match between two players.
Args:
winner: the id number of the player who won
loser: the id number of the player who lost
"""
conn, c = connect()
query = "INSERT INTO matches(winner,loser) VALUES(%s,%s)"
c.execute(query, (winner, loser))
conn.commit()
conn.close()
return
def swissPairings():
"""Returns a list of pairs of players for the next round of a match.
Assuming that there are an even number of players registered, each player
appears exactly once in the pairings. Each player is paired with another
player with an equal or nearly-equal win record, that is, a player adjacent
to him or her in the standings.
Returns:
A list of tuples, each of which contains (id1, name1, id2, name2)
id1: the first player's unique id
name1: the first player's name
id2: the second player's unique id
name2: the second player's name
"""
pair_list = playerStandings()
pair_result = []
if len(pair_list) % 2 == 0:
for i in range(0, len(pair_list), 2):
make_pairs = pair_list[i][0], pair_list[i][1], \
pair_list[i + 1][0], pair_list[i + 1][1]
pair_result.append(make_pairs)
else:
print "The number of the player is not even"
""" print out pairing results to check """
for i in range(0, len(pair_result), 1):
print pair_result[i]
return pair_result
| true |
c062f800717e2ed47656eb62573a2e37a269748d | Python | ievgenburdin/ml_course_hillel | /course/voting_classifier_preparation.py | UTF-8 | 1,512 | 2.859375 | 3 | [] | no_license | import numpy as np
import pandas as pd
import sklearn.model_selection as ms
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from sklearn.preprocessing import OneHotEncoder, StandardScaler
qualifies_by_double_grade = pd.read_csv("data/double_grade_reevaluated.csv")
print(qualifies_by_double_grade)
X = np.array(qualifies_by_double_grade[["technical_grade", "english_grade"]]).reshape(-1, 2)
y = np.array(qualifies_by_double_grade["qualifies"])
standart_scaler = StandardScaler()
X = standart_scaler.fit_transform(X)
k_folds = ms.KFold(n_splits=4, shuffle=True)
confusion_matrix = np.array([[0, 0], [0, 0]])
for train_index, test_index in k_folds.split(X):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
classification_model = RandomForestClassifier()
# classification_model = SVC(kernel="rbf", gamma="scale")
# classification_model = MLPClassifier(hidden_layer_sizes=(6,), max_iter=1000000)
classification_model.fit(X_train, y_train)
y_modeled = classification_model.predict(X_test)
test_confusion_matrix = metrics.confusion_matrix(y_test, y_modeled)
print(test_confusion_matrix)
confusion_matrix += test_confusion_matrix
print("Confusion matrix:")
print(confusion_matrix)
accuracy = (confusion_matrix[0][0] + confusion_matrix[1][1])/sum(confusion_matrix.ravel())
print("Accuracy: {}".format(accuracy))
| true |
f0e06891e3dfe6675f66a15c9bfd34e0b5aabbab | Python | FKSkinnerLab/CA1-hippo-Izhikevich-models | /CellMetrics/histrogrammer-byResult_STD.py | UTF-8 | 2,632 | 2.515625 | 3 | [] | no_license | #Created on June 26, 2017
#Modified on June 26, 2017
#Author: Anton Lunyov
#Pulls out specified data from 4D tensors generated for analyzing the effects of a,b,d,kLow on SFA, PIR and rheobase current
from pylab import *
##obtained from bubble_rheo_tensor.py, but the same for all (for consistency)
# a value iterations
aRes = 2.4E-4 # 1.0E-4
aUp = 2.4E-3
aLow = 0.0E-3
aVals = arange(aLow,aUp,aRes)
# klow iterations
kRes = 0.02 # 0.01
kLowUp = 0.2
kLowLower = 0.0
kVals = arange(kLowLower,kLowUp,kRes)
# b iters
bRes = 0.6 #0.2
bUp = 6.0
bLow = 0.0
bVals = arange(bLow,bUp,bRes)
# d iters
dRes = 2 # 0.5
dUp = 20
dLow = 0
dVals = arange(dLow,dUp,dRes)
#Load all relevant data
transitionCurrents = load('TC_across_4.npy')
adaptation_rat = load('adaptation_across_4_ratio.npy')
adaptation = load('adaptation_across_4.npy')
rheo = load('rheo_across_4.npy')
#Parameters to pull out a single value from
a = 0.0012
b = 3.0
d = 10.0
kLow = 0.10
#Calculate the indeces in 4D tensor
aInd = int((a/(aUp - aLow))*len(aVals)) - 1
bInd = int((b/(bUp - bLow))*len(bVals)) - 1
dInd = int((d/(dUp - dLow))*len(dVals)) - 1
kInd = int((kLow/(kLowUp - kLowLower))*len(kVals)) - 1
print(transitionCurrents[aInd,bInd,dInd,kInd])
print(adaptation[aInd,bInd,dInd,kInd])
print(adaptation_rat[aInd,bInd,dInd,kInd])
print(rheo[aInd,bInd,dInd,kInd])
std_SFA = std(adaptation)
std_TC = std(transitionCurrents)
std_rheo = std(rheo)
SFA_target = 0.46 #0.46 norm #5.96 rat
rheo_target = 3.5
TC_target = -6.0
SFA_up = SFA_target + 0.5*std_SFA
SFA_low = SFA_target - 0.5*std_SFA
rheo_up = rheo_target + 0.5*std_rheo
rheo_low = rheo_target - 0.5*std_rheo
TC_up = TC_target + 0.5*std_TC
TC_low = TC_target - 0.5*std_TC
inRange = (transitionCurrents < TC_up)*(transitionCurrents > TC_low)
inRange = inRange * (adaptation < SFA_up)*(adaptation > SFA_low)
inRange = inRange * (rheo < rheo_up)*(rheo > rheo_low)
#inRange = inRange*adaptation
#Get the parameter values within
aValsWithin = aVals[nonzero(inRange)[0]]
bValsWithin = bVals[nonzero(inRange)[1]]
dValsWithin = dVals[nonzero(inRange)[2]]
kValsWithin = kVals[nonzero(inRange)[3]]
suptitle(str(float(len(aValsWithin))*100/10000) + "% of the models were explored out of all")
subplot(221)
hist(aValsWithin)
xlabel('a')
ylabel('Frequency of parameter')
subplot(222)
hist(bValsWithin)
xlabel('b')
ylabel('Frequency of parameter')
subplot(223)
hist(dValsWithin)
xlabel('d')
ylabel('Frequency of parameter')
subplot(224)
hist(kValsWithin)
xlabel('k')
ylabel('Frequency of parameter')
show()
| true |
f19a16e321632fe8931fb99d3d7f7e54b0596b39 | Python | shahabmohammadi/Quera.Questions | /contest/3409/source.py | UTF-8 | 134 | 3.953125 | 4 | [] | no_license | target = int(input())
for i in range(1, (target + 1)):
for ii in range(1, target + 1):
print(i * ii, end=" ")
print()
| true |
6028fff0818b059b513024ce0bd5421361e88668 | Python | wagnerfilho1995/Processamento-de-Imagens-UFAL | /AULA 01/cores.py | UTF-8 | 854 | 2.703125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Spyder Editor
Este é um arquivo de script temporário.
"""
#%%
import cv2
import matplotlib.pyplot as plt
import numpy as np
#%%
img = cv2.imread('onepiece.jpeg', cv2.IMREAD_GRAYSCALE)
plt.imshow(img, cmap='gray')
#%%
img = cv2.imread('onepiece.jpeg', cv2.IMREAD_GRAYSCALE)
img = cv2.cvtColor(img, cv2.IMREAD_COLOR)
plt.imshow(img)
#%%
rgb = cv2.split(img)
plt.subplot(221), plt.imgshow(img)
plt.subplot(222), plt.title('R'), plt.imshow(rgb[0], cmap='gray')
plt.subplot(223), plt.title('G'), plt.imshow(rgb[1], cmap='gray')
plt.subplot(224), plt.title('B'), plt.imshow(rgb[2], cmap='gray')
plt.show()
#%%
img = cv2.imread('download.jpeg', cv2.IMREAD_GRAYSCALE)
plt.subplot(221), plt.title('Original'), plt.imshow(img, cmap='gray')
plt.subplot(222), plt.title('Histogram'), plt.hist(img.ravel(), 256, [0,256])
plt.show() | true |
6185f2f52f66ff219b21f110adeab6a0c336d290 | Python | digideskio/rjpres | /rjpres/http_server.py | UTF-8 | 11,452 | 2.953125 | 3 | [
"MIT"
] | permissive | """HTTP Server for rjpres
This module builds on SimpleHTTPServer by adding extra functionality
to deal with serving files from two directory trees (the user's dir
and a static data dir), dynamically providing wrapper pages, and
processing Markdown files that perhaps weren't orginally designed
for presentation with Reveal-JS.
"""
from _version import __version__
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import SimpleHTTPServer
import urllib
import cgi
import re
import sys
import shutil
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from rjpres.md_munge import MdMunge
class RjpresHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
# Class variables used for a number of configurations used each
# time this handler is instantiated
server_version = "SimpleHTTP+rjpres/" + __version__
#protocol_version ... HTTP protocol, no need to override
#wrapper ... request wrapper
#base_dir ... base dir of files to serve
#data_dir ... dir of local data with RevealJS etc
allow_from = None #set to list of IP regexs to allow access from
def do_GET(self, is_head=False):
"""Serve a GET request (or HEAD by truncating)
The HEAD response is identical to GET except that no
content is sent, could likely be optimized.
"""
if (not self.check_access()):
return
path = self.check_path()
f = self.send_head(path)
if f:
if (not is_head):
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request
All this does is call do_GET with the flag is_head set to do
everything except actually sending content
"""
self.do_GET(is_head=True)
def check_access(self):
"""Should we answer this request? Send error and respond False if not
If we wanted to be more brutal and simply not answer then this
could be done via override of superclass verify_request(). However,
will answer this with a 403 so do it in HTTP.
FIXME - should implement something based on the dotted IP notation
FIXME - should work with numeric IPs (left partfirst) and resolved
IP (right part first, currently not implemented)
"""
if (self.allow_from is None):
return True
# have access control list
remote_host = self.client_address[0]
for pattern in self.allow_from:
if (re.match(pattern,remote_host)):
return True
# no match => not allowed
self.send_error(403)
return False
def check_path(self):
"""Check path requested and work out whether we'll make a substitution
If self.path corresponds with a file in the server context the
simply return that, else look for package data that matches the
path. In the case that neither match then return the unmodified
path so that when used it generates an understandable error.
"""
path = self.translate_path(self.path)
local_path = os.path.join(self.base_dir,path)
data_path = os.path.join(self.data_dir,path)
if (os.path.exists(local_path)):
# All good, serve file requested
return(local_path)
elif (os.path.exists(data_path)):
# We have this as part of module data
self.log_message("serving %s from module data" % (data_path))
return(data_path)
else:
# Fall back on usual 404 etc.
return(local_path)
def send_head(self,path):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
sio = None
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
# Is this a markdown file which needs to be processed first?
wurl = self.wrapper.wrapper_url(path)
if (wurl):
mdm = MdMunge()
sio = mdm.md_needs_munge(f)
if (sio):
self.log_message("have processed markdown for %s" % (path))
except IOError:
# Should we generate a dynamic wrapper?
surl = self.wrapper.source_url(path)
if (surl):
sio = self.wrapper.wrapper(surl)
self.log_message("have generated wrapped for %s" % (surl))
else:
self.send_error(404, "File not found")
return None
# Now expect to have either valid sio stream else valid f
if (sio):
self.send_head_from_stringio(sio)
return sio
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">\n')
f.write("<html>\n<head>\n")
f.write("<title>Directory listing for %s</title>\n" % displaypath)
f.write('<link rel="stylesheet" href="/css/rjpres.css">\n</head<\n>')
f.write("<body>\n")
presentations = []
files = []
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
wurl = self.wrapper.wrapper_url(name)
if (wurl):
files.append(
'<li class="markdown"><a href="%s" alt="raw markdown">%s</a></li>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
presentations.append(
'<li class="pres"><a href="%s" alt="presentation">Presentation of %s</a></li>\n'
% (urllib.quote(wurl), cgi.escape(displayname)))
else:
files.append(
'<li><a href="%s">%s</a></li>\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
# Write sections with entries...
if (len(presentations)>0):
f.write("<h2>Presentations</h2>\n")
f.write("<ul>\n")
f.write(''.join(presentations))
f.write("</ul>\n\n")
if (len(files)>0):
f.write("<h2>Directory listing for %s</h2>\n" % displaypath)
f.write("<ul>\n")
f.write(''.join(files))
f.write("</ul>\n")
f.write("</body>\n</html>\n")
self.send_head_from_stringio(f)
return f
def send_head_from_stringio(self,f):
"""Write HTTP HEAD from StringIO, leaving f ready to copy contents
Note: expects f to be at end so that tell() works to get length
"""
length = f.tell()
f.seek(0)
encoding = sys.getfilesystemencoding()
self.send_response(200)
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.send_header("Content-Length", str(length))
self.end_headers()
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
Returns a relative path that should be interpretted in the
server's context.
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = '' #os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
if (path==''):
path = word
else:
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
| true |
0a28c20ba4989fbc203900859da88d633ecaef33 | Python | thed3de/Selfstudy-note-for-advances-in-financial-machine-learning | /chapter_14/deriving_the_timing_of_bets_from_a_series_of_rarget_positions.py | UTF-8 | 342 | 2.625 | 3 | [] | no_license | # a bet takes place between flat positions or position flips
df0=tPos[tPos==0].index
df1=tPos.shift(1);df1=df1[df1!=0].index
bets=df0.intersection(df1)#flattening
df0=tPos.iloc[1:]*tPos.iloc[:-1].values
bets=bets.union(df0[df0<0].index).sort_values()#tPos flips
if tPos.index[-1] not in bets:bets=bets.append(tPos.index[-1:])#last bet
| true |
e100ceee3e7422266dda44e17af1c5c503275d3a | Python | Margu006/2019-hw2-hyphaltip | /open_shut.py | UTF-8 | 1,619 | 3.171875 | 3 | [] | no_license | #!/usr/bin/env python3
import os,csv,gzip,re
# write a script to create new file called closed.txt
outfile="closed.txt"
# on the HPCC cluster you can use the file
file="/bigdata/gen220/shared/simple/title.basics.tsv.gz"
# or if you run this on your own computer use this
#file="title.basics.tsv.gz"
if not os.path.exists(file):
os.system("curl -O https://datasets.imdbws.com/title.basics.tsv.gz")
door_count = 0
door_word_count = 0
opened = 0
closed = 0
open_or_closed = 0
open_and_closed = 0
openre = re.compile(r'\sOpen\s')
closedre = re.compile(r'\sClosed\s')
#openclosedre = re.compile(r'\s(Open|Closed)\s')
with gzip.open(file,"r") as fh:
# now use the csv reader to read in the file, delimiter is '\t'
header = next(fh)
for line in fh:
row = line.decode('utf-8').strip().split("\t")
title = row[2]
if "door" in title or "Door" in title:
door_count += 1
if " door " in title or " Door " in title:
door_word_count += 1
o = openre.search(title)
c = closedre.search(title)
if o:
opened +=1
if c:
closed +=1
if o or c:
open_or_closed += 1
if o and c:
open_and_closed += 1
print("There are %d titles with door or Door"%(door_count))
print("There are %d titles with [Ddoor] as word"%(door_word_count))
print("There are %d titles with Open as a word and %d with Closed"%(
opened,closed))
print("There are %d titles with Open or Closed"%(
open_or_closed))
print("There are %d titles with Open and Closed"%(
open_and_closed))
| true |
6a41a11be203cc3873c5e90b4472fca474aa7ed3 | Python | Gopichand184/python-basics | /bin/listtask1.py | UTF-8 | 854 | 3.578125 | 4 | [] | no_license | city_name = ['Bangalore', 'Kolkata', 'Chennai', 'Delhi', 'Jammu']
print(type(city_name),"length of city_name is" ,len(city_name))
city_name.remove("Delhi")
print(city_name)
for x in city_name:
if "indore" in x:
print("yes indore exists in list ")
else:
print("no indore is not in the list")
break
city_name.extend(["Kanpur","Chandigarh"])
print(city_name)
for x in city_name:
print(x[-2])
a = [1,2,3,4,5,6,7,8,9,10]
even = []
odd = []
for i in a:
if ( i % 2 == 0):
even.append(i)
else:
odd.append(i)
print("even_list" , even)
print("odd_list" , odd)
dict = dict(zip(odd, even))
print(dict)
print(dict.keys())
print(dict.values())
a =['abc', 'xyz', 'aba', '1221', '2342','samples']
total_length = len(a)
for i in a:
if len(i) >=2 and i[0] == i[-1]:
print(i)
print(total_length)
| true |
09a20ad7dc952feb5e3bdbd64d6f6194e7b2ea88 | Python | RIABYI/skillup_course | /Homework/lesson3.9_homework.py | UTF-8 | 2,943 | 4.46875 | 4 | [] | no_license | # lesson3.9
# Разработайте класс с "полной инкапсуляцией", доступ к атрибутам которого и
# изменение данных реализуются через вызовы методов. В объектно-ориентированном
# программировании принято имена методов для извлечения данных начинать со
# слова get (взять), а имена методов, в которых свойствам присваиваются значения,
# – со слова set (установить). Например, getField, setField
class Automobile:
def __init__(self, manufacturer = str, model = str, price = int):
self.__manufacturer = manufacturer
self.__model = model
self.__price = price
def get_car_information(self) -> str:
"""Showing car information"""
print(f'Автомобиль: {self.__manufacturer} {self.__model}'
f'\nЦЕНА: {self.__price}')
def get_manufacturer(self) -> str:
"""Return car information: manufacturer"""
return(self.__manufacturer)
def get_model(self) -> str:
"""Return car information: model"""
return(self.__model)
def get_price(self) -> int:
"""Return car information: price"""
return(self.__price)
def set_car_information(self):
"""Changes all car information"""
self.__manufacturer = input('Введите марку автомобиля: ')
self.__model = input('Введите модель автомобиля: ')
self.__price = int(input('Введите стоимость автомобиля: '))
def __setattr__(self, name, value): #Защищает от создания 'левых' атрибутов
if name in ['_Automobile__manufacturer', '_Automobile__model',
'_Automobile__price']:
self.__dict__[name] = value
def __getattr__(self, name): # Объясняет, что атрибут 'левый'
if name not in ['_Automobile__manufacturer', '_Automobile__model',
'_Automobile__price']: # Можно заменить на self.__dict__
return f'Атрибут "{name}" не существует!'
mycar = Automobile('Mazda', '6', 15000) # Создаем объект
mycar.get_car_information() # Получаем информацию (извлекаем все данные) про объект
mycar.zet = 'VAZda' # Пробуем создать 'лишний' атрибут
print(mycar.zet) # Пробуем узнать 'лишний' атрибут
# mycar.set_car_information() # Меняем всю информацию про автомобиль
mycar.get_car_information()
print(mycar.get_price()) # Извлекаем данные о стоимости автомобиля | true |
1276bd39d0c14681607dc3039f75c203523827b4 | Python | UWPCE-PythonCert-ClassRepos/Self_Paced-Online | /students/jared_mulholland/lesson_4/trigram.py | UTF-8 | 3,470 | 4.21875 | 4 | [] | no_license | """
Trigram analysis is very simple. Look at each set of three adjacent words in a document. Use the first two words of the set as a key, and remember the fact that the third word followed that key. Once you’ve finished, you know the list of individual words that can follow each two word sequence in the document. For example, given the input:
I wish I may I wish I might
You might generate:
"I wish" => ["I", "I"]
"wish I" => ["may", "might"]
"may I" => ["wish"]
"I may" => ["I"]
This says that the words “I wish” are twice followed by the word “I”, the words “wish I” are followed once by “may” and once by “might” and so on.
To generate new text from this analysis, choose an arbitrary word pair as a starting point. Use these to look up a random next word (using the table above) and append this new word to the text so far. This now gives you a new word pair at the end of the text, so look up a potential next word based on these. Add this to the list, and so on. In the previous example, we could start with “I may”. The only possible next word is “I”, so now we have:
I may I
The last two words are “may I”, so the next word is “wish”. We then look up “I wish”, and find our choice is constrained to another “I”.:
I may I wish I
Now we look up “wish I”, and find we have a choice. Let’s choose “may”:
I may I wish I may
Now we’re back where we started from, with “I may.” Following the same sequence, but choosing “might” this time, we get:
I may I wish I may I wish I might
It this point we stop, as no sequence starts “I might.
"""
import random
tg_dict = {"I wish": ["I","I"],"wish I": ["may","might"],"may I": ["wish"], "I may": ["I"]}
def trigram(dict):
new_key = random.choice(list(dict.keys()))
trigram = new_key
while new_key in dict:
trigram = trigram + " " + dict[new_key][random.randint(0,len(dict[new_key])-1)]
new_key = " ".join(trigram.split()[-2:])
return trigram
#import sherlock and turn it to dictionary
import os
import random
import string
file_path = 'C:\\Users\\Jared\\Documents\\IntroToPython\\Self_Paced-Online\\students\\jared_mulholland\\lesson_4'
file_name = 'sherlock_short.txt'
def list_create(file_path, file_name):
"""takes text file and creates list, stripping punctuation and capitalization"""
os.chdir(file_path)
file_string = open(file_name)
file_string = file_string.read()
punct = str.maketrans("-().?!,",7*" ")
file_string = file_string.translate(punct)
file_string = file_string.lower().split()
for f_str in file_string:
f_str.replace(" ","")
return(file_string)
def dict_create(temp_list):
"""creates dict from list for use in trigram"""
dict_temp = {}
i = 0
while i < len(temp_list)-3:
if temp_list[i]+" "+temp_list[i+1] in dict_temp:
dict_temp[temp_list[i]+" "+temp_list[i+1]].append(temp_list[i+2])
else:
dict_temp[temp_list[i]+" "+temp_list[i+1]] = [temp_list[i+2]]
i+=1
return(dict_temp)
def trigram_new(dict):
"""created trigram from dict starting form a random point within the dict"""
new_key = random.choice(list(dict.keys()))
trigram = str(new_key)
while new_key in dict:
trigram = trigram + " " + dict[new_key][random.randint(0,len(dict[new_key])-1)]
new_key = " ".join(trigram.split()[-2:])
return trigram
| true |
281d0002c9a705bd95db23c730b853d4141dc00c | Python | yi-fan-wang/bilby | /examples/gw_examples/injection_examples/sine_gaussian_example.py | UTF-8 | 2,581 | 2.703125 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
Tutorial to demonstrate running parameter estimation on a sine gaussian
injected signal.
"""
from __future__ import division, print_function
import bilby
import numpy as np
# Set the duration and sampling frequency of the data segment that we're going
# to inject the signal into
duration = 4.
sampling_frequency = 2048.
# Specify the output directory and the name of the simulation.
outdir = 'outdir'
label = 'sine_gaussian'
bilby.core.utils.setup_logger(outdir=outdir, label=label)
# Set up a random seed for result reproducibility. This is optional!
np.random.seed(170801)
# We are going to inject a sine gaussian waveform. We first establish a
# dictionary of parameters that includes all of the different waveform
# parameters
injection_parameters = dict(
hrss=1e-22, Q=5.0, frequency=200.0, ra=1.375, dec=-1.2108,
geocent_time=1126259642.413, psi=2.659)
# Create the waveform_generator using a sine Gaussian source function
waveform_generator = bilby.gw.waveform_generator.WaveformGenerator(
duration=duration, sampling_frequency=sampling_frequency,
frequency_domain_source_model=bilby.gw.source.sinegaussian)
# Set up interferometers. In this case we'll use three interferometers
# (LIGO-Hanford (H1), LIGO-Livingston (L1), and Virgo (V1)). These default to
# their design sensitivity
ifos = bilby.gw.detector.InterferometerList(['H1', 'L1', 'V1'])
ifos.set_strain_data_from_power_spectral_densities(
sampling_frequency=sampling_frequency, duration=duration,
start_time=injection_parameters['geocent_time'] - 3)
ifos.inject_signal(waveform_generator=waveform_generator,
parameters=injection_parameters)
# Set up prior, which is a dictionary
priors = dict()
for key in ['psi', 'ra', 'dec', 'geocent_time']:
priors[key] = injection_parameters[key]
priors['Q'] = bilby.core.prior.Uniform(2, 50, 'Q')
priors['frequency'] = bilby.core.prior.Uniform(30, 1000, 'frequency', unit='Hz')
priors['hrss'] = bilby.core.prior.Uniform(1e-23, 1e-21, 'hrss')
# Initialise the likelihood by passing in the interferometer data (IFOs) and
# the waveoform generator
likelihood = bilby.gw.likelihood.GravitationalWaveTransient(
interferometers=ifos, waveform_generator=waveform_generator)
# Run sampler. In this case we're going to use the `dynesty` sampler
result = bilby.core.sampler.run_sampler(
likelihood=likelihood, priors=priors, sampler='dynesty', npoints=1000,
injection_parameters=injection_parameters, outdir=outdir, label=label)
# make some plots of the outputs
result.plot_corner()
| true |
1ce8ca9bacb05695c1957a3ad6facfb2fd176edb | Python | ipetrushenko-softheme/codejam | /dp/44-580A.py | UTF-8 | 350 | 3.1875 | 3 | [] | no_license | def read_tokens():
return input().strip().split(' ')
def read_ints():
return [int(s) for s in read_tokens()]
n, = read_ints()
arr = read_ints()
def solve(arr: list) -> int:
n = len(arr)
dp = [1] * n
for i in range(1, n):
if arr[i] >= arr[i-1]:
dp[i] = dp[i-1] + 1
return max(dp)
print(solve(arr))
| true |
b530b92484ac0eb31ef2315702eb46fd00b50489 | Python | Noirebao/TJCS-Homework | /编译原理课程设计/source/ObjectCode.py | UTF-8 | 15,496 | 2.59375 | 3 | [
"MIT"
] | permissive | # This Python file uses the following encoding: utf-8
import copy
import sys
# 目标代码生成器
class ObjectCodeGenerator():
def __init__(self, middleCode, symbolTable, funcTable):
self.middleCode = copy.deepcopy(middleCode)
self.symbolTable = copy.deepcopy(symbolTable)
self.funcNameTable = []
for f in funcTable:
self.funcNameTable.append(f.name)
self.mipsCode = []
self.regTable = { '$' + str(i): '' for i in range(7, 26) }
self.varStatus = {} # 记录变量是在寄存器当中还是内存当中
self.DATA_SEGMENT = 10010000
self.STACK_OFFSET = 8000
return
def getRegister(self, identifier, codes):
if identifier[0] != 't':
return identifier
if identifier in self.varStatus and self.varStatus[identifier] == 'reg':
for key in self.regTable:
if self.regTable[key] == identifier:
return key
# print('---------------')
# print(identifier + '正在申请寄存器')
# print(self.regTable)
# print(self.varStatus)
while True:
for key in self.regTable:
if self.regTable[key] == '':
self.regTable[key] = identifier
self.varStatus[identifier] = 'reg'
return key
self.freeRegister(codes)
# 释放一个寄存器
def freeRegister(self, codes):
# 提取出使用了 reg 的变量, 形式如t1, t2, ...
varRegUsed = list(filter(lambda x: x != '', self.regTable.values()))
# 统计这些变量后续的使用情况
varUsageCnts = {}
for code in codes:
# print(code)
for item in code:
# print(item)
tmp = str(item)
if tmp[0] == 't': # 是个变量
if tmp in varRegUsed:
if tmp in varUsageCnts:
varUsageCnts[tmp] += 1
else:
varUsageCnts[tmp] = 1
# print('===\n', 'varUsageCnts:', varUsageCnts, '\n===\n')
sys.stdout.flush()
flag = False
# 找出之后不会使用的变量所在的寄存器
for var in varRegUsed:
if var not in varUsageCnts:
for reg in self.regTable:
if self.regTable[reg] == var:
self.regTable[reg] = ''
self.varStatus[var] = 'memory'
flag = True
if flag:
return
# 释放最少使用的寄存器,
sorted(varUsageCnts.items(), key=lambda x: x[1])
varFreed = list(varUsageCnts.keys())[0]
for reg in self.regTable:
if self.regTable[reg] == varFreed:
for item in self.symbolTable:
if item.place == varFreed: # t1, t2, ...
self.mipsCode.append('addi $at, $zero, 0x{}'.format(self.DATA_SEGMENT))
self.mipsCode.append('sw {}, {}($at)'.format(reg, item.offset))
self.regTable[reg] = ''
self.varStatus[varFreed] = 'memory'
return
return
def genMips(self):
mc = self.mipsCode
dc = self.middleCode
dc.insert(0, ('call', '_', '_', 'programEnd'))
dc.insert(0, ('call', '_', '_', 'main'))
mc.append('.data') # 数据段 存放数组
for s in self.symbolTable:
if s.type == 'int array':
size = 4 # 单位字节
for dim in s.dims:
size *= int(dim)
mc.append(' ' + s.place + ': .space ' + str(size))
mc.append('')
mc.append('.text') # 代码段
mc.append(' addiu $sp, $zero, 0x{}'.format(self.DATA_SEGMENT + self.STACK_OFFSET))
mc.append(' or $fp, $sp, $zero')
while dc:
code = dc.pop(0)
tmp = []
for item in code:
if item == 'v0':
tmp.append('$v0')
else:
tmp.append(item)
code = tmp
if code[0] == ':=':
src = self.getRegister(code[1], dc)
dst = self.getRegister(code[3], dc)
mc.append(' add {},$zero,{}'.format(dst, src))
elif code[0] == '[]=': # []=, t21, _, t17[t22]
src = self.getRegister(code[1], dc)
base = code[3][ : code[3].index('[')]
offset = code[3][code[3].index('[') + 1 : -1]
dst_offset = self.getRegister(offset, dc)
mc.append(' la $v1,{}'.format(base))
mc.append(' mul {},{},4'.format(dst_offset,dst_offset))
mc.append(' addu {},{},$v1'.format(dst_offset, dst_offset))
mc.append(' sw {},'.format(src) + '0({})'.format(dst_offset))
elif code[0] == '=[]': # =[], t17[t23], -, t24
dst = self.getRegister(code[3], dc)
base = code[1][ : code[1].index('[')]
offset = code[1][code[1].index('[') + 1 : -1]
src_offset = self.getRegister(offset, dc)
mc.append(' la $v1,{}'.format(base))
mc.append(' mul {},{},4'.format(src_offset,src_offset))
mc.append(' addu {},{},$v1'.format(src_offset, src_offset))
mc.append(' lw {},'.format(dst) + '0({})'.format(src_offset))
# function or label
elif code[1] == ':':
if code[0] in self.funcNameTable or code[0][0] == 'f': # is a function definition
mc.append('') # empty line
mc.append('{}:'.format(code[0]))
# 跳转到函数的label处
elif code[0] == 'call':
mc.append(' jal {}'.format(code[3]))
# actual arg of a function call
elif code[0] == 'push':
if code[3] == 'ra': # return addr
mc.append(' sw $ra, {}($fp)'.format(code[2]))
else:
register = self.getRegister(code[3], dc)
if str(register)[0] != '$':
mc.append(" add $a0, $zero, {}".format(register))
register = '$a0'
mc.append(' sw {}, {}($fp)'.format(register, code[2]))
# get args inside the function
elif code[0] == 'pop':
if code[3] == 'ra':
mc.append(' lw $ra, {}($fp)'.format(code[2]))
else:
register = self.getRegister(code[3], dc)
mc.append(' lw {}, {}($fp)'.format(register, code[2]))
# store var from reg to memory
elif code[0] == 'store':
if code[3] == 'ra':
mc.append(' sw $ra, {}($sp)'.format(code[2]))
else:
register = self.getRegister(code[3], dc)
if str(register)[0] != '$':
mc.append(" add $a0,$zero,{}".format(register))
register = '$a0'
mc.append(' sw {}, {}($sp)'.format(register, code[2]))
# load var from memory to reg
elif code[0] == 'load':
if code[3] == 'ra':
mc.append(' lw $ra, {}($sp)'.format(code[2]))
else:
register = self.getRegister(code[3], dc)
mc.append(' lw {}, {}($sp)'.format(register, code[2]))
# jump instruction
elif code[0] == 'j':
mc.append(' j {}'.format(code[3]))
elif code[0] == 'j>':
arg1 = self.getRegister(code[1], dc)
mc.append(' bgt {},$zero,{}'.format(arg1, code[3]))
elif code[0] == 'return':
mc.append(' jr $ra')
# algorithm operations, has 3 oprand
else:
if code[0] == '+':
if code[1] == 'fp':
mc.append(" add $fp,$fp,{}".format(code[2]))
elif code[1] == 'sp':
mc.append(" add $sp,$sp,{}".format(code[2]))
else:
arg1 = self.getRegister(code[1], dc)
arg2 = self.getRegister(code[2], dc)
arg3 = self.getRegister(code[3], dc)
if str(arg1)[0] != '$':
mc.append(" add $a1,$zero,{}".format(arg1))
arg1 = '$a1'
mc.append(" add {},{},{}".format(arg3, arg1, arg2))
elif code[0] == '-':
if code[1] == 'fp':
mc.append(" sub $fp,$fp,{}".format(code[2]))
elif code[1] == 'sp':
mc.append(" sub $sp,$sp,{}".format(code[2]))
else:
arg1 = self.getRegister(code[1], dc)
arg2 = self.getRegister(code[2], dc)
arg3 = self.getRegister(code[3], dc)
if str(arg1)[0] != '$':
mc.append(" add $a1,$zero,{}".format(arg1))
arg1 = '$a1'
if str(arg2)[0] != '$':
mc.append(" add $a2,$zero,{}".format(arg2))
arg2 = '$a2'
mc.append(" sub {},{},{}".format(arg3, arg1, arg2))
elif code[0] == '*':
arg1 = self.getRegister(code[1], dc)
arg2 = self.getRegister(code[2], dc)
arg3 = self.getRegister(code[3], dc)
if str(arg1)[0] != '$':
mc.append(" add $a1,$zero,{}".format(arg1))
arg1 = '$a1'
if str(arg2)[0] != '$':
mc.append(" add $a2,$zero,{}".format(arg2))
arg2 = '$a2'
mc.append(" mul {},{},{}".format(arg3, arg1, arg2))
elif code[0] == '/':
arg1 = self.getRegister(code[1], dc)
arg2 = self.getRegister(code[2], dc)
arg3 = self.getRegister(code[3], dc)
if str(arg1)[0] != '$':
mc.append(" add $a1,$zero,{}".format(arg1))
arg1 = '$a1'
if str(arg2)[0] != '$':
mc.append(" add $a2,$zero,{}".format(arg2))
arg2 = '$a2'
mc.append(" div {},{},{}".format(arg3, arg1, arg2))
elif code[0] == '%':
arg1 = self.getRegister(code[1], dc)
arg2 = self.getRegister(code[2], dc)
arg3 = self.getRegister(code[3], dc)
if str(arg1)[0] != '$':
mc.append(" add $a1,$zero,{}".format(arg1))
arg1 = '$a1'
if str(arg2)[0] != '$':
mc.append(" add $a2,$zero,{}".format(arg2))
arg2 = '$a2'
mc.append(" div {},{},{}".format(arg3, arg1, arg2))
mc.append(" mfhi {}".format(arg3))
elif code[0] == '<':
arg1 = self.getRegister(code[1], dc)
arg2 = self.getRegister(code[2], dc)
arg3 = self.getRegister(code[3], dc)
if str(arg1)[0] != '$':
mc.append(" add $a1,$zero,{}".format(arg1))
arg1 = '$a1'
if str(arg2)[0] != '$':
mc.append(" add $a2,$zero,{}".format(arg2))
arg2 = '$a2'
mc.append(" slt {},{},{}".format(arg3, arg1, arg2))
elif code[0] == '>':
arg1 = self.getRegister(code[1], dc)
arg2 = self.getRegister(code[2], dc)
arg3 = self.getRegister(code[3], dc)
if str(arg1)[0] != '$':
mc.append(" add $a1,$zero,{}".format(arg1))
arg1 = '$a1'
if str(arg2)[0] != '$':
mc.append(" add $a2,$zero,{}".format(arg2))
arg2 = '$a2'
mc.append(" sgt {},{},{}".format(arg3, arg1, arg2))
elif code[0] == '!=':
arg1 = self.getRegister(code[1], dc)
arg2 = self.getRegister(code[2], dc)
arg3 = self.getRegister(code[3], dc)
if str(arg1)[0] != '$':
mc.append(" add $a1,$zero,{}".format(arg1))
arg1 = '$a1'
if str(arg2)[0] != '$':
mc.append(" add $a2,$zero,{}".format(arg2))
arg2 = '$a2'
mc.append(" sne {},{},{}".format(arg3, arg1, arg2))
elif code[0] == '==':
arg1 = self.getRegister(code[1], dc)
arg2 = self.getRegister(code[2], dc)
arg3 = self.getRegister(code[3], dc)
if str(arg1)[0] != '$':
mc.append(" add $a1,$zero,{}".format(arg1))
arg1 = '$a1'
if str(arg2)[0] != '$':
mc.append(" add $a2,$zero,{}".format(arg2))
arg2 = '$a2'
mc.append(" seq {},{},{}".format(arg3, arg1, arg2))
elif code[0] == '<=':
arg1 = self.getRegister(code[1], dc)
arg2 = self.getRegister(code[2], dc)
arg3 = self.getRegister(code[3], dc)
if str(arg1)[0] != '$':
mc.append(" add $a1,$zero,{}".format(arg1))
arg1 = '$a1'
if str(arg2)[0] != '$':
mc.append(" add $a2,$zero,{}".format(arg2))
arg2 = '$a2'
mc.append(" sgt {},{},{}".format(arg3, arg1, arg2))
mc.append(" xori {},{},1".format(arg3, arg3))
elif code[0] == '>=':
arg1 = self.getRegister(code[1], dc)
arg2 = self.getRegister(code[2], dc)
arg3 = self.getRegister(code[3], dc)
if str(arg1)[0] != '$':
mc.append(" add $a1,$zero,{}".format(arg1))
arg1 = '$a1'
if str(arg2)[0] != '$':
mc.append(" add $a2,$zero,{}".format(arg2))
arg2 = '$a2'
mc.append(" slt {},{},{}".format(arg3, arg1, arg2))
mc.append(" xori {},{},1".format(arg3, arg3))
mc.append('')
mc.append('programEnd:')
mc.append(' nop')
# self.prtMips()
sys.stdout.flush()
return
def prtMips(self):
for code in self.mipsCode:
print(code)
return
| true |
c4db689c5ac29aea145a172d6d09fc1a6d0fb998 | Python | adamcatto/dippy | /src/utils.py | UTF-8 | 1,664 | 2.78125 | 3 | [] | no_license | import os
import numpy as np
import cv2
from tqdm import tqdm
def pad_int(num, desired_length):
num_str = str(num)
num_str_len = len(num_str)
assert num_str_len <= desired_length
diff = desired_length - num_str_len
zero_string = ''
for i in range(diff):
zero_string += '0'
return zero_string + num_str
def remove_duplicates(in_dir, test_dir):
files = os.listdir(in_dir)
filenames = list(sorted([os.path.join(in_dir, f) for f in files]))
previous_frame = cv2.imread(filenames[0])
cv2.imwrite(os.path.join(test_dir, pad_int(0, 4) + '.png'), previous_frame)
filenames = filenames[1:]
zero_arr = np.zeros(previous_frame.shape)
counter = 1
for i, f in tqdm(enumerate(filenames)):
img = cv2.imread(f)
#diff_img = img - previous_frame
if (img != previous_frame).any():
cv2.imwrite(os.path.join(test_dir, pad_int(counter, 4) + '.png') , img)
counter += 1
else:
print(pad_int(i, 4))
previous_frame = img
print(len(os.listdir(in_dir)))
print(len(os.listdir(test_dir)))
#if diff_img == zero_arr:
#os.remove(f)
"""
new_files = os.listdir(in_dir)
new_filenames = [os.path.join(in_dir, f) for f in new_files]
for i, f in tqdm(enumerate(new_filenames)):
i = pad_int(i, 4)
os.rename(f, os.path.join(test_dir, i + '.png'))
"""
def sliding_window(img, window):
pass
#remove_duplicates(in_dir='/Users/adamcatto/SRC/dippy/input_data/gray_tunnel_sequence/images', test_dir='/Users/adamcatto/SRC/dippy/input_data/cleaned_gray_tunnel_sequence')
| true |
657a9664a578d74515ca65274ed92caf97c79ce7 | Python | Poyx15/Tutorials | /Codecademy Practice/VisualizeData/Seaborn/PlottingBars.py | UTF-8 | 859 | 4.25 | 4 | [] | no_license | # The Seaborn function sns.barplot(), takes at least three keyword arguments:
#
# data: a Pandas DataFrame that contains the data (in this example, data=df)
# x: a string that tells Seaborn which column in the DataFrame contains otheur x-labels (in this case, x="Gender")
# y: a string that tells Seaborn which column in the DataFrame contains the heights we want to plot for each bar (in this case y="Mean Satisfaction")
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
# This is the dataframe
df = pd.DataFrame([
['Male', 7.2],
['Female', 8.1],
['Non-binary', 6.8]
],
columns=['Gender','Mean Satisfaction']
)
# Dataframe until here ^^^^^^^^
print(df)
# The Magic of Seaborn
sns.barplot(
data= df,
x= 'Gender',
y= 'Mean Satisfaction'
)
# Just like print() but show()
plt.show() | true |
d9fa7c1e9a4e4e97c403b5b25a1de0298cf6bde7 | Python | adriano-pacheco/atividades-mentorama | /exe1.py | UTF-8 | 1,212 | 4.15625 | 4 | [
"MIT"
] | permissive | #Crie um programa que leia o nome, sexo e idade
#de varias pessoas, guardando os dados de cada
#pessoa em um dicionario e todos os dicionaris em uma lista.
#No final mostre:
pessoas = dict()
listaPessoas = list()
resposta = "s"
while resposta =="s":
pessoas['nome'] = str(input('Insira o nome: '))
pessoas['sexo'] = str(input('Qual o sexo: '))
pessoas['idade'] = int(input('Qual a idade: '))
listaPessoas.append(pessoas.copy())
resposta = input('Deseja continuar s/n: ')
#a)Quantidade de pessoas que foram cadastradas
pessoasCadastradas = len(listaPessoas)
print(f'A quantidade de pessoas cadastradas são {pessoasCadastradas}')
#b)A média de idade
media = 0
for i in listaPessoas:
media = media + i['idade']
media2 = media / pessoasCadastradas
print(f'A média de idade das pessoas cadastradas é {media2}')
#c)Uma lista com as mulheres
listM = []
for i in listaPessoas:
if i['sexo'] == 'f':
listM.append(i['nome'])
print(f'A lsita com as mulheres {listM}')
#d)Uma lista de pessoas com idade acima da média
acMedia = []
for i in listaPessoas:
if i['idade'] > media2:
acMedia.append(i['nome'])
print(f'As pessoas acima da idade média são : {acMedia}')
| true |
bf87540de8819f896014e37d7ffe86b1f99e576a | Python | Mitchell-9/KivyProject | /structure.py | UTF-8 | 6,425 | 2.890625 | 3 | [] | no_license | from kivy.uix.widget import Widget
from kivy.clock import Clock
from kivy.properties import *
from kivy.core.window import Window
import random
import smooth
class Cell(Widget):
graphical_size = ListProperty([1, 1])
graphical_pos = ListProperty([1, 1])
color = ListProperty([1, 1, 1, 1])
def __init__(self, x, y, size, margin=4):
super().__init__()
self.actual_size = (size, size)
self.graphical_size = (size - margin, size - margin)
self.margin = margin
self.actual_pos = (x, y)
self.graphical_pos_attach()
def graphical_pos_attach(self, smooth_motion=None):
to_x, to_y = self.actual_pos[0] - self.graphical_size[0] / 2, self.actual_pos[1] - self.graphical_size[1] / 2
if smooth_motion is None:
self.graphical_pos = to_x, to_y
else:
smoother, t = smooth_motion
smoother.move_to(self, to_x, to_y, t)
def move_to(self, x, y, **kwargs):
self.actual_pos = (x, y)
self.graphical_pos_attach(**kwargs)
def move_by(self, x, y, **kwargs):
self.move_to(self.actual_pos[0] + x, self.actual_pos[1] + y, **kwargs)
def get_pos(self):
return self.actual_pos
def step_by(self, direction, **kwargs):
self.move_by(self.actual_size[0] * direction[0], self.actual_size[1] * direction[1], **kwargs)
class Worm(Widget):
def __init__(self, config):
super().__init__()
self.cells = []
self.config = config
self.cell_size = config.CELL_SIZE
self.head_init((100, 100))
for i in range(config.DEFAULT_LENGTH):
self.lengthen()
def destroy(self):
for i in range(len(self.cells)):
self.remove_widget(self.cells[i])
self.cells = []
def lengthen(self, pos=None, direction=(0, 1)):
if pos is None:
px = self.cells[-1].get_pos()[0] + direction[0] * self.cell_size
py = self.cells[-1].get_pos()[1] + direction[1] * self.cell_size
pos = (px, py)
self.cells.append(Cell(*pos, self.cell_size, margin=self.config.MARGIN))
self.add_widget(self.cells[-1])
def head_init(self, pos):
self.lengthen(pos=pos)
def move(self, direction, **kwargs):
for i in range(len(self.cells) - 1, 0, -1):
self.cells[i].move_to(*self.cells[i - 1].get_pos(), **kwargs)
self.cells[0].step_by(direction, **kwargs)
def gather_positions(self):
return [cell.get_pos() for cell in self.cells]
def head_intersect(self, cell):
return self.cells[0].get_pos() == cell.get_pos()
class Form(Widget):
worm_len = NumericProperty(0)
def __init__(self, config):
super().__init__()
self.config = config
self.worm = None
self.cur_dir = (0, 0)
self.fruit = None
self.game_on = True
self.smooth = smooth.XSmooth(["graphical_pos[0]", "graphical_pos[1]"])
self._keyboard = Window.request_keyboard(self._keyboard_closed, self)
self._keyboard.bind(on_key_down=self._on_keyboard_down)
def _keyboard_closed(self):
self._keyboard.unbind(on_key_down=self._on_keyboard_down)
self._keyboard = None
def random_cell_location(self, offset):
x_row = self.size[0] // self.config.CELL_SIZE
x_col = self.size[1] // self.config.CELL_SIZE
return random.randint(offset, x_row - offset), random.randint(offset, x_col - offset)
def random_location(self, offset):
x_row, x_col = self.random_cell_location(offset)
return self.config.CELL_SIZE * x_row, self.config.CELL_SIZE * x_col
def fruit_dislocate(self):
x, y = self.random_location(2)
while (x, y) in self.worm.gather_positions():
x, y = self.random_location(2)
self.fruit.move_to(x, y)
def start(self):
self.worm = Worm(self.config)
self.add_widget(self.worm)
if self.fruit is not None:
self.remove_widget(self.fruit)
self.fruit = Cell(0, 0, self.config.APPLE_SIZE)
self.fruit_dislocate()
self.add_widget(self.fruit)
self.game_on = True
self.cur_dir = (1, 0)
Clock.schedule_interval(self.update, self.config.INTERVAL)
self.popup_label.text = ""
def stop(self, text=""):
self.game_on = False
self.popup_label.text = text
Clock.unschedule(self.update)
def game_over(self):
self.stop("GAME OVER" + " " * 5 + "\ntap space to reset")
def align_labels(self):
try:
self.popup_label.pos = ((self.size[0] - self.popup_label.width) / 2, self.size[1] / 2-3)
self.score_label.pos = ((self.size[0] - self.score_label.width) / 2, self.size[1] - 80)
except:
print(self.__dict__)
assert False
def update(self, _):
if not self.game_on:
return
self.worm.move(self.cur_dir, smooth_motion=(self.smooth, self.config.INTERVAL))
if self.worm.head_intersect(self.fruit):
directions = [(0, 1), (0, -1), (1, 0), (-1, 0)]
self.worm.lengthen(direction=random.choice(directions))
self.fruit_dislocate()
if self.worm_bite_self():
self.game_over()
self.worm_len = len(self.worm.cells)
self.align_labels()
def _on_keyboard_down(self, keyboard, keycode, text, modifiers):
if not self.game_on:
self.worm.destroy()
self.start()
return
if keycode[1] == 's':
cur_dir = (0, -1)
'''down'''
elif keycode[1] == 'd':
cur_dir = (1, 0)
'''right'''
elif keycode[1] == 'a':
cur_dir = (-1, 0)
'''left'''
elif keycode[1] == 'w':
cur_dir = (0, 1)
'''up'''
elif keycode[1] == 'down':
cur_dir = (0, -1)
'''down'''
elif keycode[1] == 'right':
cur_dir = (1, 0)
'''right'''
elif keycode[1] == 'left':
cur_dir = (-1, 0)
'''left'''
elif keycode[1] == 'up':
cur_dir = (0, 1)
'''up'''
self.cur_dir = cur_dir
def worm_bite_self(self):
for cell in self.worm.cells[1:]:
if self.worm.head_intersect(cell):
return cell
return False
| true |
ba9f8c1cfd99500d3c26ce9cc08896f661082438 | Python | aimhigh53/AlgoWing | /JeongHyeon/SWEA/Advanced/06_그래프의 기본과 탐색/5248_그룹 나누기.py | UTF-8 | 1,074 | 3.234375 | 3 | [] | no_license | ## 5248_그룹 나누기
# https://swexpertacademy.com/main/learn/course/lectureProblemViewer.do
# https://hongsj36.github.io/2020/02/01/Ad_GraphBasic/ 참고했습니다
# 부모 노드 찾기
def find_set(x) :
if parent[x] == x :
return x
else :
return find_set(parent[x])
# 합집합 만들기
def union(x, y) :
x = find_set(x)
y = find_set(y)
if rank[x] >= rank[y] :
parent[y] = x
if rank[x] == rank[y] :
rank[x] += 1
else :
parent[x] = y
T = int(input())
for test_case in range(1, T + 1):
N, M = map(int, input().split())
paper = list(map(int, input().split()))
parent = [i for i in range(N+1)] # 자리 맞추기 위해 0번째도 넣음
rank = [0] * (N + 1) # 자리 맞추기 위해 0번째도 넣음
for i in range(M) : # 두개 씩 묶기
union(paper[2 * i], paper[2 * i + 1])
groups = set() # 부모 노드만 모으기 & 중복 제거
for j in range(1, N+1) :
groups.add(find_set(j))
print('#' + str(test_case), len(groups))
| true |
bf18e74a48ab3843ddfa5977b740f2f85aeb1da5 | Python | drh4kor/webformhax | /Alma1.py | UTF-8 | 2,158 | 2.75 | 3 | [
"MIT"
] | permissive | #run_local.py
#by drh4kor
#inception date:2019-2-14
#get reference to the libraries
from selenium import webdriver
from selenium.webdriver.firefox.firefox_binary import FirefoxBinary
#set variables
loginURL='http://10.10.1.20/cgi-bin/login.cgi'
binaryPath='/usr/bin/firefox'
#create object
binary = FirefoxBinary(binaryPath)
#create browser
driver = webdriver.Firefox(firefox_binary=binary)
driver.implicitly_wait(60)
#navigate to log in page
driver.get(loginURL)
#get the textbox to login
inputElement_id = driver.find_element_by_name('login_id')
#get the textbox for password
inputElement_psw = driver.find_element_by_name('login_pwd')
#set the path to the link element does not have id or name property.
inputElement_link=driver.find_element_by_link_text('Login')
#setup counter variables
counter = 0
#setup initial value to test
psw_value=0000
#setup total number of tries
max_value = 10000
#show the current page path
print driver.current_url
currentURL=driver.current_url
#loop start at 0000
#simulate keys into the textbox
#inputElement_id.send_keys('Alma')
for x in range(0, max_value):
#debug;show the counter
print "Loop count %04d" % (x)
#gets the page after refresh
driver.getURL(loginURL)
#clear box
inputElement_psw.clear()
#todo: format x into '%3d'
#simulate keys into the textbox
psw_value=format(x,"04d")
inputElement_psw.send_keys(psw_value)
#input username and psw
inputElement_id = driver.find_element_by_name('login_id')
inputElement_id.send_keys('Alma')
inputElement_psw = driver.find_element_by_name('login_pwd')
inputElement_link=driver.find_element_by_xpath("//*[@id='btn_login']")
#simulate click on the faux button
inputElement_link.click()
#get the url from the page loaded
# newURL = driver.current_url
driver.implicitly_wait(30)
print driver.current_url;
currentURL="http://10.10.1.20/cgi-bin/login_proc.cgi"
driver.get(currentURL)
# #check if it's NOT the same
# if currentURL != currentURL:
# #todo: write to log the test value that passed
# print "SCORE!"
# #gtfo
# break
#cleanup time
driver.close()
| true |
f8905a815b0737d01833562786f45405c031041d | Python | agotsis/skillshopcmu | /FlaskWebProject1/views.py | UTF-8 | 3,291 | 2.703125 | 3 | [
"BSD-3-Clause"
] | permissive | """
Routes and views for the flask application.
"""
from datetime import datetime
from directory import Directory
from flask import render_template, g, Flask, request
from FlaskWebProject1 import app
import json
###############################################################################
# Database Functions #
###############################################################################
database = [{},{},{}] #Convention: teach := 0 | practice := 1 | conversation := 2
# Intakes a post request with a name, a skill, and a list of categories.
@app.route('/db_add', methods = ['GET', 'POST'])
def db_add():
if request.method == 'POST':
name = request.form.get('andrewID')
skill = request.form.get('skill')
teach = request.form.get('teach')
prac = request.form.get('help_others')
con = request.form.get('have_conversation')
categories = []
if (teach != None):
categories.append(0);
if (prac != None):
categories.append(1);
if (con != None):
categories.append(2);
for cat in categories:
if(database[cat].get(skill) != None):
database[cat].get(skill).add(name)
else:
database[cat][skill] = set()
database[cat][skill].add(name)
return redirect()
else:
return render_template(
'redirect.html',
)
# Intake a post request with a skill and a category, and return a list of
# andrewids that are offering that
@app.route('/db_lookup', methods = ['GET', 'POST'])
def db_lookup():
if request.method == 'POST':
skill = request.form.get('skill')
t_cat = request.form.get('category')
cat = -1
if (t_cat == "teach"):
cat = 0
elif (t_cat == "prac"):
cat = 1
elif (t_cat == "talk"):
cat = 2
if (database[cat].get(skill) == None):
return render_template(
'return_results.html'
)
results = list(database[cat].get(skill))
results = [Directory(i).getInfo(["names",
"department", "student_level"]) for i in results]
return render_template(
'return_results.html',
results = results )
else:
return render_template(
'return_results.html',
results=None,
)
##############################################################################
# Flask Page Runners #
##############################################################################
@app.route('/')
@app.route('/home')
def home():
"""Renders the home page."""
return render_template(
'index.html',
title='Home Page',
year=datetime.now().year,
)
@app.route('/contact')
def contact():
"""Renders the contact page."""
return render_template(
'contact.html',
title='Contact',
year=datetime.now().year,
message='Your contact page.'
)
@app.route('/about')
def about():
"""Renders the about page."""
return render_template(
'about.html',
title='About',
year=datetime.now().year,
message='Your application description page.'
)
def redirect():
return render_template(
"redirect.html",
year = datetime.now().year,
message = 'Thank you for your submission!'
)
| true |
5fb028e8cbadc197492f0902acd33bc6cf0487ab | Python | ubaral/SVM-Practice | /SpamDataset.py | UTF-8 | 1,876 | 2.953125 | 3 | [] | no_license | import scipy.io
import numpy as np
from sklearn import svm
import random
# load in the data files
mat = scipy.io.loadmat("data/spam-dataset/spam_data.mat")
training_data = mat["training_data"]
training_labels = mat["training_labels"]
testing_data = mat["test_data"]
y = training_labels[0]
X = training_data
k = 12
totalSamps = np.size(training_labels)
partitionSize = int(totalSamps / k)
partitionDict = dict()
for i in range(k):
partitionDict[i] = [[], []]
for i in range(totalSamps):
partitionKey = i % k
partitionDict[partitionKey][0].append(training_data[i, :])
partitionDict[partitionKey][1].append(training_labels[0][i])
# run k iterations and validate for kth partition and train on the rest.
C_Values = [10]
for c_val in C_Values:
accuracyTotalSum = 0
for kk in range(k):
clf = svm.SVC(kernel='linear', C=c_val)
flattened_images = []
flattened_labels = []
for j in range(k):
if j != kk:
flattened_images += partitionDict[j][0]
flattened_labels += partitionDict[j][1]
# train the classifier with all images except those in partition number kk
flattened_images = np.array(flattened_images)
flattened_labels = np.array(flattened_labels)
# TRAIN!
clf.fit(flattened_images, flattened_labels)
# calculate the running total of accuracy's which will be used to calculate the average
correctGuess = 0
predictedLabels = clf.predict(partitionDict[kk][0])
actualLabels = partitionDict[kk][1]
for i in range(partitionSize):
if predictedLabels[i] == actualLabels[i]:
correctGuess += 1
accuracyTotalSum += correctGuess / partitionSize
averageAccuracy = accuracyTotalSum / k
print("average accuracy for C = " + str(c_val) + " was " + str(averageAccuracy))
| true |
a809b3f1e99808524298d505e89e1e4514c6a3d4 | Python | jb2fn/adj_assignments | /Scrape.py | UTF-8 | 1,140 | 3.515625 | 4 | [] | no_license | import urllib2, csv
# Imports urllib2 and csv to visit websites with Python.
from bs4 import BeautifulSoup
# Imports the tool BeautifulSoup from bs4 package
outfile = open('jaildata.csv', 'w')
writer = csv.writer(outfile)
# Opens a file named jaildata.csv and calls a csv writer
url = 'https://report.boonecountymo.org/mrcjava/servlet/SH01_MP.I00290s?max_rows=500'
html = urllib2.urlopen(url).read()
# Downloads the Boone Cuunty webpage and grabs content from html
soup = BeautifulSoup(html, "html.parser")
# Parse the html using BeautifulSoup and store in variable 'soup'
tbody = soup.find('tbody', {'class': 'stripe'})
# Finds a stripe class of the tbody tag in the html
rows = tbody.find_all('tr')
# Finds tr tag in rows from the html
for row in rows:
# Begins a loop
cells = row.find_all('td')
# Creates cells and get value from td tag in rows
data = []
# Creates an empy list called data
for cell in cells:
data.append(cell.text.encode('utf-8'))
# Adds texts in cells encoded in utf-8 to the data list that has been just created
writer.writerow(data)
# Writes the row of data | true |
0dfbc3f3cdbd627e5f33b3d1ceede75c2db39584 | Python | SlipShabby/Hackerrank | /Python/debugging.py | UTF-8 | 1,101 | 3.890625 | 4 | [] | no_license | # word score
def is_vowel(letter):
return letter in ['a', 'e', 'i', 'o', 'u', 'y']
def score_words(words):
score = 0
for word in words:
num_vowels = 0
for letter in word:
if is_vowel(letter):
num_vowels += 1
if num_vowels % 2 == 0:
score += 2
else:
score +=1
return score
# default args
class EvenStream(object):
def __init__(self):
self.current = 0
def get_next(self):
to_return = self.current
self.current += 2
return to_return
class OddStream(object):
def __init__(self):
self.current = 1
def get_next(self):
to_return = self.current
self.current += 2
return to_return
def print_from_stream(n, stream=EvenStream()):
stream.__init__()
for _ in range(n):
print(stream.get_next())
queries = int(input())
for _ in range(queries):
stream_name, n = input().split()
n = int(n)
if stream_name == "even":
print_from_stream(n)
else:
print_from_stream(n, OddStream()) | true |
833e78e6213f4051e3d53c007a6a3fc1f502f4ee | Python | MiguelYanes/AI_Labs | /practica3IA/Practica3/alphabeta.py | UTF-8 | 2,229 | 3.140625 | 3 | [] | no_license |
# AlphaBeta Partial Search
infinity = 1.0e400
def terminal_test(state, depth):
return depth <= 0 or state.is_terminal
def max_value(state, player, max_depth, alpha, beta, eval_function, generados):
"""
Completar con el codigo correspondiente a la funcion <max_value> de la
version del algoritmo minimax con poda alfa-beta
"""
#f = open("pruebasab.txt", "a");
#f.write("ab\n");
if terminal_test(state, max_depth):
value = eval_function(state, player)
return value
value = -infinity
for sucesor in state.successors():
value = max(value, min_value(sucesor[1], sucesor[1].current_player, max_depth-1, alpha, beta, eval_function, generados + 1))
if value >= beta:
return value
alpha = max(alpha, value)
return value
def min_value(state, player, max_depth, alpha, beta, eval_function, generados):
"""
Completar con el codigo correspondiente a la funcion <min_value> de la
version del algoritmo minimax con poda alfa-beta
"""
#f = open("pruebasab.txt", "a");
#f.write("ab\n");
if terminal_test(state, max_depth):
value = eval_function(state, player)
return value
value = infinity
for sucesor in state.successors():
value = min(value, max_value(sucesor[1], sucesor[1].current_player, max_depth - 1, alpha, beta, eval_function, generados + 1))
if value <= alpha:
return value
beta = min(beta, value)
return value
def alphabeta_search(game, max_depth, eval_function):
"""
Search game to determine best action; use alpha-beta pruning.
This version cuts off search and uses an evaluation function.
"""
player = game.current_player
# Searches for the action leading to the sucessor state with the highest min score
successors = game.successors()
best_score, best_action = -infinity, successors[0][0]
for (action, state) in successors:
score = min_value(state, player, max_depth, -infinity, infinity, eval_function, 0)
if score > best_score:
best_score, best_action = score, action
return best_action
| true |
331baf228094dc6c1ead74f0b44beb4026721e5e | Python | artemb/python-lessons-10 | /games/part3_loops/loops_demo.py | UTF-8 | 212 | 2.53125 | 3 | [] | no_license | # Repetitive tasks
# Loop / Iteration
# for loop <-- when you know how many iterations you need
# loop body
# using variables in range
# iteration counter
# while loops
# infinite loops
# break statemen
| true |
426bfa1c48711c09f3b5fe6f8b91d2ce73bc3bab | Python | LeeEdel/pylearn | /pylearn2/scripts/train_example/make_dataset.py | UTF-8 | 2,374 | 3.609375 | 4 | [] | no_license | #See README before reading this file
#This script creates a preprocessed version of a dataset using pylearn2
#It's not necessary to save preprocessed versions of your dataset to
#disk but this is an instructive example, because later we can show
#how to load your custom dataset in a yaml file.
#
#This is also a common use case because often you will want to preprocess
#your data once and then train several models on the preprocessed data.
#We'll need the serial module to save the dataset
from pylearn2.utils import serial
#Our raw dataset will be the CIFAR10 image dataset
from pylearn2.datasets import cifar10
#We'll need the preprocessing module to preprocess the dataset
from pylearn2.datasets import preprocessing
#Our raw training set is 32x32 color images
train = cifar10.CIFAR10(which_set="train")
#We'd like to do several operations on them so we'll set up a pipeline to do so
pipeline = preprocessing.Pipeline()
#First we want to pull out small patches of the images, since it's easier to
#train an RBM on these
pipeline.items.append(preprocessing.ExtractPatches(patch_shape=(8,8),num_patches=150000))
#Next we contrast normalize the patches. The default arguments use the same
#"regularization" parameters as those used in Adam Coates, Honglak Lee, and Andrew Ng's
#paper "An Analysis of Single-Layer Networks in Unsupervised Feature Learning"
pipeline.items.append(preprocessing.GlobalContrastNormalization())
#Finally we whiten the data using ZCA. Again, the default parameters to ZCA are set to
#the same values as those used in the previously mentioned paper.
pipeline.items.append(preprocessing.ZCA())
#Here we apply the preprocessing pipeline to the dataset. The can_fit argument
#indicates that data-driven preprocessing steps (such as the ZCA step in this
#example) are allowed to fit themselves to this dataset. Later we might want
#to run the same pipeline on the test set with the can_fit flag set to False,
#in order to make sure that the same whitening matrix was used on both datasets.
train.apply_preprocessor(preprocessor = pipeline, can_fit = True)
#Finally we save the dataset to the filesystem. We instruct the dataset to store
#its design matrix as a numpy file because this uses less memory. The dataset
#object itself is stored as a pickle file.
train.use_design_loc('train_design.npy')
serial.save('cifar10_preprocessed_train.pkl',train)
| true |
d4a1795683fc49b694d454bbd5d0ced9da04a0aa | Python | dinotuku/iapr-2020 | /project/iapr_project/detections.py | UTF-8 | 5,141 | 3.046875 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Object detection functions
"""
import cv2
import numpy as np
from utilities import compute_angle, compute_elongation, imshow, mask_image, preprocess
def find_red_arrow(image, show=False):
"""Detect the red arrow in the image.
Args:
image: The input image.
show: Whether to show the results.
Return:
tuple: Tip coordinates.
tuple: Center coordinates.
"""
image_copy = image.copy()
masked = mask_image(image_copy,
np.array([0, 100, 0]), np.array([20, 255, 255]),
np.array([160, 100, 0]), np.array([180, 255, 255]))
preprocessed = preprocess(masked, 100)
contours, _ = cv2.findContours(preprocessed, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
for c in contours:
# compute the centroid of the shapes
M = cv2.moments(c)
area = M['m00']
elongation = compute_elongation(M)
# these will not be the arrow (too small or too big)
if area < 1000 or area > 10000 or elongation > 100: continue
cX = int(M['m10'] / area)
cY = int(M['m01'] / area)
center = (cX, cY)
# Not sure do we need this
# if abs(M['mu20'] - M['mu02']) > 420000: continue
# find the corners of the arrow
points = cv2.approxPolyDP(c, 4.7, True).squeeze(1)
tip_idx = 0
cand_tips = []
angles = []
# find tip candidates
for i in range(len(points)):
# get the current point and the surrounding points
x = points[i - 1] if i != 0 else points[-1]
y = points[i]
z = points[i + 1] if i != len(points) - 1 else points[0]
# get the lengths between the current point and the surrounding points
l1 = np.linalg.norm(np.array(x) - np.array(y))
l2 = np.linalg.norm(np.array(y) - np.array(z))
ang = compute_angle(x, y, z)
angles.append(ang)
# save candidates
if abs(ang - 100) < 15 and (l1 + l2 > 30):
cand_tips.append(len(angles) - 1)
# choose the correct tip
for i in cand_tips:
pang = angles[i - 1] if i != 0 else angles[-1]
nang = angles[i + 1] if i != len(angles) - 1 else angles[0]
if pang + nang < 300 and pang + nang > 200:
tip_idx = i
# visualize the result on the image
cv2.drawContours(image_copy, [c], 0, (214, 39, 40), 2)
cv2.circle(image_copy, tuple(center), 5, (0, 255, 0), -1)
cv2.circle(image_copy, tuple(points[tip_idx]), 5, (0, 0, 255), -1)
break
if show:
imshow(image_copy)
return points[tip_idx], center
def find_math_elements(image, arrow_c, bound=20, show=False):
"""Detect math elements in the image.
Args:
image: The input image.
arrow: Center of the arrow.
bound: Bounding box size.
show: Whether to show the results.
Return:
list: Images of all math elements.
list: Center coordinates of math elements.
"""
image_original = image.copy()
image_copy = image.copy()
# cover red arrow with white rectangle
cv2.rectangle(image_copy, (arrow_c[0] - 60, arrow_c[1] - 60), (arrow_c[0] + 60, arrow_c[1] + 60), (255, 255, 255), -1)
value_threshold = int(cv2.cvtColor(image, cv2.COLOR_RGB2HSV)[:, :, 2].mean() * 0.9)
masked = mask_image(image_copy,
np.array([0, 0, 0]), np.array([180, 255, value_threshold]),
np.array([100, 100, 0]), np.array([140, 255, 255]))
preprocessed = preprocess(masked, 10, False)
# get the contours of all shapes
contours, _ = cv2.findContours(preprocessed, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
centers = []
elements = []
for i, c in enumerate(contours):
# compute the centroid of the shapes
M = cv2.moments(c)
area = M['m00']
elongation = compute_elongation(M)
# these are either too small or too big or too elongated
if area < 40 or area > 400 or elongation > 3000: continue
cY = int(M['m01'] / M['m00'])
cX = int(M['m10'] / M['m00'])
center = (cX, cY)
# if it is too close to a known element, it is not a valid element
too_close = False
for center_ in centers:
d = (center_[0] - center[0]) ** 2 + (center_[1] - center[1]) ** 2
if d < 4000:
too_close = True
break
if too_close: continue
# save element and center
element = image[cY - bound: cY + bound, cX - bound:cX + bound]
element = cv2.resize(element, (28, 28))
elements.append(element)
centers.append(center)
# visualize the result on the image
label_color = (214, 39, 40)
cv2.rectangle(image_original, (cX - bound, cY - bound), (cX + bound, cY + bound), label_color, 2)
cv2.putText(image_original, f'{len(elements) - 1}', (cX, cY + 40), cv2.FONT_HERSHEY_SIMPLEX, 0.6, label_color, 2)
if show:
imshow(image_original)
return elements, centers
| true |
9563bcb6f9dd49cf3d844d246c9e46076d206ce9 | Python | Grzegorz-Olszewski/dec_to_hex | /decimal_to_hex/api.py | UTF-8 | 416 | 2.859375 | 3 | [] | no_license | from rest_framework.views import APIView
from django.http import HttpResponse
from decimal_to_hex.utils import dec_to_hex, is_integer
class DecToHexView(APIView):
def post(self, request):
number = request.data['number']
if not is_integer(number):
return HttpResponse("Number has to be an integer", status=400)
result = dec_to_hex(number)
return HttpResponse(result)
| true |
86fa00d110a2d9dba6d440f448801f1df027293d | Python | Highsir/Python3_stdlib | /Python3标准库/ten_text/thrending/10_36_threading_subclass_args.py | UTF-8 | 602 | 2.96875 | 3 | [
"MIT"
] | permissive | import threading
import logging
class MyThreadWithArgs(threading.Thread):
def __init__(self,group=None,target=None,name=None,
args=(),kwargs=None,*,daemon=None):
super().__init__(group=group,target=target,name=name,daemon=daemon)
self.args = args
self.kwargs = kwargs
def run(self):
logging.debug('running with %s and %s',self.args, self.kwargs)
logging.basicConfig(
level=logging.DEBUG,
format='(%(threadName) - 10s) %(message)s',
)
for i in range(5):
t = MyThreadWithArgs(args=(i,),kwargs={'a': 'A', 'b': 'B'})
t.start() | true |
23613a810d3d35c65b99ec6d237f4b0efca5e0d5 | Python | jonakirke94/python-morsels | /normalize_sentences/normalize_sentences.py | UTF-8 | 270 | 3.046875 | 3 | [] | no_license | import re
def normalize_sentences(sentences):
# if we use () regex group capture it will also include the delimiter
# the + means we match one or more
# the \1 is a backreference to the first match group
return re.sub(r'([.?!]) +', r'\1 ', sentences)
| true |
63250c8d65ad1181efc9ed56fb46329285014e63 | Python | snoplusuk/echidna | /echidna/core/fill_spectrum.py | UTF-8 | 11,185 | 2.703125 | 3 | [
"MIT"
] | permissive | """ Provides code for creating echidna spectra and populating with data
from RAT Root files/ntuples.
"""
from echidna.util import root_help
import rat
from ROOT import RAT
from ROOT import TChain
import echidna.core.spectra as spectra
from echidna.core.config import SpectraConfig
import echidna.core.dsextract as dsextract
def _bipo_ntuple(spectrum, chain, extractors):
""" Because ROOT is crap, we have to loop through the entries first
before using the LoadTree() method required for the bipo checks.
If the chain is not looped first then all parameters return 0.
This function will apply the bipo cuts to a chain from an ntuple and
fill a spectrum.
Args:
spectrum (:class:`spectra.Spectra`): The spectrum which is being
filled.
chain (ROOT.TChain): Chain containing the events.
extractors (dict): Keys are the variable names and the values are
their respective extractors.
Returns:
:class:`spectra.Spectra`: The filled spectrum.
"""
check_next = False
fill_kwargs = {}
for entry in chain:
# check_next means previous event has evIndex = 1 & passes fill checks
if check_next and entry.evIndex < 1:
try:
spectrum.fill(**fill_kwargs)
spectrum._raw_events += 1
except ValueError:
pass
# Reset kwargs
fill_kwargs = {}
if entry.evIndex != 0:
check_next = False
continue
for e in extractors:
if e.get_valid_ntuple(chain):
fill_kwargs[e._name] = e.get_value_ntuple(chain)
check_next = True
else:
check_next = False
break
return spectrum
def _root_mix(spectrum, dsreader, extractors, bipo):
""" Internal function for filling a spectrum whose config has a mixture of
mc (and/or truth) and reco paremeters.
Args:
spectrum (:class:`spectra.Spectra`): The spectrum which is being
filled.
dsreder (ROOT.RAT.DU.DSReader): rat's data structure reader
for the root file.
extractors (dict): Keys are the variable names and the values are their
respective extractors.
bipo (bool): Applies the bipo cut if set to True.
"""
for entry in range(0, dsreader.GetEntryCount()):
ds = dsreader.GetEntry(entry)
fill_kwargs = {}
# Note mc will be the same for all evs in loop below:
mc = ds.GetMC()
if bipo and ds.GetEVCount() != 1:
# Only bipos with 1 ev survive bipo cut
continue
for ievent in range(0, ds.GetEVCount()):
ev = ds.GetEV(ievent)
fill = True
for var, extractor in extractors.iteritems():
var_type = var.split("_")[-1]
if var_type == "reco":
if extractor.get_valid_root(ev, ds):
fill_kwargs[extractor._name] = \
extractor.get_value_root(ev)
else:
fill = False
break
else: # mc or truth
if extractor.get_valid_root(mc, ds):
fill_kwargs[extractor._name] = \
extractor.get_value_root(mc)
else:
fill = False
break
if fill:
try:
spectrum.fill(**fill_kwargs)
spectrum._raw_events += 1
except ValueError:
pass
def _root_ev(spectrum, dsreader, extractors, bipo):
""" Internal function for filling a spectrum whose config only has
reco paremeters.
Args:
spectrum (:class:`spectra.Spectra`): The spectrum which is being
filled.
dsreder (ROOT.RAT.DU.DSReader): rat's data structure reader
for the root file.
extractors (dict): Keys are the variable names and the values are their
respective extractors.
bipo (bool): Applies the bipo cut if set to True.
"""
for entry in range(0, dsreader.GetEntryCount()):
ds = dsreader.GetEntry(entry)
if bipo and ds.GetEVCount() != 1:
# Only bipos with 1 ev survive bipo cut
continue
for ievent in range(0, ds.GetEVCount()):
ev = ds.GetEV(ievent)
fill_kwargs = {}
fill = True
for var, extractor in extractors.iteritems():
if extractor.get_valid_root(ev, ds):
fill_kwargs[extractor._name] = extractor.get_value_root(ev)
else:
fill = False
break
if fill:
try:
spectrum.fill(**fill_kwargs)
spectrum._raw_events += 1
except ValueError:
pass
def _root_mc(spectrum, dsreader, extractors, bipo):
""" Internal function for filling a spectrum whose config only has
mc or truth paremeters.
Args:
spectrum (:class:`spectra.Spectra`): The spectrum which is being
filled.
dsreder (ROOT.RAT.DU.DSReader): rat's data structure reader
for the root file.
extractors (dict): Keys are the variable names and the values are their
respective extractors.
bipo (bool): Applies the bipo cut if set to True.
"""
for entry in range(0, dsreader.GetEntryCount()):
ds = dsreader.GetEntry(entry)
mc = ds.GetMC()
fill = True
fill_kwargs = {}
if bipo and ds.GetEVCount() != 1:
# Only bipos with 1 ev survive bipo cut
continue
for var, extractor in extractors.iteritems():
if extractor.get_valid_root(mc, ds):
fill_kwargs[extractor._name] = extractor.get_value_root(mc)
else:
fill = False
break
if fill:
try:
spectrum.fill(**fill_kwargs)
spectrum._raw_events += 1
except ValueError:
pass
def fill_from_root(filename, spectrum_name="", config=None, spectrum=None,
bipo=False, **kwargs):
""" This function fills in the ndarray (dimensions specified in the config)
with weights. It takes the parameter specified in the config from the
events in the root file.
Args:
filename (str): A root file to study
spectrum_name (str, optional): A name of future spectrum. Not
required when appending a spectrum.
config (:class:`SpectraConfig` or string, optional): The config
or directory to the config for the spectrum. Not requried when
appending a spectrum.
spectrum (:class:`spectra.Spectra`, optional): Spectrum you wish
to append. Not required when creating a new spectrum.
bipo (bool, optional): Applies the bipo cut if set to True.
Default is False.
kwargs (dict): Passed to and checked by the dsextractor.
Raises:
ValueError: If spectrum_name is not set when creating a new
spectrum.
IndexError: Unknown dimension type (not mc, truth or reco).
Returns:
:class:`spectra.Spectra`: The filled spectrum.
"""
if type(config) == str:
config = SpectraConfig.load_from_file(config)
dsreader = RAT.DU.DSReader(filename)
if spectrum is None:
if spectrum_name == "" or not config:
raise ValueError("Name not set when creating new spectra.")
spectrum = spectra.Spectra(str(spectrum_name),
dsreader.GetEntryCount(),
config)
else:
spectrum._num_decays += dsreader.GetEntryCount()
spectrum_name = spectrum._name
print "Filling", spectrum_name, "with", filename
extractors = {}
mc_fill = False
ev_fill = False
for var in spectrum.get_config().get_pars():
var_type = var.split("_")[-1]
if var_type == "mc" or var_type == "truth":
mc_fill = True
elif var_type == "reco":
ev_fill = True
else:
raise IndexError("Unknown paramer type %s" % var_type)
extractors[var] = dsextract.function_factory(var, **kwargs)
if bipo:
spectrum._bipo = 1 # Flag to indicate bipo cuts are applied
if mc_fill and ev_fill:
_root_mix(spectrum, dsreader, extractors, bipo)
elif mc_fill:
_root_mc(spectrum, dsreader, extractors, bipo)
else:
_root_ev(spectrum, dsreader, extractors, bipo)
return spectrum
def fill_from_ntuple(filename, spectrum_name="", config=None, spectrum=None,
bipo=False, **kwargs):
""" This function fills in the ndarray (dimensions specified in the config)
with weights. It takes the parameters specified in the config from
the events in the ntuple.
Args:
filename (str): The ntuple to study
spectrum_name (str, optional): A name of future spectrum. Not
required when appending a spectrum.
config (:class:`SpectraConfig` or string, optional): The config
or directory to the config for the spectrum
spectrum (:class:`spectra.Spectra`, optional): Spectrum you wish
to append. Not required when creating a new spectrum.
bipo (bool, optional): Applies the bipo cut if set to True.
Default is False.
kwargs (dict): Passed to and checked by the dsextractor.
Raises:
ValueError: If spectrum_name is not set when creating a new
spectrum.
Returns:
:class:`echidna.core.spectra.Spectra`: The filled spectrum.
"""
chain = TChain("output")
chain.Add(filename)
if type(config) == str:
config = SpectraConfig.load_from_file(config)
if spectrum is None:
if spectrum_name == "" or not config:
raise ValueError("Name not set when creating new spectra.")
spectrum = spectra.Spectra(str(spectrum_name), chain.GetEntries(),
config)
else:
spectrum._num_decays += chain.GetEntries()
spectrum_name = spectrum._name
print "Filling", spectrum_name, "with", filename
extractors = []
for var in spectrum.get_config().get_pars():
extractors.append(dsextract.function_factory(var, **kwargs))
if bipo:
spectrum._bipo = 1 # Flag to indicate bipo cuts are applied
return _bipo_ntuple(spectrum, chain, extractors)
# No bipo so use standard loop:
for entry in chain:
fill = True
fill_kwargs = {}
# Check to see if all parameters are valid and extract values
for e in extractors:
if e.get_valid_ntuple(entry):
fill_kwargs[e._name] = e.get_value_ntuple(entry)
else:
fill = False
break
# If all OK, fill the spectrum
if fill:
try:
spectrum.fill(**fill_kwargs)
spectrum._raw_events += 1
except ValueError:
pass
return spectrum
| true |
20507c27636ff84fe972e7589092e6a86026f615 | Python | kut-info-ase-2019/group1-sasaki | /photo_de_recipe/recv_temp.py | UTF-8 | 701 | 3.1875 | 3 | [] | no_license | # -*- coding:utf-8 -*-
import socket
host = "222.229.69.221" # サーバーのホスト名
port = 1234 # PORTを指定
def temperture():
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # オブジェクトの作成
try:
client.connect((host, port)) # サーバーに接続
except ConnectionRefusedError:
print('\n')
print('temperature: Unable to get the temperature')
return 20 # 季節の判定に影響のない値を返す
client.send(b"ok") # データを送信
response = client.recv(4096) # レシーブ (2進数)
temp = int.from_bytes(response, 'big')
print('\n')
print('temperature: %d' % temp)
return temp
| true |
dbc4c3d22f3166c941711f0d1b63a16e43275521 | Python | skilstak/code-dot-org-python | /solutions/stage07-artist2/s1level42.py | UTF-8 | 339 | 2.53125 | 3 | [
"Unlicense"
] | permissive | import sys
sys.path.append('../..')
import codestudio
a = codestudio.load('s1level42')
a.speed = 'faster'
for count in range(4):
for count2 in range(10):
a.color = a.random_color()
for count in range(4):
a.move_forward(20)
a.turn_right(90)
a.move_forward(20)
a.right(80)
a.check()
| true |
3b4118fea08b0ad02849e870a58bd99484243826 | Python | subhosuper/Code | /version2.py | UTF-8 | 567 | 2.984375 | 3 | [] | no_license | final = []
sum = 0
def index_sum(indexi, length_input, input, x):
global sum
sum = sum + input.index(x, indexi, length_input-1)
if x in range(input[input.index(x)+1], input[length_input-1]):
index_sum(input.index(x)+1, length_input, input, x)
else:
final.append(sum)
input = [1,2,1,2,3,2]
distinct_set = list(set(input))
count_distinct_set = len(distinct_set)
while count_distinct_set != 0:
sum = 0
index_sum(0, len(input), input, distinct_set[count_distinct_set-1])
count_distinct_set -= 1
print(final)
print(max(final)) | true |
97829587a4bfa4100db0f9403d2986743f2b2a33 | Python | gaberosser/crime-fighter | /stats/random.py | UTF-8 | 618 | 3.203125 | 3 | [] | no_license | import numpy as np
def weighted_random_selection(weights, n=1, prng=None):
"""
Select the *indices* of n points at random, weighted by the supplied weights matrix
:param weights:
:param n:
:param prng: Optionally supply a np.random.Random() instance if control is required
:return: Iterable of n indices, referencing the weights array, or scalar index if n == 1
"""
prng = prng or np.random.RandomState()
totals = np.cumsum(weights)
throws = prng.rand(n) * totals[-1]
res = np.searchsorted(totals, throws)
if n == 1:
return res[0]
else:
return res
| true |
062c9f44c8b471c04cf0609a09d9d9c48f8f4d8e | Python | anirudnits/Spojy | /spojy/profile_info.py | UTF-8 | 1,847 | 3.5 | 4 | [
"MIT"
] | permissive | '''
This module takes as input:
i>username of the user, whom you want to gather information about.
and returns:
i> the list of problem codes of the solved problems by the user.
ii> the list of problem codes of the unsolved problems by the user.
iii> world_rank of the user
i> total points of the user
'''
import requests
from bs4 import BeautifulSoup
import re
def filter_func(s):
'''
A filter function to remove empty or unnecessary problem names.
'''
if s.isspace() == True or len(s) < 1:
return False
else:
return True
def get_profile(profile_name):
'''
The main function which takes the username as input and
returns the rank, the total points and the list of solved and unsolved problems.
'''
profile_name = str(profile_name)
url = "https://www.spoj.com/users/" + profile_name + "/"
print (url)
res = requests.get(url)
res.raise_for_status
soup = BeautifulSoup(res.content, "html.parser")
all_tables = soup.find_all("table")
profile_div = soup.find("div", attrs={"id" : "user-profile-left"})
#to make sure that the username entered is a valid username
try:
profile_paras = profile_div.find_all('p')
except:
print ("There's no profile by this name")
exit(0)
world_rank_para = profile_paras[2].text
rank_pat = re.compile(r'#(\d)+')
point_pat = re.compile(r'\((.)*\)')
rank = rank_pat.search(world_rank_para).group(0)
point_line = point_pat.search(world_rank_para).group(0)
points = point_line.split()[0][1:]
for i in range(2):
if i == 0:
solved_problems_unfiltered = all_tables[i].text.split('\n')
solved_problems = list(filter(filter_func, solved_problems_unfiltered))
else:
unsolved_problems_unfiltered = all_tables[i].text.split('\n')
unsolved_problems = list(filter(filter_func, unsolved_problems_unfiltered))
return solved_problems, unsolved_problems,rank,points
| true |
74b5ceaa86c143bbcd705e328737ea385395bfc3 | Python | wenmengqiang/learn-python-the-hard-way | /ex15.py | UTF-8 | 914 | 3.71875 | 4 | [] | no_license | #encoding:utf-8
from sys import argv
#导入sys模块,并用argv代表sys.argv
script,filename=argv
#将执行时argv变量的内容传递给script和filename
txt=open(filename)
#使用txt的open函数,将filename传递给txt
print "Here's your file %r:"%filename
print txt.read()
#打印出txt.read函数的内容
print "Typen the filename again:"
file_again=raw_input("-->")
txt_again=open(file_again)
#txt_again.close()该函数执行后,下一条指令错误
print txt_again.read()
txt.close()
txt_again.close()
#function —— A series of statements which returns some value to a caller. It can also be passed zero or more arguments which may be used in the execution of the body
#method —— A function which is defined inside a class body. If called as an attribute of an instance of that class, the method will get the instance object as its first argument (which is usually called self).
# | true |
750afb2903608a88523893515531002b3a9a06af | Python | msk20msk20/Python | /20191218_62.py | UTF-8 | 208 | 3.46875 | 3 | [] | no_license | def addition(n) :
if n > 1 :
result = addition(n-1) + n
else :
result = 1
return result
print (addition(10))
print (addition(100))
#print (addition(1000))
| true |
300bbdf612c9ba7955d031a795f1a66f1b7d1a0f | Python | emellars/euler-sol | /065.py | UTF-8 | 1,014 | 3.71875 | 4 | [] | no_license | #Runtime ~0.07s.
#Gives nth term in sequence [1, 2, 1, 1, 4, 1, 1, 6, 1, ... 1, 2n, 1, ...]
def g(n):
if n % 3 == 2: return 2*((n+1)//3)
else: return 1
f = [g(n) for n in range(1, 100)][::-1] #Generate previously mentioned sequence in reverse order up to the 100th convergent.
old_numer = f[0]
old_denom = 1
#Starting from the most nested reciprocal, the continued fraction is built.
for elem in f[1:]:
#To build the next fraction, the reciprocal of the previous one must be taken (i.e. interchange numerator and denominator.)
numer = elem*old_numer + old_denom #Take the reciprocal of the previous fraction and the relevant part of f and combine into a single fraction.
denom = old_numer
old_numer, old_denom = numer, denom
#One last reciprocal is taken.
numer = old_denom
denom = old_numer
#Adding the integer part of e to the proper fraction. Take the numerator of this improper fraction.
e_numer = str(numer + 2*denom)
print(sum(map(int, list(e_numer))))
| true |
0d06b2580644d392cc994f2806b2b01da97e9f0e | Python | Md-Mudassir/PracticeSnippets | /python snippets/lists.py | UTF-8 | 437 | 4.53125 | 5 | [] | no_license | # A List is a collection which is ordered and changeable. Allows duplicate members.
# create
numbers = [1, 2, 4, 3]
# constructors
num2 = list((1, 2, 3, 2))
print(num2, numbers)
fruits = ['apple', 'oranges', 'grapes']
fruits.append('honey')
# Change value
fruits[0] = 'Blueberries'
# Remove with pop
fruits.pop(2)
# Reverse list
fruits.reverse()
# Sort list
fruits.sort()
# Reverse sort
fruits.sort(reverse=True)
print(fruits)
| true |
8eb20decf0310212589d63081ca1ae0af4251e95 | Python | ktjell/WaterGrid | /1simulink6RPIs/shamir_scheme.py | UTF-8 | 995 | 3.015625 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 28 12:56:38 2018
@author: kst
"""
import FFArithmetic as field
import numpy as np
#np.random.seed(1)
# Creates shares of secrets using Shamir's secret sharing scheme.
def share(F, x, t, n):
shares = []
c = [F.random_element() for i in range(t)]
for i in range(1, n + 1):
s = x
for j in range(1, t + 1):
s += c[j - 1] * F.power(F(i),j)
shares.append(s)
s = np.array(shares)
return np.array(shares)
# Creates the "recombination"-vector used to reconstruct a secret from its shares.
def basispoly(F,n):
r = []
C = range(1, n + 1)
for i in range(1, n + 1):
c = [k for k in C if k != i]
p = 1
for j in range(n - 1):
p *= -F(c[j]) / (F(i) - F(c[j]))
r.append(p)
return r
# reconstruct secret.
def rec(F,x):
res = F(0)
n = len(x)
y = basispoly(F,n)
for i in range(len(x)):
res += x[i] * y[i]
return res
| true |
b6a7d52a0a0edf20aca676bd145fe2473c012a0a | Python | TaoChenOSU/quad_nn | /ros_ws/src/crazyswarm/scripts/follow_waypoints.py | UTF-8 | 966 | 2.859375 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
import numpy as np
import random
from pycrazyswarm import *
def randomWaypoints():
waypoints = []
for i in range(100):
waypoints.append([random.uniform(-0.3, 0.3), random.uniform(-0.3, 0.3), random.uniform(0.8, 1.2)])
waypoints.append([0.0, 0.0, 1.0])
return waypoints
if __name__ == "__main__":
# generate waypoints randomly
waypoints = randomWaypoints()
# execute waypoints
swarm = Crazyswarm()
timeHelper = swarm.timeHelper
allcfs = swarm.allcfs
height = 1.0
allcfs.takeoff(targetHeight=1.0, duration=2.0)
timeHelper.sleep(2.0)
lastTime = 0.0
counter = 0
for waypoint in waypoints:
for cf in allcfs.crazyflies:
print "Going to " + str(counter) +": " + str(waypoint)
cf.goTo(waypoint, 0, 0.6)
timeHelper.sleep(0.6)
counter += 1
allcfs.land(targetHeight=0.06, duration=2.0)
timeHelper.sleep(3.0) | true |
b79985b42563a651d7c28ad5f3ad54ebbd7cf424 | Python | willmacd/dl-malware-detection | /mainNN.py | UTF-8 | 2,096 | 2.78125 | 3 | [] | no_license | import os
import tensorflow as tf
from DataProcessing.LoadData import dat_to_train_test, to_tf_dataset, \
rm_unlabelled_samples, to_batch_dataset, normalize_data, dataset_pca_reduction
from Model.NNMalwareDetection import MalwareDetectionNN
FILTERED_DATASET_SIZES = 600000
BATCH_SIZE = 1000
EPOCHS = 125
# CHECK: Ensure that this is the correct path to the dataset
DATA_DIR = './Data/dat/'
if __name__ == '__main__':
x_train, y_train, x_test, y_test = dat_to_train_test(DATA_DIR)
# Normalize the data using robust scaler
print("Normalizing data...")
x_train_scaled = normalize_data(x_train)
x_test_scaled = normalize_data(x_test)
print("Data normalization complete...")
# Apply PCA dimensionality reduction
print("Computing PCA for dimensionality reduction...")
x_train_pca, x_test_pca = dataset_pca_reduction(x_train_scaled, x_test_scaled)
print("PCA dimensionality reduction complete...")
# Number of components kept by PCA
num_components_pca = len(x_train_pca[1])
print(num_components_pca)
unfiltered_train_ds = to_tf_dataset(x_train_pca, y_train)
unfiltered_test_ds = to_tf_dataset(x_test_pca, y_test)
# Filter out the data with label '-1' (unlabeled)
filtered_train_ds = rm_unlabelled_samples(unfiltered_train_ds)
filtered_test_ds = rm_unlabelled_samples(unfiltered_test_ds)
train_ds = filtered_train_ds.take(int(0.85 * FILTERED_DATASET_SIZES))
val_ds = filtered_train_ds.skip(int(0.85 * FILTERED_DATASET_SIZES))
train_ds = to_batch_dataset(filtered_train_ds, BATCH_SIZE)
val_ds = to_batch_dataset(val_ds, BATCH_SIZE)
test_ds = to_batch_dataset(filtered_test_ds, BATCH_SIZE)
# Create the instance of the MalwareDetection model
model = MalwareDetectionNN(num_components_pca)
model.summary()
model.train(train_ds,
validation_dataset=val_ds,
epochs=EPOCHS,
optimizer=tf.keras.optimizers.Adam,
learning_rate=0.007)
test_loss, test_acc = model.test(test_ds)
print(test_loss)
print(test_acc)
| true |
ecae58b1a85f63e140e4d22bfa1d0c2330c616c5 | Python | Ryder-987/python | /class/11/練習.py | UTF-8 | 56 | 3.15625 | 3 | [] | no_license | a=int(input('數值:'))
b=str((a+10)**2)
print('A:' + b) | true |
1ace78f5eb693175fd01b6ffe7eac02adcda2d9e | Python | zuelzt/learning_of_tensorflw | /tensorboard_basic.py | UTF-8 | 2,495 | 2.703125 | 3 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 1 16:28:39 2018
@author: Rorschach
@mail: 188581221@qq.com
"""
import warnings
warnings.filterwarnings('ignore')
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
# import
mnist = input_data.read_data_sets("/Users/zt/Desktop/Master File/practice of python/MNIST_data/",
one_hot=True)
# parameters
learning_rate = 0.01
training_epochs = 25
batch_size = 100
display_epoch = 1
logs_path = "/Users/zt/Desktop/TensorFlow/logs/"
# nn parameters
n_hidden_1 = 256
n_hidden_2 = 100
n_input = 28*28
n_output = 10
# set
with tf.name_scope('Input'):
X = tf.placeholder(tf.float32, [None, 28*28], name='InputData')
y = tf.placeholder(tf.float32, [None, 10], name='LabelData')
# create model
def multlayer_perceptron(X, weights, biases):
# set layer 1
layer_1 = tf.add(tf.multiply(X, weights['w1']), biases['b1'])
layer_1 = tf.nn.relu(layer_1)
tf.summary.histogram('relu1', layer_1)
# set layer 2
layer_2 = tf.add(tf.multiply(layer_1, weights['w2']), biases['b2'])
layer_2 = tf.nn.relu(layer_2)
tf.summary.histogram('relu2', layer_2)
# set out layer
out_layer = tf.add(tf.multiply(layer_2, weights['w3']), biases['b3'])
return out_layer
# get w, b
weights = {
'w1': tf.random_normal([n_input, n_hidden_1], name='w1'),
'w2': tf.random_normal([n_hidden_1, n_hidden_2], name='w2'),
'w3': tf.random_normal([n_hidden_2, n_output], name='w3')}
biases = {
'b1': tf.random_normal([n_hidden_1], name='b1'),
'b2': tf.random_normal([n_hidden_2], name='b2'),
'b3': tf.random_normal([n_output], name='b3')}
# built
with tf.name_scope('Model'):
pred = multlayer_perceptron(X, weights, biases)
with tf.name_scope('Loss'):
# softmax cross entropy
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y))
with tf.name_scope('SGD'):
# gradient descent
optimizer = tf.train.GradientDescentOptimizer(learning_rate)
grads = tf.gradients(loss, tf.trainable_variables())
grads = list(zip(grads, tf.trainable_variables())) # can use next
apply_grads = optimizer.apply_gradients(grads_and_vars=grads)
with tf.name_scope('Accuracy'):
acc = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
acc = tf.reduce_mean(tf.cast(acc, dtype=tf.float32))
# initialize
init = tf.global_variables_initializer()
| true |
838abeffe79656b91f03ab37524aeeb72fcd1319 | Python | Mark24Code/python_data_structures_and_algorithms | /剑指offer/40_NumbersAppearOnce(数组中只出现一次的数字).py | UTF-8 | 4,088 | 4.46875 | 4 | [
"MIT"
] | permissive | """
题目:一个整型数组里除了两个数字之外,其他的数字都出现了两次。请写程序找出这两个只出现一次的数字。要求时间复杂度是O(n),
空间复杂度是O(1)。
我们还是从头到尾依次异或数组中的每一个数字,那么最终得到的结果就是两个只出现一次的数字的异或结果。因为其他数字都出现了两次,
在异或中全部抵消了。由于这两个数字肯定不一样,那么异或的结果肯定不为0,也就是说在这个结果数字的二进制表示中至少就有一位为1。
我们在结果数字中找到第一个为1的位的位置,记为第n位。现在我们以第n位是不是1为标准把原数组中的数字分成两个子数组
,第一个子数组中每个数字的第n位都是1,而第二个子数组中每个数字的第n位都是0。由于我们分组的标准是数字中的某一位是1还是0,
那么出现了两次的数字肯定被分配到同一个子数组。因为两个相同的数字的任意一位都是相同的,我们不可能把两个相同的数字分配到两个子数组中去,
于是我们已经把原数组分成了两个子数组,每个子数组都包含一个只出现一次的数字,而其他数字都出现了两次。
我们已经知道如何在数组中找出唯一一个只出现一次数字,因此到此为止所有的问题都已经解决了。
https://leetcode.com/problems/single-number-iii/
Given an array of numbers nums, in which exactly two elements appear only once and all the other elements appear exactly twice. Find the two elements that appear only once.
Example:
Input: [1,2,1,3,2,5]
Output: [3,5]
Note:
The order of the result is not important. So in the above example, [5, 3] is also correct.
Your algorithm should run in linear runtime complexity. Could you implement it using only constant space complexity?
"""
class Solution1:
def singleNumber(self, nums):
"""
类似题:求只出现一次的数字,其他都出现两次。
:type nums: List[int]
:rtype: int
"""
first = 0
for num in nums:
first ^= num
return first
class Solution:
def singleNumber(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
def get_single_num(nums):
first = 0
for num in nums:
first ^= num
return first
single = get_single_num(nums)
print(single)
mask = 1
while single & mask == 0:
mask = mask << 1
print(mask, '||||||||||||||||||')
left = [i for i in nums if i & mask]
right = [i for i in nums if not(i & mask)]
return [get_single_num(left), get_single_num(right)]
class Solution(object):
def singleNumber(self, nums):
"""
思路:异或。
136 题做过只出现一次的一个数字,本题有两个只出现一次的数字。
核心在于把数组分成两个。怎么分组呢?
假设只出现一次的数字式 x1,x2,所有元素异或结果是 x。一定有 x=x1^x2。
x不能是0,否则x1==x2,就不是只出现一次的数字了。
*可以用位运算 x&-x 取出x 二进制位最低位的 1,设其实是第 L 位置。*
可以据此分类数组,一组是二进制第 L位置为 0 的数字,一组L 位置为 1的数字。
x1,x2会分别出现在两个组中。这样第一组全部异或得到x1, 第二组全部异或得到 x2
:type nums: List[int]
:rtype: List[int]
"""
x = 0
for num in nums:
x ^= num
low1pos = x & -x # 这里也可以用移位运算,x&-x 不太好想,类似上边那个解法
x1, x2 = 0, 0
for num in nums:
if num & low1pos:
x1 ^= num
else:
x2 ^= num
return x1, x2
def test():
s = Solution()
assert s.singleNumber([1, 2, 1, 3, 2, 5]) == [3, 5]
test()
| true |
1a05794696873af1f3d67c9160213e21c9c60ada | Python | pebsconsulting/bach | /_old/python/bach2latex-simple.py | UTF-8 | 5,467 | 3.359375 | 3 | [
"MIT"
] | permissive | """
Parses a Bach document and prints the result as a LaTeX document.
Partial implementation.
Usage:
cat examples/article.bach | python3 python/bach2latex-simple.py > out.latex
pdflatex out.latex
"""
import argparse
import bach
import datetime
import io
import sys
import textwrap
# w.r.t. https://en.wikibooks.org/wiki/LaTeX/Document_Structure#Document_classes
LATEX_DOCUMENT_CLASSES = (
'article', # for articles in scientific journals, presentations, short reports, program documentation, invitations, ...
'IEEEtran', # for articles with the IEEE Transactions format.
'proc', # a class for proceedings based on the article class
'report', # for longer reports containing several chapters, small books, thesis, ...
'book', # for real books
'slides', # presentations
'memoir', # based on book class
'letter', # for writing letters
'beamer', # presentations
)
# w.r.t. http://www.cespedes.org/blog/85/how-to-escape-latex-special-characters
LATEX_ESCAPES = [
('#', '\\#'),
('$', '\\$'),
('%', '\\%'),
('&', '\\&'),
('\\', '\\textbackslash{}'),
('^', '\\textasciicircum{}'),
('_', '\\_'),
('{', '\\{'),
('}', '\\}'),
('~', '\\textasciitilde{}'),
]
def LatexEscape(s):
for k,v in LATEX_ESCAPES:
s = s.replace(k, v)
return s
def NiceText(s):
# Nice 80-column text from input text that may be indented horribly
s = s.split()
s = ' '.join(s)
s = textwrap.wrap(s, 78)
return '\n'.join(s)
def OneAttribute(attributes, attribute, default=None):
# bach attributes are a dictionary mapping names to an array of zero or
# more values. Sometimes we only want exactly one value.
# If default is None and no value is given, raises a KeyError
value = attributes.get(attribute)
if value is not None:
if len(value) == 1:
value = value[0]
if value is not None:
return value
else:
raise ValueError("Expected a non-None value for %s" % attribute)
else:
raise ValueError("Expected exactly one value for %s" % attribute)
else:
if default is None:
raise KeyError("Expected attribute %s" % attribute)
return default
def StartDocument(doc):
# Top-level of document
# a bach document tree is a 3-tuple
label, attributes, subdocuments = doc
assert label in LATEX_DOCUMENT_CLASSES, \
"Document must begin with any valid LaTeX \documentclass: (%s)" % ', '.join(LATEX_DOCUMENT_CLASSES)
title = LatexEscape(OneAttribute(attributes, 'title', 'Untitled'))
authors = attributes.get('author', ['Anonymous'])
authors = list(map(LatexEscape, authors))
date = OneAttribute(attributes, 'date', '\\today')
print("""
% Automatically generated by {generator}
% at {now}
\\documentclass{{{docClass}}}
\\title{{{title}}}
\\author{{{authors}}}
\\date{{{date}}}
\\begin{{document}}
\\maketitle
""".format(
generator = sys.argv[0],
now = datetime.datetime.now(),
docClass = label,
title = title,
authors = ' \\and '.join(authors),
date = date
))
for x in StartSubdocuments(subdocuments):
print(x)
print ("\\end{document}")
def StartSubdocuments(subdocuments, sectionDepth=0, chapterDepth=0):
for i in subdocuments:
if isinstance(i, str):
yield i
else:
yield from StartSubdocument(i, sectionDepth, chapterDepth)
def StartSubdocument(subdocument, sectionDepth=-1, chapterDepth=-1):
# a bach subdocument is either a 3-tuple document tree or a single string
# (the preceeding function checks for string instances)
label, attributes, subdocuments = subdocument
if label == 'section':
assert sectionDepth >= 0, "section not allowed here"
title = LatexEscape(OneAttribute(attributes, 'title', 'Untitled Section'))
subs = 'sub'*sectionDepth
yield "\\%ssection{%s}" % (subs, title)
yield from StartSubdocuments(subdocuments, sectionDepth+1, chapterDepth)
elif label == 'chapter':
assert chapterDepth == 0, "chapter not allowed here"
title = LatexEscape(OneAttribute(attributes, 'title', 'Untitled Chapter'))
yield "\\chapter{%s}" % title
yield from StartSubdocuments(subdocuments, sectionDepth, chapterDepth+1)
elif label in ('b', 'p'):
text = ' '.join([x for x in StartSubdocuments(subdocuments)])
if label == 'b':
yield '\\textbf{%s}' % text.strip()
elif label == 'p':
yield '%s\n' % NiceText(text.strip())
else:
yield LatexEscape(text)
else:
raise Exception("Don't know what to do with: %s" % label)
ap = argparse.ArgumentParser(
description='Takes an "article style" bach document from stdin and writes a LaTeX document to stdout')
ap.add_argument('-e', '--encoding', default='utf-8',
help='specify the input character encoding (defaults to utf-8)')
args = ap.parse_args()
# Get the standard input binary buffer and wrap it in a file-object so that it
# decodes into a stream of Unicode characters from the specified encoding.
fp = io.TextIOWrapper(sys.stdin.buffer, encoding=args.encoding)
tree = bach.parse(fp)
StartDocument(tree)
| true |
993f1c63f9df335b07953d6a2f41c34623eb0183 | Python | Raj6713/rk_opt | /src/helper_function.py | UTF-8 | 718 | 2.90625 | 3 | [
"MIT"
] | permissive | import numpy as np
import pandas as pd
def calculate_enter_and_exit(dataframe, minimum_value, variable_name, index_value):
dataframe['Calculation'] = "true"
dataframe.loc[index_value, "Calculation"] = "false"
# data = dataframe[dataframe['Calculation'] == 'true']
dataframe['Enter_or_Exit'] = dataframe['C']/dataframe[variable_name]
min_in_enter_or_exit = np.min(dataframe[dataframe['Calculation'] == 'true']['Enter_or_Exit'])
print(dataframe)
# get index where min_in_enter_or_exit
index_where_value_is_min = dataframe[dataframe['Enter_or_Exit'] == min_in_enter_or_exit].index.values[0]
print(index_where_value_is_min)
print(min_in_enter_or_exit, index_where_value_is_min)
| true |
840a09f085a066f752de2831e3aa632d9b0a897f | Python | deivy311/mujoco_panda | /mujoco_panda/controllers/controller_base.py | UTF-8 | 3,630 | 2.640625 | 3 | [] | no_license | import abc
import threading
import logging
import time
import numpy as np
LOG_LEVEL = "DEBUG"
class ControllerBase(object):
"""
Base class for joint controllers
"""
def __init__(self, robot_object, config={}):
self._robot = robot_object
logging.basicConfig(format='\n{}: %(levelname)s: %(message)s\n'.format(
self.__class__.__name__), level=LOG_LEVEL)
self._logger = logging.getLogger(__name__)
self._config = config
if 'control_rate' in self._config:
control_rate = self._config['control_rate']
else:
control_rate = 1./self._robot.model.opt.timestep
self._is_active = False
self._cmd = self._robot.sim.data.ctrl[self._robot.actuated_arm_joints].copy(
)
self._mutex = threading.Lock()
self._ctrl_thread = threading.Thread(target = self._send_cmd, args=[control_rate])
self._is_running = True
self._ctrl_thread.start()
self._error = {'linear': np.zeros(3), 'angular': np.zeros(3)}
@property
def is_active(self):
"""
Returns True if controller is active
:return: State of controller
:rtype: bool
"""
return self._is_active
def set_active(self, status=True):
"""
Activate/deactivate controller
:param status: To deactivate controller, set False. Defaults to True.
:type status: bool, optional
"""
self._is_active = status
def toggle_activate(self):
"""
Toggles controller state between active and inactive.
"""
self.set_active(status = not self._is_active)
@abc.abstractmethod
def _compute_cmd(self):
raise NotImplementedError("Method must be implemented in child class!")
@abc.abstractmethod
def set_goal(self, *args, **kwargs):
raise NotImplementedError("Method must be implemented in child class!")
def _send_cmd(self, control_rate):
"""
This method runs automatically in separate thread at the specified controller
rate. If controller is active, the command is computed and robot is commanded
using the api method. Simulation is also stepped forward automatically.
:param control_rate: rate of control loop, ideally same as simulation step rate.
:type control_rate: float
"""
while self._is_running:
now_c = time.time()
if self._is_active:
self._mutex.acquire()
self._compute_cmd()
self._robot.set_joint_commands(
self._cmd, joints=self._robot.actuated_arm_joints, compensate_dynamics=False)
self._robot.step(render=False)
self._mutex.release()
elapsed_c = time.time() - now_c
sleep_time_c = (1./control_rate) - elapsed_c
if sleep_time_c > 0.0:
time.sleep(sleep_time_c)
def stop_controller_cleanly(self):
"""
Method to be called when stopping controller. Stops the controller thread and exits.
"""
self._is_active = False
self._logger.info ("Stopping controller commands; removing ctrl values.")
self._robot.set_joint_commands(np.zeros_like(self._robot.actuated_arm_joints),self._robot.actuated_arm_joints)
self._robot._ignore_grav_comp=False
self._logger.info ("Stopping controller thread. WARNING: PandaArm->step() method has to be called separately to continue simulation.")
self._is_running = False
self._ctrl_thread.join()
| true |