blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
464c7f25f8528a78138fc7a52e64f0839d193bd6 | Python | BurdenBear/kube-charts-mirror | /fetch.py | UTF-8 | 3,585 | 2.671875 | 3 | [] | no_license | #!/usr/bin/env python
# encoding: utf-8
import yaml
import requests
from contextlib import closing
import os
from urllib.parse import urlparse
# 转自https://www.zhihu.com/question/41132103/answer/93438156
def wget(url, file_name):
with closing(requests.get(url, stream=True)) as response:
chunk_size = 1024 # 单次请求最大值
content_size = int(response.headers['content-length']) # 内容体总大小
progress = ProgressBar(file_name, total=content_size,
unit="KB", chunk_size=chunk_size, run_status="正在下载", fin_status="下载完成")
with open(file_name, "wb") as file:
for data in response.iter_content(chunk_size=chunk_size):
file.write(data)
progress.refresh(count=len(data))
class ProgressBar(object):
def __init__(self, title,
count=0.0,
run_status=None,
fin_status=None,
total=100.0,
unit='', sep='/',
chunk_size=1.0):
super(ProgressBar, self).__init__()
self.info = "【%s】%s %.2f %s %s %.2f %s"
self.title = title
self.total = total
self.count = count
self.chunk_size = chunk_size
self.status = run_status or ""
self.fin_status = fin_status or " " * len(self.statue)
self.unit = unit
self.seq = sep
def __get_info(self):
# 【名称】状态 进度 单位 分割线 总数 单位
_info = self.info % (self.title, self.status,
self.count / self.chunk_size, self.unit, self.seq, self.total / self.chunk_size, self.unit)
return _info
def refresh(self, count=1, status=None):
self.count += count
# if status is not None:
self.status = status or self.status
end_str = "\r"
if self.count >= self.total:
end_str = '\n'
self.status = status or self.fin_status
print(self.__get_info(), end=end_str)
def main():
root = "/mnt/charts/docs"
chart_url = os.environ.get(
"CHARTS_URL", "https://kubernetes-charts.storage.googleapis.com/")
repo_url = os.environ.get("GIT_REPO")
if repo_url is None:
raise RuntimeError("You must specify a git repo!")
p = urlparse(repo_url)
git_user = p.path.split("/")[-2]
repo_name = p.path.split("/")[-1].split(".")[0]
default_mirror = "https://%s.github.io/%s/" % (git_user.lower(), repo_name)
mirror_url = os.environ.get("MIRROR_URL", default_mirror)
index_file = "index.yaml"
wget(chart_url + index_file, index_file)
with open(index_file) as f:
index = yaml.load(f)
entries = index["entries"]
new = index.copy()
for name, charts in entries.items():
for chart, new_chart in zip(charts, new["entries"][name]):
url = chart["urls"][0]
tar_name = url.split("/")[-1]
target = os.path.join(root, tar_name)
new_chart["urls"][0] = "/".join(
[mirror_url[:-1] if mirror_url.endswith("/") else mirror_url, tar_name])
# datetime format issure
new_chart["created"] = new_chart["created"].strftime('%Y-%m-%dT%H:%M:%S.%f000Z')
if os.path.exists(target):
continue
wget(url, target)
new["generated"] = new["generated"].strftime('%Y-%m-%dT%H:%M:%S.%f000Z')
with open(os.path.join(root, "index.yaml"), "w") as f:
yaml.dump(new, stream=f)
if __name__ == "__main__":
main()
| true |
93761af643fac46ce66ae6d74b97fea1d95329fc | Python | lajanki/alarmpi | /src/handlers/get_bbc_news.py | UTF-8 | 852 | 2.546875 | 3 | [
"MIT"
] | permissive | import logging
import feedparser
from src import apcontent
event_logger = logging.getLogger("eventLogger")
class NewsParser(apcontent.AlarmpiContent):
def __init__(self, section_data):
super().__init__(section_data)
def build(self):
url = "https://feeds.bbci.co.uk/news/world/rss.xml"
rss = feedparser.parse(url)
if rss.bozo or rss.status != 200: # the bozo flag is set if connection failed or response wal not well-formed
content = "Failed to reach BBC News"
else:
content = "And now, The latest stories from the World section of the BBC News.\n\n"
for entry in rss.entries[:4]:
# append each item to the feed string
content += "{}.\n{}\n\n".format(entry["title"], entry["description"])
self.content = content
| true |
01f3bb2949137561853700a79c279c1806870830 | Python | lapis42/boj | /boj16975_segment_tree_v2.py | UTF-8 | 685 | 3.03125 | 3 | [] | no_license | import sys
from operator import xor
input = sys.stdin.readline
def get(r):
ans = a[r - 1]
l = n
r += n
while l < r:
if l & 1:
ans += t[l]
l += 1
if r & 1:
r -= 1
ans += t[r]
l >>= 1
r >>= 1
return ans
def update(i, v):
i += n
t[i] += v
while i > 1:
t[i >> 1] = t[i] + t[xor(i, 1)]
i >>= 1
n = int(input())
a = list(map(int, input().split()))
t = [0] * (2 * n)
for _ in range(int(input())):
q, *x = map(int, input().split())
if q == 1:
update(x[0] - 1, x[2])
if x[1] < n: update(x[1], -x[2])
else:
print(get(x[0]))
| true |
f68c61cd65bb03cdf90d0eb41e0c1cfcda7dc577 | Python | PabloRO07/SIUNTREF | /Dsp/punto1/RMS.py | UTF-8 | 195 | 2.875 | 3 | [] | no_license | import numpy as np
from matplotlib import pyplot as plt
'Esta funcion calcula el RMS de una señal'
def RMS(xn):
n=len(xn)
rms=np.sqrt((1/n)*sum(np.square(abs(xn))))
return(rms)
| true |
ba35d9058fb9d08fa32844d37a2eb0fe259a01f1 | Python | Eastone/Seismograph | /lib/ParseWindow.py | UTF-8 | 2,744 | 2.984375 | 3 | [] | no_license | # utility class to handle config file (measurement windows)
import sys, string, time
import Global
class ParseWindow:
def __init__ (self, monitor):
self.window_flag = []
self.window_start = []
self.window_end = []
# Parse window configure file
self.__readConf(Global.window[monitor], Global.timeshift[monitor])
# Read config file to set the time window information
def __readConf(self, file_name, timeshift):
f = file(Global.root + file_name)
line = f.readline()
while(line):
line = string.rstrip(line)
# Empty or comment
if len(line) == 0 or line.startswith('#'):
pass
# Flag, such as cpt_filter
elif line.startswith('flag'):
list = string.split(line, '=')
self.window_flag.append(list[1])
# Start of one window
elif line.startswith('start'):
list = string.split(line, '=')
list = string.split(list[1], '-')
ts = (int(list[0]), int(list[1]), int(list[2]), int(list[3]), int(list[4]), int(list[5]), 0, 0, 0)
timestamp = int(time.mktime(ts)) - timeshift
self.window_start.append(timestamp)
# End of one window
elif line.startswith('end'):
list = string.split(line, '=')
list = string.split(list[1], '-')
ts = (int(list[0]), int(list[1]), int(list[2]), int(list[3]), int(list[4]), int(list[5]), 0, 0, 0)
timestamp = int(time.mktime(ts)) - timeshift
self.window_end.append(timestamp)
# Others
else:
print "Unrecognized config: %s \n" % line
line = f.readline()
f.close()
# Make sure parse window completely
try:
assert len(self.window_flag) == len(self.window_start)
assert len(self.window_start) == len(self.window_end)
except:
print 'Problem with config file: non-matching number of items'
sys.exit(1)
def getWindows(self, timestamp):
# Check each window
for k in range(len(self.window_start)):
if timestamp >= self.window_start[k] and timestamp <= self.window_end[k]:
return k
# NOT in any window
return -1
def getFlag(self, timestamp):
# Check each window
for k in range(len(self.window_start)):
if timestamp >= self.window_start[k] and timestamp <= self.window_end[k]:
return self.window_flag[k]
# NOT in any window
return None
| true |
856a904863f8c056de988471850027aa5d13c5ee | Python | Anujay-Saraf/fsdse-python-assignment-106 | /build.py | UTF-8 | 441 | 3.53125 | 4 | [] | no_license | def two_sum(inputList,Sum):
''' to get the two indices that sum to a specific value. '''
if(inputList == [] and Sum == 0):
raise ValueError
indicesList = []
for i in range (0,len(inputList)):
for j in range (i+1,len(inputList)):
if(inputList[i] + inputList[j] == Sum):
indicesList.extend((i,j))
print indicesList
return indicesList
two_sum([9,12,5,1,13,18,-12,20,7], 16)
| true |
a542a40e8450e6cf99d627a458aa4f9a1c720e1a | Python | isabella-df/bacteria-growth | /Tetscript 2.py | UTF-8 | 642 | 2.765625 | 3 | [] | no_license | import numpy as np
def dataLoad(filename):
mat1=np.loadtxt(filename)
t=0
data=np.array([-1,-1,-1])
for row in mat1:
t=t+1
datarowisvalid=True
if row[0]<10 or row[0]>60:
datarowisvalid=False
print('Error in row',t,'Temperature')
if row[1]<0:
datarowisvalid=False
print('Error in row',t,'Growth Rate')
if row[2]<1 or row[2]>4:
datarowisvalid=False
print('Error in row',t,'Bacteria')
if datarowisvalid:
data=np.vstack((data,row))
return data[1:,:]
print(dataLoad('testforreals.txt')) | true |
d53efa37e7e9dc9cfe2549c3e7ccb62b42ead5d3 | Python | carhackpils/Clonesite | /clonesite.py | UTF-8 | 10,098 | 2.65625 | 3 | [] | no_license | import json
import sys
import urllib2
import re
import os
from HTMLParser import HTMLParser
import argparse
class htmltagparser(HTMLParser):
def __init__(self):
self.reset()
self.NEWATTRS = []
def handle_starttag(self, tag, attrs):
self.NEWATTRS = attrs
def clean(self):
self.NEWATTRS = []
class Cloner(object):
def __init__(self, url, path, remove_js,remove_hidden, maxdepth=3, proxies=''):
self.start_url = url
self.path = os.getcwd() + "/" + path
self.maxdepth = maxdepth
self.seenurls = []
self.user_agent="Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)"
self.proxies = {'http': proxies,'https': proxies}
self.remove_js=remove_js
self.remove_hidden=remove_hidden
# ######################################3
# Utility Functions
# ######################################3
# http get request
def get_url(self, url):
headers = { 'User-Agent' : self.user_agent }
proxy = urllib2.ProxyHandler(self.proxies)
opener = urllib2.build_opener(proxy)
urllib2.install_opener(opener)
try:
req = urllib2.Request(url, None, headers)
return urllib2.urlopen(req).read()
except urllib2.HTTPError, e:
print 'We failed with error code - %s.' % e.code
if e.code == 404:
return ""
else:
return ""
# download a binary file
def download_binary(self, url):
filename = ""
if url.startswith(self.start_url):
filename = url[len(self.start_url):]
else:
return
data = self.get_url(url)
if (data == ""):
return
self.write_outfile(data, filename)
return
# writeout a file
def write_outfile(self, data, filename):
print "DLf = %s" % (filename)
if filename.startswith("/"):
filename = filename[1:]
fullfilename = self.path + "/" + filename
if not os.path.exists(os.path.dirname(fullfilename)):
os.makedirs(os.path.dirname(fullfilename))
print "WRITING OUT FILE [%s]" % (filename)
f = open(fullfilename, 'a')
f.write(data)
f.close()
# unique a list
def unique_list(self, old_list):
new_list = []
if old_list != []:
for x in old_list:
if x not in new_list:
new_list.append(x)
return new_list
# ######################################3
# html and link processing functions
# ######################################3
def find_forms(self, html):
form_regex = re.compile('<form[^>]+>')
return self.unique_list(form_regex.findall(html))
# convert all forms to contain hooks
def process_forms(self, html, method="post", action="index"):
# find all forms in page
forms = self.find_forms(html)
parser = htmltagparser()
# loop over each form
for form in forms:
print "FOUND A FORM [%s]" % (form)
# parse out parts of old form tag
parser.feed(form)
attrs = parser.NEWATTRS
parser.clean()
# build new form
new_form = "<form method=\"%s\" action=\"%s\"" % (method, action)
for (name, value) in attrs:
if ((name.lower() != "method") and (name.lower() != "action")):
new_form += " %s=\"%s\"" % (name, value)
new_form += ">"
print "REWROTE FORM TO BE [%s]" % (new_form)
# rewrite html with new form
html = html.replace(form, new_form)
return html
def process_js(self,html):
html = re.sub('<script(.|\s)*</script>', '', html)
return html
def process_hidden(self,html):
html = re.sub('<input type=\"hidden\"([^>]+)>', '', html)
return html
# build new list of only the link types we are interested in
def process_links(self, links):
new_links = []
for link in links:
print link
link = link.lower()
if (link.endswith(".css") or
link.endswith(".html") or
link.endswith(".php") or
link.endswith(".asp") or
link.endswith(".aspx") or
link.endswith(".js") or
link.endswith(".ico") or
link.endswith(".png") or
link.endswith(".jpg") or
link.endswith(".jpeg") or
link.endswith(".bmp") or
link.endswith(".gif") or
link.endswith(".eot")
# ("." not in os.path.basename(link))
):
new_links.append(link)
return new_links
# primary recersive function used to clone and crawl the site
def clone(self, depth=0, url="", base="", method="post", action="index"):
# early out if max depth is reached
if (depth > self.maxdepth):
print "MAX URL DEPTH [%s]" % (url)
return
# if no url is specified, then assume the starting url
if (url == ""):
url = self.start_url
# if no base is specified, then assume the starting url
if (base == ""):
base = self.start_url
# check to see if we have processed this url before
if (url in self.seenurls):
print "ALREADY SEEN URL [%s]" % (url)
return
else:
self.seenurls.append(url)
# get the url and return if nothing was returned
html = self.get_url(url)
if (html == ""):
return
# determine the websites script/filename
filename = ""
# we are only interested in urls on the same site
if url.startswith(base):
filename = url[len(base):]
# if filename is blank, assume index.html
if (filename == ""):
filename = "index.html"
else:
print "BAD URL [%s]" % (url)
return
print "CLONING URL [%s]" % (url)
# find links
links = re.findall(r"<link.*?\s*href=\"(.*?)\".*?>", html)
links += re.findall(r"<script.*?\s*src=\"(.*?)\".*?>", html)
links += re.findall(r"<img.*?\s*src=\"(.*?)\".*?>", html)
links += re.findall(r"\"(.*?)\"", html)
links += re.findall(r"url\(\"?(.*?)\"?\);", html)
links = self.process_links(self.unique_list(links))
# loop over the links
for link in links:
link = link.lower()
new_link = link
if link.startswith("http"):
new_link = link
elif link.startswith("//"):
new_link = "http:" + link
elif link.startswith("/"):
new_link = base + link
elif link.startswith("../"):
new_link = base + "/" + link[3:]
else:
new_link = base + "/" + link
good_link = new_link
if (new_link.startswith(self.start_url)):
good_link = new_link[len(self.start_url):]
print "FOUND A NEW LINK [%s]" % (new_link)
print "FOUND A NEW LINK * [%s]" % (good_link)
# switch out new_link for link
html = html.replace("\"" + link + "\"", "\"" + good_link + "\"")
# determine is we need to call Clone recursively
if (link.endswith(".css") or
link.endswith(".html") or
link.endswith(".php") or
link.endswith(".asp") or
link.endswith(".aspx") or
link.endswith(".js")
# ("." not in os.path.basename(link))
):
# recursively call process_html on each non-image link
if base != self.start_url:
self.clone(url=new_link, base=os.path.dirname(url), depth=depth+1)
else:
self.clone(url=new_link, depth=depth+1)
else:
# must be a binary file, so just download it
print "downloading %s" % (new_link)
self.download_binary(new_link)
# update any forms within the page
if self.remove_hidden:
print "REMOVING HIDDEN INPUTS"
html = self.process_hidden(html)
if self.remove_js:
print "REMOVING SCRIPT TAGS"
html = self.process_js(html)
html = self.process_forms(html, action=action)
# write out the html for the page we have been processing
self.write_outfile(html, filename)
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Website cloner.')
parser.add_argument('-p', action='store', dest='proxies', help='ip:port')
parser.add_argument('-o', action='store', dest='folder',default='out', help='Output folder')
parser.add_argument('-d', action='store', dest='maxdepth',default=3,type=int, help='Depth of url (default 3)')
parser.add_argument('-a', action='store', dest='action',default='index', help='Default action of the found forms')
parser.add_argument('-m', action='store', dest='method',default='post', help='Method used in the found forms')
parser.add_argument('-x', action='store_true',dest='remove_hidden',default=False,help='Remove hidden inputs (default False)')
parser.add_argument('-j', action='store_true',dest='remove_js',default=False,help='Remove scripts inputs (default False)')
parser.add_argument('url',action='store')
result = parser.parse_args()
print result
c = Cloner(result.url, result.folder, result.remove_js, result.remove_hidden, maxdepth=result.maxdepth,proxies=result.proxies)
c.clone(method=result.method, action=result.action)
| true |
d68b03668f6c1ae8668b5c4c308912bcc09e0e55 | Python | danielcelin/nomina_helper | /main.py | UTF-8 | 1,361 | 2.640625 | 3 | [] | no_license | from PyQt4 import QtGui
import sys
from nomina_helper import NominaHelper
from modelo.configuracion_nomina import ConfiguracionNomina
def main():
app = QtGui.QApplication(sys.argv)
# Constantes validas para el anio 2015
smlv = int(644350)
auxilio_transporte = int(74000)
# Configuracion por defecto
configuracion_por_defecto = ConfiguracionNomina(
"errores.txt", # Nombre archivo de errores
"log.txt", # Nombre archivo de registro de operacion del programa
"liquidacion.liq", # Nombre archivo que contendra la liquidacion de la nomina
2, # Numero minimo de lineas para archivo de nomina
smlv, # Salario Minimo Legal Vigente (SMLV) 2015
auxilio_transporte, # Auxilio de Transporte 2015
2 * smlv, # El auxilio de transporte solo se da si gana igual o menor a 2 SMLV
4 * smlv, # Quienes ganes igual o mas de 4 SMLV contribuyen 1% al fondo de solidaridad pensional
0.01, # Porcentaje de deducccion de aporte al fondo de solidaridad pensional.
0.00522 # Porcentaje Administracion de Riesgos Laborales. Modifcar porcentaje segun el tipo de riesgo definido en: https://www.positiva.gov.co/ARL/Paginas/default.aspx
)
app_gui = NominaHelper(configuracion_por_defecto)
app_gui.show()
app.exec_()
if __name__ == "__main__":
main()
| true |
8e48e29b24e3eab0c55e3d2f8ad32a50443b10ae | Python | guyuejia/douban | /movie/getmovieinfo.py | UTF-8 | 1,814 | 3.203125 | 3 | [] | no_license | from selenium import webdriver
from selenium.common.exceptions import TimeoutException
def getMovieInfos(movieId):
infosDict={}
movieURL = "https://movie.douban.com/subject/" + str(movieId)
option = webdriver.ChromeOptions()
option.add_argument('headless')
browser = webdriver.Chrome("D:\\test\\chromedriver.exe",options=option)
# 设置页面加载和脚本执行超时时间
browser.set_page_load_timeout(20)
browser.set_script_timeout(20)
try:
browser.get(movieURL)
except TimeoutException:
print("加载过慢")
browser.execute_script('window.stop()')
#定位到页面关于影片基本信息的元素节点
infoElement = browser.find_element_by_id("info")
#获取导演信息
director = infoElement.find_element_by_xpath("//span//a[@rel='v:directedBy']").text
infosDict["导演"] = [director]
#获取演员信息,结果类似:'屈楚萧 / 吴京 / 李光洁 / 吴孟达 / 赵今麦 / 更多...'
actorStr = infoElement.find_element_by_xpath("//span[@class='actor']/span[@class='attrs']").text
#格式化演员信息
actorList = []
for actor in actorStr.split("/"):
actorList.append(actor.strip())
#利用切片,去除最后的"更多"
actorList = actorList[0:-1]
infosDict["演员"] = actorList
# 获取影片类型:战争、爱情、剧情、戏剧等
typeElement = infoElement.find_elements_by_xpath("//span[@property='v:genre']")
types = []
for type in typeElement:
types.append(type.text)
infosDict["类型"] = types
browser.quit()
return infosDict
if __name__ == "__main__":
movieId = "26363254"
infosDict = getMovieInfos(movieId)
for key in infosDict.keys():
print(key + ":" + ",".join(infosDict[key]))
| true |
766af3095c629fd0cb84d64f6628fd2a07d30608 | Python | aldeano19/databucket | /scrapers/ProductRepository.py | UTF-8 | 3,314 | 2.640625 | 3 | [
"Apache-2.0"
] | permissive | import requests
import json
import yaml
import urllib
import GlobalUtil
class ProductRepository():
"""docstring for ProductRepository"""
def __init__(self, domain, port, base_path):
self.domain = domain
self.port = ":"+str(port)
self.base_path = base_path
self.items_base_url = "/items"
self.update_items_url = "/%s"
self.patch_items_availability_url = "/availability/%s"
self.get_items_with_name_url = "/withName/%s"
self.get_items_by_id_url = "/%s" # takes an item's mongodb id
self.get_items_urls = "/getUrlsMap"
self.patch_items_availability = "/%s" # takes an item's name
self.filter_items_url = "/filter"
def create_new_item(self, item):
# item_dict = item.__dict__
url = self.domain + \
self.port + \
self.base_path + \
self.items_base_url
return requests.post(url, params=item)
def get_products_urls(self):
url = self.domain + \
self.port + \
self.base_path + \
self.items_base_url + \
self.get_items_urls
return requests.get(url)
def filter_items(self, filters):
url = self.domain + \
self.port + \
self.base_path + \
self.items_base_url + \
self.filter_items_url
return requests.get(url, params=filters)
def get_items(self):
url = self.domain + \
self.port + \
self.base_path + \
self.items_base_url
return requests.get(url)
def update_item(self, item):
# item_dict = item.__dict__
url = self.domain + \
self.port + \
self.base_path + \
self.items_base_url + \
self.update_items_url % (item["id"])
return requests.put(url, params=item)
def patch_availability(self, item_name, price_update_map):
url = self.domain + \
self.port + \
self.base_path + \
self.items_base_url
headers = {
"Content-Type": "application/json"
}
params={
"itemName":item_name
}
price_update_map = json.dumps(price_update_map)
print "url:",url
print "prams:",params
print "data:",price_update_map
return requests.patch(url, params=params, data=price_update_map, headers=headers)
# UNIT TEST CLASS #
import unittest
class TestProductRepository(unittest.TestCase):
def setUp(self):
self.rest_connection = GlobalUtil.get_rest_env()
self.product_repository = ProductRepository(
self.rest_connection["domain"],
self.rest_connection["port"],
self.rest_connection["base_path"])
def test_filter_items(self):
filters = {"store":"BJS"}
results = self.product_repository.filter_items(filters).json()
bad_results = self.product_repository.get_items().json()
print len(bad_results)
print len(results)
self.assertNotEqual( len(results), len(bad_results))
if __name__ == '__main__':
unittest.main()
| true |
816ec79c423c5312b0cd38185b524e2c7fa8e480 | Python | shubhamrauniyar/angela | /libs/algorithms/maddpg_v2/training.py | UTF-8 | 3,141 | 2.8125 | 3 | [
"MIT"
] | permissive | """
Training loop.
"""
import numpy as np
import torch
import libs.statistics
def train(environment, agent, n_episodes=10000, max_t=1000,
render=False,
solve_score=0.5):
"""
Params
======
environment: environment object
agent: agent object
n_episodes (int): maximum number of training episodes
max_t (int): maximum number of timesteps per episode
solve_score (float): criteria for considering the environment solved
"""
stats = libs.statistics.MultiAgentDDPGv2Stats()
stats_format = 'Buffer: {:6} NoiseW: {:.4}'
for i_episode in range(1, n_episodes+1):
rewards = []
state = environment.reset()
# loop over steps
for t in range(1, max_t+1):
# select an action
if agent.evaluation_only: # disable noise on evaluation
action = agent.act(state, add_noise=False)
else:
action = agent.act(state)
# take action in environment
next_state, reward, done = environment.step(action)
# update agent with returned information
agent.step(state, action, reward, next_state, done)
state = next_state
rewards.append(reward)
if any(done):
break
# every episode
buffer_len = len(agent.memory)
per_agent_rewards = [] # calculate per agent rewards
for i in range(agent.n_agents):
per_agent_reward = 0
for step in rewards:
per_agent_reward += step[i]
per_agent_rewards.append(per_agent_reward)
stats.update(t, [np.max(per_agent_rewards)], i_episode) # use max over all agents as episode reward
stats.print_episode(i_episode, t, stats_format, buffer_len, agent.noise_weight,
agent.agents[0].critic_loss, agent.agents[1].critic_loss,
agent.agents[0].actor_loss, agent.agents[1].actor_loss,
agent.agents[0].noise_val, agent.agents[1].noise_val,
per_agent_rewards[0], per_agent_rewards[1])
# every epoch (100 episodes)
if i_episode % 100 == 0:
stats.print_epoch(i_episode, stats_format, buffer_len, agent.noise_weight)
save_name = 'checkpoints/episode.{}.'.format(i_episode)
for i, save_agent in enumerate(agent.agents):
torch.save(save_agent.actor_local.state_dict(), save_name + str(i) + '.actor.pth')
torch.save(save_agent.critic_local.state_dict(), save_name + str(i) + '.critic.pth')
# if solved
if stats.is_solved(i_episode, solve_score):
stats.print_solve(i_episode, stats_format, buffer_len, agent.noise_weight)
save_name = 'checkpoints/solved.'
for i, save_agent in enumerate(agent.agents):
torch.save(save_agent.actor_local.state_dict(), save_name + str(i) + '.actor.pth')
torch.save(save_agent.critic_local.state_dict(), save_name + str(i) + '.critic.pth')
break
| true |
42564ce9cb417a779322afdb42b62804d244f9da | Python | animeshroy/automate_cowin_appointment | /login.py | UTF-8 | 1,726 | 2.515625 | 3 | [
"MIT"
] | permissive | import requests,sys
import json, hashlib
from beneficiary import fetch_beneficiaries
MOBILE_NUMBER = ""
SECRET = "U2FsdGVkX19c2O9OPU***********t4L4r5Nm1qIQRL7y6JhEf3vf6NBZveF00tMfHnP/16Og==" #Check from browser network @https://cdn-api.co-vin.in/api/v2/auth/generateMobileOTP call
def generate_otp():
GENERATE_URL = "https://cdn-api.co-vin.in/api/v2/auth/generateMobileOTP"
with requests.session() as session:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
data = {'mobile':MOBILE_NUMBER, 'secret':SECRET}
response = session.post(GENERATE_URL, headers=headers, data=json.dumps(data))
response = response.json()
print(response)
return response
def confirm_otp(retry = 0):
CONFIRM_OTP = "https://cdn-api.co-vin.in/api/v2/auth/validateMobileOtp"
data = generate_otp()
print("OTP send to your registered mobile number")
otp_number = input("\nEnter OTP:: ")
otp_number = hashlib.sha256(str(otp_number).encode())
enrcypted_otp = otp_number.hexdigest()
with requests.session() as session:
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.76 Safari/537.36'}
otp = {'otp':enrcypted_otp}
otp_data ={**data, **otp}
response = session.post(CONFIRM_OTP, headers=headers, data=json.dumps(otp_data))
if response.status_code == 401:
print("incorrect OTP retry again..")
if retry > 2:
print("3 Retires exhuasted..try again later")
sys.exit()
response = confirm_otp(retry+1)
else:
response = response.json()
print(response)
return response
| true |
996011778054cc15b4102d2dff115357ca3fcb9f | Python | Tel-las/AABIO | /Class_2/Trie_complete.py | UTF-8 | 3,336 | 4.09375 | 4 | [] | no_license | # -*- coding: utf-8 -*-
class Trie:
def __init__(self):
self.nodes = { 0:{} } # dictionary
self.num = 0
#Construir a trie
def print_trie(self): #Imprimir nós e posições
for k in self.nodes.keys():
print (k, "->" , self.nodes[k])
def add_node(self, origin, symbol): #Parametros são o node onde começamos e o símbolo ("nucleótido") cujo arco queremos adicionar, ou caso exista verificar
self.num += 1 #Contador do número de nós
self.nodes[origin][symbol] = self.num #Indexa ao valor dicionário dentro do value do 1º dicionário
self.nodes[self.num] = {} #Abre o dicionário de um nove key(node)
def add_pattern(self, p):
pos = 0
node = 0
while pos < len(p): #Enquanto que a posição (iterador) for menor que o tamanho do padrão
if p[pos] not in self.nodes[node].keys(): # p[pos] corresponde a uma base ATGC/Verifica a presença do "nucleótido" no node que está a ser ieradp
self.add_node(node, p[pos]) #Se não estiver presente na árvore é adicionado
node = self.nodes[node][p[pos]] #Define valor do nó onde estamos na árvore
pos += 1 #Avançamos na posição do padrão
def trie_from_patterns(self, pats): #Itera os padrões pela função add_pattern
for p in pats:
self.add_pattern(p)
#Funções para encontrar padrões numa sequência
def prefix_trie_match(self, text):
pos = 0 #Contador da posição no texto
match = "" #String que devolve o padrão encontrado
node = 0 #Contador do nó
while pos < len(text):
if text[pos] in self.nodes[node].keys(): #Verifica se a base está na árvore
node = self.nodes[node][text[pos]] #Guardo o nó em que a base está (value do 2º dicionário)
match += text[pos] #Adiciona ao match (padrão)
if self.nodes[node] == {}: return match #Atingir uma folha da árvore
else: pos += 1 #Incrementar a posição para continuar a pesquisar o texto
else: return None #Quando o nucleótido não estiver na trie, interrompe o ciclo e devolve None
return None #Em caso de má argumentação devolve None
def trie_matches(self, text):
res = []
for i in range(len(text)): #Iteração para cada uma das caracteres da string text (posições iniciais)
m = self.prefix_trie_match(text[i:]) #Fornece o argumento da posição inicial i no texto e guarda o match correspondente
if m is not None: res.append((i,m)) #Se ouver padrões indexar a res um tuplo com a posição inicial e o padrão
return res #Devolve a lista com os matches (tuplos)
def test():
patterns = ["GAT", "CCT", "GAG"]
t = Trie() #Inicializar a classe
t.trie_from_patterns(patterns)
t.print_trie()
def test2():
patterns = ["AGAGAT", "AGC", "AGTCC", "CAGAT", "CCTA", "GAGAT", "GAT", "TC"]
t = Trie() #Inicializar a classe
t.trie_from_patterns(patterns) #Gerar árvore a partir dos padrões
print (t.prefix_trie_match("GAGATCCTA")) #Testar padrão 1ª posição
print (t.trie_matches("GAGATCCTA")) #Testar padrões em todas as posições iniciais
test()
print()
test2()
| true |
e7522d9a137a8bcbbbf3b15f2a1929dd54b2cc98 | Python | stevevista/nbla-go-zero | /nnabla/build-tools/code_generator/load_implements_rst.py | UTF-8 | 2,184 | 2.53125 | 3 | [
"Apache-2.0"
] | permissive | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import sys
import re
import os
class Implements:
def __init__(self):
base = os.path.abspath(os.path.dirname(os.path.abspath(__file__)) + '/../..')
filename = '{}/doc/implements.rst'.format(base)
error_exists = False
info = collections.OrderedDict()
with open(filename, 'r') as f:
list_num = -1
list_line = 0
implements = []
func_name = None
for n, l in enumerate(f.readlines()):
l = l.rstrip()
m = re.match(r'^ ([\s\*]) -\s*(.*)$', l)
if m:
if m.group(1) == '*':
list_num += 1
list_line = 0
if list_num > 0:
func_name = m.group(2)
info[func_name] = collections.OrderedDict()
else:
if list_num == 0:
implements.append(m.group(2))
else:
if m.group(2) != '':
info[func_name][implements[list_line - 1]] = m.group(2)
list_line += 1
if error_exists:
exit(-1)
self.info = info
# Main command
# ===============
def main():
import pprint
pp = pprint.PrettyPrinter(indent=2)
f = Implements()
print('Info Keys = {}'.format(f.info.keys()))
for k, v in f.info.items():
print(k, v)
if __name__ == '__main__':
main()
| true |
554bb3299891a64f509ea9c05d9617e67305d38c | Python | daniel-reich/ubiquitous-fiesta | /GPibesdkGf433vHBX_18.py | UTF-8 | 361 | 2.71875 | 3 | [] | no_license |
def goldbach_conjecture(n):
if n <= 2: return []
if n % 2 == 1: return []
for i in range(2, n):
if is_prime(i):
if is_prime(n-i):
return [i, n-i]
def is_prime(num):
if num < 2:
return False
for i in range(2, num):
if (num % i) == 0:
return False
return True
| true |
c72766bfe635124b27f7b044f8d0ba03656c1b5b | Python | anveshduddu/hackerrank--30---days---of---code | /Day 7: Arrays | UTF-8 | 176 | 2.578125 | 3 | [] | no_license | #!/bin/python
import math
import os
import random
import re
import sys
n=int(input())
arr = map(int, raw_input().split())
arr.reverse()
for i in range(n):
print (arr[i]),
| true |
94e93da45a48f546bfb60ea8f29504c85de4ea75 | Python | deckardmehdy/coursera | /Course1/Week3/clicks.py | UTF-8 | 220 | 2.828125 | 3 | [] | no_license | # Uses python3
import numpy as np
# Start of Function
ads = int(input())
A = np.sort([int(x) for x in input().split()])
B = np.sort([int(x) for x in input().split()])
B = np.reshape(B,(ads,1))
print(int(np.dot(A,B)))
| true |
6e75f6b36769d31ad47796d70d0dfb92e5039c60 | Python | SachinPitale/Python | /ex30.py | UTF-8 | 459 | 3.890625 | 4 | [] | no_license |
people = raw_input("how many people ")
cars = raw_input("how many cars ")
buses = raw_input("how many buses ")
#people = 30
#cars = 40
#buses = 15
if cars > people :
print "cars is greater than people"
elif cars < people:
print "cars is less than people"
else:
print "cars and buses are same number"
if buses > cars :
print "buses is greater than cars"
elif buses < cars :
print "buses is less than cars"
else :
print "bues is same as cars"
| true |
63469c2a2d1f88c71e206632d0d9aba8c314f84c | Python | danieljurczak/sushi_game_bot | /code.py | UTF-8 | 10,937 | 2.65625 | 3 | [] | no_license | import PIL.ImageGrab as ImageGrab
import os
import time
import win32api, win32con
import PIL.ImageOps as ImageOps
from numpy import *
"""
All coordinates assume a screen resolution of 2560x1080, and Chrome
maximized with the Bookmarks Toolbar disabled.
x_pad = 321
y_pad = 144
Play area = x_pad+1, y_pad+1, 959, 623
"""
# Globals
# ------------------
x_pad = 321
y_pad = 144
foodOnHand = {'shrimp':5,
'rice':10,
'nori':10,
'roe':10,
'salmon':5,
'unagi':5}
sushiTypes = {2670:'onigiri',
3143:'caliroll',
2677:'gunkan'}
class Blank:
seat_1 = 8119
seat_2 = 5986
seat_3 = 11596
seat_4 = 10613
seat_5 = 7286
seat_6 = 9119
class Cord:
f_shrimp = (33, 330)
f_rice = (91, 333)
f_nori = (22, 392)
f_roe = (91, 388)
f_salmon = (14, 441)
f_unagi = (99, 435)
#--------------------------------
phone = (589, 355)
menu_toppings = (556, 270)
t_shrimp = (507, 218)
t_nori = (577, 224)
t_roe = (491, 273)
t_salmon = (566, 274)
t_unagi = (498, 334)
t_exit = (587, 330)
menu_rice = (540, 292)
buy_rice = (546, 272)
delivery_norm = (498, 292)
def clear_tables():
mousePos((75, 207))
leftClick()
mousePos((171, 204))
leftClick()
mousePos((275, 208))
leftClick()
mousePos((370, 207))
leftClick()
mousePos((480, 205))
leftClick()
mousePos((580, 213))
leftClick()
time.sleep(1)
def makeFood(food):
if food == 'caliroll':
print('Making a caliroll')
foodOnHand['rice'] -= 1
foodOnHand['nori'] -= 1
foodOnHand['roe'] -= 1
mousePos(Cord.f_rice)
leftClick()
time.sleep(.05)
mousePos(Cord.f_nori)
leftClick()
time.sleep(.05)
mousePos(Cord.f_roe)
leftClick()
time.sleep(.1)
foldMat()
time.sleep(1.5)
elif food == 'onigiri':
print('Making a onigiri')
foodOnHand['rice'] -= 2
foodOnHand['nori'] -= 1
mousePos(Cord.f_rice)
leftClick()
time.sleep(.05)
mousePos(Cord.f_rice)
leftClick()
time.sleep(.05)
mousePos(Cord.f_nori)
leftClick()
time.sleep(.1)
foldMat()
time.sleep(.05)
time.sleep(1.5)
elif food == 'gunkan':
print('Making a gunkan')
foodOnHand['rice'] -= 1
foodOnHand['nori'] -= 1
foodOnHand['roe'] -= 2
mousePos(Cord.f_rice)
leftClick()
time.sleep(.05)
mousePos(Cord.f_nori)
leftClick()
time.sleep(.05)
mousePos(Cord.f_roe)
leftClick()
time.sleep(.05)
mousePos(Cord.f_roe)
leftClick()
time.sleep(.1)
foldMat()
time.sleep(1.5)
def buyFood(food):
if food == 'rice':
mousePos(Cord.phone)
time.sleep(.1)
leftClick()
mousePos(Cord.menu_rice)
time.sleep(.05)
leftClick()
s = screenGrab()
if s.getpixel(Cord.buy_rice) != (127, 127, 127):
print('rice is available')
mousePos(Cord.buy_rice)
time.sleep(.1)
leftClick()
mousePos(Cord.delivery_norm)
foodOnHand['rice'] += 10
time.sleep(.1)
leftClick()
time.sleep(2.5)
else:
print('rice is NOT available')
mousePos(Cord.t_exit)
leftClick()
time.sleep(1)
buyFood(food)
if food == 'nori':
mousePos(Cord.phone)
time.sleep(.1)
leftClick()
mousePos(Cord.menu_toppings)
time.sleep(.05)
leftClick()
s = screenGrab()
print('test')
time.sleep(.1)
if s.getpixel(Cord.t_nori) != (108, 108, 79):
print('nori is available')
mousePos(Cord.t_nori)
time.sleep(.1)
leftClick()
mousePos(Cord.delivery_norm)
foodOnHand['nori'] += 10
time.sleep(.1)
leftClick()
time.sleep(2.5)
else:
print('nori is NOT available')
mousePos(Cord.t_exit)
leftClick()
time.sleep(1)
buyFood(food)
if food == 'roe':
mousePos(Cord.phone)
time.sleep(.1)
leftClick()
mousePos(Cord.menu_toppings)
time.sleep(.05)
leftClick()
s = screenGrab()
time.sleep(.1)
if s.getpixel(Cord.t_roe) != (255, 252, 172):
print('roe is available')
mousePos(Cord.t_roe)
time.sleep(.1)
leftClick()
mousePos(Cord.delivery_norm)
foodOnHand['roe'] += 10
time.sleep(.1)
leftClick()
time.sleep(2.5)
else:
print('roe is NOT available')
mousePos(Cord.t_exit)
leftClick()
time.sleep(1)
buyFood(food)
def checkFood():
for i, j in foodOnHand.items():
if i == 'nori' or i == 'rice' or i == 'roe':
if j <= 4:
print('%s is low and needs to be replenished' % i)
buyFood(i)
def get_seat_one():
box = (x_pad+24,y_pad+60,x_pad+24+63,y_pad+60+16)
im = ImageOps.grayscale(ImageGrab.grab(box))
a = array(im.getcolors())
a = a.sum()
print(a)
im.save(os.getcwd() + '\\seat_one__' + str(int(time.time())) + '.png', 'PNG')
return a
def get_seat_two():
box = (x_pad+125,y_pad+60,x_pad+125+63,y_pad+60+16)
im = ImageOps.grayscale(ImageGrab.grab(box))
a = array(im.getcolors())
a = a.sum()
print(a)
im.save(os.getcwd() + '\\seat_two__' + str(int(time.time())) + '.png', 'PNG')
return a
def get_seat_three():
box = (x_pad+226,y_pad+60,x_pad+226+63,y_pad+60+16)
im = ImageOps.grayscale(ImageGrab.grab(box))
a = array(im.getcolors())
a = a.sum()
print(a)
im.save(os.getcwd() + '\\seat_three__' + str(int(time.time())) + '.png', 'PNG')
return a
def get_seat_four():
box = (x_pad+327,y_pad+60,x_pad+327+63,y_pad+60+16)
im = ImageOps.grayscale(ImageGrab.grab(box))
a = array(im.getcolors())
a = a.sum()
print(a)
im.save(os.getcwd() + '\\seat_four__' + str(int(time.time())) + '.png', 'PNG')
return a
def get_seat_five():
box = (x_pad+428,y_pad+60,x_pad+428+63,y_pad+60+16)
im = ImageOps.grayscale(ImageGrab.grab(box))
a = array(im.getcolors())
a = a.sum()
print(a)
im.save(os.getcwd() + '\\seat_five__' + str(int(time.time())) + '.png', 'PNG')
return a
def get_seat_six():
box = (x_pad+529,y_pad+60,x_pad+529+63,y_pad+60+16)
im = ImageOps.grayscale(ImageGrab.grab(box))
a = array(im.getcolors())
a = a.sum()
print(a)
im.save(os.getcwd() + '\\seat_six__' + str(int(time.time())) + '.png', 'PNG')
return a
def get_all_seats():
get_seat_one()
get_seat_two()
get_seat_three()
get_seat_four()
get_seat_five()
get_seat_six()
def foldMat():
mousePos((Cord.f_rice[0]+40,Cord.f_rice[1]))
leftClick()
time.sleep(.1)
def mousePos(cord):
win32api.SetCursorPos((x_pad + cord[0],y_pad + cord[1]))
def leftClick():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(.1)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
print("Click.") #completely optional. But nice for debugging purposes.
def leftDown():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN,0,0)
time.sleep(.1)
print('left Down')
def leftUp():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP,0,0)
time.sleep(.1)
print('left release')
def get_cords():
x, y = win32api.GetCursorPos()
x = x - x_pad
y = y - y_pad
print(x,y)
def screenGrab():
box = (x_pad + 1,y_pad+1,x_pad+640,y_pad+480)
im = ImageGrab.grab(box)
##im.save(os.getcwd() + '\\Snap__' + str(int(time.time())) + '.png', 'PNG')
return im
def grab():
box = (x_pad + 1,y_pad+1,x_pad+640,y_pad+480)
im = ImageOps.grayscale(ImageGrab.grab(box))
a = array(im.getcolors())
a = a.sum()
print(a)
return a
def startGame():
#location of first menu
mousePos((321, 198))
leftClick()
time.sleep(.1)
#location of second menu
mousePos((304, 394))
leftClick()
time.sleep(.1)
#location of third menu
mousePos((588, 458))
leftClick()
time.sleep(.1)
#location of fourth menu
mousePos((302, 374))
leftClick()
time.sleep(.1)
def check_bubs():
checkFood()
s1 = get_seat_one()
if s1 != Blank.seat_1:
if s1 in sushiTypes:
print('table 1 is occupied and needs %s' % sushiTypes[s1])
makeFood(sushiTypes[s1])
else:
print('sushi not found!\n sushiType = %i' % s1)
else:
print('Table 1 unoccupied')
clear_tables()
checkFood()
s2 = get_seat_two()
if s2 != Blank.seat_2:
if s2 in sushiTypes:
print('table 2 is occupied and needs %s' % sushiTypes[s2])
makeFood(sushiTypes[s2])
else:
print('sushi not found!\n sushiType = %i' % s2)
else:
print('Table 2 unoccupied')
checkFood()
s3 = get_seat_three()
if s3 != Blank.seat_3:
if s3 in sushiTypes:
print('table 3 is occupied and needs %s' % sushiTypes[s3])
makeFood(sushiTypes[s3])
else:
print('sushi not found!\n sushiType = %i' % s3)
else:
print('Table 3 unoccupied')
checkFood()
s4 = get_seat_four()
if s4 != Blank.seat_4:
if s4 in sushiTypes:
print('table 4 is occupied and needs %s' % sushiTypes[s4])
makeFood(sushiTypes[s4])
else:
print('sushi not found!\n sushiType = %i' % s4)
else:
print('Table 4 unoccupied')
clear_tables()
checkFood()
s5 = get_seat_five()
if s5 != Blank.seat_5:
if s5 in sushiTypes:
print('table 5 is occupied and needs %s' % sushiTypes[s5])
makeFood(sushiTypes[s5])
else:
print('sushi not found!\n sushiType = %i' % s5)
else:
print('Table 5 unoccupied')
checkFood()
s6 = get_seat_six()
if s6 != Blank.seat_6:
if s6 in sushiTypes:
print('table 1 is occupied and needs %s' % sushiTypes[s6])
makeFood(sushiTypes[s6])
else:
print('sushi not found!\n sushiType = %i' % s6)
else:
print('Table 6 unoccupied')
clear_tables()
def main():
startGame()
while True:
check_bubs()
if __name__ == '__main__':
main() | true |
b254dc355e8d60d871045af30b86f333e1ed2cf6 | Python | silky/bell-ppls | /env/lib/python2.7/site-packages/observations/r/marathon.py | UTF-8 | 1,758 | 2.609375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import numpy as np
import os
import sys
from observations.util import maybe_download_and_extract
def marathon(path):
"""Marathon
Training records for a marathon runner
A dataset with 1128 observations on the following 9 variables.
`Date`
Training date
`Miles`
Miles for training run
`Time`
Training time (in minutes:seconds:hundredths)
`Pace`
Running pace (in minutes:seconds:hundredths per mile)
`ShoeBrand`
`Addidas`, `Asics`, `Brooks`, `Izumi`, `Mizuno`, or
`New Balance`
`TimeMin`
Training time (in minutes)
`PaceMin`
Running pace (in minutes per mile)
`Short`
`1`\ = 5 miles or less or `0`\ =more than 5 miles
`After2004`
`1`\ = for runs after 2004 or `0`\ =for earlier runs
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `marathon.csv`.
Returns:
Tuple of np.ndarray `x_train` with 1127 rows and 9 columns and
dictionary `metadata` of column headers (feature names).
"""
import pandas as pd
path = os.path.expanduser(path)
filename = 'marathon.csv'
if not os.path.exists(os.path.join(path, filename)):
url = 'http://dustintran.com/data/r/Stat2Data/Marathon.csv'
maybe_download_and_extract(path, url,
save_file_name='marathon.csv',
resume=False)
data = pd.read_csv(os.path.join(path, filename), index_col=0,
parse_dates=True)
x_train = data.values
metadata = {'columns': data.columns}
return x_train, metadata
| true |
46b57b1d4b50829b67472e9883c3f71881bcd343 | Python | vfxetc/uitools | /uitools/checkbox.py | UTF-8 | 999 | 2.53125 | 3 | [] | no_license | from .qt import QtGui
class CollapseToggle(QtGui.QCheckBox):
def paintEvent(self, e):
paint = QtGui.QStylePainter(self)
option = QtGui.QStyleOptionButton()
self.initStyleOption(option)
# Paint the normal control.
paint.drawControl(QtGui.QStyle.CE_CheckBox, option)
# Re-use the style option, it contains enough info to make sure the
# button is correctly checked
option.rect = self.style().subElementRect(QtGui.QStyle.SE_CheckBoxIndicator, option, self)
# Erase the checkbox...
paint.save();
px = QtGui.QPixmap(option.rect.width(), option.rect.height())
px.fill(self, option.rect.left(), option.rect.top())
brush = QtGui.QBrush(px)
paint.fillRect(option.rect, brush)
paint.restore()
# ... and replace it with an arrow button.
paint.drawPrimitive(QtGui.QStyle.PE_IndicatorArrowDown if self.isChecked() else QtGui.QStyle.PE_IndicatorArrowRight, option)
| true |
132367c62db58663311947b9a5fe325371b21318 | Python | mfrasca/fibra | /tests/howto_receive_lines.py | UTF-8 | 533 | 2.671875 | 3 | [
"MIT"
] | permissive | from __future__ import print_function
import fibra
import fibra.net
def task(sock, address):
#create a tasks which returns lines from a socket
line_receiver = fibra.net.recv_lines(sock)
while True:
#receive a line
line = yield line_receiver
print("Received:", line)
def main():
s = fibra.schedule()
#install a task which installs a new task on a new connection
s.install(fibra.net.listen(("localhost", 2000), task))
s.run()
if __name__ == '__main__':
main()
| true |
2b37a665f43ffd06daa4af275776b329ba79d71f | Python | jiwon-0129/likelion_jw | /파이썬과제3_jiwon.py | UTF-8 | 556 | 3.828125 | 4 | [] | no_license | class char:
def __init__(self, name, age, sex):
self.name = name
self.age = age
self.sex = sex
def __str__(self):
sen = self.name +"는 "+str(self.age)+"세 이고, "+self.sex+" 입니다."
return sen
zzanggu = char("짱구", 5, "남자")
print(zzanggu)
dora = char("도라에몽", 14, "남자")
print(dora)
konan = char("코난",8, "남자")
print(konan)
shoc = char("쇼콜라", 15, "여자")
print(shoc)
amu = char("아무", 12, "여자")
print(amu)
gayoung = char("가영", 16, "여자")
print(gayoung)
| true |
cf6322677a29ee08d3e1dd914cf0e44fdce94417 | Python | wenbinzhang-be/Gitpy | /Python/面向对象基础/带参数的__init__().py | UTF-8 | 748 | 4.28125 | 4 | [] | no_license | # 一个类可以创建多个对象,如何对不同的对象设置不同的初始化属性
# 答: 传参数。
# 1.定义类:带参数的init:宽度和高度, 定义方法:调用实例属性
class Washer(object):
def __init__(self, width, height):
self.width = width
self.height = height
def print_info(self):
print(f'洗衣机的宽度为{self.width}')
print(f'洗衣机的高度为{self.height}')
# 2 创建对象,创建多个对象且属性值不同,调用实例方法
haier1 = Washer(222, 333)
haier1.print_info()
haier2 = Washer(1, 2)
haier2.print_info()
# haier3 = Washer()
# haier3.print_info() # 不传参数会报错missing 2 required positional arguments: 'width' and ;height
| true |
0481d9895346f0a6e751d96a6f5f40cc1ce1a9b4 | Python | quervernetzt/simple-blockchain | /main.py | UTF-8 | 1,017 | 3.359375 | 3 | [
"MIT"
] | permissive | from datetime import datetime
from solution.linked_list import LinkedList
from solution.block import Block
if __name__ == "__main__":
###################################
# Demo
###################################
block_chain: LinkedList = LinkedList()
first_block: Block = Block(datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ"), "first", None)
block_chain.prepend(first_block)
second_block: Block = Block(datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ"), "second", first_block.hash)
block_chain.prepend(second_block)
third_block: Block = Block(datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ"), "third", second_block.hash)
block_chain.prepend(third_block)
print(block_chain.size()) # 3
print("-----------")
for block in block_chain.to_list():
print(block.timestamp) # UTC time when the corresponding block has been created e.g. 2020-12-29T09:17:43.732627Z
print(block.data) # first, second or third
print("-----------")
| true |
65966f71a6796b0335626b3c80ecc75fceba4b1d | Python | jim-huang-nj/BeginningPython | /MagePython/Chapter5/5-3-8.py | UTF-8 | 778 | 3.421875 | 3 | [] | no_license | import datetime,time,functools
def logger(duration,func=lambda name,duration:print("{} took {}s".format(name,duration))):
def _logger(fn):
@functools.wraps(fn)
def wrapper(*args,**kwargs):
"""I am wrapper"""
start = datetime.datetime.now()
ret = fn(*args,**kwargs)
delta = (datetime.datetime.now() - start).total_seconds()
if delta > duration:
func(fn.__name__, duration)
return ret
return wrapper
return _logger
@logger(5) #add = logger(add)
def add(x,y):
"""This is a function for add"""
time.sleep(1)
return x + y
print(add(5,6),add.__name__,add.__wrapped__,add.__dict__,sep="\n")
#print("name={},doc={}".format(add.__name__,add.__doc__)) | true |
ab1379b9ff33a839b7ad17d521ef0bf597fbfffe | Python | alien-from-jupiter/AI-Project | /code.py | UTF-8 | 2,354 | 2.6875 | 3 | [] | no_license | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import accuracy_score #works
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, export_graphviz
import numpy as np
import matplotlib.pyplot as plt
import random
#from random import seed
#from random import randint
# seed random number generator
data = pd.read_csv("Training.csv")
#data.head()
#data.columns
#len(data.columns)
#len(data['prognosis'].unique())
df = pd.DataFrame(data)
#df.head()
#len(df)
cols = df.columns
cols = cols[:-1]
#cols
#len(cols)
x = df[cols]
y = df['prognosis']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=42)
mnb = MultinomialNB()
mnb = mnb.fit(x_train, y_train)
mnb.score(x_test, y_test)
'''print ("cross result========")
scores = cross_validation.cross_val_score(mnb, x_test, y_test, cv=3)
print (scores)
print (scores.mean())'''
test_data = pd.read_csv("Testing.csv")
#test_data.head()
testx = test_data[cols]
testy = test_data['prognosis']
mnb.score(testx, testy)
#dt.__getstate__()
dt = DecisionTreeClassifier(criterion = "entropy", random_state = 42)
dt=dt.fit(x_train,y_train)
importances = dt.feature_importances_
indices = np.argsort(importances)[::-1]
features = cols
for f in range(10):
print("%d. feature %d - %s (%f)" % (f + 1, indices[f], features[indices[f]] ,importances[indices[f]]))
feature_dict = {}
for i,f in enumerate(features):
feature_dict[f] = i
#feature_dict['redness_of_eyes']
r = random.randrange(0,len(testx)+1,1)
print(r+2)
sample_x = testx.iloc[r,:].values
#print(testy.iloc[r,1])
#sample_x = [i/52 if i ==52 else 1 for i in range(len(features))]
#print(len(sample_x))
sample_x = np.array(sample_x).reshape(1,len(sample_x))
#print(sample_x)
#print(dt.predict(sample_x))
ypred = dt.predict(sample_x)
print(ypred)
#print(dt.predict_proba(sample_x))
#print(accuracy_score(testy.iloc[r,:],ypred)*100)
d = pd.read_csv("doc.csv")
a = d.iloc[0:41,0].values
#print(a)
b = d.iloc[0:41,1].values
c = d.iloc[0:41,2].values
for i in range(0,41):
#print(ypred)
#print(a[0]==ypred)
if a[i] == ypred:
print("Consult the doctor : ",b[i])
print("Link : ",c[i]) | true |
4f5079f17b1fe41a0352ea678a52fa8b2665cf66 | Python | adnappp/deeplabv3 | /cal_mean.py | UTF-8 | 548 | 2.53125 | 3 | [] | no_license | import cv2
import os
import numpy as np
R_sum =0
G_sum =0
B_sum =0
path = "dataset/train/images/"
images = os.listdir(path)
per_image_Rmean = []
per_image_Gmean = []
per_image_Bmean = []
for image in images:
img = cv2.imread(os.path.join(path,image))
per_image_Bmean.append(np.mean(img[:, :, 0]))
per_image_Gmean.append(np.mean(img[:, :, 1]))
per_image_Rmean.append(np.mean(img[:, :, 2]))
R_mean = np.mean(per_image_Rmean)
G_mean = np.mean(per_image_Gmean)
B_mean = np.mean(per_image_Bmean)
print(R_mean)
print(G_mean)
print(B_mean) | true |
05054b8cd3dc3bb6f535901f271ea3c30d958eee | Python | kaiocp/uri-challenges | /python/1044.py | UTF-8 | 193 | 3.21875 | 3 | [] | no_license | a, b = map(int, input().split())
values = [a, b]
sorted_values = sorted(values)
if (sorted_values[1] % sorted_values[0] == 0):
print("Sao Multiplos")
else:
print("Nao sao Multiplos")
| true |
aef9da156461f92b638598f472c2276957f9b3c6 | Python | abhimanyusrana/convert-RecordList-to-DictList | /recordsToDicts.py | UTF-8 | 936 | 3.1875 | 3 | [
"MIT"
] | permissive | # converts recordlist generated by Neo4j cypher to a list of dictionaries
# each dictionary contains all data from each record in property:value format
# and contains id(node) data in the node_id key pair
# it takes input in form : 'WHATEVERHAPPENS-IN-THE-QUERY return n, keys(n)'
def convertRecordListToDict(records):
keySet=getPropertySet(records)
one_record = {}
all_records=[]
for x in records:
node_id = x[0].ref[5:]
one_record['node_id'] = node_id
for y in keySet:
one_record[y]=x[0][y]
all_records.append(one_record)
one_record={}
return all_records
# inspects all records and extracts properties mentioned in them to ensure it
# gets all. Returns a set of property values.
# it takes input in form : 'WHATEVERHAPPENS-IN-THE-QUERY return n, keys(n)'
def getPropertySet(records):
keySet=set()
for x in records:
for y in x[1]:
keySet.add(y)
return keySet
| true |
f301578b46d59500b568501bddd6138c71c904b3 | Python | UmeshDeshmukh/DSP_Lab_ECE-GY_6183 | /Module 8/demo 22 - video _cv2_/demo 22 - video (cv2)/D1 - images/show_hsv.py | UTF-8 | 1,142 | 3.703125 | 4 | [] | no_license | # show_hsv.py
# Show the three components of the HSV representation of a color image.
# The HSV channels are: Hue, Saturation, Value.
# Value encodes brightness (Value = 0 gives black).
# Saturation encodes color level (Saturation = 0 gives gray, where the shade of gray depends on Value)
# Hue then encodes color informaiton.
import cv2
import numpy as np
img = cv2.imread('books.jpg', 1)
# 1 means import image in color
# Convert to different color space
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
print(type(img_hsv))
print(img_hsv.shape)
print(img_hsv.dtype)
cv2.imshow('Original image', img)
cv2.imshow('Channel 0 (Hue)', img_hsv[:,:,0])
cv2.imshow('Channel 1 (Saturation)', img_hsv[:,:,1])
cv2.imshow('Channel 2 (Value)', img_hsv[:,:,2])
print('Switch to images. Then press any key to quit')
# Write the image to a file
cv2.imwrite('books - hue.jpg', img[:, :, 0])
cv2.imwrite('books - saturation.jpg', img[:, :, 1])
cv2.imwrite('books - value.jpg', img[:, :, 2])
cv2.waitKey(0) # wait until any keystroke
cv2.destroyAllWindows()
# Reference
# http://docs.opencv.org/3.2.0/df/d9d/tutorial_py_colorspaces.html
| true |
3599f31d8ae21a537411f6e3459fa01c1e92cb02 | Python | Katorea132/AssociateAnalgesic | /tests/test_mentor.py | UTF-8 | 7,233 | 3.203125 | 3 | [] | no_license | #!/usr/bin/python3
"""
Module containing unit tests for most of the methods
in the Mentor class
"""
from mentor import Mentor
import unittest
class TestMentor(unittest.TestCase):
"""Class to test Mentor class"""
def test_mentor_correct_init(self):
"""
Tests the correct initialization and parametization of a mentor object
"""
a = Mentor("pizza", "Monday", "AM")
self.assertTrue(a)
self.assertEqual("pizza", a.name)
self.assertEqual("Monday", a.day)
self.assertEqual("AM", a.time)
a = Mentor("pizza", "Undefined", "Undefined")
self.assertTrue(a)
self.assertEqual("pizza", a.name)
self.assertEqual("Undefined", a.day)
self.assertEqual("Undefined", a.time)
a = Mentor("pizza", "Tuesday", "AM")
self.assertTrue(a)
self.assertEqual("pizza", a.name)
self.assertEqual("Tuesday", a.day)
self.assertEqual("AM", a.time)
a = Mentor("pizza", "Wednesday", "AM")
self.assertTrue(a)
self.assertEqual("pizza", a.name)
self.assertEqual("Wednesday", a.day)
self.assertEqual("AM", a.time)
a = Mentor("pizza", "Thursday", "AM")
self.assertTrue(a)
self.assertEqual("pizza", a.name)
self.assertEqual("Thursday", a.day)
self.assertEqual("AM", a.time)
a = Mentor("pizza", "Friday", "AM")
self.assertTrue(a)
self.assertEqual("pizza", a.name)
self.assertEqual("Friday", a.day)
self.assertEqual("AM", a.time)
a = Mentor("pizza", "Monday", "PM")
self.assertTrue(a)
self.assertEqual("pizza", a.name)
self.assertEqual("Monday", a.day)
self.assertEqual("PM", a.time)
a = Mentor("pizza", "Tuesday", "PM")
self.assertTrue(a)
self.assertEqual("pizza", a.name)
self.assertEqual("Tuesday", a.day)
self.assertEqual("PM", a.time)
a = Mentor("pizza", "Wednesday", "PM")
self.assertTrue(a)
self.assertEqual("pizza", a.name)
self.assertEqual("Wednesday", a.day)
self.assertEqual("PM", a.time)
a = Mentor("pizza", "Thursday", "PM")
self.assertTrue(a)
self.assertEqual("pizza", a.name)
self.assertEqual("Thursday", a.day)
self.assertEqual("PM", a.time)
a = Mentor("pizza", "Friday", "PM")
self.assertTrue(a)
self.assertEqual("pizza", a.name)
self.assertEqual("Friday", a.day)
self.assertEqual("PM", a.time)
def test_mentor_missing_one_init(self):
"""
Tests the correct raise of TypeError when missing 1 parameter of init
"""
with self.assertRaises(TypeError):
a = Mentor("pizza", "Monday")
def test_mentor_missing_two_init(self):
"""
Tests the correct raise of TypeError when missing 2 parameters of init
"""
with self.assertRaises(TypeError):
a = Mentor("pizza")
def test_mentor_missing_three_init(self):
"""
Tests the correct raise of TypeError when missing 3 parameters of init
"""
with self.assertRaises(TypeError):
a = Mentor()
def test_mentor_wrong_day_init(self):
"""
Tests the correct raise of ValueError on invalid day
"""
self.assertRaises(ValueError, Mentor, "pizza", "Mondayayaya", "AM")
self.assertRaises(ValueError, Mentor, "pizza", "", "AM")
self.assertRaises(ValueError, Mentor, "pizza", "MMonday", "AM")
self.assertRaises(ValueError, Mentor, "pizza", "MOnday", "AM")
self.assertRaises(ValueError, Mentor, "pizza", "", "AM")
self.assertRaises(ValueError, Mentor, "pizza", 3, "AM")
self.assertRaises(ValueError, Mentor, "pizza", "NotPizza", "AM")
def test_mentor_wrong_time_init(self):
"""
Tests the correct raise of ValueError on invalid time frame
"""
self.assertRaises(ValueError, Mentor, "pizza", "Monday", "AMM")
self.assertRaises(ValueError, Mentor, "pizza", "Monday", "AMM")
self.assertRaises(ValueError, Mentor, "pizza", "Monday", "AAM")
self.assertRaises(ValueError, Mentor, "pizza", "Monday", "Am")
self.assertRaises(ValueError, Mentor, "pizza", "Monday", "aM")
self.assertRaises(ValueError, Mentor, "pizza", "Monday", "ma")
self.assertRaises(ValueError, Mentor, "pizza", "Monday", "")
self.assertRaises(ValueError, Mentor, "pizza", "Monday", 3)
def test_mentor_is_avaliable_correct_returns(self):
"""
Tests for the appropiated return of the is avaliable function
"""
a = Mentor("pizza", "Monday", "AM")
self.assertTrue(a.is_avaliable("9:30"))
a.hours["9:30"][0] = True
self.assertFalse(a.is_avaliable("9:30"))
def test_mentor_correct_time_frame_assigned(self):
"""
Tests that the correct frame and only that frame was assigned to\
the mentor
"""
a = Mentor("pizza", "Monday", "AM")
self.assertTrue(a.is_avaliable("9:30"))
self.assertRaises(KeyError, a.is_avaliable, "14:50")
a = Mentor("pizza", "Monday", "PM")
self.assertTrue(a.is_avaliable("14:50"))
self.assertRaises(KeyError, a.is_avaliable, "9:50")
def test_mentor_correct_resting_time(self):
"""
Tests that no meeting is possible in the resting time (14:30-14:50)
"""
a = Mentor("pizza", "Monday", "PM")
self.assertRaises(KeyError, a.is_avaliable, "14:30")
def test_mentor_correct_space_between_time_frames(self):
"""
Tests that no meeting is possible in the time between time frames
"""
a = Mentor("pizza", "Monday", "PM")
self.assertRaises(KeyError, a.is_avaliable, "16:30")
a = Mentor("pizza", "Monday", "AM")
self.assertRaises(KeyError, a.is_avaliable, "12:50")
def test_mentor_occupy_hours_non_undefined(self):
"""
Tests that the Occupy hours function does populate correctly\
the hour and company name when non undefined values are given
"""
a = Mentor("pizza", "Monday", "AM")
a.occupy_hours("9:30", "Pepsiman")
self.assertTrue(a.hours["9:30"][0])
self.assertEqual(a.hours["9:30"][1], "Pepsiman")
self.assertFalse(a.is_avaliable("9:30"))
def test_mentor_occupy_hours_on_undefined(self):
"""
Tests that Occupy hours function does populate correctly\
when undefined is present
"""
a = Mentor("pizza", "Undefined", "Undefined")
a.occupy_hours("Undefined", "Damedanedameyodamenanoyo")
self.assertEqual(a.hours["Undefined"][1], "Damedanedameyodamenanoyo/")
a.occupy_hours("Undefined", "Antagasugitesugisugite")
self.assertEqual(a.hours["Undefined"][1],
"Damedanedameyodamenanoyo/Antagasugitesugisugite/")
def test_mentor_occupy_hours_on_invalid_time(self):
"""
Tests that Occupy does in fact throw a KeyError when\
given an invalid hour
"""
a = Mentor("pizza", "Monday", "AM")
self.assertRaises(KeyError, a.occupy_hours, "14:30", "Lala")
| true |
5ec220619bf17af966ceb9091ad591747b357de3 | Python | SegFault2017/Leetcode2020 | /group_options.py | UTF-8 | 655 | 3.28125 | 3 | [] | no_license | class Solution(object):
def grouping_options(self, n, k):
"""
:type n: int
:type k: int
:rtype: int
"""
row_end = n
col_end = k
dp_list = [[0] * (col_end + 1) for _ in range(row_end + 1)]
dp_list[0][0] = 1
res = 0
for i in range(1, row_end + 1):
for j in range(1, min(col_end, i) + 1):
dp_list[i][j] = dp_list[i - j][j] + dp_list[i - 1][j - 1]
return dp_list[-1][-1]
def main():
n = 4
k = 3
solution = Solution()
res = solution.grouping_options(n, k)
print(res)
if __name__ == "__main__":
main()
| true |
23dbf3dc50628e79141d6f3d492ee66a60408106 | Python | StefPres/Fence-climber | /Climber.py | UTF-8 | 2,659 | 2.796875 | 3 | [] | no_license | import Adafruit_PCA9685
import time
from evdev import InputDevice, categorize
pwm = Adafruit_PCA9685.PCA9685()
pwm_frequency = 50
pwm.set_pwm_freq(pwm_frequency)
servo_min = (145 * pwm_frequency) // 50
servo_max = (580 * pwm_frequency) // 50
def servoSetting(angle):
return ((servo_max - servo_min) * angle//180 + servo_min)
def motorSetting(speed):
return int(speed * 4095.0 / 100.0)
gamepad = InputDevice('/dev/input/event0')
print(gamepad)
if __name__ == '__main__':
try:
pwm.set_pwm(0, 0, servoSetting(180)) #arm 1
pwm.set_pwm(1, 0, servoSetting(0)) #arm 2
pwm.set_pwm(3, 0, servoSetting(180)) #claw 1
pwm.set_pwm(4, 0, servoSetting(0)) #claw 2
noError = True
while noError:
try:
for event in gamepad.read():
eventinfo = categorize(event)
if event.type == 1:
newbutton = True
codebutton = eventinfo.scancode
valuebutton = eventinfo.keystate
if(codebutton == 308):
if(valuebutton == 1):
print("Arms out...")
pwm.set_pwm(0, 0, servoSetting(115)) #arm 1
pwm.set_pwm(1, 0, servoSetting(45)) #arm 2
time.sleep(1)
print("Latching fence...")
pwm.set_pwm(3, 0, servoSetting(80)) #claw 1
pwm.set_pwm(4, 0, servoSetting(100)) #claw 2
time.sleep(1)
print("Positive latch. \nPulling robot up...")
pwm.set_pwm(0, 0, servoSetting(180)) #arm 1
pwm.set_pwm(1, 0, servoSetting(0)) #arm 2
time.sleep(2)
print("Unlatching...")
pwm.set_pwm(3, 0, servoSetting(180)) #claw 1
pwm.set_pwm(4, 0, servoSetting(0)) #claw 2
time.sleep(1)
print("Unlatched")
except:
pass
# Reset by pressing CTRL + C
except KeyboardInterrupt:
pwm.set_pwm(0, 0, servoSetting(180))
pwm.set_pwm(1, 0, servoSetting(0))
pwm.set_pwm(3, 0, servoSetting(180))
pwm.set_pwm(4, 0, servoSetting(0))
time.sleep(1)
print("Program stopped by User")
#GPIO.cleanup()
| true |
4dc8e1bb00d410ce5f57710ee879a8037dfa2348 | Python | davidafayjr/RunningDog | /flyingDrones.py | UTF-8 | 5,589 | 3.015625 | 3 | [] | no_license | # from firebase import firebase
from time import sleep
import numpy as np
import pyrebase
import sys
import random
from websocket import create_connection
global DRONE_NAME
config = {
"apiKey": "AIzaSyCLWJMUXvEE8c-272i9RlNM07dLehvWIiY",
"authDomain": "myfirstmapboxapp.firebaseapp.com",
"databaseURL": "https://myfirstmapboxapp-11599.firebaseio.com/",
"storageBucket": "projectId.appspot.com"
}
firebase = pyrebase.initialize_app(config)
#mydatabase = firebase.FirebaseApplication('https://myfirstmapboxapp-11599.firebaseio.com/', None)
mydatabase = firebase.database()
def createSteps(a, b, number_of_steps):
delta = b - a
step = delta/number_of_steps
return step
def runLeg(start_lat, start_lng, stop_lat, stop_lng, number_of_steps):
'''
simulate movement between to locaions and transmit the locations to firebase
call isThisPointInANoFlyZone(lat, lng) to check it the location is in a no-fly zone
'''
lat_step = createSteps(start_lat, stop_lat, number_of_steps)
lng_step = createSteps(start_lng, stop_lng, number_of_steps)
indexes = list(range(0,number_of_steps-1))
each_lat = start_lat
each_lng = start_lng
for index in indexes:
each_lat = each_lat + lat_step
each_lng = each_lng + lng_step
print("%s [%s, %s]" % (DRONE_NAME, each_lat, each_lng))
lat = each_lat
lng = each_lng
if not isThisPointInANoFlyZone(lat, lng):
data = {
"GeoFire/%s/l/" % DRONE_NAME: {
"0": lat,
"1": lng
}
}
mydatabase.update(data)
# this code transmits the locations through a websocket to firebase
# databasestring = '{ "0": %s, "1": %s }' % (str(lat), str(lng))
#
# ws = create_connection("ws://firebasewebsocket-davidfay.c9users.io/")
# # ws = create_connection("ws://localhost:8080/")
#
# print("Sending %s" % databasestring)
# ws.send(databasestring)
# print ("Sent")
# print ("Receiving...")
# result = ws.recv()
# print ("Received '%s'" % result)
# ws.close()
sleep(.01)
def isThisPointInANoFlyZone(lat, lng):
'''
compare the current lat/lng to the NoFlyZones from firebase database
if in a no-fly zone return false
'''
allNoFlyZones = mydatabase.child("NoFlyZones").get()
for each_noflyzone in allNoFlyZones.each():
if each_noflyzone.key() != "numberOfZones":
polygon_list = each_noflyzone.val().split()
if pointInsidePolygon(polygon_list, lat, lng):
print("Flying in %s" % each_noflyzone.key())
return True
return False
def pointInsidePolygon(polygon_list, lat, lng):
'''
helper function for isThisPointInANoFlyZone(lat, lng)
cast a line from the curren point and counts how many of the
edges of the polygon it intersects with lineIntersect() function
'''
inside = False
next_index = 0
for current_point in polygon_list:
next_index=(next_index+1) % len(polygon_list)
(p1_lng, p1_lat, p1_alt) = current_point.split(",")
(p2_lng, p2_lat, p2_alt) = polygon_list[next_index].split(",")
inside = lineIntersect(inside, float(lng), float(lat), float(p1_lng), float(p1_lat), float(p2_lng), float(p2_lat))
return inside
def lineIntersect(inside, x, y, p1_x, p1_y, p2_x, p2_y):
'''
helper function for pointInsidePolygon()
'''
if y > min(p1_y, p2_y):
if y <= max(p1_y, p2_y):
if x <= max(p1_x, p2_x):
if p1_y != p2_y:
xinters = (y-p1_y) * (p2_x-p1_x)/(p2_y - p1_y) + p1_x
if p1_x==p2_x or x<=xinters:
inside = not inside
return inside
if __name__ == "__main__":
global DRONE_NAME
#global OFFSETS
offsets = [0.0002, 0.0008, 0.0009, 0.0003, 0.0010, 0.0004]
if len(sys.argv) < 2:
print("not enough arguments")
sys.exit()
DRONE_NAME = sys.argv[1]
for i, each in enumerate(offsets):
offsets[i] = each+np.random.uniform(0.0001, 0.002)
offsets[i] = float("{0:.4f}".format(offsets[i]))
n = random.randrange(1,3)
if n%2 :
offsets[i] = offsets[i]*-1
# print(offsets[i])
offsets[i] = float("{0:.4f}".format(offsets[i]))
intial_lat = mydatabase.child("/GeoFire/%s/l/0" % DRONE_NAME).get()
intial_lng = mydatabase.child("/GeoFire/%s/l/1" % DRONE_NAME).get()
print(intial_lat.val(), intial_lng.val())
# sys.exit()
lat_a, lng_a = (float(intial_lat.val()), float(intial_lng.val()))
lat_b, lng_b = (lat_a+offsets[0], lng_a +offsets[1])
lat_c, lng_c = (lat_b+offsets[2], lng_b +offsets[3])
lat_d, lng_d = (lat_c+offsets[4], lng_c+offsets[5])
try:
while True:
runLeg(lat_a, lng_a, lat_b, lng_b, 45)
runLeg(lat_b, lng_b, lat_c, lng_c, 45)
runLeg(lat_c, lng_c, lat_d, lng_d, 45)
runLeg(lat_d, lng_d, lat_a, lng_a, 45)
except KeyboardInterrupt:
data = {
"GeoFire/%s/l/" % DRONE_NAME: {
"0": intial_lat.val(),
"1": intial_lng.val()
}
}
mydatabase.update(data)
except ConnectionError:
data = {
"GeoFire/%s/l/" % DRONE_NAME: {
"0": intial_lat.val(),
"1": intial_lng.val()
}
}
mydatabase.update(data)
| true |
75df20b73b155fa87b63746cbf7d34f73f1b1bcf | Python | jyotsnareddy/Assignments | /Dictionary.py | UTF-8 | 265 | 3.140625 | 3 | [] | no_license | dict1 = {1: "munny", 2: "sunny", 3: "tillu"}
dict2 = {"pi": 3.14}
dict2[4] = "rithu"
# print(dict1.viewitems())
# print(dict1.values())
# print(dict1.copy())
#
x=dict2.copy()
print(x)
y=dict2
print(y)
print dict1[2]
print(len(dict1))
if 2 in dict1:
print "yes"
| true |
06151dcda5098e796decd5b14a4ca51297e1e28e | Python | vaishnavichakravarthi/Practice-Problems | /key2val.py | UTF-8 | 183 | 3.3125 | 3 | [] | no_license | dict={'a':"Speckbit", 'b':"World", 'c':"Quiet"}
a=dict.items()
str=input("input the the value you want to search")
for i in a:
if str==i[1]:
print(i[0])
| true |
8ad7b22e13731c8409e74684115095d5f2137862 | Python | osm3000/text_generation_rnn_tutorial | /utils.py | UTF-8 | 1,284 | 2.9375 | 3 | [] | no_license | from sklearn.utils import shuffle
import numpy as np
def getbatch(*args, **kwargs):
"""
Give it any number of arguments
"""
i = kwargs['i']
batch_size = kwargs['batch_size']
assert len(args) > 0
output_list = []
# min_len = min(batch_size, len(args[0]) - 1 - i)
min_len = min(batch_size, len(args[0]) - i)
for argument in args:
output_list.append(argument[i:i + min_len])
return output_list
def get_random_batch(*args, **kwargs):
"""
Inspired by Dawood's idea, which seems very logical to me
"""
args_new = shuffle(args)
i = kwargs['i']
batch_size = kwargs['batch_size']
assert len(args_new) > 0
output_list = []
# min_len = min(batch_size, len(args[0]) - 1 - i)
min_len = min(batch_size, len(args_new[0]) - i)
for argument in args_new:
output_list.append(argument[i:i + min_len])
return output_list
def sample(preds, temperature=1.0):
# helper function to sample an index from a probability array
preds = np.asarray(preds).astype('float64')
# preds = np.log(preds) / temperature
preds = preds / temperature
exp_preds = np.exp(preds)
preds = exp_preds / np.sum(exp_preds)
probas = np.random.multinomial(1, preds, 1)
return np.argmax(probas)
| true |
c72e07e3eba248d08ec512f3c6d78a1733434785 | Python | linyihan2013/leetcode | /python/UglyNumber.py | UTF-8 | 774 | 3.203125 | 3 | [] | no_license | __author__ = 'yihan'
class Solution(object):
def isUgly(self, num):
"""
:type num: int
:rtype: bool
"""
while num >= 2:
if num % 2 == 0:
num /= 2
elif num % 3 == 0:
num /= 3
elif num % 5 == 0:
num /= 5
else:
return False
return num == 1
def nthUglyNumber(self, n):
q = [1]
i2 = i3 = i5 = 0
while len(q) < n:
m2, m3, m5 = q[i2] * 2, q[i3] * 3, q[i5] * 5
m = min(m2, m3, m5)
if m == m2:
i2 += 1
if m == m3:
i3 += 1
if m == m5:
i5 += 1
q += m
return q[-1]
| true |
25d6b0eabaf90fb3363aa3ba71b1d94743e849e8 | Python | priyakumar513/pkumar1 | /lab2ex1.py | UTF-8 | 131 | 2.796875 | 3 | [] | no_license | def hello_name ():
somebody_specified = input()
print('Hello world, from', somebody_specified)
hello_name()
| true |
3c5a0a63c63831ee273dbb0e87fce1ed7895f041 | Python | prabhupant/python-ds | /data_structures/binary_trees/continuous_tree.py | UTF-8 | 923 | 4 | 4 | [
"MIT"
] | permissive | # A continuous tree is such that the absolute difference between two
# adjacent nodes is 1
class Node:
def __init__(self, val):
self.val = val
self.left = None
self.right = None
def continuous(root):
# Can be continuous if
# 1. Root is none
# 2. Both left and right STs are none
# 3. If left ST is none, check for right
# 4. If right ST is none, check for left
# 5. Else check for everything
if root is None:
return True
if root.left == None and root.right == None:
return True
if root.left == None:
return (abs(root.val - root.right.val) == 1) and continuous(root.right)
if root.right == None:
return (abs(root.val - root.left.val) == 1) and continuous(root.left)
return (abs(root.val - root.right.val) == 1) and (abs(root.left.val - root.val) == 1) and continuous(root.left) and continuous(root.right)
| true |
c0bfee924151710f2e1c6fe7fc7bd988aea82e16 | Python | atekippe/SecDSM_April_2019_IOT_CTF_MQTT_Python | /pyqt5_ex.py | UTF-8 | 1,138 | 2.640625 | 3 | [] | no_license | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QApplication, QWidget, QLabel
class Window(QWidget):
def __init__(self, *args, **kwargs):
QWidget.__init__(self, *args, **kwargs)
self.label = QLabel(king, self)
self.label.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.label.setAlignment(Qt.AlignCenter)
self.label.setStyleSheet("QLabel {background-color: black;color: rgb(32, 192, 14);}")
self.label2 = QLabel("Label2", self)
self.label2.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.label2.setAlignment(Qt.AlignCenter)
self.label2.setStyleSheet("QLabel {background-color: black;color: rgb(32, 192, 14);}")
self.layout = QGridLayout()
self.layout.addWidget(self.label, 0, 0)
self.layout.addWidget(self.label2, 1, 2)
self.setLayout(self.layout)
self.showFullScreen()
#self.show()
var1= "TEST"
var2 = "TEST2"
king = var1 + " Is the master of the domain"
app = QApplication(sys.argv)
win = Window()
sys.exit(app.exec_()) | true |
3097a38d62fab4e1371517be6ce483c464bb9b3c | Python | apple-open-source/macos | /pyobjc/pyobjc/pyobjc-framework-QTKit-2.5.1/PyObjCTest/test_qttimerange.py | UTF-8 | 1,142 | 2.75 | 3 | [
"MIT"
] | permissive |
from PyObjCTools.TestSupport import *
from QTKit import *
class TestQTTimeRange (TestCase):
def testStructs(self):
v = QTTimeRange()
self.assertHasAttr(v, 'time')
self.assertHasAttr(v, 'duration')
def testFunctions(self):
v = QTMakeTimeWithTimeInterval(1500.0)
w = QTMakeTimeWithTimeInterval(10.0)
rng = QTMakeTimeRange(v, w)
self.assertIsInstance(rng, QTTimeRange)
rng2 = QTMakeTimeRange(w, v)
self.assertIsInstance(rng2, QTTimeRange)
self.assertResultIsBOOL(QTTimeInTimeRange)
self.assertResultIsBOOL(QTEqualTimeRanges)
o = QTTimeInTimeRange(v, rng)
self.assertTrue(o is True)
o = QTTimeInTimeRange(w, rng)
self.assertTrue(o is False)
o = QTEqualTimeRanges(rng, rng)
self.assertTrue(o is True)
o = QTTimeRangeEnd(rng)
self.assertIsInstance(o, QTTime)
o = QTUnionTimeRange(rng, rng2)
self.assertIsInstance(o, QTTimeRange)
o = QTIntersectionTimeRange(rng, rng2)
self.assertIsInstance(o, QTTimeRange)
if __name__ == "__main__":
main()
| true |
8907481c973c59e3a7b8628418b8a64df501a651 | Python | honghen15/checkio_ | /three-points-circle.py | UTF-8 | 1,486 | 3.15625 | 3 | [] | no_license | import re
import numpy as np
def checkio(data):
tuple1 = re.findall(r'((\d+),(\d+))', data)
y1 = int(tuple1[0][2])-int(tuple1[1][2])
x1 = int(tuple1[0][1])-int(tuple1[1][1])
y2 = int(tuple1[1][2])-int(tuple1[2][2])
x2 = int(tuple1[1][1]) - int(tuple1[2][1])
ans1 = y1*(int(tuple1[0][2])+int(tuple1[1][2]))/2+x1*(int(tuple1[0][1])+int(tuple1[1][1]))/2
ans2 = y2*(int(tuple1[1][2])+int(tuple1[2][2]))/2+x2*(int(tuple1[1][1])+int(tuple1[2][1]))/2
list = [y1,x1], [y2,x2]
list_ans = [ans1, ans2]
a = np.array(list)
b = np.array(list_ans)
x = np.linalg.solve(a, b)
# print(int(tuple1[1][2]) - int(tuple1[0][2]))
# print(tuple1[1][2]) - int(tuple1[0][2]))
#replace this for solution
distance = (x[1]-int(tuple1[0][1]))**2+(x[0]-int(tuple1[0][2]))**2
distance = round(distance**0.5, 2)
distance = round(distance, 2) if distance > int(distance) else int(distance)
if x[0] == int(x[0]):
aa = int(x[0])
else:
aa = round(x[0],2)
if x[1] == int(x[1]):
bb = int(x[1])
else:
bb = round(x[1],2)
return '(x-{0})^2+(y-{1})^2={2}^2'.format(bb, aa, distance)
#These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
print(checkio("(2,2),(6,2),(2,6)"))# == "(x-4)^2+(y-4)^2=2.83^2"
print(checkio("(3,7),(6,9),(9,7)"))# == "(x-6)^2+(y-5.75)^2=3.25^2"
print(checkio("(7,7),(4,3),(1,8)"))# == "(x-6)^2+(y-5.75)^2=3.25^2"
| true |
82bee3e7185915933dafd7baab747d17c359bddb | Python | sudhakarchoudhari75/pythontraining | /varArgsDictDemo.py | UTF-8 | 254 | 3.078125 | 3 | [] | no_license | def varArgs(a,b,*args,**kwargs):
print(a)
print(b)
print(type(args))
for x in args:
print(x)
print(type(kwargs))
for x in kwargs:
print(x, kwargs[x])
if __name__=='__main__':
varArgs(10,20,30,40,50,name="sudhakar", education="BE")
| true |
d488f5a5bf8c769dce68e9f4da292b0498291167 | Python | pandayo/aoc2018 | /20181214.py | UTF-8 | 2,718 | 3.1875 | 3 | [] | no_license | def printRecipes(recipes, indices):
output = ""
for ind, score in enumerate(recipes):
if not ind in indices:
output += " {0} ".format(score)
elif ind == indices[0]:
output += "({0})".format(score)
else:
output += "[{0}]".format(score)
print(output)
def printFlat(recipes):
output = ""
for score in recipes:
output += "{0}".format(score)
return(output)
def solvePart(puzzle_input, debug = False):
recipes = [3,7]
indices = (0,1)
while len(recipes) < (puzzle_input+10):
newRecipe = recipes[indices[0]]+recipes[indices[1]]
if(newRecipe > 9):
tens = newRecipe // 10
recipes.append(tens)
newRecipe = newRecipe % 10
recipes.append(newRecipe)
indices = ((indices[0]+1+recipes[indices[0]]) % len(recipes),
(indices[1]+1+recipes[indices[1]]) % len(recipes))
if debug:
printRecipes(recipes,indices)
score = recipes[-10:]
output = ""
for s in score:
output = output+str(s)
return(output)
def solvePart2(puzzle_input):
recipes = [3,7]
indices = (0,1)
str_recipes = printFlat(recipes)
check = [int(c) for c in str(puzzle_input)]
count = len(check)
while not (check == recipes[-count:] or
check == recipes[-count-1:-1]):
newRecipe = recipes[indices[0]]+recipes[indices[1]]
if(newRecipe > 9):
tens = newRecipe // 10
recipes.append(tens)
newRecipe = newRecipe % 10
recipes.append(newRecipe)
indices = ((indices[0]+1+recipes[indices[0]]) % len(recipes),
(indices[1]+1+recipes[indices[1]]) % len(recipes))
if check == recipes[-count:]:
return(len(recipes)-count)
else:
return(len(recipes)-count-1)
def part2(inp):
scores = [3, 7]
blah = list(map(int, inp))
a, b = 0, 1
while True:
asd = str(scores[a] + scores[b])
scores.extend(map(int, asd))
a += scores[a] + 1
b += scores[b] + 1
a %= len(scores)
b %= len(scores)
if scores[-len(blah):] == blah or scores[-len(blah)-1:-1] == blah:
break
if scores[-len(blah):] == blah:
print(len(scores) - len(blah))
else:
print(len(scores) - len(blah) - 1)
print(solvePart(9, True))
print(solvePart(18))
print(solvePart(2018))
print(solvePart(909441))
print(solvePart2("51589"))
print(solvePart2("01245"))
print(solvePart2("92510"))
print(solvePart2("59414"))
print(solvePart2("909441"))
| true |
d245da502709ed86273f935436b89c1c059bff39 | Python | lhern207/Python-NetCentric-Scripts | /Some_other_python_scripts/dateManipulation.py | UTF-8 | 1,815 | 3.15625 | 3 | [] | no_license | #!/usr/bin/python
"""This program helps visualize how to convert from date string to date object,
compare two different date objects, differentiate between naive and aware date
objectsand finally convert from date object to http rfc date string format.
This is necessary to implement the conditional get request header"""
from datetime import datetime
import pytz
from os import path
httpdate1 = "Sun, 06 Nov 2015 08:49:37 GMT"
httpdate2 = "Wed, 23 May 2017 08:49:37 GMT"
httpdate3 = "Mon, 07 Nov 2015 08:49:37 GMT"
httpdate4 = "Mon, 07 Nov 2015 14:49:37 GMT"
httpdate5 = "Sun, 06 Nov 1994 08:49:37 GMT"
dateobject1 = datetime.strptime(httpdate1, "%a, %d %b %Y %H:%M:%S %Z")
dateobject2 = datetime.strptime(httpdate2, "%a, %d %b %Y %H:%M:%S %Z")
dateobject3 = datetime.strptime(httpdate3, "%a, %d %b %Y %H:%M:%S %Z")
dateobject4 = datetime.strptime(httpdate4, "%a, %d %b %Y %H:%M:%S %Z")
dateobject5 = datetime.strptime(httpdate5, "%a, %d %b %Y %H:%M:%S %Z")
print "\n"
print "Date 1: " + httpdate1
print "Date 2: " + httpdate2
print "Date 3: " + httpdate3
print "Date 4: " + httpdate4
print "Date 5: " + httpdate5
print "\n"
print "Date 3 older than Date 1: " + str(dateobject3 < dateobject1)
print "Date 3 older than Date 2: " + str(dateobject3 < dateobject2)
print "Date 3 older than Date 4: " + str(dateobject3 < dateobject4)
print "Date 3 older than Date 5: " + str(dateobject3 < dateobject5)
gmt = pytz.timezone("GMT")
dateobject2 = gmt.localize(dateobject2)
seconds = path.getmtime("presentFile.txt")
filedateobject = datetime.fromtimestamp(seconds, pytz.utc)
filedate = datetime.strftime(filedateobject, "%a, %d %b %Y %H:%M:%S %Z")
print "\nFile 'present.txt' was last modified on: " + filedate
print "The file was last modified before Date 3: " + str(filedateobject < dateobject2) + "\n"
| true |
01d7acc6de04fa61c4cead6abbc7e79d284b7b0c | Python | dtran39/Programming_Preparation | /04_Linked_List/023_Merge_K_Sorted_List/MergeKSortedLists.py | UTF-8 | 3,143 | 3.984375 | 4 | [] | no_license | '''
Problem:
- Merge k sorted linked lists and return it as one sorted list. Analyze and describe its complexity.
----------------------------------------------------------------------------------------------------
Examples:
[1,4, 7]
[2, 3, 6]
[5, 8]
-> [1, 2, 3, 4, 5, 6, 7, 8]
----------------------------------------------------------------------------------------------------
Solution:
1. USE HEAP
- Use dummy node to catch empty list
+ Create a dummy, then starts appending to that dummy node
+ In the end, return dummy.next
By doing this, we do not need to handle empty lists as a separate case
- Use a heap to determine which is the next node to be added to output list
+ Heap sorted by value, but must also keep track of the node and the list it belongs to
+ Best shot: Use tuple
- Complexity:
+ Time: O(nklog(k)) Space: O(k)
2. Divide and conquer:
- Use merge two lists algorithm from 021
- Use Merge sort (Divide and conquer) approach:
+ First call: merge(lists, 0, len(lists) - 1)
+ merge(lists, begin, end):
if begin > end: return None
if begin == end: return lists[begin]
mergeTwoLists(merge(lists, begin, middle ), merge(lists, middle + 1, end))
'''
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
# Dummy
dummy = ListNode(0)
cur = dummy
smallestNodesEachList = []
# heap
from heapq import heappush, heappop
for aListHead in lists:
if aListHead:
heappush(smallestNodesEachList, (aListHead.val, aListHead))
while smallestNodesEachList:
smallestVal, smallestNode = heappop(smallestNodesEachList)
# Add new value
cur.next = ListNode(smallestVal)
# update pointers, add new value to list if possible
cur = cur.next
if smallestNode.next:
heappush(smallestNodesEachList, (smallestNode.next.val, smallestNode.next))
return dummy.next
class Solution2:
# @param a list of ListNode
# @return a ListNode
def mergeKLists(self, lists):
def mergeTwoLists(l1, l2):
curr = dummy = ListNode(0)
while l1 and l2:
if l1.val < l2.val:
curr.next = l1
l1 = l1.next
else:
curr.next = l2
l2 = l2.next
curr = curr.next
curr.next = l1 or l2
return dummy.next
def mergeKListsHelper(lists, begin, end):
if begin > end: return None
if begin == end: return lists[begin]
return mergeTwoLists(mergeKListsHelper(lists, begin, (begin + end) / 2), \
mergeKListsHelper(lists, (begin + end) / 2 + 1, end))
return mergeKListsHelper(lists, 0, len(lists) - 1)
| true |
16f157bb3e1c056dc977f128b5eb9f94e593fec8 | Python | google/pytype | /pytype/tests/test_protocols2.py | UTF-8 | 31,635 | 2.890625 | 3 | [
"Apache-2.0",
"MIT"
] | permissive | """Tests for matching against protocols.
Based on PEP 544 https://www.python.org/dev/peps/pep-0544/.
"""
from pytype.pytd import pytd_utils
from pytype.tests import test_base
from pytype.tests import test_utils
class ProtocolTest(test_base.BaseTest):
"""Tests for protocol implementation."""
def test_check_protocol(self):
self.Check("""
import protocols
from typing import Sized
def f(x: protocols.Sized):
return None
def g(x: Sized):
return None
class Foo:
def __len__(self):
return 5
f([])
foo = Foo()
f(foo)
g([])
g(foo)
""")
def test_check_protocol_error(self):
_, errors = self.InferWithErrors("""
import protocols
def f(x: protocols.SupportsAbs):
return x.__abs__()
f(["foo"]) # wrong-arg-types[e]
""")
self.assertErrorRegexes(
errors, {"e": r"\(x: SupportsAbs\).*\(x: List\[str\]\)"})
def test_check_iterator_error(self):
_, errors = self.InferWithErrors("""
from typing import Iterator
def f(x: Iterator[int]):
return None
class Foo:
def next(self) -> str:
return ''
def __iter__(self):
return self
f(Foo()) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"Iterator\[int\].*Foo"})
def test_check_protocol_match_unknown(self):
self.Check("""
from typing import Sized
def f(x: Sized):
pass
class Foo:
pass
def g(x):
foo = Foo()
foo.__class__ = x
f(foo)
""")
def test_check_parameterized_protocol(self):
self.Check("""
from typing import Iterator, Iterable
class Foo:
def __iter__(self) -> Iterator[int]:
return iter([])
def f(x: Iterable[int]):
pass
foo = Foo()
f(foo)
f(iter([3]))
""")
def test_check_parameterized_protocol_error(self):
_, errors = self.InferWithErrors("""
from typing import Iterator, Iterable
class Foo:
def __iter__(self) -> Iterator[str]:
return iter([])
def f(x: Iterable[int]):
pass
foo = Foo()
f(foo) # wrong-arg-types[e]
""")
self.assertErrorRegexes(
errors, {"e": r"\(x: Iterable\[int\]\).*\(x: Foo\)"})
def test_check_parameterized_protocol_multi_signature(self):
self.Check("""
from typing import Sequence, Union
class Foo:
def __len__(self):
return 0
def __getitem__(self, x: Union[int, slice]) -> Union[int, Sequence[int]]:
return 0
def f(x: Sequence[int]):
pass
foo = Foo()
f(foo)
""")
def test_check_parameterized_protocol_error_multi_signature(self):
_, errors = self.InferWithErrors("""
from typing import Sequence, Union
class Foo:
def __len__(self):
return 0
def __getitem__(self, x: int) -> int:
return 0
def f(x: Sequence[int]):
pass
foo = Foo()
f(foo) # wrong-arg-types[e]
""")
self.assertErrorRegexes(
errors, {"e": r"\(x: Sequence\[int\]\).*\(x: Foo\)"})
def test_construct_dict_with_protocol(self):
self.Check("""
class Foo:
def __iter__(self):
pass
def f(x: Foo):
return dict(x)
""")
def test_method_on_superclass(self):
self.Check("""
class Foo:
def __iter__(self):
pass
class Bar(Foo):
pass
def f(x: Bar):
return iter(x)
""")
def test_method_on_parameterized_superclass(self):
self.Check("""
from typing import List
class Bar(List[int]):
pass
def f(x: Bar):
return iter(x)
""")
def test_any_superclass(self):
self.Check("""
class Bar(__any_object__):
pass
def f(x: Bar):
return iter(x)
""")
def test_multiple_options(self):
self.Check("""
class Bar:
if __random__:
def __iter__(self): return 1
else:
def __iter__(self): return 2
def f(x: Bar):
return iter(x)
""")
def test_iterable_getitem(self):
ty = self.Infer("""
from typing import Iterable, Iterator, TypeVar
T = TypeVar("T")
class Bar:
def __getitem__(self, i: T) -> T:
if i > 10:
raise IndexError()
return i
T2 = TypeVar("T2")
def f(s: Iterable[T2]) -> Iterator[T2]:
return iter(s)
next(f(Bar()))
""", deep=False)
self.assertTypesMatchPytd(ty, """
from typing import Iterable, Iterator, TypeVar
T = TypeVar("T")
class Bar:
def __getitem__(self, i: T) -> T: ...
T2 = TypeVar("T2")
def f(s: Iterable[T2]) -> Iterator[T2]: ...
""")
def test_iterable_iter(self):
ty = self.Infer("""
from typing import Iterable, Iterator, TypeVar
class Bar:
def __iter__(self) -> Iterator:
return iter([])
T = TypeVar("T")
def f(s: Iterable[T]) -> Iterator[T]:
return iter(s)
next(f(Bar()))
""", deep=False)
self.assertTypesMatchPytd(ty, """
from typing import Iterable, Iterator, TypeVar
class Bar:
def __iter__(self) -> Iterator: ...
T = TypeVar("T")
def f(s: Iterable[T]) -> Iterator[T]: ...
""")
def test_pyi_iterable_getitem(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", """
T = TypeVar("T")
class Foo:
def __getitem__(self, i: T) -> T: ...
""")
self.Check("""
from typing import Iterable, TypeVar
import foo
T = TypeVar("T")
def f(s: Iterable[T]) -> T: ...
f(foo.Foo())
""", pythonpath=[d.path])
def test_pyi_iterable_iter(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Any
class Foo:
def __iter__(self) -> Any: ...
""")
self.Check("""
from typing import Iterable, TypeVar
import foo
T = TypeVar("T")
def f(s: Iterable[T]) -> T: ...
f(foo.Foo())
""", pythonpath=[d.path])
def test_inherited_abstract_method_error(self):
_, errors = self.InferWithErrors("""
from typing import Iterator
class Foo:
def __iter__(self) -> Iterator[str]:
return __any_object__
def next(self):
return __any_object__
def f(x: Iterator[int]):
pass
f(Foo()) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"Iterator\[int\].*Foo"})
def test_reversible(self):
self.Check("""
from typing import Reversible
class Foo:
def __reversed__(self):
pass
def f(x: Reversible):
pass
f(Foo())
""")
def test_collection(self):
self.Check("""
from typing import Collection
class Foo:
def __contains__(self, x):
pass
def __iter__(self):
pass
def __len__(self):
pass
def f(x: Collection):
pass
f(Foo())
""")
def test_list_against_collection(self):
self.Check("""
from typing import Collection
def f() -> Collection[str]:
return [""]
""")
def test_hashable(self):
self.Check("""
from typing import Hashable
class Foo:
def __hash__(self):
pass
def f(x: Hashable):
pass
f(Foo())
""")
def test_list_hash(self):
errors = self.CheckWithErrors("""
from typing import Hashable
def f(x: Hashable):
pass
f([]) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"Hashable.*List.*__hash__"})
def test_hash_constant(self):
errors = self.CheckWithErrors("""
from typing import Hashable
class Foo:
__hash__ = None
def f(x: Hashable):
pass
f(Foo()) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"Hashable.*Foo.*__hash__"})
def test_hash_type(self):
self.Check("""
from typing import Hashable, Type
def f(x: Hashable):
pass
def g(x: Type[int]):
return f(x)
""")
def test_hash_module(self):
self.Check("""
import subprocess
from typing import Hashable
def f(x: Hashable):
pass
f(subprocess)
""")
def test_generic_callable(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Generic, TypeVar
T = TypeVar("T")
class Foo(Generic[T]):
def __init__(self, x: T):
self = Foo[T]
def __call__(self) -> T: ...
""")
errors = self.CheckWithErrors("""
from typing import Any, Callable
import foo
def f() -> Callable:
return foo.Foo("")
def g() -> Callable[[], str]:
return foo.Foo("")
def h() -> Callable[[Any], str]:
return foo.Foo("") # bad-return-type[e1]
def i() -> Callable[[], int]:
return foo.Foo("") # bad-return-type[e2]
""", pythonpath=[d.path])
# TODO(rechen): 'T' should be 'str'.
self.assertErrorSequences(errors, {
"e1": ["def <callable>(self, _0) -> str: ...",
"def __call__(self: foo.Foo[T]) -> T: ..."],
"e2": ["def <callable>(self) -> int: ...",
"def __call__(self: foo.Foo[T]) -> T: ..."]})
def test_staticmethod(self):
self.CheckWithErrors("""
from typing import Any, Callable, Protocol
class MyProtocol(Protocol):
@staticmethod
def __call__(a, b) -> int:
return 0
def f() -> MyProtocol:
return __any_object__
def g1(x: Callable[[Any, Any], int]):
pass
def g2(x: Callable[[Any], int]):
pass
def g3(x: Callable[[Any, Any, Any], int]):
pass
def g4(x: Callable[[Any, Any], str]):
pass
g1(f()) # ok
g2(f()) # wrong-arg-types # too few Callable args
g3(f()) # wrong-arg-types # too many Callable args
g3(f()) # wrong-arg-types # wrong Callable return
""")
def test_protocol_caching(self):
self.Check("""
import collections
from typing import Text
class _PortInterface:
def __init__(self):
self._flattened_ports = collections.OrderedDict()
def PortBundle(self, prefix: Text, bundle):
for name, port in bundle.ports.items():
full_name = prefix + "_" + name
self._flattened_ports[full_name] = port
def _GetPortsWithDirection(self):
return collections.OrderedDict(
(name, port) for name, port in self._flattened_ports.items())
""")
def test_custom_protocol(self):
self.Check("""
from typing_extensions import Protocol
class Appendable(Protocol):
def append(self):
pass
class MyAppendable:
def append(self):
pass
def f(x: Appendable):
pass
f([])
f(MyAppendable())
""")
def test_custom_protocol_error(self):
errors = self.CheckWithErrors("""
from typing_extensions import Protocol
class Appendable(Protocol):
def append(self):
pass
class NotAppendable:
pass
def f(x: Appendable):
pass
f(42) # wrong-arg-types[e1]
f(NotAppendable()) # wrong-arg-types[e2]
""")
self.assertErrorRegexes(errors, {
"e1": r"Appendable.*int.*append",
"e2": r"Appendable.*NotAppendable.*append"})
def test_reingest_custom_protocol(self):
ty = self.Infer("""
from typing_extensions import Protocol
class Appendable(Protocol):
def append(self) -> None:
pass
""")
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(ty))
self.Check("""
import foo
class MyAppendable:
def append(self):
pass
def f(x: foo.Appendable):
pass
f([])
f(MyAppendable())
""", pythonpath=[d.path])
def test_reingest_custom_protocol_error(self):
ty = self.Infer("""
from typing_extensions import Protocol
class Appendable(Protocol):
def append(self) -> None:
pass
""")
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(ty))
errors = self.CheckWithErrors("""
import foo
class NotAppendable:
pass
def f(x: foo.Appendable):
pass
f(42) # wrong-arg-types[e1]
f(NotAppendable()) # wrong-arg-types[e2]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {
"e1": r"Appendable.*int.*append",
"e2": r"Appendable.*NotAppendable.*append"})
def test_reingest_custom_protocol_inherit_method(self):
ty = self.Infer("""
from typing_extensions import Protocol
class Appendable(Protocol):
def append(self):
pass
class Mutable(Appendable, Protocol):
def remove(self):
pass
""")
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(ty))
errors = self.CheckWithErrors("""
from foo import Mutable
class NotMutable:
def remove(self):
pass
def f(x: Mutable):
pass
f([]) # ok
f(NotMutable()) # wrong-arg-types[e]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e": r"Mutable.*NotMutable.*append"})
def test_reingest_custom_protocol_implement_method(self):
ty = self.Infer("""
from typing_extensions import Protocol
class Appendable(Protocol):
def append(self):
pass
class Mixin:
def append(self):
pass
class Removable(Mixin, Appendable, Protocol):
def remove(self):
pass
""")
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(ty))
self.Check("""
from foo import Removable
def f(x: Removable):
pass
class MyRemovable:
def remove(self):
pass
f(MyRemovable())
""", pythonpath=[d.path])
def test_ignore_method_body(self):
self.Check("""
from typing_extensions import Protocol
class Countable(Protocol):
def count(self) -> int:
...
""")
def test_check_method_body(self):
errors = self.CheckWithErrors("""
from typing_extensions import Protocol
class Countable(Protocol):
def count(self) -> int:
... # bad-return-type[e]
class MyCountable(Countable):
def count(self):
return super(MyCountable, self).count()
""")
self.assertErrorRegexes(errors, {"e": r"int.*None.*line 7"})
def test_callback_protocol(self):
self.CheckWithErrors("""
from typing_extensions import Protocol
class Foo(Protocol):
def __call__(self) -> int:
return 0
def f1() -> int:
return 0
def f2(x) -> int:
return x
def f3() -> str:
return ''
def accepts_foo(f: Foo):
pass
accepts_foo(f1)
accepts_foo(f2) # wrong-arg-types
accepts_foo(f3) # wrong-arg-types
""")
def test_callback_protocol_pyi(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Protocol
class Foo(Protocol):
def __call__(self, x: str) -> str: ...
def accepts_foo(f: Foo) -> None: ...
""")
self.CheckWithErrors("""
import foo
def f1(x: str) -> str:
return x
def f2() -> str:
return ''
def f3(x: int) -> str:
return str(x)
foo.accepts_foo(f1)
foo.accepts_foo(f2) # wrong-arg-types
foo.accepts_foo(f3) # wrong-arg-types
""", pythonpath=[d.path])
def test_class_matches_callback_protocol(self):
self.CheckWithErrors("""
from typing_extensions import Protocol
class Foo(Protocol):
def __call__(self) -> int:
return 0
def accepts_foo(f: Foo):
pass
accepts_foo(int)
accepts_foo(str) # wrong-arg-types
""")
def test_class_matches_callback_protocol_pyi(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Protocol
class Foo(Protocol):
def __call__(self) -> int: ...
def accepts_foo(f: Foo) -> None: ...
""")
self.CheckWithErrors("""
import foo
foo.accepts_foo(int)
foo.accepts_foo(str) # wrong-arg-types
""", pythonpath=[d.path])
def test_classmethod(self):
# TODO(rechen): An instance method shouldn't match a classmethod.
self.CheckWithErrors("""
from typing import Protocol
class Foo(Protocol):
@classmethod
def f(cls):
return cls()
class Bar:
@classmethod
def f(cls):
return cls()
class Baz:
def f(self):
return type(self)
class Qux:
pass
def f(x: Foo):
pass
f(Bar())
f(Baz())
f(Qux()) # wrong-arg-types
""")
def test_abstractmethod(self):
self.CheckWithErrors("""
import abc
from typing import Protocol
class Foo(Protocol):
@abc.abstractmethod
def f(self) -> int:
pass
class Bar:
def f(self):
pass
class Baz:
pass
def f(x: Foo):
pass
f(Bar())
f(Baz()) # wrong-arg-types
""")
def test_decorated_method(self):
self.Check("""
from typing import Callable
from typing_extensions import Protocol
class Foo(Protocol):
def foo(self):
pass
def decorate(f: Callable) -> Callable:
return f
class Bar:
@decorate
def foo(self):
pass
def accept(foo: Foo):
pass
accept(Bar())
""")
def test_len(self):
self.Check("""
from typing import Generic, Protocol, TypeVar
T = TypeVar('T')
class SupportsLen(Generic[T], Protocol):
def __len__(self) -> int: ...
def f() -> SupportsLen[int]:
return [1, 2, 3]
""")
def test_property(self):
self.Check("""
from typing_extensions import Protocol
class Foo(Protocol):
@property
def name(self) -> str: ...
def f(self) -> int: ...
""")
def test_has_dynamic_attributes(self):
self.Check("""
from typing import Protocol
class Foo(Protocol):
def f(self) -> int: ...
class Bar:
_HAS_DYNAMIC_ATTRIBUTES = True
def f(x: Foo):
pass
f(Bar())
""")
def test_empty(self):
self.Check("""
from typing import Protocol
class Foo(Protocol):
pass
class Bar:
pass
def f(foo: Foo):
pass
f(Bar())
""")
def test_empty_and_generic(self):
self.Check("""
from typing import Protocol, TypeVar
T = TypeVar('T')
class Foo(Protocol[T]):
pass
class Bar:
pass
def f(foo: Foo[int]):
pass
f(Bar())
""")
def test_deduplicate_error_message(self):
# Tests that the 'Attributes not implemented' line appears only once in the
# error message.
errors = self.CheckWithErrors("""
from typing import Callable, Iterable, Optional, Union
DistanceFunctionsType = Iterable[Union[Callable[[str, str], float], str]]
def f(x: DistanceFunctionsType) -> DistanceFunctionsType:
return (x,) # bad-return-type[e]
""")
self.assertErrorRegexes(
errors, {"e": r"Actually returned[^\n]*\nAttributes[^\n]*$"})
def test_annotated_classmethod(self):
self.Check("""
from typing import Protocol
class Foo(Protocol):
@classmethod
def f(cls) -> str: ...
""")
def test_typing_extensions_protocol(self):
self.Check("""
from typing_extensions import SupportsIndex
def f(x: SupportsIndex):
pass
f(0)
""")
def test_not_instantiable(self):
self.CheckWithErrors("""
import abc
from typing import Protocol
class MyProtocol(Protocol):
@abc.abstractmethod
def f(self): ...
class Child(MyProtocol):
pass
Child() # not-instantiable
""")
def test_substitute_typevar(self):
self.Check("""
from typing import Protocol, TypeVar
_T = TypeVar('_T')
_T_int = TypeVar('_T_int', bound=int)
class MyProtocol(Protocol[_T]):
def __getitem__(self, __k: int) -> _T: ...
def f(x: MyProtocol[_T_int]) -> _T_int:
return x[0]
f([0])
f([])
""")
class ProtocolsTestPython3Feature(test_base.BaseTest):
"""Tests for protocol implementation on a target using a Python 3 feature."""
def test_check_iterator(self):
self.Check("""
from typing import Iterator
def f(x: Iterator):
return None
class Foo:
def __next__(self):
return None
def __iter__(self):
return None
foo = Foo()
f(foo)
""")
def test_check_parameterized_iterator(self):
self.Check("""
from typing import Iterator
def f(x: Iterator[int]):
return None
class Foo:
def __next__(self):
return 42
def __iter__(self):
return self
f(Foo())
""")
def test_inherited_abstract_method(self):
self.Check("""
from typing import Iterator
class Foo:
def __iter__(self) -> Iterator[int]:
return __any_object__
def __next__(self):
return __any_object__
def f(x: Iterator[int]):
pass
f(Foo())
""")
def test_check_supports_bytes_protocol(self):
self.Check("""
import protocols
from typing import SupportsBytes
def f(x: protocols.SupportsBytes):
return None
def g(x: SupportsBytes):
return None
class Foo:
def __bytes__(self):
return b"foo"
foo = Foo()
f(foo)
g(foo)
""")
def test_metaclass_abstractness(self):
self.Check("""
import abc
from typing import Protocol
class Meta1(type(Protocol)):
pass
class Meta2(Protocol.__class__):
pass
class Foo(metaclass=Meta1):
@abc.abstractmethod
def foo(self):
pass
class Bar(metaclass=Meta2):
@abc.abstractmethod
def bar(self):
pass
""")
def test_module(self):
foo_ty = self.Infer("""
x: int
def f() -> str:
return 'hello world'
""")
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(foo_ty))
errors = self.CheckWithErrors("""
import foo
from typing import Protocol
class ShouldMatch(Protocol):
x: int
def f(self) -> str: ...
class ExtraAttribute(Protocol):
x: int
y: str
class ExtraMethod(Protocol):
def f(self) -> str: ...
def g(self) -> int: ...
class WrongType(Protocol):
x: str
def should_match(x: ShouldMatch):
pass
def extra_attribute(x: ExtraAttribute):
pass
def extra_method(x: ExtraMethod):
pass
def wrong_type(x: WrongType):
pass
should_match(foo)
extra_attribute(foo) # wrong-arg-types[e1]
extra_method(foo) # wrong-arg-types[e2]
wrong_type(foo) # wrong-arg-types[e3]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {
"e1": r"not implemented on module: y",
"e2": r"not implemented on module: g",
"e3": r"x.*expected str, got int",
})
class ProtocolAttributesTest(test_base.BaseTest):
"""Tests for non-method protocol attributes."""
def test_basic(self):
errors = self.CheckWithErrors("""
from typing import Protocol
class Foo(Protocol):
x: int
class Bar:
x: int
class Baz:
x: str
def f(foo: Foo):
pass
f(Bar())
f(Baz()) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"x.*expected int, got str"})
def test_missing(self):
errors = self.CheckWithErrors("""
from typing import Protocol
class Foo(Protocol):
x: int
y: str
class Bar:
y = ''
def f(foo: Foo):
pass
f(Bar()) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"Foo.*Bar.*x"})
def test_pyi(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Protocol
class Foo(Protocol):
x: int
""")
self.CheckWithErrors("""
import foo
class Bar:
x = 0
class Baz:
x = '1'
def f(x: foo.Foo):
pass
f(Bar())
f(Baz()) # wrong-arg-types
""", pythonpath=[d.path])
def test_pyi_inheritance(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", """
class Foo:
x: int
""")
self.CheckWithErrors("""
import foo
from typing import Protocol
class Bar(Protocol):
x: int
class Baz(Protocol):
x: str
class Foo2(foo.Foo):
pass
def f(bar: Bar):
pass
def g(baz: Baz):
pass
f(Foo2())
g(Foo2()) # wrong-arg-types
""", pythonpath=[d.path])
def test_instance_attribute(self):
self.CheckWithErrors("""
from typing import Protocol
class Foo(Protocol):
x: int
class Bar:
def __init__(self):
self.x = 0
class Baz:
def __init__(self):
self.x = ''
def f(foo: Foo):
pass
f(Bar())
f(Baz()) # wrong-arg-types
""")
def test_property(self):
errors = self.CheckWithErrors("""
from typing import Protocol
class Foo(Protocol):
@property
def x(self) -> int: ...
class Bar:
@property
def x(self):
return 0
class Baz:
@property
def x(self):
return ''
def f(foo: Foo):
pass
f(Bar())
f(Baz()) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"x.*expected int, got str"})
def test_property_in_pyi_protocol(self):
foo_ty = self.Infer("""
from typing import Protocol
class Foo(Protocol):
@property
def x(self) -> int: ...
""")
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(foo_ty))
self.CheckWithErrors("""
import foo
class Bar:
@property
def x(self):
return 0
class Baz:
@property
def x(self):
return ''
def f(x: foo.Foo):
pass
f(Bar())
f(Baz()) # wrong-arg-types
""", pythonpath=[d.path])
def test_inherit_property(self):
foo_ty = self.Infer("""
class Foo:
@property
def x(self):
return 0
""")
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", pytd_utils.Print(foo_ty))
self.CheckWithErrors("""
import foo
from typing import Protocol
class Protocol1(Protocol):
@property
def x(self) -> int: ...
class Protocol2(Protocol):
@property
def x(self) -> str: ...
class Bar(foo.Foo):
pass
def f1(x: Protocol1):
pass
def f2(x: Protocol2):
pass
f1(Bar())
f2(Bar()) # wrong-arg-types
""", pythonpath=[d.path])
def test_optional(self):
errors = self.CheckWithErrors("""
from typing import Optional, Protocol
class Foo(Protocol):
x: Optional[int]
class Bar:
x = 0
class Baz:
x = ''
def f(x: Foo):
pass
f(Bar())
f(Baz()) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"expected Optional\[int\], got str"})
def test_match_optional_to_optional(self):
self.Check("""
from typing import Optional, Protocol
class Foo(Protocol):
x: Optional[int]
class Bar:
def __init__(self, x: Optional[int]):
self.x = x
def f(x: Foo):
pass
f(Bar(0))
""")
def test_generic(self):
errors = self.CheckWithErrors("""
from typing import Generic, Protocol, Type, TypeVar
T = TypeVar('T')
class Foo(Protocol[T]):
x: T
T2 = TypeVar('T2', bound=Foo[int])
def f(cls: Type[T2]) -> T2:
return cls()
class Bar:
x = 0
class Baz:
x = ''
f(Bar) # ok
f(Baz) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {"e": r"expected int, got str"})
def test_generic_from_pyi(self):
with test_utils.Tempdir() as d:
d.create_file("foo.pyi", """
from typing import Protocol, TypeVar
T = TypeVar('T')
class Foo(Protocol[T]):
x: T
""")
errors = self.CheckWithErrors("""
from typing import Type, TypeVar
import foo
T = TypeVar('T', bound=foo.Foo[int])
def f(cls: Type[T]) -> T:
return cls()
class Bar:
x = 0
class Baz:
x = ''
f(Bar) # ok
f(Baz) # wrong-arg-types[e]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {"e": r"expected int, got str"})
def test_generic_used_in_pyi(self):
with test_utils.Tempdir() as d:
d.create_file("protocol.pyi", """
from typing import Dict, List, Protocol, TypeVar
T = TypeVar('T')
class Foo(Protocol[T]):
x: Dict[str, List[T]]
""")
d.create_file("util.pyi", """
import protocol
from typing import Type, TypeVar
T = TypeVar('T', bound=protocol.Foo[int])
def f(x: Type[T]) -> T: ...
""")
errors = self.CheckWithErrors("""
from typing import Dict, List
import util
class Bar:
x: Dict[str, List[int]]
class Baz:
x: Dict[str, List[str]]
util.f(Bar) # ok
util.f(Baz) # wrong-arg-types[e]
""", pythonpath=[d.path])
self.assertErrorRegexes(errors, {
"e": (r"expected Dict\[str, List\[int\]\], "
r"got Dict\[str, List\[str\]\]")})
def test_match_multi_attributes_against_dataclass_protocol(self):
errors = self.CheckWithErrors("""
from typing import Dict, Protocol, TypeVar, Union
import dataclasses
T = TypeVar('T')
class Dataclass(Protocol[T]):
__dataclass_fields__: Dict[str, dataclasses.Field[T]]
def f(x: Dataclass[int]):
pass
@dataclasses.dataclass
class ShouldMatch:
x: int
y: int
@dataclasses.dataclass
class ShouldNotMatch:
x: int
y: str
f(ShouldMatch(0, 0))
f(ShouldNotMatch(0, '')) # wrong-arg-types[e]
""")
self.assertErrorRegexes(errors, {
"e": (r"expected Dict\[str, dataclasses\.Field\[int\]\], "
r"got Dict\[str, dataclasses\.Field\[Union\[int, str\]\]\]")})
if __name__ == "__main__":
test_base.main()
| true |
8f1205a98e51cab89d14b2778a86dc73384712e0 | Python | kbuczynski-nashpl/tibia.py | /tibiapy/abc.py | UTF-8 | 17,703 | 2.59375 | 3 | [
"Apache-2.0"
] | permissive | import abc
import datetime
import json
import urllib.parse
from collections import OrderedDict
from enum import Enum
from tibiapy.enums import HouseType, HouseStatus, HouseOrder
CHARACTER_URL = "https://www.tibia.com/community/?subtopic=characters&name=%s"
CHARACTER_URL_TIBIADATA = "https://api.tibiadata.com/v2/characters/%s.json"
HOUSE_URL = "https://www.tibia.com/community/?subtopic=houses&page=view&houseid=%d&world=%s"
HOUSE_URL_TIBIADATA = "https://api.tibiadata.com/v2/house/%s/%d.json"
HOUSE_LIST_URL = "https://www.tibia.com/community/?subtopic=houses&world=%s&town=%s&type=%s&status=%s&order=%s"
HOUSE_LIST_URL_TIBIADATA = "https://api.tibiadata.com/v2/houses/%s/%s/%s.json"
GUILD_URL = "https://www.tibia.com/community/?subtopic=guilds&page=view&GuildName=%s"
GUILD_URL_TIBIADATA = "https://api.tibiadata.com/v2/guild/%s.json"
GUILD_LIST_URL = "https://www.tibia.com/community/?subtopic=guilds&world="
GUILD_LIST_URL_TIBIADATA = "https://api.tibiadata.com/v2/guilds/%s.json"
NEWS_URL = "https://www.tibia.com/news/?subtopic=newsarchive&id=%d"
NEWS_SEARCH_URL = "https://www.tibia.com/news/?subtopic=newsarchive"
WORLD_URL = "https://www.tibia.com/community/?subtopic=worlds&world=%s"
WORLD_URL_TIBIADATA = "https://api.tibiadata.com/v2/world/%s.json"
class Serializable:
"""Contains methods to make a class convertible to JSON.
.. note::
| There's no way to convert JSON strings back to their original object.
| Attempting to do so may result in data loss.
"""
@classmethod
def __slots_inherited__(cls):
slots = []
for base in cls.__bases__:
try:
# noinspection PyUnresolvedReferences
slots.extend(base.__slots_inherited__())
except AttributeError:
continue
slots.extend(getattr(cls, "__slots__", []))
return tuple(OrderedDict.fromkeys(slots))
def keys(self):
return list(self.__slots_inherited__())
def __getitem__(self, item):
if item in self.keys():
try:
return getattr(self, item)
except AttributeError:
return None
else:
raise KeyError(item)
def __setitem__(self, key, value):
if key in self.keys():
setattr(self, key, value)
else:
raise KeyError(key)
@staticmethod
def _try_dict(obj):
try:
if isinstance(obj, datetime.datetime):
return obj.isoformat()
if isinstance(obj, Enum):
return obj.value
return {k: v for k, v in dict(obj).items() if v is not None}
except TypeError:
return str(obj)
def to_json(self, *, indent=None, sort_keys=False):
"""Gets the object's JSON representation.
Parameters
----------
indent: :class:`int`, optional
Number of spaces used as indentation, ``None`` will return the shortest possible string.
sort_keys: :class:`bool`, optional
Whether keys should be sorted alphabetically or preserve the order defined by the object.
Returns
-------
:class:`str`
JSON representation of the object.
"""
return json.dumps({k: v for k, v in dict(self).items() if v is not None}, indent=indent, sort_keys=sort_keys,
default=self._try_dict)
class BaseCharacter(Serializable, metaclass=abc.ABCMeta):
"""Base class for all character classes.
Implements common properties methods for characters.
The following implement this class:
- :class:`.Character`
- :class:`.GuildInvite`
- :class:`.GuildMember`
- :class:`.OnlineCharacter`
- :class:`.OtherCharacter`
Attributes
----------
name: :class:`str`
The name of the character.
"""
__slots__ = ("name", )
def __eq__(self, o: object) -> bool:
"""Two characters are considered equal if their names are equal."""
if isinstance(o, self.__class__):
return self.name.lower() == o.name.lower()
return False
def __repr__(self):
return "<{0.__class__.__name__} name={0.name!r}>".format(self,)
@property
def url(self):
"""
:class:`str`: The URL of the character's information page on Tibia.com
"""
return self.get_url(self.name)
@property
def url_tibiadata(self):
"""
:class:`str`: The URL of the character's information on TibiaData.com.
"""
return self.get_url_tibiadata(self.name)
@classmethod
def get_url(cls, name):
"""Gets the Tibia.com URL for a given character name.
Parameters
------------
name: :class:`str`
The name of the character.
Returns
--------
:class:`str`
The URL to the character's page."""
return CHARACTER_URL % urllib.parse.quote(name.encode('iso-8859-1'))
@classmethod
def get_url_tibiadata(cls, name):
"""Gets the TibiaData.com URL for a given character name.
Parameters
------------
name: :class:`str`
The name of the character.
Returns
--------
:class:`str`
The URL to the character's page on TibiaData.com."""
return CHARACTER_URL_TIBIADATA % urllib.parse.quote(name)
class BaseGuild(Serializable, metaclass=abc.ABCMeta):
"""Base class for Guild classes.
The following implement this class:
- :class:`.Guild`
- :class:`.GuildMembership`
Attributes
----------
name: :class:`str`
The name of the guild.
"""
__slots__ = ("name",)
def __repr__(self):
return "<{0.__class__.__name__} name={0.name!r}>".format(self)
@property
def url(self):
""":class:`str`: The URL to the guild's information page on Tibia.com."""
return self.get_url(self.name)
@property
def url_tibiadata(self):
""":class:`str`: The URL to the guild on TibiaData.com."""
return self.get_url_tibiadata(self.name)
@classmethod
def get_url(cls, name):
"""Gets the Tibia.com URL for a given guild name.
Parameters
------------
name: :class:`str`
The name of the guild.
Returns
--------
:class:`str`
The URL to the guild's page"""
return GUILD_URL % urllib.parse.quote(name.encode('iso-8859-1'))
@classmethod
def get_url_tibiadata(cls, name):
"""Gets the TibiaData.com URL for a given guild name.
Parameters
------------
name: :class:`str`
The name of the guild.
Returns
--------
:class:`str`
The URL to the guild's page on TibiaData.com."""
return GUILD_URL_TIBIADATA % urllib.parse.quote(name)
@classmethod
def get_world_list_url(cls, world):
"""Gets the Tibia.com URL for the guild section of a specific world.
Parameters
----------
world: :class:`str`
The name of the world.
Returns
-------
:class:`str`
The URL to the guild's page
"""
return GUILD_LIST_URL + urllib.parse.quote(world.title().encode('iso-8859-1'))
@classmethod
def get_world_list_url_tibiadata(cls, world):
"""Gets the TibiaData.com URL for the guild list of a specific world.
Parameters
----------
world: :class:`str`
The name of the world.
Returns
-------
:class:`str`
The URL to the guild's page.
"""
return GUILD_LIST_URL_TIBIADATA % urllib.parse.quote(world.title().encode('iso-8859-1'))
class BaseHouse(Serializable, metaclass=abc.ABCMeta):
"""Base class for all house classes
The following implement this class:
- :class:`.abc.BaseHouseWithId`
- :class:`.GuildHouse`
Attributes
----------
name: :class:`str`
The name of the house.
world: :class:`str`
The name of the world the house belongs to.
status: :class:`HouseStatus`
The current status of the house.
type: :class:`HouseType`
The type of the house.
"""
__slots__ = ("name", "world", "status", "type")
def __repr__(self):
return "<{0.__class__.__name__} name={0.name!r} world={0.world!r} status={0.status!r} type={0.type!r}>"\
.format(self,)
def __eq__(self, o: object) -> bool:
"""Two houses are considered equal if their names are equal."""
if isinstance(o, self.__class__):
return self.name.lower() == o.name.lower()
return False
@classmethod
def get_url(cls, house_id, world):
""" Gets the Tibia.com URL for a house with the given id and world.
Parameters
----------
house_id: :class:`int`
The internal id of the house.
world: :class:`str`
The world of the house.
Returns
-------
The URL to the house in Tibia.com
"""
return HOUSE_URL % (house_id, world)
@classmethod
def get_url_tibiadata(cls, house_id, world):
""" Gets the TibiaData.com URL for a house with the given id and world.
Parameters
----------
house_id: :class:`int`
The internal id of the house.
world: :class:`str`
The world of the house.
Returns
-------
The URL to the house in TibiaData.com
"""
return HOUSE_URL_TIBIADATA % (world, house_id)
@classmethod
def get_list_url(cls, world, town, house_type: HouseType = HouseType.HOUSE, status: HouseStatus = None,
order=HouseOrder.NAME):
"""
Gets the URL to the house list on Tibia.com with the specified parameters.
Parameters
----------
world: :class:`str`
The name of the world.
town: :class:`str`
The name of the town.
house_type: :class:`HouseType`
Whether to search for houses or guildhalls.
status: :class:`HouseStatus`, optional
The house status to filter results. By default no filters will be applied.
order: :class:`HouseOrder`, optional
The ordering to use for the results. By default they are sorted by name.
Returns
-------
:class:`str`
The URL to the list matching the parameters.
"""
house_type = "%ss" % house_type.value
status = "" if status is None else status.value
return HOUSE_LIST_URL % (urllib.parse.quote(world), urllib.parse.quote(town), house_type, status, order.value)
@classmethod
def get_list_url_tibiadata(cls, world, town, house_type: HouseType = HouseType.HOUSE):
"""
Gets the URL to the house list on Tibia.com with the specified parameters.
Parameters
----------
world: :class:`str`
The name of the world.
town: :class:`str`
The name of the town.
house_type: :class:`HouseType`
Whether to search for houses or guildhalls.
Returns
-------
:class:`str`
The URL to the list matching the parameters.
"""
house_type = "%ss" % house_type.value
return HOUSE_LIST_URL_TIBIADATA % (urllib.parse.quote(world), urllib.parse.quote(town), house_type)
class BaseHouseWithId(BaseHouse):
"""A derivative of :class:`BaseHouse`
Implements the :py:attr:`id` attribute and dependant functions and properties.
The following implement this class:
- :class:`.House`
- :class:`.CharacterHouse`
Attributes
----------
id: :class:`int`
The internal ID of the house. This is used on the website to identify houses.
name: :class:`str`
The name of the house.
world: :class:`str`
The name of the world the house belongs to.
status: :class:`HouseStatus`
The current status of the house.
type: :class:`HouseType`
The type of the house.
"""
__slots__ = ("id",)
def __eq__(self, o: object) -> bool:
"""Two houses are considered equal if their names or ids are equal."""
if isinstance(o, self.__class__):
return self.name.lower() == o.name.lower() or self.id == o.id
return False
@property
def url(self):
""":class:`str`: The URL to the Tibia.com page of the house."""
return self.get_url(self.id, self.world) if self.id and self.world else None
@property
def url_tibiadata(self):
""":class:`str`: The URL to the TibiaData.com page of the house."""
return self.get_url_tibiadata(self.id, self.world) if self.id and self.world else None
class BaseNews(Serializable, metaclass=abc.ABCMeta):
"""Base class for all news classes
Implements the :py:attr:`id` attribute and common properties.
The following implement this class:
- :class:`.News`
- :class:`.ListedNews`
Attributes
----------
id: :class:`int`
The internal ID of the news entry.
title: :class:`str`
The title of the news entry.
category: :class:`.NewsCategory`
The category this belongs to.
category_icon: :class:`str`
The URL of the icon corresponding to the category.
date: :class:`datetime.date`
The date when the news were published.
"""
__slots__ = (
"id",
"title",
"category",
"category_icon",
"date",
)
def __eq__(self, o: object) -> bool:
"""Two news articles are considered equal if their names or ids are equal."""
if isinstance(o, self.__class__):
return self.id == o.id
return False
@property
def url(self):
""":class:`str`: The URL to the Tibia.com page of the news entry."""
return self.get_url(self.id)
@classmethod
def get_url(cls, news_id):
"""Gets the Tibia.com URL for a news entry by its id.
Parameters
------------
news_id: :class:`int`
The id of the news entry.
Returns
--------
:class:`str`
The URL to the news' page"""
return NEWS_URL % news_id
@classmethod
def get_list_url(cls):
"""Gets the URL to Tibia.com's news archive page.
Notes
-----
It is not possible to perform a search using query parameters.
News searches can only be performed using POST requests sending the parameters as form-data.
Returns
-------
:class:`str`
The URL to the news archive page on Tibia.com.
"""
return NEWS_SEARCH_URL
class BaseWorld(Serializable):
"""Base class for all World classes.
The following implement this class:
- :class:`.ListedWorld`
- :class:`.World`
Attributes
----------
name: :class:`str`
The name of the world.
status: :class:`str`
The current status of the world.
online_count: :class:`int`
The number of currently online players in the world.
location: :class:`WorldLocation`
The physical location of the game servers.
pvp_type: :class:`PvpType`
The type of PvP in the world.
transfer_type: :class:`TransferType`
The type of transfer restrictions this world has.
battleye_protected: :class:`bool`
Whether the server is currently protected with BattlEye or not.
battleye_date: :class:`datetime.date`
The date when BattlEye was added to this world.
If this is ``None`` and the world is protected, it means the world was protected from the beginning.
experimental: :class:`bool`
Whether the world is experimental or not.
tournament_world_type: :class:`TournamentWorldType`
The type of tournament world. ``None`` if this is not a tournament world.
premium_only: :class:`bool`
Whether only premium account players are allowed to play in this server.
"""
__slots__ = (
"name",
"status",
"location",
"online_count",
"pvp_type",
"battleye_protected",
"battleye_date",
"experimental",
"premium_only",
"tournament_world_type",
"transfer_type"
)
def __repr__(self):
return "<{0.__class__.__name__} name={0.name!r} location={0.location!r} pvp_type={0.pvp_type!r}>".format(self)
@property
def url(self):
""":class:`str`: URL to the world's information page on Tibia.com."""
return self.get_url(self.name)
@property
def url_tibiadata(self):
""":class:`str`: URL to the world's information page on TibiaData.com."""
return self.get_url_tibiadata(self.name)
@classmethod
def get_url(cls, name):
"""Gets the URL to the World's information page on Tibia.com.
Parameters
----------
name: :class:`str`
The name of the world.
Returns
-------
:class:`str`
The URL to the world's information page.
"""
return WORLD_URL % name.title()
@classmethod
def get_url_tibiadata(cls, name):
"""Gets the URL to the World's information page on TibiaData.com.
Parameters
----------
name: :class:`str`
The name of the world.
Returns
-------
:class:`str`
The URL to the world's information page on TibiaData.com.
"""
return WORLD_URL_TIBIADATA % name.title()
| true |
c103d3d886c7f4dc966e6b4f2839f1930b0ba91b | Python | reinaaa05/python | /dotinstall/python_lessons/myapp_28.py | UTF-8 | 235 | 3.078125 | 3 | [] | no_license | #辞書型
sales = {"taguchi":200,"fkoji":400}
# print(sales["taguchi"])
# sales["taguchi"] = 300
# sales["dorinstall"] = 500
# del(sales["fkoji"])
# print(sales)
for key,value in sales.items():
print("{0}: {1}".format(key,value))
| true |
05cecd20a6f7c83634f2a7b8bf158272d1d81c62 | Python | Yobretaw/AlgorithmProblems | /Py_leetcode/121_bestTimeToBuyAndSellStock.py | UTF-8 | 639 | 3.671875 | 4 | [] | no_license | import sys
import os
import math
import imp
from collections import deque
"""
Say you have an array for which the ith element is the price of a given stock on day i.
If you were only permitted to complete at most one transaction (ie, buy one and sell one share
of the stock), design an algorithm to find the maximum profit.
"""
def maxProfit(prices):
max_profit = 0
lowest_price = sys.maxint
for i in range(0, len(prices)):
lowest_price = min(lowest_price, prices[i])
max_profit = max(max_profit, prices[i] - lowest_price)
return max_profit
#prices = [1, 2, 3, 4, 5]
#print maxProfit(prices)
| true |
e39de1bfcb11f3de2715a6ba8ec8e99a53a47b55 | Python | pratikkatkade/spam-filter | /script.py | UTF-8 | 1,912 | 3.015625 | 3 | [] | no_license | from spam_data import training_spam_docs, training_doc_tokens, training_labels
from sklearn.naive_bayes import MultinomialNB
from preprocessing import preprocess_text
# Add your email text to test_text between the triple quotes:
test_text = """
Dear Customer,
Greetings from Flipkart!
We've created a new Flipkart account for you. A Flipkart account gives you a personalized shopping experience.
Your account details are:
Email for login: pratikk@gmail.com
Activation link: https://www.flipkart.com/account/guestaccountsignup?v1=J1KiIAZGoSWtMd2OLHbKIrJ1jlvUzLefnKAfjn8f/1pH0=
Flipkart Account benefits:
? Enjoy a faster checkout
? Track / Cancel / Return orders online
? Print / email invoices
? Add items to your wishlist and share them with friends and family
? Save your contact details so that you don\'t have to retype every time you order
? And more...
"""
test_tokens = preprocess_text(test_text)
def create_features_dictionary(document_tokens):
features_dictionary = {}
index = 0
for token in document_tokens:
if token not in features_dictionary:
features_dictionary[token] = index
index += 1
return features_dictionary
def tokens_to_bow_vector(document_tokens, features_dictionary):
bow_vector = [0] * len(features_dictionary)
for token in document_tokens:
if token in features_dictionary:
feature_index = features_dictionary[token]
bow_vector[feature_index] += 1
return bow_vector
bow_sms_dictionary = create_features_dictionary(training_doc_tokens)
training_vectors = [tokens_to_bow_vector(training_doc, bow_sms_dictionary) for training_doc in training_spam_docs]
test_vectors = [tokens_to_bow_vector(test_tokens, bow_sms_dictionary)]
spam_classifier = MultinomialNB()
spam_classifier.fit(training_vectors, training_labels)
predictions = spam_classifier.predict(test_vectors)
print("Looks like a normal email!" if predictions[0] == 0 else "You've got spam!") | true |
764c1a0dba2d7b0abd992d25b82e296d9ec91c4c | Python | prathamesh2901/Flask_Corona_App_Read | /models/state.py | UTF-8 | 816 | 2.796875 | 3 | [] | no_license | from db import db
from sqlalchemy import text, desc
class StateModel(db.Model):
__tablename__= 'states'
name = db.Column(db.String(50), primary_key=True)
date = db.Column(db.String(50), primary_key=True)
cases = db.Column(db.Integer)
deaths = db.Column(db.Integer)
recoveries = db.Column(db.Integer)
def __init__(self, name, date, cases, deaths, recoveries):
self.name = name
self.date = date
self.cases = cases
self.deaths = deaths
self.recoveries = recoveries
def json(self):
return {'name': self.name, 'date': self.date, 'cases': self.cases, 'deaths': self.deaths, 'recoveries': self.recoveries}
@classmethod
def find_by_state(cls, name):
return cls.query.filter_by(name=name).order_by(desc(cls.date)).first()
| true |
f6c4881fbc86578da612dbde8ce98b726193146f | Python | inaheaven/Finance_Tool | /Finance_Data_Scraper/kosdaq_scraper.py | UTF-8 | 2,443 | 2.609375 | 3 | [] | no_license | import urllib.parse
import pandas as pd
import csv
import pandas_datareader.data as web
import datetime
MARKET_CODE_DICT = {
'kospi': 'stockMkt',
'kosdaq': 'kosdaqMkt',
'konex': 'konexMkt'
}
DOWNLOAD_URL = 'kind.krx.co.kr/corpgeneral/corpList.do'
PORTAL = 'yahoo'
start_date = datetime.datetime(2008, 1, 1)
end_date = datetime.datetime(2019, 12, 31)
def download_stock_codes(market=None, delisted=False):
try:
params = {'method': 'download'}
if market.lower() in MARKET_CODE_DICT:
params['marketType'] = MARKET_CODE_DICT[market]
if not delisted:
params['searchType'] = 13
params_string = urllib.parse.urlencode(params)
request_url = urllib.parse.urlunsplit(['http', DOWNLOAD_URL, '', params_string, ''])
df = pd.read_html(request_url, header=0)[0]
df.종목코드 = df.종목코드.map('{:06d}'.format)
df.to_csv('./KOSDAQ.csv', sep='\t', encoding='utf-8')
return df
except Exception as e:
print('Errors While Scraping Index Data: ' + str(e))
finally:
temp_for_sort = []
with open('./KOSDAQ.csv', 'rt', encoding='utf-8') as in_file:
for sort_line in in_file:
temp_for_sort.append(sort_line)
temp_for_sort.sort()
with open('./KOSDAQ.csv', 'w') as out_file:
seen = set()
for line in temp_for_sort:
if line in seen:
continue
else:
if not line == None:
seen.add(line)
sorted(seen)
out_file.write(line)
print("Scraping Stock Index Data Completed.")
def stock_price_data(stocks, start_date, end_date):
results = {}
for code in kosdaq_stocks.종목코드:
ticker = code+'.KQ'
try:
gs = web.DataReader(ticker, PORTAL, start_date, end_date)
print("ticker:", ticker)
gs['Adj Close'].to_csv('./kosdaq/{}.csv'.format(ticker), header=False)
except Exception as e:
print("Scarping Price Data of ", ticker, "is not accessible")
pass
# df = pd.concat(results, axis=1)
# df.loc[:, pd.IndexSlice[:, 'Adj Close']].tail()
# print(df)
kosdaq_stocks = download_stock_codes('kosdaq')
print(len(kosdaq_stocks))
# stock_price_data(kosdaq_stocks, start_date, end_date)
# print("KOSDAQ Scraping Completed.") | true |
d0840126d60d8ed4c697114400d94b6168f4772b | Python | zhaosir/Demo | /tornado/runon.py | UTF-8 | 477 | 2.5625 | 3 | [] | no_license | import tornado.ioloop
from tornado.gen import coroutine
from tornado.concurrent import Future
import time
from concurrent.futures import ThreadPoolExecutor
from tornado.concurrent import run_on_executor
class T(object):
executor = ThreadPoolExecutor(2)
@run_on_executor
def test(self, y):
time.sleep(1)
return 1 + y
class H(object):
@staticmethod
@coroutine
def add(y):
t = T()
ret = yield t.test(y)
print ret
| true |
20552d6df997a0219a14396c55a8c810c566b9f8 | Python | duhdoesk/gerenciador-estoque | /gerenciador.py | UTF-8 | 6,781 | 2.78125 | 3 | [] | no_license | import pymysql
import funcoes_produtos as fprod
import funcoes_materiais as fmat
import dicionario as dicio
host, user, pw, db, port = 'localhost', 'aplicacao', '123456', 'gerenciamento', 3306
con = pymysql.connect(host, user, pw, db, port)
c = con.cursor(pymysql.cursors.DictCursor)
def time():
from datetime import datetime
# localtime = time.asctime( time.localtime(time.time()) )
localtime = f"{datetime.now():%d/%m/%Y às %H:%M}"
return str(localtime)
def entrada_produtos(lista_id, lista_quantidade):
# inserir argumentos em forma de listas com itens tipo int
i = 0
while i < len(lista_id):
# movimentando estoque de produto acabado
fprod.movimenta_estoque(lista_id[i], lista_quantidade[i], 'soma')
# inserindo informações no registro
tempo = time()
texto = str(tempo + ' - ' + str(lista_quantidade[i]) + ' unidades do produto ' + str(lista_id[i]) + ' foram inseridas no estoque de produto acabado.\n')
registro(texto)
# movimentando estoque de materiais
fmat.baixa_materiais(lista_id[i], lista_quantidade[i])
i += 1
def saida_produtos(lista_id, lista_quantidade, destino, data = ''):
# inserir argumentos em forma de listas com itens tipo int
i = 0
while i < len(lista_id):
fprod.movimenta_estoque(lista_id[i], lista_quantidade[i])
fprod.carga(lista_id[i], lista_quantidade[i], destino, descricao, data)
# inserindo informações no registro
tempo = time()
texto = str(tempo + ' - ' + str(lista_quantidade[i]) + ' unidades do produto ' + str(lista_id[i]) + ' foram expedidas do estoque de produto acabado.\n')
registro(texto)
i += 1
def saida_produtos_inter(lista_id, lista_quantidade, lista_destino, lista_descricao, data = ''):
# inserir argumentos em forma de listas com itens tipo int
i = 0
while i < len(lista_id):
fprod.movimenta_estoque(lista_id[i], lista_quantidade[i])
fprod.carga(lista_id[i], lista_quantidade[i], lista_destino[i], lista_descricao[i], data)
# inserindo informações no registro
tempo = time()
texto = str(tempo + ' - ' + str(lista_quantidade[i]) + ' unidades do produto ' + str(lista_id[i]) + ' - ' + str(lista_descricao[i]) + ' foram expedidas do estoque de produto acabado.\n')
registro(texto)
i += 1
def entrada_materiais(lista_id, lista_quantidades):
# inserir argumentos em forma de listas com itens tipo str
i = 0
while i < len(lista_id):
fmat.entrada_material(lista_id[i], lista_quantidades[i])
# inserindo informações no registro
tempo = time()
texto = str(tempo + ' - ' + str(lista_quantidades[i]) + ' unidades do material ' + str(lista_id[i]) + ' foram inseridas no estoque de materiais.\n')
registro(texto)
i += 1
def registro(texto):
registro = open('C:/Users/edusc/OneDrive/Área de Trabalho/gerenciador estoque/registro.txt', 'a')
registro.write(texto)
registro.close()
registro = open('C:/Users/edusc/OneDrive/Área de Trabalho/gerenciador estoque/atualizacoes.txt', 'a')
registro.write(texto)
registro.close()
def format_list(opcao, estoque, id_item, item, c01='', c02='',c03=''):
estoque = str(estoque)
id_item = str(id_item)
item = str(item)
c01 = str(c01)
c02 = str(c02)
c03 = str(c03)
if opcao == '1':
return f'{estoque: <8}{id_item: <10}{item: <24}{c01: <14}{c02: <10}{c03: <10}'
else:
return f'{c01: <8}{estoque: <8}{id_item: <10}{item: <24}'
def imprime_lista(tabela, opcao='1'):
if opcao == '1':
opcao = 'estoque > 0'
else:
opcao = 'estoque >= 0'
query = 'SELECT * FROM ' + tabela + ' WHERE ' + opcao
c.execute(query)
itens = []
linhas = c.fetchall()
for linha in linhas:
itens.append(list(linha.values()))
if tabela == 'materiais':
print('\n' + ('#' * 80) + '\n', '\n' + 'Opção: Imprimir lista de materiais em estoque' + '\n')
print(format_list('2', 'estoque', 'id', 'material', 'minimo') + '\n' + ('-' * 80))
for item in itens:
print(format_list('2', item[-2], item[-0], item[1], item[-1]))
print('\n' + ('#' * 80) + '\n')
else:
print('\n' + ('#' * 80) + '\n', '\n' + 'Opção: Imprimir lista de produtos em estoque' + '\n')
print(format_list('1', 'estoque', 'id', 'produto', 'condição', 'aba', 'cor') + '\n' + ('-' * 80))
for item in itens:
print(format_list('1', item[-1], item[0], item[1], item[2], item[3], item[4]))
print('\n' + ('#' * 80) + '\n')
def main():
global c, con
menu = '9'
while menu != '0':
print(
'Bem vindo ao Gerenciador de Estoques ES 2020!\n\n'
'Selecione a opção desejada:\n\n'
'1 - Entrada de Produto Acabado.\n'
'2 - Expedição de Produto Acabado.\n'
'3 - Entrada de Materiais / Insumos.\n'
'4 - Imprimir lista de Produtos em estoque.\n'
'5 - Imprimir lista de Materiais em estoque.\n'
'0 - Sair.\n'
)
menu = str(input())
if menu == '1':
lista_id = []
lista_quantidade = []
produto = '1'
while produto != '0':
produto = input('Insira o id do produto (ou 0 para encerrar): ')
if produto != '0':
quantidade = input('Insira a quantidade do produto a ser inserida: ')
lista_id.append(str(produto))
lista_quantidade.append(int(quantidade))
print('')
entrada_produtos(lista_id, lista_quantidade)
if menu == '2':
data = input('Insira a data referência (opcional): ')
destino = input('Insira o destino da carga: ')
lista_id = []
lista_quantidade = []
produto = '1'
while produto != '0':
produto = input('\nInsira o id do produto (ou 0 para encerrar): ')
if produto != '0':
quantidade = input('Insira a quantidade do produto a ser expedida: ')
lista_id.append(str(produto))
lista_quantidade.append(int(quantidade))
print('')
saida_produtos(lista_id, lista_quantidade, destino, data)
if menu == '3':
lista_id = []
lista_quantidade = []
produto = '1'
while produto != '0':
produto = input('\nInsira o id do material (ou 0 para encerrar): ')
if produto != '0':
quantidade = input('Insira a quantidade de material a ser inserido: ')
lista_id.append(str(produto))
lista_quantidade.append(int(quantidade))
print('')
entrada_materiais(lista_id, lista_quantidade)
if menu == '4':
print(
'Escolha a opção de visualização:\n\n'
'1 - Somente produtos em estoque\n'
'2 - Lista Completa\n'
)
opcao = str(input())
print('')
imprime_lista('produtos', opcao)
if menu == '5':
print(
'Escolha a opção de visualização:\n\n'
'1 - Somente materiais em estoque\n'
'2 - Lista Completa\n'
)
opcao = str(input())
print('')
imprime_lista('materiais', opcao)
if menu == '0':
return
# main() | true |
e948723e17f4e62afd408ec9d032b8cf176db327 | Python | unitn-sml/CAN | /src/semantic_loss/script_constraints/parity_check_20.py | UTF-8 | 1,110 | 3.453125 | 3 | [] | no_license | """
Hastily put togheter to generate parity check constraints to use later for semantic loss,
change n and m to your liking.
"""
def get_row_odd_states(row_index, m):
variables = ["X.%s.%s" % (row_index, i) for i in range(1, m)]
variables = ",".join(variables)
xor = "Xor(%s)" % variables
return xor
def get_column_odd_states(column_index, m):
variables = ["X.%s.%s" % (i, column_index) for i in range(1, m)]
variables = ",".join(variables)
xor = "Xor(%s)" % variables
return xor
# what to write to file
output = ["shape [20, 20]"]
# number of control pixels
n = 10
# number of pixels to check for parity for each row/column
m = 10
for i in range(1, n):
row_check = "X.%s.0" % i
row_OR = get_row_odd_states(i, m)
row_check = "Equivalent(%s, %s)" % (row_check, row_OR)
output.append(row_check)
col_check = "X.0.%s" % i
col_OR = get_column_odd_states(i, m)
col_check = "Equivalent(%s, %s)" % (col_check, col_OR)
output.append(col_check)
output = "\n".join(output)
with open("parity_check_10_10.txt", "w") as file:
file.write(output)
| true |
4249adea520e61b12ed39f7feb4566e3c427eb24 | Python | ceto2016/python | /assignment/1/exper_assignmen8.py | UTF-8 | 419 | 3.125 | 3 | [] | no_license | std1 = int(input("please enter std 1 mark from 100 "))
std2 = int(input("please enter std 2 mark from 100 "))
std3 = int(input("please enter std 3 mark from 100 "))
std4 = int(input("please enter std 4 mark from 100 "))
std5 = int(input("please enter std 5 mark from 100 "))
total = std1 + std2 + std3+std4+std5
avg = total / 5
prc = (total/100)*100
print("total : ", total)
print("avg : ", avg)
print("prc : ", prc)
| true |
b1de095e30d476bfcb9bb5de03283481a1377677 | Python | jaam102114/CTM2 | /aaScript.py | UTF-8 | 753 | 2.53125 | 3 | [] | no_license | import sys
def replace_all(text,dic):
for i,j in dic.iteritems():
text = text.replace(i,j)
return text
stripping =[]
default = ''
aa = {'CYS': 'C', 'ASP': 'D', 'SER': 'S', 'GLN': 'Q', 'LYS': 'K',
'ILE': 'I', 'PRO': 'P', 'THR': 'T', 'PHE': 'F', 'ASN': 'N',
'GLY': 'G', 'HIS': 'H', 'LEU': 'L', 'ARG': 'R', 'TRP': 'W',
'ALA': 'A', 'VAL':'V', 'GLU': 'E', 'TYR': 'Y', 'MET': 'M', ' ':''}
simpleText = ''
counter = 104
with open(sys.argv[1]) as f:
for line in f:
stripping+= line.strip() + ' '
simpleText+= '>' + ''.join(stripping[-104+counter:-97+counter]) + '\n' + ''.join(stripping[-96+counter:-49+counter]) + '\n'
counter = counter + 104
txt = replace_all(simpleText, aa)
print txt
| true |
af575e2e98481d5d705e5ce68f0f46ecb722f255 | Python | rohanneps/utilities_python | /flask_restplus/library/txt_generate_gram_matrix/gram_generator.py | UTF-8 | 7,033 | 2.703125 | 3 | [] | no_license | import csv
import re
import pandas as PythonPandas
import os
from utils.gram_generator_variables import *
def ReadGramPatternFilePaths(DataFileName, TextsToAppend):
ChosenColumn = TextsToAppend
CleanedDataGrams = FOLDER_TO_PROCESS + '/1.1.1.1 Stage Gram per Description/'
CleanedGramPatterns = FOLDER_TO_PROCESS + '/1.1.1.2 Description Gram With Patterns/'
GramWithStopWords = FOLDER_TO_PROCESS + '/1.2.1.1 Stage Gram per Description/'
GramPatternsWithStopWords = FOLDER_TO_PROCESS + '/1.2.1.2 Description Gram With Patterns/'
GramLists = [GramWithStopWords]
PatternLists = [GramPatternsWithStopWords]
for FileNumber in range(1, 7):
GramFiles = ChosenColumn + '_' + DataFileName + '_Grams_' + str(FileNumber)
GramLists.append(GramFiles)
PatternFiles = ChosenColumn + '_' + DataFileName + '_Patterns_' + str(FileNumber)
PatternLists.append(PatternFiles)
CleanedGramsList = [CleanedDataGrams]
CleanedPatternsList = [CleanedGramPatterns]
for FileNumber in range(1, 7):
GramFiles = ChosenColumn + '_' + DataFileName + '_Grams_' + str(FileNumber)
CleanedGramsList.append(GramFiles)
PatternFiles = ChosenColumn + '_' + DataFileName + '_Patterns_' + str(FileNumber)
CleanedPatternsList.append(PatternFiles)
PathToDictionary = DICT_FOLDER
PathToCorrelationData = CORREL_FOLDER
DictionaryList = [PathToDictionary]
CorrelationDataList = [PathToCorrelationData]
for root, dirs, files in os.walk(PathToDictionary):
for name in files:
if (name.endswith('.' + 'csv')):
if name not in DictionaryList:
DictionaryList.append(name)
for root, dirs, files in os.walk(PathToCorrelationData):
for name in files:
if (name.endswith('.' + 'csv')):
if name not in CorrelationDataList:
CorrelationDataList.append(name)
FinalList = [CleanedGramsList, CleanedPatternsList, GramLists, PatternLists,DictionaryList,CorrelationDataList]
return FinalList
def GeneratePatternsFromGrams(InputFilePath, OutputFilePath):
OpenCSVInputFile = open(InputFilePath, 'r')
ReadCSVInputFile = csv.reader(OpenCSVInputFile, delimiter=',')
OpenCSVOutputFile = open(OutputFilePath, 'w', newline='\n')
CreateCSVWriter = csv.writer(OpenCSVOutputFile, delimiter=',', lineterminator='\n')
CreateCSVWriter.writerow(['Grams', 'Patterns'])
next(ReadCSVInputFile, None)
for EveryRowData in ReadCSVInputFile:
if len(EveryRowData) > 0:
if not EveryRowData: continue
MyRow=str(EveryRowData[1]).strip(' ')
#MyRow=MyRow.strip(r'[\s\/\W+\D+\w{1}\w{2}\w{3}\_\-]')
Numbers = re.sub(r'[0-9]', r'9', MyRow)
Lowercase = re.sub(r'[a-z]', r'x', Numbers)
UpperCase = re.sub(r'[A-Z]', r'X', Lowercase)
Symbols=re.sub('[^9xX ]', '#', UpperCase)
#Spaces = re.sub('[9xX ]'+' '+'[9xX ]', '[9xX ]'+'_'+'[9xX ]', Symbols)
Spaces = re.sub(' ', '_' , Symbols)
RegularExpression = Spaces
#if RegularExpression.startswith(r'[^_]'):
#break
#else:
#pass
GramData=str(EveryRowData[1])
if GramData.__contains__('%'):
RegularExpression=''.join(RegularExpression).replace('#', '%', 1)
JoinKeyValuePairs = [GramData, RegularExpression]
CreateCSVWriter.writerow(JoinKeyValuePairs)
else:
JoinKeyValuePairs = [GramData, RegularExpression]
CreateCSVWriter.writerow(JoinKeyValuePairs)
OpenCSVInputFile.close()
OpenCSVOutputFile.close()
def PatternGenerator(Uploadpath, DownloadPath, DataFileName, TextsToAppend):
for PatternCount in range(1, 7):
InputFile = Uploadpath + TextsToAppend + '_' + DataFileName + '_Grams_' + str(PatternCount) + '.' + 'csv'
OutputFile = DownloadPath + TextsToAppend + '_' + DataFileName + '_Patterns_' + str(PatternCount) + '.' + 'csv'
GeneratePatternsFromGrams(InputFile, OutputFile)
def NGramsGenerator(InputData, NumberPfGramsToProduce):
InputData = InputData.split(' ')
OutputGramsAppender = []
for NumberOfGrams in range(len(InputData) - NumberPfGramsToProduce + 1):
FinalGram = InputData[NumberOfGrams:NumberOfGrams + NumberPfGramsToProduce]
OutputGramsAppender.append(FinalGram)
return OutputGramsAppender
def GramFarmGenerator(InputFilePath, OutputFilePath, NumberOfGrams, KeysToAppend):
OpenCSVInput = open(InputFilePath, 'r')
ReadCSVInput = csv.reader(OpenCSVInput, delimiter=',')
Headers = next(ReadCSVInput, None)
OutputFilePath = OutputFilePath.replace('_Grams', '_Grams_' + str(NumberOfGrams))
OpenCSVOutput = open(OutputFilePath, 'w', newline='\n')
CreateCSVWriter = csv.writer(OpenCSVOutput, delimiter=',', lineterminator='\n')
CreateCSVWriter.writerow([KeysToAppend, 'Grams'])
for DataRow in ReadCSVInput:
for Grams in NGramsGenerator(DataRow[1], NumberOfGrams):
if NumberOfGrams>1:
RowGram = '_'.join(map(str, Grams))
RowGram = RowGram.strip(r'\"|\'|\,\#|\&|\.|\-|\*')
else:
RowGram = ''.join(map(str, Grams))
RowGram = RowGram.strip(r'\"|\'|\,\#|\&|\.|\-|\*')
if RowGram:
AppendKeyValuePairs = [DataRow[0], RowGram]
CreateCSVWriter.writerow(AppendKeyValuePairs)
OpenCSVInput.close()
OpenCSVOutput.close()
def GenerateGrams(Uploadpath, DownloadPath, DataFileName, Keys, TextsToAppend):
for GramsNumbers in range(1, 7):
csvOutputFile = DownloadPath + TextsToAppend + '_' + DataFileName + '_Grams' + '.' + 'csv'
GramFarmGenerator(Uploadpath + DataFileName + '_Cleaned.csv', csvOutputFile, GramsNumbers, Keys)
def GenerateDirtyGrams(Uploadpath, DownloadPath, DataFileName, Keys, TextsToAppend):
for GramsNumbers in range(1, 7):
csvOutputFile = DownloadPath + TextsToAppend + '_' + DataFileName + '_Grams' + '.' + 'csv'
GramFarmGenerator(DataFileName + '.csv', csvOutputFile, GramsNumbers, Keys)
def KeysColumnsBreaker(Uploadpath, DownloadPath, DataFileName, Keys, TextsToAppend):
DataToSplit = PythonPandas.DataFrame(PythonPandas.read_excel(Uploadpath + DataFileName + '.' + 'csv'))
print(DataToSplit)
DataToSplit = PythonPandas.DataFrame(DataToSplit.astype(str))
DataToSplit = PythonPandas.DataFrame(BreakColumns(DownloadPath, DataFileName, DataToSplit, Keys, TextsToAppend))
return DataToSplit
def BreakColumns(DownloadPath, DataFileName, PandasDataFrame, KeyNumbers, TextsToAppend):
SplittedDataFrame = PythonPandas.DataFrame(PandasDataFrame[[KeyNumbers, TextsToAppend]].copy())
SplittedDataFrame.replace('nan', value='', inplace=True)
SplittedDataFrame.to_excel(DownloadPath + TextsToAppend + '_' + DataFileName + '.' + '.xlsx')
return PandasDataFrame
| true |
205f797bc01b5d5f1e26ac167fdbb2574a896882 | Python | AnoujeanAJB/Python | /PY renomme/ClassAction.py | UTF-8 | 649 | 2.546875 | 3 | [] | no_license | # -- coding: utf-8 --
import os
class ACTION:
def __init__(self, nomDuRépertoire, règle):
self.nomDuRépertoire=nomDuRépertoire
self.règle=règle
def Get_nomDuRépertoire(self):
return self.nomDuRépertoire
def Set_nomDuRépertoire(self, nom):
self.nomDuRépertoire=nom
def Get_règle(self):
return self.règle
def Set_règle(self, règle):
self.règle=règle
def simule(self,repertoire, extension, prefixe, postfixe):
for e in os.listdir():
if e.endswith(extension):
print(str(prefixe)+e+str(postfixe)+str(extension))
| true |
84bf142e5037d94d328506a13ca44ed3dbf45a86 | Python | bkyileo/algorithm-practice | /python/Permutations.py | UTF-8 | 910 | 3.53125 | 4 | [] | no_license | __author__ = 'BK'
'''
Given a collection of distinct numbers, return all possible permutations.
For example,
[1,2,3] have the following permutations:
[1,2,3], [1,3,2], [2,1,3], [2,3,1], [3,1,2], and [3,2,1].
Subscribe to see which companies asked this question
'''
class Solution(object):
def helper(self,nums,start,re):
import copy
if start == len(nums):
if nums not in re:
re.append(copy.copy(nums))
return
for i in xrange(start,len(nums)):
nums[i],nums[start]=nums[start],nums[i]
self.helper(nums,start+1,re)
nums[i],nums[start]=nums[start],nums[i]
def permute(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
re=[]
self.helper(nums,0,re)
return re
solu = Solution()
nums=[1,1,0,0,1,-1,-1,1]
print solu.permute(nums)
| true |
657bdfe9de349833cc2850ff841c3be2168481ae | Python | DendrouLab/sc_pipelines_PSC | /python/run_scrublet_scores.py | UTF-8 | 6,942 | 2.59375 | 3 | [] | no_license | '''
run scrublet on single channel
expects sample id and path to input data
'''
import scrublet as scr
import scipy.io
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import argparse
import sys
import scanpy as sc
parser = argparse.ArgumentParser()
parser.add_argument("--sample_id",
default="sampleID",
help="name of the sample, usually just he name of the cellranger filtered folder")
parser.add_argument("--inputpath",
default="/path/to/cellranger/filtered_folder",
help="path to the single channel worth of cells for the doublet estimation to be run on")
parser.add_argument("--filetype",
default="cellranger",
help="type of data, options = ['cellranger', 'csv_matrix','txt_matrix', 'h5ad', 'h5']")
parser.add_argument("--outdir",
default="/gpfs3/well/combat/projects/preprocess/citeseq_final/FC_annotation/dotplot_minimal/",
help="string, b or t cells?")
parser.add_argument("--expected_doublet_rate",
default=0.06,
help="the expected fraction of transcriptomes that are doublets, typically 0.05-0.1. Results are not particularly sensitive to this parameter")
parser.add_argument("--sim_doublet_ratio",
default=2,
help="the number of doublets to simulate, relative to the number of observed transcriptomes. Setting too high is computationally expensive. Min tested 0.5")
parser.add_argument("--n_neighbors",
default=20,
help="Number of neighbors used to construct the KNN classifier of observed transcriptomes and simulated doublets. The default value of round(0.5*sqrt(n_cells)) generally works well.")
parser.add_argument("--min_counts",
default=2,
help="Used for gene filtering prior to PCA. Genes expressed at fewer than `min_counts` in fewer than `min_cells` (see below) are excluded")
parser.add_argument("--min_cells",
default=3,
help="Used for gene filtering prior to PCA. Genes expressed at fewer than `min_counts` (see above) in fewer than `min_cells` are excluded.")
parser.add_argument("--min_gene_variability_pctl",
default=85,
help="Used for gene filtering prior to PCA. Keep the most highly variable genes (in the top min_gene_variability_pctl percentile), as measured by the v-statistic [Klein et al., Cell 2015]")
parser.add_argument("--n_prin_comps",
default=30,
help="Number of principal components used to embed the transcriptomes prior to k-nearest-neighbor graph construction")
parser.add_argument("--use_thr",
default=True,
help="use a user defined thr to define min doublet score to split true from false doublets? if false just use what the software produces")
parser.add_argument("--call_doublets_thr",
default=0.25,
help="if use_thr is True, this thr will be used to define doublets")
args = parser.parse_args()
if args.filetype == "cellranger":
if os.path.exists(args.inputpath):
input_dir = args.inputpath
else:
sys.exit("the cells you're trying to load don't exist!")
counts_matrix = scipy.io.mmread(input_dir + '/matrix.mtx.gz').T.tocsc()
genes = np.array(scr.load_genes(input_dir + '/features.tsv', delimiter='\t', column=1))
cellnames = pd.read_csv(input_dir + '/barcodes.tsv.gz', sep='\t', header=None)
elif args.filetype == "h5ad":
if os.path.exists(args.inputpath):
input_path = args.inputpath
else:
sys.exit("the cells you're trying to load don't exist!")
adata = sc.read_h5ad(input_path)
counts_matrix = adata.X.copy()
genes = list(adata.var_names)
cellnames = list(adata.obs_names)
elif args.filetype == "csv_matrix":
if os.path.exists(args.inputpath):
input_path = args.inputpath
else:
sys.exit("the cells you're trying to load don't exist!")
counts_matrix = pd.read_csv(input_path, sep=',',index_col=0)
genes = list(counts_matrix.columns)
cellnames = list(counts_matrix.index)
counts_matrix = counts_matrix.to_numpy()
elif args.filetype == "txt_matrix":
if os.path.exists(args.inputpath):
input_path = args.inputpath
else:
sys.exit("the cells you're trying to load don't exist!")
counts_matrix = pd.read_csv(input_path, sep='\t',index_col=0)
genes = list(counts_matrix.columns)
cellnames = list(counts_matrix.index)
counts_matrix = counts_matrix.to_numpy()
elif args.filetype == "h5":
if os.path.exists(args.inputpath):
input_path = args.inputpath
else:
sys.exit("the cells you're trying to load don't exist!")
adata = sc.read_hdf(input_path)
counts_matrix = adata.X.copy()
genes = list(adata.var_names)
cellnames = list(adata.obs_names)
print('Counts matrix shape: {} rows, {} columns'.format(counts_matrix.shape[0], counts_matrix.shape[1]))
print('Number of genes in gene list: {}'.format(len(genes)))
print("now initializing the scrublet object with expected_doublet_rate:\n")
print(args.expected_doublet_rate)
scrub = scr.Scrublet(counts_matrix, expected_doublet_rate=float(args.expected_doublet_rate))
print("predicting doublets with params: \nmincells: %s \nmingenes: %s \nmin_gene_variabilty_pctl: %s\nn_prin_comps: %s\n" % (args.min_counts, args.min_cells, args.min_gene_variability_pctl, args.n_prin_comps))
print("predicting using:\n")
doublet_scores, predicted_doublets = scrub.scrub_doublets(min_counts=int(args.min_counts),
min_cells=int(args.min_cells),
min_gene_variability_pctl=float(args.min_gene_variability_pctl),
n_prin_comps=int(args.n_prin_comps))
# cgat pipelines will probably parse a string
if args.use_thr == "True":
use_thr = True
if use_thr:
print("using default threshold to call doublets, instead of predicted %s, using default set to %s" %(round(scrub.threshold_, 2), args.call_doublets_thr))
predicted_doublets = scrub.call_doublets(threshold=float(args.call_doublets_thr))
print("saving plots to outdir")
fig = scrub.plot_histogram()
fig[0].savefig(args.outdir + '/' + args.sample_id + '_' + "doubletScore_histogram.png",
bbox_inches='tight', dpi=120)
print("cellnames and doublet scores and prediction")
data = pd.DataFrame({'doublet_scores': doublet_scores,
'predicted_doublets': predicted_doublets})
data['barcode'] = cellnames
data.to_csv(os.path.join(args.outdir + "/" + args.sample_id + "_scrublet_scores.txt"),
sep="\t", index=False)
print('done')
| true |
a1880b2dac8e6624934cf19ee2b709fd088f5720 | Python | dpezzin/dpezzin.github.io | /test/Python/dataquest/dataframe_basics/math_with_multiple_columns_by_name_to_var.py | UTF-8 | 897 | 3 | 3 | [] | no_license | #!/usr/bin/env python
# Adding up all of the fat columns.
total_fat = food_info["FA_Sat_(g)"] + food_info["FA_Mono_(g)"] + food_info["FA_Poly_(g)"]
# We can also divide.
grams_of_protein_per_calorie = food_info["Protein_(g)"] / food_info["Energ_Kcal"]
# We can also multiply
grams_of_protein_squared = food_info["Protein_(g)"] * food_info["Protein_(g)"]
# And subtract
non_sugar_carbs = food_info["Carbohydrt_(g)"] - food_info["Sugar_Tot_(g)"]
# Assign the number of grams of protein per gram of water ("Protein_(g)" column divided by "Water_(g)" column) to grams_of_protein_per_gram_of_water.
# Assign the total amount of calcium and iron("Calcium_(mg)" column plus "Iron_(mg)" column) to milligrams_of_calcium_and_iron.
grams_of_protein_per_gram_of_water = food_info["Protein_(g)"] / food_info["Water_(g)"]
milligrams_of_calcium_and_iron = food_info["Calcium_(mg)"] + food_info["Iron_(mg)"] | true |
1888655950e13d97d4c297c94f9309a1ae27ad1d | Python | bellachp/TopicLabeling | /model/labels_reader.py | UTF-8 | 2,228 | 3.296875 | 3 | [
"MIT"
] | permissive | # labels_reader.py
import pandas as pd
# import jellyfish
labels_word_list = ["bag of labels"]
class Label:
"""
simple label object for nesting labels
"""
def __init__(self, level_in):
self.level = level_in
self.value = 0
self.nodes = {}
class SomeLabel:
"""
example implementing some label structure
"""
def __init__(self, idx):
self.main_id = idx
self.label = Label(0)
self.label.nodes["subterm"] = Label(1)
self.label.nodes["other"] = Label(1)
self.label.nodes["subterm"].nodes["subsub"] = Label(2)
# various process loops
def add_label(self, tag):
for k1, v1 in self.label.nodes.items():
if k1 == tag:
v1.value = 1
return
elif v1.nodes:
for k2, v2 in v1.nodes.items():
if k2 == tag:
v2.value = 1
v1.value = 1
return
# to reach here, no label. do string matching
closest_str = self.compute_closest_string(tag)
self.add_label(closest_str)
# string matching function could go here
def compute_closest_string(self, tag):
pass
class LabelsReader:
"""
parse excel files of labeled data into label
heirarchy to capture usage for modeling
"""
def __init__(self):
self.labels = []
# pandas excel loaders
def load_xlsx(self, filename, sheetname=""):
xlfile = pd.ExcelFile(filename)
shee1 = None
if sheetname == "":
sheet1 = xlfile.parse(0)
else:
sheet1 = xlfile.parse(sheetname)
# do parsing...
# convert labels to dataframe
def labels_to_dataframe(self, label_level):
# get cols
# labels
# labels = [ll.get_labelrow(level=label_level) for ll in self.labels]
# df = pd.DataFrame(labels, columns=cols)
# return df
pass
# testdev case
if __name__ == '__main__':
ff = "file"
lr = LabelsReader()
lr.load_xlsx(ff, sheetname="bob")
lr.load_xlsx(ff, sheetname="tony")
df = lr.labels_to_dataframe(label_level=2)
| true |
108e77aa4ee8f9896c032f0775146ee72c18f2fa | Python | MizanMustakim/Daily-Coding-Problem-no.118 | /problem_118.py | UTF-8 | 106 | 3.359375 | 3 | [] | no_license | given = [-9, -2, 0, 2, 3]
x = []
for number in given:
x.append(number * number)
x.sort()
print(x)
| true |
3b816c8b7333df9ba8bd0664a0029eb6fe304f9c | Python | agladstein/stdpopsim | /tests/test_pongo.py | UTF-8 | 623 | 2.53125 | 3 | [
"MIT"
] | permissive | import unittest
import io
import stdpopsim
from stdpopsim import pongo
class TestPongoIM(unittest.TestCase):
"""
Basic tests for the LockeEtAlPongoIM model.
"""
def test_simulation_runs(self):
model = pongo.LockeEtAlPongoIM()
contig = stdpopsim.Contig()
samples = model.get_samples(2)
ts = model.simulate(contig, samples)
self.assertEqual(ts.num_populations, 2)
def test_debug_runs(self):
model = pongo.LockeEtAlPongoIM()
output = io.StringIO()
model.debug(output)
s = output.getvalue()
self.assertGreater(len(s), 0)
| true |
ffd72377d063f1bcfa0a1ccfec6198f5ec739023 | Python | CMad51/9th-30th | /9thday.py | UTF-8 | 505 | 3.953125 | 4 | [] | no_license | '''for number in range(11):
print(number)
_number=10
while _number>=0:
print(_number)
_number-=1
for item in range(11) :
str_of_item=str(item)
print(str_of_item,'x',str_of_item,'=',str(item**2))
#9
_sum=0
for number in range(0,101):
_sum+=number
print(_sum)'''
#12
fruit_list=['banana', 'orange', 'mango', 'lemon']
length=len(fruit_list)
print(length)
reverse_fruit_list=[ ]
for number in range (length-1,-1 ,-1):
reverse_fruit_list.append(fruit_list[number])
print(reverse_fruit_list)
| true |
283b6bb83e197900f06145be17fc64bb914eecf2 | Python | buihuubang/BTLINUX | /BTPYTHON/SinhVien/khoa.py | UTF-8 | 345 | 2.90625 | 3 | [] | no_license | class KHOA:
def __init__ (self,makhoa,tenkhoa):
self.makhoa = makhoa
self.tenkhoa = tenkhoa
def setMaKhoa(self,makhoa):
self.makhoa = makhoa
def getMaKhoa(self):
self.makhoa
def setTenKhoa(self,tenkhoa):
self.tenkhoa = tenkhoa
def getTenKhoa(self):
self.tenkhoa
def toString(self):
print(self.makhoa, ' ' , self.tenkhoa)
| true |
2c66b7d307bbcd02fea8768c27e6267569e48e11 | Python | luzhijun/Optimization | /CMA-ES/分段数对效果的影响/makeData.py | UTF-8 | 586 | 2.890625 | 3 | [
"Apache-2.0"
] | permissive | #!usr/bin/env python
#encoding: utf-8
__author__="luzhijun"
'''
make linear data sets
'''
import pickle
import numpy as np
import random as rd
import math
PN=300
def dumpData(x,filename):
with open(filename,'w') as f:
pickle.dump(x,f,pickle.HIGHEST_PROTOCOL)
def loadData(filename):
with open(filename,'r') as f:
x=pickle.load(f)
return x
def linefunc2(x):
return 10*math.sin(0.6*x)+rd.uniform(-1.5,1.5)*rd.gauss(0,5)
if __name__ == '__main__':
X=np.linspace(-7,7,PN+1)
X=np.delete(X,len(X)-1,0)
Y=map(linefunc2,X)
dumpData([X,Y],'data1.tl') | true |
d39334a99212c2c04e5cfa5fbbf1f41ad8c5c62b | Python | tech-learner123/leetcode_python | /3-medium.py | UTF-8 | 435 | 3.109375 | 3 | [] | no_license | class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
# here j indicate the next non duplicate position.
j = 0
mapping = {}
length = 0
for start, letter in enumerate(s):
if letter in mapping and mapping[letter] + 1 > j:
j = mapping[letter] + 1
mapping[letter] = start
length = max(length, start - j + 1)
return length
| true |
44b21b39c0239b15f99fe7cb8571e6c06c4068f9 | Python | rafaelperazzo/programacao-web | /moodledata/vpl_data/330/usersdata/296/93652/submittedfiles/lista1.py | UTF-8 | 465 | 3.328125 | 3 | [] | no_license | # -*- coding: utf-8 -*-
n = int(input("Digite o valor de n: "))
a = []
for i in range (0,n,1):
a.append(int(input("Digite um valor: "))
spares = 0
simpares = 0
qpares = 0
qimpares = 0
for i in range (0,n,1):
if a[i] % 2 == 0:
spares = spares + 1
qpares = qpares + a[i]
else:
simpares = simpares + 1
simpares = simpares + a[i]
print(spares)
print(qpares)
print(simpares)
print(qimpares)
print(a)
| true |
c0070ed43dc999f28731b0003a2fcd9f2c16407f | Python | AinoraZ/stack-trace-solver | /stack-trace-solver-be/src/fuzzy_generation.py | UTF-8 | 5,236 | 2.546875 | 3 | [
"MIT"
] | permissive | import difflib
import re
from fuzzywuzzy import process
from fuzzywuzzy import fuzz
from exception_extractor import retrieve_exception_dictionary, load_exceptions
from exception_template import ExceptionTemplate
from knowledge_base import KnowledgeBase
def show_diff(seqm):
output= []
for opcode, a_start, a_end, _, _ in seqm.get_opcodes():
if opcode == 'equal':
output.append(seqm.a[a_start:a_end])
elif opcode == 'insert':
output.append("{*}")
elif opcode == 'delete':
output.append("{*}")
elif opcode == 'replace':
output.append("{*}")
else:
raise RuntimeError("unexpected opcode")
return ''.join(output)
def fuzzy_group(exception_list, scorer, threshold):
groups = []
while len(exception_list) >= 2:
matches = process.extract(exception_list[0], exception_list[1:], limit=1000, scorer=scorer)
current_group = [match[0] for match in matches if match[1] >= threshold]
current_group.append(exception_list[0])
groups.append((current_group, threshold))
exception_list = [match[0] for match in matches if match[1] < threshold]
if len(exception_list) == 1:
groups.append(([exception_list[0]], threshold))
return groups
def generate_template(type, messages):
if len(messages) == 1:
return ExceptionTemplate(type, messages, messages[0])
template = messages[0]
for toCompareId in range(len(messages)):
for other in messages[toCompareId+1:]:
sm = difflib.SequenceMatcher(None, template, other)
template = show_diff(sm)
between_two_wildcards = r"(\{\*\}.{0,5}\{\*\})";
next_to_wildcard = r"(((?<=\s)\S)?\{\*\}(\S(?=\s))?)"
template = re.sub(next_to_wildcard, "{*}", template)
while re.search(between_two_wildcards, template):
template = re.sub(between_two_wildcards, "{*}", template)
if "{*}" in template and len(template.replace("{*}", "")) <= 6:
return None
return ExceptionTemplate(type, messages, template, "{*}" in template)
def generate_templates(filename, scorer):
ex_dict = retrieve_exception_dictionary(filename)
ex_dict_keys = list(ex_dict.keys())
ex_dict_keys.sort()
exception_templates = []
for key in ex_dict_keys:
values = ex_dict[key]
if len(values) < 1:
continue
all_messages = [value.message for value in values]
print("Generating type:", key)
message_groups = fuzzy_group(all_messages, scorer, 70)
while len(message_groups) != 0:
messages, used_threshold = message_groups.pop(0)
template = generate_template(key, messages)
if template is not None:
exception_templates.append(template)
else:
regenerated_groups = fuzzy_group(messages, scorer, used_threshold + 3)
while len(regenerated_groups) <= 1 and used_threshold <= 94:
_, used_threshold = regenerated_groups[0]
regenerated_groups = fuzzy_group(messages, scorer, used_threshold + 3)
if len(regenerated_groups) > 1:
message_groups.extend(regenerated_groups)
return exception_templates
def generate_with_debug(filename, scorer=fuzz.ratio):
templates = generate_templates(filename, scorer)
for template in templates:
if not template.generated:
continue
print(template)
for message in template.messages:
print(f"\t{message}")
def generate_to_knowledge_base(filename, sqlite_filename="knowledge_base.sql", scorer=fuzz.ratio):
templates = generate_templates(filename, scorer)
base = KnowledgeBase(sqlite_filename)
for template in templates:
if not template.generated:
continue
base.insert_exception_template(template)
def load_to_knowledge_base(filename, sqlite_filename="knowledge_base.sql"):
exceptions = load_exceptions(filename)
templates = [ExceptionTemplate(exception.exception, [], exception.message) for exception in exceptions]
base = KnowledgeBase(sqlite_filename)
for template in templates:
if "{*}" in template.template:
base.insert_exception_template(template)
def generate_with_different_scorers():
scorers = [fuzz.ratio, fuzz.token_set_ratio]
for scorer in scorers:
generate_to_knowledge_base("exceptions.txt", f"knowledge_base_{scorer.__name__}.sqlite", scorer)
generate_to_knowledge_base("cas.txt", f"knowledge_base_{scorer.__name__}.sqlite", scorer)
def generate_single_with_different_scorers():
scorers = [fuzz.ratio, fuzz.token_set_ratio]
for scorer in scorers:
pass
generate_to_knowledge_base("exceptions.txt", "knowledge_base.sqlite", scorer)
generate_to_knowledge_base("cas.txt", "knowledge_base.sqlite", scorer)
load_to_knowledge_base("manual/sorted_finished_exceptions.txt", "knowledge_base.sqlite")
if __name__ == "__main__":
# generate_with_different_scorers()
generate_single_with_different_scorers()
# load_to_knowledge_base("manual/sorted_finished_exceptions.txt")
| true |
a5b276bee07789d17bb9e5cbe236e4ad98bd6919 | Python | juan230500/Agenda | /interfaz.py | UTF-8 | 1,061 | 3.5 | 4 | [] | no_license | from tkinter import *
class Movile():
def on_start(self,event):
self.x=event.x
self.y=event.y
print("Incio",self.rootx,self.rooty)
def on_drag(self,event):
self.rootx+=event.x-self.x
self.rooty+=event.y-self.y
self.widget.place(x=self.rootx,y=self.rooty)
def on_drop(self,event):
print("Final",self.rootx,self.rooty)
def __init__(self,widget,rootx,rooty):
self.widget=widget
self.rootx=rootx
self.rooty=rooty
self.widget.bind("<ButtonPress-1>", self.on_start)
self.widget.bind("<B1-Motion>", self.on_drag)
self.widget.bind("<ButtonRelease-1>", self.on_drop)
window = Tk()
window.title("app")
window.geometry('350x200')
canvas1 = Canvas(window, width=200, height=100)
canvas1.configure(background="blue")
canvas1.place(x=0,y=0)
M1=Movile(canvas1,0,0)
canvas2 = Canvas(window, width=200, height=100)
canvas2.configure(background="red")
canvas2.place(x=100,y=100)
M2=Movile(canvas2,100,100)
window.mainloop()
| true |
e9f9cfb0b5dcc94e740e192941ea9fa300504e4a | Python | paulocesarcsdev/ExerciciosPython | /2-EstruturaDeDecisao/25.py | UTF-8 | 1,250 | 4.34375 | 4 | [] | no_license | '''
Faça um programa que faça 5 perguntas para uma pessoa sobre um crime. As perguntas são:
a) "Telefonou para a vítima?"
b) "Esteve no local do crime?"
c) "Mora perto da vítima?"
d) "Devia para a vítima?"
e) "Já trabalhou com a vítima?"
O programa deve no final emitir uma classificação sobre a participação da pessoa no crime.
Se a pessoa responder positivamente a 2 questões ela deve ser classificada como "Suspeita", entre 3 e 4 como "Cúmplice" e 5 como "Assassino".
Caso contrário, ele será classificado como "Inocente".
'''
print('Responta as perguntas: ')
contador = 0
print("Telefonou para a vítima?")
a = int(input('Aperte 1 para sim 0 para não: '))
print("Esteve no local do crime?")
b = int(input('Aperte 1 para sim 0 para não: '))
print("Mora perto da vítima?")
c = int(input('Aperte 1 para sim 0 para não: '))
print("Devia para a vítima?")
d = int(input('Aperte 1 para sim 0 para não: '))
print("Já trabalhou com a vítima?")
e = int(input('Aperte 1 para sim 0 para não: '))
contador = (a + b + c + d + e)
if(contador == 2):
print("Suspeita")
if(contador == 3 or contador == 4):
print("Cúmplice")
if(contador == 5):
print('"Assassino"')
if(contador == 1 or contador == 0):
print("Inocente") | true |
4b922dfe1ab6a6beaf3de6149ec43c607c527f49 | Python | souravs17031999/100dayscodingchallenge | /recursion and backtracking/knight_tour_problem.py | UTF-8 | 2,548 | 3.953125 | 4 | [] | no_license | # Program for checking if the knight can move such that we can move to all the cells once on the board.
# We are required to basically create a tour of all the moves that knight makes if succesfull in covering the entire board.
# Note : we are prohibited to move outside the given board area and can't traverse the already traversed path again.
# -----------------------------------------------------------------------------------------------------------------------
# Naive solution will be to generate all the possible configuration for the board and check with the constraints and output the configuration which
# sets correctly but this gives exponential time complexity.
# Knight tour problem can be efficeintly solved by backtracking which is one of the approaches for solving it but due to very high time
# complexity (exponential) 8^(n * n), we are not able to run it in python most of the times.
# we will see here backtracking method but There are other methods also.
# BOARD SIZE : 8 * 8
# POSSIBLE KNIGHT MOVES IN ALL 8 DIRECTIONS :
#
# x,y
# 1,-2 2,1
#
# -1,-2 1,2
#
# -2,-1 -1,2
# -2,1
#
#
# -----------------------------------------------------------------------------------------------------------
# # we store the positions by preprocessing the moves already known to us :
# x_moves = [2, 1, -1, -2, -2, -1, 1, 2]
# y_moves = [1, 2, 2, 1, -1, -2, -2, -1]
def pretty_print(board):
for i in range(8):
for j in range(8):
print(board[i][j], end = " ")
print()
def is_safe(board, curr_x, curr_y):
if curr_x >= 0 and curr_x < 8 and curr_y >= 0 and curr_y < 8 and board[curr_x][curr_y] == -1:
return True
return False
def solve(board, curr_x, curr_y, x_moves, y_moves, pos):
if pos == 64:
return True
for i in range(8):
new_x = curr_x + x_moves[i]
new_y = curr_y + y_moves[i]
if is_safe(board, new_x, new_y):
board[new_x][new_y] = pos
if solve(board, new_x, new_y, x_moves, y_moves, pos + 1):
return True
board[new_x][new_y] = -1
return False
if __name__ == '__main__':
board = [[-1 for _ in range(8)] for _ in range(8)]
x_moves = [2, 1, -1, -2, -2, -1, 1, 2]
y_moves = [1, 2, 2, 1, -1, -2, -2, -1]
board[0][0] = 0
pos = 1
if not solve(board, 0, 0, x_moves, y_moves, pos):
print("FALSE")
else:
print("TRUE")
| true |
a1d11eb12df885cf8be2579f449f51ece758a991 | Python | EddyMM/lipa-less | /POS/tests/user_management/login/test_login.py | UTF-8 | 1,929 | 2.53125 | 3 | [] | no_license | import unittest
from POS.models.base_model import AppDB
from POS.tests.base.base_test_case import BaseTestCase
class TestLogin(BaseTestCase):
def setUp(self):
TestLogin.confirm_app_in_testing_mode()
from POS import app
app.testing = True
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = False
self.test_app = app.test_client()
AppDB.db_session.commit()
AppDB.BaseModel.metadata.drop_all()
AppDB.BaseModel.metadata.create_all()
def tearDown(self):
AppDB.db_session.commit()
AppDB.BaseModel.metadata.drop_all()
def test_get(self):
rv = self.test_app.get("/login")
assert b"LOG IN" in rv.data
def test_post(self):
from POS.models.user_management.user import User
test_name = "lipaless"
test_email = "lipaless@gmail.com"
test_password = "lipaless_pw"
# Create a user to test against
# TODO: Create user from an SQL file
AppDB.db_session.add(User(
name=test_name,
email=test_email,
password=test_password
))
AppDB.db_session.commit()
# Test for email being empty
rv = self.login("", test_password)
assert b"Fill in" in rv.data
# Test for password being empty
rv = self.login(test_email, "")
assert b"Fill in" in rv.data
# Test for an email that exists and password is correct
rv = self.login(test_email, test_password)
assert b"Successful login" in rv.data
# Test for an email that exists and password is wrong
rv = self.login(test_email, test_password + "extra")
assert b"Wrong password" in rv.data
# Test for an email that doesn't exist in the first place
rv = self.login(test_email + "extra", test_password)
assert b"email not found" in rv.data
if __name__ == "__main__":
unittest.main()
| true |
be2a54a50243fbf5b14d0ea2ae0665499537b19e | Python | jmflorez/pymatgen | /pymatgen/util/io_utils.py | UTF-8 | 11,893 | 2.921875 | 3 | [
"MIT"
] | permissive | #!/usr/bin/env python
"""
This module provides utility classes for io operations.
"""
__author__ = "Shyue Ping Ong, Rickard Armiento, Anubhav Jain, G Matteo"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "Sep 23, 2011"
import re
import numpy
import os
import time
import errno
from bz2 import BZ2File
from gzip import GzipFile
def zopen(filename, *args, **kwargs):
"""
This wrapper wraps around the bz2, gzip and standard python's open function
to deal intelligently with bzipped, gzipped or standard text files.
Args:
filename:
filename
args:
Standard args for python open(..). E.g., 'r' for read, 'w' for
write.
kwargs:
Standard kwargs for python open(..).
Returns:
File handler
"""
file_ext = filename.split(".")[-1].upper()
if file_ext == "BZ2":
return BZ2File(filename, *args, **kwargs)
elif file_ext in ("GZ", "Z"):
return GzipFile(filename, *args, **kwargs)
else:
return open(filename, *args, **kwargs)
def zpath(filename):
"""
Returns an existing (zipped or unzipped) file path given the unzipped
version. If no path exists, returns the filename unmodified
Args:
filename:
filename without zip extension
Returns:
filename with a zip extension (unless an unzipped version
exists)
"""
for ext in ["", '.gz', '.GZ', '.bz2', '.BZ2', '.z', '.Z']:
zfilename = "{}{}".format(filename, ext)
if os.path.exists(zfilename):
return zfilename
return filename
def clean_lines(string_list, remove_empty_lines=True):
"""
Strips whitespace, carriage returns and empty lines from a list of strings.
Args:
string_list:
List of strings
remove_empty_lines:
Set to True to skip lines which are empty after stripping.
Returns:
List of clean strings with no whitespaces.
"""
for s in string_list:
clean_s = s
if '#' in s:
ind = s.index('#')
clean_s = s[:ind]
clean_s = clean_s.strip()
if (not remove_empty_lines) or clean_s != '':
yield clean_s
def micro_pyawk(filename, search, results=None, debug=None, postdebug=None):
"""
Small awk-mimicking search routine.
'file' is file to search through.
'search' is the "search program", a list of lists/tuples with 3 elements;
i.e. [[regex,test,run],[regex,test,run],...]
'results' is a an object that your search program will have access to for
storing results.
Here regex is either as a Regex object, or a string that we compile into a
Regex. test and run are callable objects.
This function goes through each line in filename, and if regex matches that
line *and* test(results,line)==True (or test == None) we execute
run(results,match),where match is the match object from running
Regex.match.
The default results is an empty dictionary. Passing a results object let
you interact with it in run() and test(). Hence, in many occasions it is
thus clever to use results=self.
Author: Rickard Armiento
Returns:
results
"""
if results is None:
results = {}
# Compile strings into regexs
for entry in search:
if isinstance(entry[0], str):
entry[0] = re.compile(entry[0])
with zopen(filename) as f:
for line in f:
for i in range(len(search)):
match = search[i][0].search(line)
if match and (search[i][1] is not None
or search[i][1](results, line)):
if debug is not None:
debug(results, match)
search[i][2](results, match)
if postdebug is not None:
postdebug(results, match)
return results
def clean_json(input_json, strict=False):
"""
This method cleans an input json-like dict object, either a list or a dict,
nested or otherwise, by converting all non-string dictionary keys (such as
int and float) to strings.
Args:
input_dict:
input dictionary.
strict:
This parameters sets the behavior when clean_json encounters an
object it does not understand. If strict is True, clean_json will
try to get the to_dict attribute of the object. If no such
attribute is found, an attribute error will be thrown. If strict is
False, clean_json will simply call str(object) to convert the
object to a string representation.
Returns:
Sanitized dict that can be json serialized.
"""
if isinstance(input_json, (list, numpy.ndarray, tuple)):
return [clean_json(i, strict=strict) for i in input_json]
elif isinstance(input_json, dict):
return {str(k): clean_json(v, strict=strict)
for k, v in input_json.items()}
elif isinstance(input_json, (int, float)):
return input_json
else:
if not strict:
return str(input_json)
else:
if isinstance(input_json, basestring):
return str(input_json)
elif input_json is None:
return 'None'
else:
return clean_json(input_json.to_dict, strict=strict)
def which(program):
"""
Returns full path to a executable.
"""
def is_exe(fpath):
return os.path.isfile(fpath) and os.access(fpath, os.X_OK)
fpath, fname = os.path.split(program)
if fpath:
if is_exe(program):
return program
else:
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if is_exe(exe_file):
return exe_file
return None
def reverse_readline(m_file, blk_size=4096, max_mem=4000000):
"""
Generator method to read a file line-by-line, but backwards. This allows
one to efficiently get data at the end of a file.
Based on code by Peter Astrand <astrand@cendio.se>, using modifications by
Raymond Hettinger and Kevin German.
http://code.activestate.com/recipes/439045-read-a-text-file-backwards
-yet-another-implementat/
Reads file forwards and reverses in memory for files smaller than the
max_mem parameter, or for gzip files where reverse seeks are not supported.
Files larger than max_mem are dynamically read backwards.
Args:
m_file:
File stream to read (backwards)
blk_size:
The buffer size. Defaults to 4096.
max_mem:
The maximum amount of memory to involve in this operation. This is
used to determine when to reverse a file in-memory versus seeking
portions of a file. For bz2 files, this sets the maximum block
size.
Returns:
Generator that returns lines from the file. Similar behavior to the
file.readline() method, except the lines are returned from the back
of the file.
"""
file_size = os.path.getsize(m_file.name)
# If the file size is within our desired RAM use, just reverse it in memory
# GZip files must use this method because there is no way to negative seek
if file_size < max_mem or isinstance(m_file, GzipFile):
for line in reversed(m_file.readlines()):
yield line.rstrip()
else:
if isinstance(m_file, BZ2File):
# for bz2 files, seeks are expensive. It is therefore in our best
# interest to maximize the blk_size within limits of desired RAM
# use.
blk_size = min(max_mem, file_size)
buf = ""
m_file.seek(0, 2)
lastchar = m_file.read(1)
trailing_newline = (lastchar == "\n")
while 1:
newline_pos = buf.rfind("\n")
pos = m_file.tell()
if newline_pos != -1:
# Found a newline
line = buf[newline_pos + 1:]
buf = buf[:newline_pos]
if pos or newline_pos or trailing_newline:
line += "\n"
yield line
elif pos:
# Need to fill buffer
toread = min(blk_size, pos)
m_file.seek(pos - toread, 0)
buf = m_file.read(toread) + buf
m_file.seek(pos - toread, 0)
if pos == toread:
buf = "\n" + buf
else:
# Start-of-file
return
class FileLockException(Exception):
"""Exception raised by FileLock."""
class FileLock(object):
"""
A file locking mechanism that has context-manager support so you can use
it in a with statement. This should be relatively cross-compatible as it
doesn't rely on msvcrt or fcntl for the locking.
Taken from http://www.evanfosmark.com/2009/01/cross-platform-file-locking
-support-in-python/
"""
Error = FileLockException
def __init__(self, file_name, timeout=10, delay=.05):
"""
Prepare the file locker. Specify the file to lock and optionally
the maximum timeout and the delay between each attempt to lock.
Args:
file_name:
Name of file to lock.
timeout:
Maximum timeout for locking. Defaults to 10.
delay:
Delay between each attempt to lock. Defaults to 0.05.
"""
self.file_name = os.path.abspath(file_name)
self.lockfile = os.path.abspath(file_name) + ".lock"
self.timeout = float(timeout)
self.delay = float(delay)
self.is_locked = False
if self.delay > self.timeout or self.delay <= 0 or self.timeout <= 0:
raise ValueError("delay and timeout must be positive with delay "
"<= timeout")
def acquire(self):
"""
Acquire the lock, if possible. If the lock is in use, it check again
every `delay` seconds. It does this until it either gets the lock or
exceeds `timeout` number of seconds, in which case it throws
an exception.
"""
start_time = time.time()
while True:
try:
self.fd = os.open(self.lockfile,
os.O_CREAT | os.O_EXCL | os.O_RDWR)
break
except (OSError,) as e:
if e.errno != errno.EEXIST:
raise
if (time.time() - start_time) >= self.timeout:
raise FileLockException("%s: Timeout occured." %
self.lockfile)
time.sleep(self.delay)
self.is_locked = True
def release(self):
""" Get rid of the lock by deleting the lockfile.
When working in a `with` statement, this gets automatically
called at the end.
"""
if self.is_locked:
os.close(self.fd)
os.unlink(self.lockfile)
self.is_locked = False
def __enter__(self):
"""
Activated when used in the with statement. Should automatically
acquire a lock to be used in the with block.
"""
if not self.is_locked:
self.acquire()
return self
def __exit__(self, type, value, traceback):
"""
Activated at the end of the with statement. It automatically releases
the lock if it isn't locked.
"""
if self.is_locked:
self.release()
def __del__(self):
"""
Make sure that the FileLock instance doesn't leave a lockfile
lying around.
"""
self.release()
| true |
2fc912d23c2c7baa58fe095cb1b6a5936a7c6bd0 | Python | ThanosPapas/diakrita | /HavelHakimi.py | UTF-8 | 2,327 | 3.53125 | 4 | [] | no_license | import networkx as nx
import matplotlib.pyplot as plt
from random import choice
def test(lst, vathmos):
G = nx.Graph()
for i in range(len(lst)):
x = choice(lst)
while vathmos[x] == 0:
x = choice(lst)
tmp = vathmos[:]
tmp[x] = 0 #για να μην συνδεθεί ο κόμβος με τον εαυτό του
for j in range(vathmos[x]):
ind = tmp.index(max(tmp))
G.add_edge(x, ind)
tmp[ind] -=1
vathmos[ind] = tmp[ind] #αντιγράφω την αλλαγή στο original array
tmp[ind] = 0 #για να μην συνδεθεί ο κόμβος με έναν άλλο πάνω από μία φορά (με παίδεψε πολύ αυτό!)
vathmos[x] = 0
if all(v==0 for v in vathmos):
nx.draw_networkx(G)
break
def check(lst):
while True:
lst.sort(reverse=True)
x = lst[0]
lst.pop(0)
for i in range(x):
try:
lst[i] -= 1
except IndexError:
return False
if any(v<0 for v in lst):
return False
if all(v==0 for v in lst):
return True
def insert(n):
lst = [i for i in range(n)]
vathmos =[]
for i in range(n):
x = int(input(f"Εισάγετε αριθμό δεσμών για τον {i + 1}ο κόμβο: "))
vathmos.append(x)
if check(vathmos[:]): #ελέγχω αν η ακολουθία είναι γραφική, περνάω ένα αντίγραφο της λίστας με call by value
test(lst, vathmos)
else:
print("Αυτή η ακολουθία δεν είναι γραφική.")
def main():
while True:
x = input("Εισάγετε αριθμό κόμβων: ")
try:
n = int(x)
if n <=1:
print("Ο αριθμός πρέπει να είναι μεγαλύτερος του 1. ", end='')
continue
insert(n)
plt.show()
break
except ValueError:
print("Απαιτείται ακέραιος αριθμός. ", end='')
if __name__ == '__main__':
main() | true |
4dcc5d1eb392569dab6791a91cadb0352b01ade3 | Python | lvybupt/PyLearn | /pytest/findthenum/finderthenum.py | UTF-8 | 723 | 3.515625 | 4 | [] | no_license | '''
python 算法 习题3-4
'''
def ftm1(num,q):
upnum=1
while upnum <= num:
upnum = upnum * (2**q)
for i in range(upnum):
if num == i:
return i
def ftm2(num,q):
upnum = 1
while upnum <= num:
upnum = upnum * (2 ** q)
##i = upnum
downnum = 0
while True:
if (upnum + downnum) > num * 2:
upnum = (upnum + downnum)//2
elif(upnum + downnum) < num * 2:
downnum = (upnum + downnum)//2
else:
return (upnum + downnum)//2
if __name__ == '__main__':
from random import randrange
n = 10 ** 8
num = randrange(n)
print("ftp1",ftm1(num,10))
print("ftp2",ftm2(num,10))
print(num)
| true |
4301c77ac84fd5762f5377dbbb1b889650e560bf | Python | aatapa/RLScore | /rlscore/learner/rankrls_with_pairwise_preferences.py | UTF-8 | 6,333 | 2.734375 | 3 | [
"MIT"
] | permissive | #
# The MIT License (MIT)
#
# This file is part of RLScore
#
# Copyright (c) 2015 - 2016 Tapio Pahikkala, Antti Airola
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import coo_matrix
from rlscore.utilities import adapter
from rlscore.utilities import linalg
from rlscore.predictor import PredictorInterface
class PPRankRLS(PredictorInterface):
"""Regularized least-squares ranking (RankRLS) with pairwise preferences
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Data matrix
pairs_start_inds : {array-like}, shape = [n_preferences]
pairwise preferences: pairs_start_inds[i] > pairs_end_inds[i]
pairs_end_inds : {array-like}, shape = [n_preferences]
pairwise preferences: pairs_start_inds[i] > pairs_end_inds[i]
regparam : float, optional
regularization parameter, regparam > 0 (default=1.0)
kernel : {'LinearKernel', 'GaussianKernel', 'PolynomialKernel', 'PrecomputedKernel', ...}
kernel function name, imported dynamically from rlscore.kernel
basis_vectors : {array-like, sparse matrix}, shape = [n_bvectors, n_features], optional
basis vectors (typically a randomly chosen subset of the training data)
Other Parameters
----------------
bias : float, optional
LinearKernel: the model is w*x + bias*w0, (default=1.0)
gamma : float, optional
GaussianKernel: k(xi,xj) = e^(-gamma*<xi-xj,xi-xj>) (default=1.0)
PolynomialKernel: k(xi,xj) = (gamma * <xi, xj> + coef0)**degree (default=1.0)
coef0 : float, optional
PolynomialKernel: k(xi,xj) = (gamma * <xi, xj> + coef0)**degree (default=0.)
degree : int, optional
PolynomialKernel: k(xi,xj) = (gamma * <xi, xj> + coef0)**degree (default=2)
Attributes
-----------
predictor : {LinearPredictor, KernelPredictor}
trained predictor
Notes
-----
Computational complexity of training:
m = n_samples, d = n_features, p = n_preferences, b = n_bvectors
O(m^3 + dm^2 + p): basic case
O(dm^2 + p): Linear Kernel, d < m
O(bm^2 + p): Sparse approximation with basis vectors
RankRLS algorithm was generalized in [1] to learning directly from pairwise preferences.
References
----------
[1] Tapio Pahikkala, Evgeni Tsivtsivadze, Antti Airola, Jouni Jarvinen, and Jorma Boberg.
An efficient algorithm for learning to rank from preference graphs.
Machine Learning, 75(1):129-165, 2009.
"""
def __init__(self, X, pairs_start_inds, pairs_end_inds, regparam = 1.0, kernel='LinearKernel', basis_vectors = None, **kwargs):
kwargs['kernel'] = kernel
kwargs['X'] = X
if basis_vectors is not None:
kwargs["basis_vectors"] = basis_vectors
self.regparam = regparam
self.pairs = np.vstack([pairs_start_inds, pairs_end_inds]).T
self.svdad = adapter.createSVDAdapter(**kwargs)
self.svals = np.mat(self.svdad.svals)
self.svecs = self.svdad.rsvecs
self.results = {}
self.X = csc_matrix(X)
self.bias = 0.
self.results = {}
self.solve(regparam)
def solve(self, regparam):
"""Re-trains RankRLS for the given regparam.
Parameters
----------
regparam : float, optional
regularization parameter, regparam > 0 (default=1.0)
Notes
-----
"""
size = self.svecs.shape[0]
if not hasattr(self, "multipleright"):
vals = np.concatenate([np.ones((self.pairs.shape[0]), dtype=np.float64), -np.ones((self.pairs.shape[0]), dtype = np.float64)])
row = np.concatenate([np.arange(self.pairs.shape[0]), np.arange(self.pairs.shape[0])])
col = np.concatenate([self.pairs[:, 0], self.pairs[:, 1]])
coo = coo_matrix((vals, (row, col)), shape = (self.pairs.shape[0], size))
self.L = (coo.T * coo)#.todense()
#Eigenvalues of the kernel matrix
evals = np.multiply(self.svals, self.svals)
#Temporary variables
ssvecs = np.multiply(self.svecs, self.svals)
#These are cached for later use in solve and computeHO functions
ssvecsTLssvecs = ssvecs.T * self.L * ssvecs
LRsvals, LRevecs = linalg.eig_psd(ssvecsTLssvecs)
LRsvals = np.mat(LRsvals)
LRevals = np.multiply(LRsvals, LRsvals)
LY = coo.T * np.mat(np.ones((self.pairs.shape[0], 1)))
self.multipleright = LRevecs.T * (ssvecs.T * LY)
self.multipleleft = ssvecs * LRevecs
self.LRevals = LRevals
self.LRevecs = LRevecs
self.regparam = regparam
#Compute the eigenvalues determined by the given regularization parameter
self.neweigvals = 1. / (self.LRevals + regparam)
self.A = self.svecs * np.multiply(1. / self.svals.T, (self.LRevecs * np.multiply(self.neweigvals.T, self.multipleright)))
self.predictor = self.svdad.createModel(self)
| true |
4bb28f37ab04ef6511a29a2c26ff0162604edc5a | Python | NikkiNgNguyen/PythonProjects | /CECS100/NikkiNguyen_GeometryCalculator_CECS100.py | UTF-8 | 1,628 | 3.96875 | 4 | [] | no_license | '''
Student Name: Nikki Nguyen
Professor Name: Elleni Wolde
Class: CECS 100
Date: 10/13/2015
'''
import sys
#declare constants
PI = 3.14159
HALF = .5
#The main menu for the geometry calculator
def displayMenu():
selection = 0
print(" Geometry Calculator")
print()
print("----------------------------------------")
print("| 1. Calculate the Area of a Circle |")
print("| 2. Calculate the Area of a Rectancle |")
print("| 3. Calculate the Area of a Triangle |")
print("| 4. Quit |")
print("| Enter your choice (1-4) |")
print("----------------------------------------")
print()
selection = int(input("Please choose calculate type or 4 to quit:"))
return selection
#if else
def userChoice(selection):
r = 0
length = 0
width = 0
base = 0
height = 0
if selection == 1:
r = int(input("Enter radius of circle: "))
print("Area of circle is: ", PI*(r**2))
elif selection == 2:
length = int(input("Enter length of rectangle: "))
width = int(input("Enter width of rectangle: "))
print("Area of rectangle is: ", length*width)
elif selection == 3:
base = int(input("Enter length of triangle's base: "))
height = int(input("Enter height of triangle: "))
print("Area of triangle is: ", base*height*half)
elif selection == 4:
sys.exit()
else:
print("Error: Enter values 1-4 only")
displayMenu()
#main function definition
def main():
selection = displayMenu()
userChoice(selection)
main()
| true |
69a26b4bc7b422ab84289b822beb88e9d1937fb7 | Python | ksrntheja/08-Python-Core | /venv/functions/09Factorial.py | UTF-8 | 278 | 4.34375 | 4 | [] | no_license | def factorial(number):
factorial = 1
while number > 1:
factorial *= number
number -= 1
return factorial
number = int(input('Enter a number: '))
print('{} factorial = {}'.format(number, factorial(number)))
# Enter a number: 8
# 8 factorial = 40320
| true |
c34997fe3c91ebc9d6c402d2913cf0e22385e073 | Python | Jhonata-Hudson/dojo-puzzles | /cash-machine/test_cash_machine.py | UTF-8 | 1,725 | 3.90625 | 4 | [] | no_license | # -*- coding: utf-8 -*-
from cash_machine import cash_machine
import types
def test_should_be_a_function():
assert isinstance(cash_machine, types.FunctionType)
def test_should_return_an_error_message_if_value_is_negative():
assert cash_machine(-10) == 'Informe um valor positivo'
def test_should_return_an_error_message_if_value_is_less_than_10():
assert cash_machine(9) == 'Saque um valor acima de 10 reais'
def test_should_return_an_error_message_if_number_is_not_divisible_by_10():
assert (cash_machine(11)) == 'Informe um valor válido para o saque'
def test_should_return_a_list_with_an_item():
assert len(cash_machine(10)) == 1
def test_should_return_a_note_of_10():
cash = cash_machine(10)
assert len(cash) == 1 and cash.count(10) == 1
def test_should_return_a_note_of_20():
assert cash_machine(20) == [20]
def test_should_return_a_note_of_20_and_a_note_of_10():
assert cash_machine(30) == [20, 10]
def test_should_return_a_note_of_50_and_a_note_of_10():
assert cash_machine(60) == [50, 10]
def test_should_return_a_note_of_50_and_a_note_of_20_and_a_note_of_10():
assert cash_machine(80) == [50, 20, 10]
def test_should_return_a_note_of_100_and_a_note_of_20():
assert cash_machine(120) == [100, 20]
def test_should_return_3_notes():
assert cash_machine(170) == [100, 50, 20]
def test_should_return_2_notes_of_100():
assert cash_machine(200) == [100, 100]
def test_should_return_10_notes_of_100():
cash = cash_machine(1000)
assert len(cash) == 10 and cash.count(100) == 10
def test_should_return_18_notes():
cash = cash_machine(1590)
assert len(cash) == 18 and cash.count(100) == 15 and cash.count(50) == 1 and cash.count(20) == 2
| true |
6d4d4f96d21bc7e708a316c5fcd8e7cd43159c9b | Python | anonymousr007/Deep-Learning-for-Human-Activity-Recognition | /src/data_prep/create_features.py | UTF-8 | 12,434 | 2.78125 | 3 | [
"MIT"
] | permissive | from typing import List
import numpy as np
import pandas as pd
from src.data_prep.preprocessing import Preprocess # Load class for obtaining features
def create_features(acc_raw: pd.DataFrame, gyro_raw: pd.DataFrame) -> np.ndarray:
"""Create features from raw acceleration and gyroscope sensor data
Args:
acc_raw (pd.DataFrame): Raw 3-axial accelerometer signals with columns denoting axes.
gyro_raw (pd.DataFrame): Raw 3-axial gyroscope signals with columns denoting axes.
Returns:
features (np.ndarray): Created features corresponding args with columns denoting feature names.
"""
of = Preprocess(fs=50) # Create an instance.
# Remove noises by median filter & Butterworth filter
acc_raw = of.apply_filter(signal=acc_raw, filter="median", window=5)
acc_raw = of.apply_filter(signal=acc_raw, filter="butterworth")
gyro_raw = of.apply_filter(signal=gyro_raw, filter="median", window=5)
gyro_raw = of.apply_filter(signal=gyro_raw, filter="butterworth")
# Sample signals in fixed-width sliding windows
tAccXYZ = of.segment_signal(acc_raw, window_size=128, overlap_rate=0.5, res_type="dataframe")
tBodyGyroXYZ = of.segment_signal(
gyro_raw, window_size=128, overlap_rate=0.5, res_type="dataframe"
)
# Separate acceleration signal into body and gravity acceleration signal
tBodyAccXYZ, tGravityAccXYZ = [], []
for acc in tAccXYZ:
body_acc, grav_acc = of.separate_gravity(acc.copy())
tBodyAccXYZ.append(body_acc)
tGravityAccXYZ.append(grav_acc)
# Obtain Jerk signals of body linear acceleration and angular velocity
tBodyAccJerkXYZ, tBodyGyroJerkXYZ = [], []
for body_acc, gyro in zip(tBodyAccXYZ, tBodyGyroXYZ):
body_acc_jerk = of.obtain_jerk_signal(body_acc.copy())
gyro_jerk = of.obtain_jerk_signal(gyro.copy())
tBodyAccJerkXYZ.append(body_acc_jerk)
tBodyGyroJerkXYZ.append(gyro_jerk)
# Calculate the magnitude of three-dimensional signals using the Euclidean norm
tBodyAccMag, tGravityAccMag, tBodyAccJerkMag, tBodyGyroMag, tBodyGyroJerkMag = (
[],
[],
[],
[],
[],
)
for body_acc, grav_acc, body_acc_jerk, gyro, gyro_jerk in zip(
tBodyAccXYZ, tGravityAccXYZ, tBodyAccJerkXYZ, tBodyGyroXYZ, tBodyGyroJerkXYZ
):
body_acc_mag = of.obtain_magnitude(body_acc.copy())
grav_acc_mag = of.obtain_magnitude(grav_acc.copy())
body_acc_jerk_mag = of.obtain_magnitude(body_acc_jerk.copy())
gyro_mag = of.obtain_magnitude(gyro.copy())
gyro_jerk_mag = of.obtain_magnitude(gyro_jerk.copy())
tBodyAccMag.append(body_acc_mag)
tGravityAccMag.append(grav_acc_mag)
tBodyAccJerkMag.append(body_acc_jerk_mag)
tBodyGyroMag.append(gyro_mag)
tBodyGyroJerkMag.append(gyro_jerk_mag)
# Obtain amplitude spectrum using Fast Fourier Transform (FFT).
(
fBodyAccXYZAmp,
fBodyAccJerkXYZAmp,
fBodyGyroXYZAmp,
fBodyAccMagAmp,
fBodyAccJerkMagAmp,
fBodyGyroMagAmp,
fBodyGyroJerkMagAmp,
) = ([], [], [], [], [], [], [])
(
fBodyAccXYZPhs,
fBodyAccJerkXYZPhs,
fBodyGyroXYZPhs,
fBodyAccMagPhs,
fBodyAccJerkMagPhs,
fBodyGyroMagPhs,
fBodyGyroJerkMagPhs,
) = ([], [], [], [], [], [], [])
for (
body_acc,
body_acc_jerk,
gyro,
body_acc_mag,
body_acc_jerk_mag,
gyro_mag,
gyro_jerk_mag,
) in zip(
tBodyAccXYZ,
tBodyAccJerkXYZ,
tBodyGyroXYZ,
tBodyAccMag,
tBodyAccJerkMag,
tBodyGyroMag,
tBodyGyroJerkMag,
):
body_acc_amp, body_acc_phase = of.obtain_spectrum(body_acc.copy())
body_acc_jerk_amp, body_acc_jerk_phase = of.obtain_spectrum(body_acc_jerk.copy())
gyro_amp, gyro_phase = of.obtain_spectrum(gyro.copy())
body_acc_mag_amp, body_acc_mag_phase = of.obtain_spectrum(body_acc_mag.copy())
body_acc_jerk_mag_amp, body_acc_jerk_mag_phase = of.obtain_spectrum(
body_acc_jerk_mag.copy()
)
gyro_mag_amp, gyro_mag_phase = of.obtain_spectrum(gyro_mag.copy())
gyro_jerk_mag_amp, gyro_jerk_mag_phase = of.obtain_spectrum(gyro_jerk_mag.copy())
fBodyAccXYZAmp.append(body_acc_amp)
fBodyAccJerkXYZAmp.append(body_acc_jerk_amp)
fBodyGyroXYZAmp.append(gyro_amp)
fBodyAccMagAmp.append(body_acc_mag_amp)
fBodyAccJerkMagAmp.append(body_acc_jerk_mag_amp)
fBodyGyroMagAmp.append(gyro_mag_amp)
fBodyGyroJerkMagAmp.append(gyro_jerk_mag_amp)
fBodyAccXYZPhs.append(body_acc_phase)
fBodyAccJerkXYZPhs.append(body_acc_jerk_phase)
fBodyGyroXYZPhs.append(gyro_phase)
fBodyAccMagPhs.append(body_acc_mag_phase)
fBodyAccJerkMagPhs.append(body_acc_jerk_mag_phase)
fBodyGyroMagPhs.append(gyro_mag_phase)
fBodyGyroJerkMagPhs.append(gyro_jerk_mag_phase)
# Following signals are obtained by implementing above functions.
time_signals = [
tBodyAccXYZ,
tGravityAccXYZ,
tBodyAccJerkXYZ,
tBodyGyroXYZ,
tBodyGyroJerkXYZ,
tBodyAccMag,
tGravityAccMag,
tBodyAccJerkMag,
tBodyGyroMag,
tBodyGyroJerkMag,
]
freq_signals = [
fBodyAccXYZAmp,
fBodyAccJerkXYZAmp,
fBodyGyroXYZAmp,
fBodyAccMagAmp,
fBodyAccJerkMagAmp,
fBodyGyroMagAmp,
fBodyGyroJerkMagAmp,
fBodyAccXYZPhs,
fBodyAccJerkXYZPhs,
fBodyGyroXYZPhs,
fBodyAccMagPhs,
fBodyAccJerkMagPhs,
fBodyGyroMagPhs,
fBodyGyroJerkMagPhs,
]
all_signals = time_signals + freq_signals
# Calculate feature vectors by using signals
features = []
for i in range(len(tBodyAccXYZ)):
feature_vector = np.array([])
# mean, std, mad, max, min, sma, energy, iqr, entropy
for t_signal in all_signals:
sig = t_signal[i].copy()
mean = of.obtain_mean(sig)
std = of.obtain_std(sig)
mad = of.obtain_mad(sig)
max_val = of.obtain_max(sig)
min_val = of.obtain_min(sig)
sma = of.obtain_sma(sig)
energy = of.obtain_energy(sig)
iqr = of.obtain_iqr(sig)
entropy = of.obtain_entropy(sig)
feature_vector = np.hstack(
(feature_vector, mean, std, mad, max_val, min_val, sma, energy, iqr, entropy)
)
# arCoeff
for t_signal in time_signals:
sig = t_signal[i].copy()
arCoeff = of.obtain_arCoeff(sig)
feature_vector = np.hstack((feature_vector, arCoeff))
# correlation
for t_signal in [
tBodyAccXYZ,
tGravityAccXYZ,
tBodyAccJerkXYZ,
tBodyGyroXYZ,
tBodyGyroJerkXYZ,
]:
sig = t_signal[i].copy()
correlation = of.obtain_correlation(sig)
feature_vector = np.hstack((feature_vector, correlation))
# maxInds, meanFreq, skewness, kurtosis
for t_signal in freq_signals:
sig = t_signal[i].copy()
maxInds = of.obtain_maxInds(sig)
meanFreq = of.obtain_meanFreq(sig)
skewness = of.obtain_skewness(sig)
kurtosis = of.obtain_kurtosis(sig)
feature_vector = np.hstack((feature_vector, maxInds, meanFreq, skewness, kurtosis))
# bandsEnergy
for t_signal in [tBodyAccXYZ, tBodyAccJerkXYZ, tBodyGyroXYZ]:
sig = t_signal[i].copy()
bandsEnergy = of.obtain_bandsEnergy(sig)
feature_vector = np.hstack((feature_vector, bandsEnergy))
# angle
gravityMean = tGravityAccXYZ[i].mean()
tBodyAccMean = tBodyAccXYZ[i].mean()
tBodyAccJerkMean = tBodyAccJerkXYZ[i].mean()
tBodyGyroMean = tBodyGyroXYZ[i].mean()
tBodyGyroJerkMean = tBodyGyroJerkXYZ[i].mean()
tXAxisAcc = tAccXYZ[i]["x"]
tXAxisGravity = tGravityAccXYZ[i]["x"]
tYAxisAcc = tAccXYZ[i]["y"]
tYAxisGravity = tGravityAccXYZ[i]["y"]
tZAxisAcc = tAccXYZ[i]["z"]
tZAxisGravity = tGravityAccXYZ[i]["z"]
tBodyAccWRTGravity = of.obtain_angle(tBodyAccMean, gravityMean)
tBodyAccJerkWRTGravity = of.obtain_angle(tBodyAccJerkMean, gravityMean)
tBodyGyroWRTGravity = of.obtain_angle(tBodyGyroMean, gravityMean)
tBodyGyroJerkWRTGravity = of.obtain_angle(tBodyGyroJerkMean, gravityMean)
tXAxisAccWRTGravity = of.obtain_angle(tXAxisAcc, tXAxisGravity)
tYAxisAccWRTGravity = of.obtain_angle(tYAxisAcc, tYAxisGravity)
tZAxisAccWRTGravity = of.obtain_angle(tZAxisAcc, tZAxisGravity)
feature_vector = np.hstack(
(
feature_vector,
tBodyAccWRTGravity,
tBodyAccJerkWRTGravity,
tBodyGyroWRTGravity,
tBodyGyroJerkWRTGravity,
tXAxisAccWRTGravity,
tYAxisAccWRTGravity,
tZAxisAccWRTGravity,
)
)
# ECDF
for t_signal in [tBodyAccXYZ, tBodyGyroXYZ]:
sig = t_signal[i].copy()
ecdf = of.obtain_ecdf_percentile(sig)
feature_vector = np.hstack((feature_vector, ecdf))
features.append(feature_vector)
return np.array(features)
def get_feature_names() -> List[str]:
"""Get feature names
Returns:
feature_names (List[str]): Title of features
"""
time_signal_names = [
"tBodyAccXYZ",
"tGravityAccXYZ",
"tBodyAccJerkXYZ",
"tBodyGyroXYZ",
"tBodyGyroJerkXYZ",
"tBodyAccMag",
"tGravityAccMag",
"tBodyAccJerkMag",
"tBodyGyroMag",
"tBodyGyroJerkMag",
]
freq_signal_names = [
"fBodyAccXYZAmp",
"fBodyAccJerkXYZAmp",
"fBodyGyroXYZAmp",
"fBodyAccMagAmp",
"fBodyAccJerkMagAmp",
"fBodyGyroMagAmp",
"fBodyGyroJerkMagAmp",
"fBodyAccXYZPhs",
"fBodyAccJerkXYZPhs",
"fBodyGyroXYZPhs",
"fBodyAccMagPhs",
"fBodyAccJerkMagPhs",
"fBodyGyroMagPhs",
"fBodyGyroJerkMagPhs",
]
all_signal_names = time_signal_names + freq_signal_names
feature_names = []
for name in all_signal_names:
for s in ["Mean", "Std", "Mad", "Max", "Min", "Sma", "Energy", "Iqr", "Entropy"]:
if s == "Sma":
feature_names.append(f"{name}{s}")
continue
if "XYZ" in name:
n = name.replace("XYZ", "")
feature_names += [f"{n}{s}-{ax}" for ax in ["X", "Y", "Z"]]
else:
feature_names.append(f"{name}{s}")
for name in time_signal_names:
if "XYZ" in name:
n = name.replace("XYZ", "")
feature_names += [f"{n}ArCoeff-{ax}{i}" for ax in ["X", "Y", "Z"] for i in range(4)]
else:
feature_names += [f"{name}ArCoeff{i}" for i in range(4)]
for name in [
"tBodyAccXYZ",
"tGravityAccXYZ",
"tBodyAccJerkXYZ",
"tBodyGyroXYZ",
"tBodyGyroJerkXYZ",
]:
n = name.replace("XYZ", "")
feature_names += [f"{n}Correlation-{ax}" for ax in ["X", "Y", "Z"]]
for name in freq_signal_names:
for s in ["MaxInds", "MeanFreq", "Skewness", "Kurtosis"]:
if "XYZ" in name:
n = name.replace("XYZ", "")
feature_names += [f"{n}{s}-{ax}" for ax in ["X", "Y", "Z"]]
else:
feature_names.append(f"{name}{s}")
for name in ["tBodyAccXYZ", "tBodyAccJerkXYZ", "tBodyGyroXYZ"]:
n = name.replace("XYZ", "")
feature_names += [f"{n}BandsEnergy-{ax}{i}" for i in range(14) for ax in ["X", "Y", "Z"]]
feature_names += [
"tBodyAccWRTGravity",
"tBodyAccJerkWRTGravity",
"tBodyGyroWRTGravity",
"tBodyGyroJerkWRTGravity",
"tXAxisAccWRTGravity",
"tYAxisAccWRTGravity",
"tZAxisAccWRTGravity",
]
feature_names += [
f"tBody{sensor}ECDF-{axis}{i}"
for sensor in ["Acc", "Gyro"]
for axis in ["X", "Y", "Z"]
for i in range(10)
]
return feature_names
| true |
297f776ee6be0612518af87392d11184b11667f7 | Python | moonorange/GoF_design_patterns | /code/adapter.py | UTF-8 | 700 | 3.578125 | 4 | [] | no_license | from abc import ABCMeta, abstractmethod
def main():
pb = PrintBanner("Hello")
pb.print_weak()
pb.print_strong()
class Banner:
def __init__(self, string):
self.__string = string
def show_with_paren(self):
print("({})".format(self.__string))
def show_with_aster(self):
print("*{}*".format(self.__string))
class Print(metaclass=ABCMeta):
@abstractmethod
def print_weak(self):
pass
@abstractmethod
def print_strong(self):
pass
class PrintBanner(Banner, Print):
def print_weak(self):
self.show_with_paren()
def print_strong(self):
self.show_with_aster()
if __name__ == '__main__':
main()
| true |
72be36b8b047add080095e69eb7882f122255f73 | Python | shicks255/CodeWars_Python | /Code_Wars/Kata_Next_Bigger_Number_With_Same_Digits/solution.py | UTF-8 | 916 | 3.65625 | 4 | [] | no_license | # !python3
from unittest import TestCase
def next_bigger(n):
numbers = list(str(n))
i = len(numbers)-1
temp = numbers[i]
numbers[i] = numbers[i-1]
numbers[i-1] = temp
if numbers <= list(str(n)):
i = len(numbers)-2
temp = numbers[i]
numbers[i] = numbers[i-1]
numbers[i-1] = temp
return int(''.join(numbers[:i]) + ''.join(sorted(numbers[i:])))
return int(''.join(numbers))
next_bigger(2074)
class Tests(TestCase):
def testAll(self):
# self.assertEqual(next_bigger(12),21)
# self.assertEqual(next_bigger(513),531)
# self.assertEqual(next_bigger(2017),2071)
# self.assertEqual(next_bigger(414),441)
# self.assertEqual(next_bigger(144),414)
self.assertEqual(next_bigger(1234567890),1234567908)
# self.assertEqual(next_bigger(2074),2407)
# self.assertEqual(next_bigger(1047),1074) | true |
2e94bfd66c65b6f89e41694d2441717265e9531f | Python | Luca96/imgann | /error.py | UTF-8 | 5,193 | 2.75 | 3 | [
"MIT"
] | permissive | # -----------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2018 Luca Anzalone
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -----------------------------------------------------------------------------
# -- Error: provide functions to analize the shape_predictor accuracy
# -----------------------------------------------------------------------------
import math
import utils
from utils import dlib
from utils import Colors
def __distance(p1, p2):
'''returns the distance between two points'''
dx = p2[0] - p1[0]
dy = p2[1] - p1[1]
return math.sqrt(dx * dx + dy * dy)
def normalized_root_mean_square(truth, measured):
'''returns the NRMSE across the ground truth and the measure points'''
assert(len(truth) == len(measured))
dist = 0
# inter-ocular distance
iod = __distance(truth[36], truth[45])
for i in range(0, len(truth)):
dist += __distance(truth[i], measured[i])
return dist / iod
def point_to_point(truth, measured):
'''returns the point-to-point error across truth and measured points'''
assert(len(truth) == len(measured))
Min = 2 ^ 31
Max = -2 ^ 31
Avg = 0
iod = __distance(truth[36], truth[45])
num = len(truth)
for i in range(0, num):
err = __distance(truth[i], measured[i]) / iod
Avg += err
if err > Max:
Max = err
if err < Min:
Min = err
return Min, Max, Avg / num
def test_shape_predictor(xml, model):
'''wraps the dlib.test_shape_predictor method, to test
the accuracy of the [model] on the labels described in [xml]'''
error = dlib.test_shape_predictor(xml, model)
print("model error: {} on {}".format(error, xml))
def of_dataset(folder="testset", model=None, view=False):
'''measure the error across the given dataset,
it compares the measured points with the annotated ground truth,
optionally you can [view] the results'''
assert(model)
# load face and landmark detectors
utils.load_shape_predictor(model)
# utils.init_face_detector(True, 150)
# init average-error
err = 0
num = 0
for img, lmarks, path in utils.ibug_dataset(folder):
# detections
face = utils.prominent_face(utils.detect_faces(img, detector="dlib"))
measured = utils.detect_landmarks(img, face)
# get error
num += 1
err += normalized_root_mean_square(lmarks, measured)
# results:
if view is True:
utils.draw_rect(img, face, color=Colors.yellow)
utils.draw_points(img, lmarks, color=Colors.green)
utils.draw_points(img, measured, color=Colors.red)
utils.show_image(utils.show_properly(utils.crop_image(img, face)))
print(err, num, err / num)
print("average NRMS Error for {} is {}".format(folder, err / num))
def compare_models(folder="testset", m1=None, m2=None, view=False):
'''compare the [m1] shape_predictor aganist the [m2] model,
optionally you can [view] the results'''
assert(m1 and m2)
utils.init_face_detector(True, 150)
# load models
utils.load_shape_predictor(m2)
sp_m2 = utils.shape_predictor
utils.load_shape_predictor(m1)
sp_m1 = utils.shape_predictor
# init error
err = 0
num = 0
for face, region in utils.faces_inside(folder):
h, w = face.shape[:2]
if h == 0 or w == 0:
continue
box = utils.Region(0, 0, region.width, region.height)
# detect landmarks
utils.shape_predictor = sp_m1
lmarks_m1 = utils.detect_landmarks(face, box)
utils.shape_predictor = sp_m2
lmarks_m2 = utils.detect_landmarks(face, box)
# update error:
num += 1
# err += normalized_root_mean_square(lmarks_m1, lmarks_m2)
# results:
if view is True:
utils.draw_points(face, lmarks_m1, color=Colors.green)
utils.draw_points(face, lmarks_m2, color=Colors.red)
utils.show_image(utils.show_properly(face))
if num != 0:
err /= num
print("the NRMSE of m1 aganist m2 is {}".format(err))
| true |
7ced9cc459718407ba7f39a7d8b46bfb76d9bc1b | Python | georgiev-ventsi/my-python | /Basics/Average Words Length/average-words-length.py | UTF-8 | 475 | 4.40625 | 4 | [] | no_license | # For a given sentence, return the average word length.
sentence1 = "Don't let another somber pariah consume your soul"
sentence2 = "Good things come to people who wait, but better things come to those who go out and get them."
def average_word_length(text):
for n in "!?',;.\"":
text = text.replace(n, '')
words = text.split(' ')
print(round(sum(len(word) for word in words)/len(words),2))
average_word_length(sentence1)
average_word_length(sentence2) | true |
bbe2653baae9d6e6e36342d44a01eb62d28cfea7 | Python | phoenix9373/Algorithm | /2020/기초(D2~D3)/D2/5515.py | UTF-8 | 452 | 3.03125 | 3 | [] | no_license | # 31: 1, 3, 5, 7, 8, 10, 12
# 30: 4, 6, 9, 11
for c in range(1, int(input())+1):
cnt = 4
m, d = map(int, input().split())
dic = {}
for k in range(1, 13):
if k == 2:
dic[k] = 29
elif k in [4, 6, 9, 11]:
dic[k] = 30
else:
dic[k] = 31
if m > 1:
for i in range(1, m):
cnt += dic[i]
cnt += d-1
else:
cnt += d-1
print(f'#{c} {cnt%7}') | true |
f81eca3f794f092899ce0b69254bad42d437cc7e | Python | mjginzo/ndusc | /data/model_S1.py | UTF-8 | 894 | 2.59375 | 3 | [] | no_license | # _________________________________________________________________________
#
# Example: stochastic problem from the thesis of Lee.
# First stage model.
# _________________________________________________________________________
#
# Imports
#
from pyomo.environ import *
def model_S1(data):
#
# Model
#
model = ConcreteModel()
#
# Sets
#
model.Resources = Set(initialize=data['sets']['Resources'])
#
# Parameters
#
model.P = Param(model.Resources,
initialize=data['params']['P'],
within=PositiveReals)
#
# Variables
#
model.Z = Var(model.Resources, within=Binary)
#
# Objective
#
def Obj_rule(model):
return + sum(model.P[i]*model.Z[i] for i in model.Resources)
model.Obj = Objective(rule=Obj_rule, sense=minimize)
return model
| true |
9198c2fba4175e2d13b5d009465c544b8c8615df | Python | Al153/PartIIProject | /src/main/resources/imdb/prep.py | UTF-8 | 3,254 | 2.875 | 3 | [] | no_license | import json
import csv
import sys
# process data from here https://www.kaggle.com/tmdb/tmdb-movie-metadata
class Movie(object):
"""Movie object"""
def __init__(self, id, title, cast, directors):
self.id = id
self.title = title
self.cast = cast
self.directors = directors
def toNode(self):
return {
"title": self.title,
"_key": str(self.id),
"language": "english",
"type": "Movie"
}
class Person(object):
"""actor object"""
def __init__(self, name, id):
self.name = name
self.id = id
def __eq__(self, that):
return isinstance(that, self.__class__) and self.name == that.name and self.id == that.id
def __hash__(self):
return hash(self.name) ^ (hash(self.id) << 1)
def toNode(self):
return {
"name": self.name,
"_key": str(self.id),
"type": "Person"
}
class ActsIn(object):
"""docstring for ActsIn"""
def __init__(self, person, movie):
self.person = person
self.movie = movie
def toNode(self):
return {
"_key": str(0),
"_from": str(self.person.id),
"_to": str(self.movie.id),
"$label": "ACTS_IN"
}
class Directed(object):
"""docstring for ActsIn"""
def __init__(self, person, movie):
self.person = person
self.movie = movie
def toNode(self):
return {
"_key": str(0),
"_from": str(self.person.id),
"_to": str(self.movie.id),
"$label": "DIRECTED"
}
def getNMovies(file, n, lowDensity):
r = csv.reader(file)
columns = r.next()
movies = []
for row in r:
movies.append(Movie(row[0], row[1], getActors(json.loads(row[2])), getDirectors(json.loads(row[3]))))
return sorted(movies, key = lambda m: len(m.cast), reverse = not(lowDensity))[:n]
def getActors(cast):
actors = []
for actor in cast:
newRole = Person(actor['name'], actor['id'])
actors.append(newRole)
return actors
def getDirectors(crew):
directors = []
for director in (filter(lambda c: c['job'] == "Director", crew)):
newDirector = Person(director['name'], director['id'])
directors.append(newDirector)
return directors
def getActedIn(movies):
actedIn = []
for movie in movies:
actedIn += map(lambda a: ActsIn(a, movie), movie.cast)
return actedIn
def getDirected(movies):
directed = []
for movie in movies:
directed += map(lambda d: Directed(d, movie), movie.directors)
return directed
def multilineJson(jString):
return jString.replace("}, ", "},\n")
def doMain(sourceFile, n, destinationFolderPath, lowDensity = False):
movies = getNMovies(sourceFile, n, lowDensity)
print "Got movies"
people = list(set([person for movie in movies for person in (movie.cast + movie.directors)]))
print "Got People"
actedIn = getActedIn(movies)
print "got acted in"
directed = getDirected(movies)
print "got directed"
nodes = json.dumps([p.toNode() for p in people] + [m.toNode() for m in movies])
print "got nodes json"
edges = json.dumps([r.toNode() for r in actedIn] + [r.toNode() for r in directed])
print "got edges json"
open(destinationFolderPath + "/nodes.json", "w").write(multilineJson(nodes))
open(destinationFolderPath + "/edges.json", "w").write(multilineJson(edges))
if __name__ == "__main__":
# Usage: prep.py <dest> <n>
doMain(open("tmdb_5000_credits.csv"), int(sys.argv[2]), sys.argv[1], len(sys.argv) > 3)
| true |
6d98ba054fb69b22e9e1704a969c8a99d3f275d2 | Python | achntj/2-semesters-of-python-in-HS | /swapping.py | UTF-8 | 224 | 4.0625 | 4 | [] | no_license | #swap three numbers
a=int(input('Enter number 1: '))
b=int(input('Enter number 2: '))
c=int(input('Enter number 3: '))
print('Numbers before swapping are: ',a,b,c)
a,b,c=b,c,a
print('Numbers after swapping are: ' , a,b,c)
| true |
d4077bbbaff7848585be684873d8d90c786db872 | Python | LYXalex/Leetcode-PythonSolution | /扫描线/56. Merge Intervals.py | UTF-8 | 461 | 2.921875 | 3 | [] | no_license | class Solution:
def merge(self, intervals):
res = []
if not intervals:
return res
intervals.sort()
first = intervals[0]
for i in range(1, len(intervals)):
if intervals[i][0] > first[1]:
res.append(first)
first = intervals[i]
else:
first = [first[0], max(first[1], intervals[i][1])]
res.append(first)
return res
| true |
3be9d4081d2c9e172efacd74f78135ed072615a8 | Python | jpclark6/advent_of_code_2020 | /09_xmas/solution.py | UTF-8 | 1,005 | 3.765625 | 4 | [] | no_license | """
Advent of code
Day 9
Brute force but works in a few seconds
"""
from itertools import combinations
def parse_input(filename):
text = open(filename)
lines = text.read().split('\n')
code = [int(line) for line in lines]
return code
def part_1(data, preamble):
for i in range(preamble, len(data)):
arr = data[i - preamble:i]
combos = list(combinations(set(arr), 2))
combos = [combo[0] + combo[1] for combo in combos]
if data[i] not in combos:
print("Part 1:", data[i])
return data[i]
def part_2(data, value):
for i in range(len(data)):
for j in range(len(data) - i):
if value == sum(data[i:i+j]) and j > 1:
chunk = data[i:i+j]
chunk.sort()
answer = chunk[0] + chunk[-1]
print("Part 2:", answer)
if __name__ == "__main__":
filename = 'input.txt'
data = parse_input(filename)
value = part_1(data, 25)
part_2(data, value) | true |
f533f0e572b7b2c5ccd0e114095e43af8893be99 | Python | frrad/sudoku | /index.py | UTF-8 | 1,532 | 3.171875 | 3 | [] | no_license | def cells(a,b,x,y,template):
ans = []
for i in xrange(a,b):
acc = []
for j in xrange(x,y):
acc.append(template % (i,j))
ans.append(acc)
return ans
def table(rows, border=False):
b = ""
if border:
b = ' border="1"'
ans =""
ans += "<table%s>\n" %b
for row in rows:
ans += " <tr>\n"
for col in row:
ans+= " <td>%s</td>\n" % col
ans += " </tr>\n"
ans+= "</table>\n"
return ans
def sudoku(template):
tbl = []
for x in xrange(3):
acc = []
for y in xrange(3):
acc.append(table(cells(3*x,3*(x+1),3*y,3*(y+1),template)))
tbl.append(acc)
return table(tbl, True)
prepasta = '''
<html>
<head><title>Sudoku Solver</title></head>
<body>
<meta charset="utf-8">
<script src="wasm_exec.js"></script>
<script>
const go = new Go();
let mod, inst;
WebAssembly.instantiateStreaming(fetch("main.wasm"), go.importObject).then(async (result) => {
mod = result.module;
inst = result.instance;
await go.run(inst)
});
</script>
'''
postpasta = '''
</body>
</html>
'''
print prepasta
print sudoku('<input type="text" size="1" id="input-%d-%d">')
print '<button onClick="solveSudoku();">Solve</button>'
print '<br><br>'
print '<p id="messageP"></p>'
print sudoku('<p id="ans-%d-%d"> </p>')
print postpasta
| true |
52221f9432a7ed85c43e61927883eb57e2e60c63 | Python | 5eo1ab/Crawler_googleplayKR | /GooglePlayApp.py | UTF-8 | 1,071 | 2.71875 | 3 | [] | no_license | ########################
# GooglePlayApp.py
# 2017.1.4
########################
import googleplaylib as lib
class GooglePlayApp :
def __init__(self, url) :
soup, app_id = lib.url_connect(url)
dic_p = lib.get_primary_info(soup)
dic_s = lib.get_secondary_info(soup)
self.appid = app_id
self.title = dic_p['title']
self.developer = dic_p['developer']
self.category = dic_p['category']
self.description = dic_p['description']
self.score = dic_s['score']
self.review_num = dic_s['review_num']
self.updated_date = dic_s['updated_date']
self.download_volumn = dic_s['download_volumn']
self.contents_rating = dic_s['contents_rating']
def __call__(self) :
print("AppID:\t{0}\nTitle:\t{1}\ndeveloper:\t{2}\ncategory:\t{3}\n".format(self.appid, self.title, self.developer, self.category))
print("score:\t{0}\nreview_num:\t{1}\nupdated_date:\t{2}\ndownload_volumn:\t{3}\ncontents_rating:\t{4}\n".format(self.score, self.review_num,self.updated_date,self.download_volumn,self.contents_rating))
print("description:\n{0}".format(self.description))
| true |
4b991850674b2e3f917b0514f45de7e15c7a7925 | Python | programmingwithcaptain/Notepad-Python | /PythonNotepad.py | UTF-8 | 2,978 | 3.25 | 3 | [] | no_license | import os
import tkinter
from tkinter import *
from tkinter.messagebox import *
from tkinter.filedialog import *
class Notepad:
__root = Tk()
# WIndow
__thisWidth = 300
__thisHeight = 300
__thisTextArea = Text(__root)
__thisMenuBar = Menu(__root)
# ScrollBar
__thisScrollBar = Scrollbar(__thisTextArea)
__file = None
def __init__(self,**kwargs):
# Window Size
self.__thisWidth = kwargs['width']
self.__thisHeight = kwargs['height']
# Window Title
self.__root.title("Untitled - Notepad")
# WIndow Alignment
screenWidth = self.__root.winfo_screenwidth()
screenHeight = self.__root.winfo_screenheight()
left = (screenWidth / 2) - (self.__thisWidth / 2)
top = (screenHeight / 2) - (self.__thisHeight / 2)
self.__root.geometry('%dx%d+%d+%d' % (self.__thisWidth,
self.__thisHeight,
left, top))
# Resizable
self.__root.grid_rowconfigure(0, weight=1)
self.__root.grid_columnconfigure(0, weight=1)
self.__thisTextArea.grid(sticky= N + E + S + W)
# New File
self.__thisMenuBar.add_command(label="New",
command=self.__newFile)
# Existing File
self.__thisMenuBar.add_command(label="Open",
command=self.__openFile)
# Save File
self.__thisMenuBar.add_command(label="Save",
command=self.__saveFile)
# Exit
self.__thisMenuBar.add_command(label="Exit",
command=self.__quitApplication)
self.__root.config(menu=self.__thisMenuBar)
self.__thisScrollBar.pack(side=RIGHT,fill=Y)
# ScrollBar Adjustment
self.__thisScrollBar.config(command=self.__thisTextArea.yview)
self.__thisTextArea.config(yscrollcommand=self.__thisScrollBar.set)
def __quitApplication(self):
self.__root.destroy()
def __openFile(self):
self.__file = askopenfilename(defaultextension=".txt",
filetypes=[("All Files","*.*"),
("Text Documents","*.txt")])
if self.__file == "":
self.__file = None
else:
self.__root.title(os.path.basename(self.__file) + " - Notepad")
self.__thisTextArea.delete(1.0,END)
file = open(self.__file,"r")
self.__thisTextArea.insert(1.0,file.read())
file.close()
def __newFile(self):
self.__root.title("Untitled - Notepad")
self.__file = None
self.__thisTextArea.delete(1.0,END)
def __saveFile(self):
if self.__file == None:
self.__file = asksaveasfilename(initialfile="Untitled.txt",
defaultextension=".txt",
filetypes=[("All Files","*.*"),
("Text Document","*.txt")])
if self.__file == "":
self.__file = None
else:
file = open(self.__file,"w")
file.write(self.__thisTextArea.get(1.0,END))
file.close()
self.__root.title(os.path.basename(self.__file) + " - Notepad")
else:
file = open(self.__file,"w")
file.write(self.__thisTextArea.get(1.0,END))
file.close()
def run(self):
# Main Application
self.__root.mainloop()
# Run Main Application
notepad = Notepad(width=600,height=400)
notepad.run()
| true |