blob_id stringlengths 40 40 | language stringclasses 1
value | repo_name stringlengths 5 133 | path stringlengths 2 333 | src_encoding stringclasses 30
values | length_bytes int64 18 5.47M | score float64 2.52 5.81 | int_score int64 3 5 | detected_licenses listlengths 0 67 | license_type stringclasses 2
values | text stringlengths 12 5.47M | download_success bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|
cb714c1d96ffd19de107d3ca3f43cae6181ade61 | Python | Humanamateur/eplogin | /epConfig.py | UTF-8 | 4,881 | 2.515625 | 3 | [] | no_license |
#!/usr/bin/env python3
from tkinter import *
import os
import platform
from tkinter import font as tkFont
def getEthMac():
try:
mac = open('/sys/class/net/eth0/address').readline()
except:
mac = "00:00:00:00:00:00"
return mac[0:17]
def getWifiMac():
try:
mac = open('/sys/class/net/wlan0/address').readline()
except:
mac = "00:00:00:00:00:00"
return mac[0:17]
def hostNameSet(hostname):
initFile = open("/etc/hostname", "w")
initFile.write(hostname.get)
initFile.close()
return
def switch(mode.get):
switcher={
0:"RD_STATION_MODE",
1:"KIOSK_MODE",
2:"COMANDLINE_MODE",
3:"CREATIVE_MODE",
}
func=mode.get(i,lambda :'Invalid')
return func()
def init():
#colors
bgColor = "#E34432"
fgColor = "#F5B111"
bgEntryColor = "#381610"
fgEntryColor = "#D1D1C2"
#window
mainWindow = Tk()
screenW = mainWindow.winfo_screenwidth()
screenH = mainWindow.winfo_screenheight()
mainWindow.geometry(str(screenW) + 'x' + str(screenH))
mainWindow.title('ELECTROPOLI_RPI_CONFIG')
mainWindow['bg'] = fgEntryColor
#mainWindow.overrideredirect(True)
#fonts
Gothic7B = tkFont.Font(family="Gothic", size=int(screenH/15), weight="bold")
Gothic54 = tkFont.Font(family="Gothic", size=int(screenH/54))
Gothic30B = tkFont.Font(family="Gothic", size=int(screenH/30), weight="bold")
#Title
titleLabel = Label(mainWindow, text="ELECTROPOLI_RPI_CONFIG")
titleLabel.place(relx=0.2, rely=0.1)
titleLabel['font'] = Gothic7B
titleLabel['bg'] = fgEntryColor
titleLabel['fg'] = bgEntryColor
#Mac label and text entry box
lMacNameLabelHnd = Label(mainWindow, text="eth0 : " + getEthMac() )
lMacNameLabelHnd.place(relx=0.75, rely=0.6)
lMacNameLabelHnd['font'] = Gothic54
lMacNameLabelHnd['bg'] = fgEntryColor
lMacNameLabelHnd['fg'] = bgEntryColor
wMacNameLabelHnd = Label(mainWindow, text="wlan0: " + getWifiMac() )
wMacNameLabelHnd.place(relx=0.75, rely=0.65)
wMacNameLabelHnd['font'] = Gothic54
wMacNameLabelHnd['bg'] = fgEntryColor
wMacNameLabelHnd['fg'] = bgEntryColor
#hostNeme label and text entry box
hostNemeLabelHnd = Label(mainWindow, text="HOSTNAME:", )
hostNemeLabelHnd.place(relx=0.35, rely=0.4)
hostNemeLabelHnd['font'] = Gothic30B
hostNemeLabelHnd['bg'] = fgEntryColor
hostNemeLabelHnd['fg'] = bgEntryColor
hostname = StringVar()
hostNemeEntryHnd = Entry(mainWindow, textvariable=hostname)
hostNemeEntryHnd.place(relx=0.35, rely=0.47)
hostNemeEntryHnd['font'] = Gothic30B
hostNemeEntryHnd['bg'] = bgEntryColor
hostNemeEntryHnd['fg'] = fgEntryColor
#Mode Switch button
mode = StringVar()
modeSwitchButtonHnd = Radiobutton(mainWindow, text="RD_STATION_MODE", variable=mode, value ="RD_STATION_MODE")
modeSwitchButtonHnd.place(relx=0.35, rely=0.53)
modeSwitchButtonHnd['font'] = Gothic54
modeSwitchButtonHnd['bg'] = bgColor
modeSwitchButtonHnd['fg'] = fgColor
modeSwitchButtonHnd['activebackground'] = bgColor
modeSwitchButtonHnd['activeforeground'] = fgColor
modeSwitchButtonHnd2 = Radiobutton(mainWindow, text="LINUX_KIOSK_MODE", variable=mode, value ="LINUX_KIOSK_MODE")
modeSwitchButtonHnd2.place(relx=0.35, rely=0.57)
modeSwitchButtonHnd2['font'] = Gothic54
modeSwitchButtonHnd2['bg'] = bgColor
modeSwitchButtonHnd2['fg'] = fgColor
modeSwitchButtonHnd2['activebackground'] = bgColor
modeSwitchButtonHnd2['activeforeground'] = fgColor
modeSwitchButtonHnd3 = Radiobutton(mainWindow, text="COMANDLINE_MODE", variable=mode, value ="COMANDLINE_MODE")
modeSwitchButtonHnd3.place(relx=0.35, rely=0.61)
modeSwitchButtonHnd3['font'] = Gothic54
modeSwitchButtonHnd3['bg'] = bgColor
modeSwitchButtonHnd3['fg'] = fgColor
modeSwitchButtonHnd3['activebackground'] = bgColor
modeSwitchButtonHnd3['activeforeground'] = fgColor
modeSwitchButtonHnd4 = Radiobutton(mainWindow, text="CREATIVE_MODE", variable=mode, value ="CREATIVE_MODE")
modeSwitchButtonHnd4.place(relx=0.35, rely=0.65)
modeSwitchButtonHnd4['font'] = Gothic54
modeSwitchButtonHnd4['bg'] = bgColor
modeSwitchButtonHnd4['fg'] = fgColor
modeSwitchButtonHnd4['activebackground'] = bgColor
modeSwitchButtonHnd4['activeforeground'] = fgColor
#Konfig button
konfigButtonHnd = Button(mainWindow, text="KONFIGUROVAT" )# command=configurate)
konfigButtonHnd.place(relx=0.4, rely=0.8)
konfigButtonHnd['font'] = Gothic30B
konfigButtonHnd['bg'] = bgEntryColor
konfigButtonHnd['fg'] = fgEntryColor
#mainLoop
mainWindow.mainloop()
return
init() | true |
263c11d5182b6f9807d89f17a36f499eb43b2f28 | Python | pangruitao/nt_py | /day05/day05_3.py | UTF-8 | 810 | 3.515625 | 4 | [] | no_license | a=int(input('请输入正方形的宽和高:'))
for i in range(1,a+1):
for j in range(0,a):
print(i+j,end=' ')
if j == a-1:
print()
# a=int(input('请输入正方形的宽和高:'))
# n=0
# for i in range(1,a+1):
# n+=1
# for j in range(n,n+a):
# print(j,end =' ')
# print()
# a=int(input('请输入正方形的宽和高:'))
# n=0
# for i in range(1,a+1):
# n+=i
# for j in range(n,n+a):
# print(j,end =' ')
# n=0
# print()
# n=int(input('请输入正方形的宽和高:'))
# for x in range(1,n+1):
# for y in range(x,x+n):
# print(y,end=' ')
# print()
# n=int(input("请输入一个数:"))
# j=0
# while j<n:
# i=j
# while i<=j+n:
# print(i+1,end=' ')
# i+=1
# print()
# j+=1
| true |
b4f4c02c620e48991e58d3c6dee136b9715be7e0 | Python | ryuryukke/atcoder_contests | /AtCoder/ABC130/d.py | UTF-8 | 872 | 3.28125 | 3 | [] | no_license | # しゃくとり法
n, k = map(int, input().split())
a = tuple(map(int, input().split()))
right = 0
part_sum, ans = 0, 0
for left in range(n):
while part_sum < k and right < n: # rightが右に進む条件をandでここにまとめる
part_sum += a[right]
right += 1
# rightが数列の右端まで行ってwhileを通らなかったり、抜ける場合があるためにこれを設ける
if part_sum >= k:
ans += n-(right-1)
part_sum -= a[left] # leftが次のループでインクリメントするのでその分を引く
print(ans)
# 累積和+二分探索
from bisect import bisect_right
n, k = map(int, input().split())
a = tuple(map(int, input().split()))
ans = 0
acc = [0]
for i in range(n):
acc.append(acc[-1]+a[i])
for part in acc:
if part < k:
continue
ans += bisect_right(acc, part-k)
print(ans) | true |
ca681b17381e3c8051097d2de8087111db18d2d0 | Python | yuliang123456/p1804ll | /sb/shenfen.py | UTF-8 | 158 | 2.59375 | 3 | [] | no_license | import random
a=int(input('请输入一个数字'))
a (你输入的数字是 %d :' % a)
if a>0
print('你还有九次机会')
a=10-1
print('谢谢')
| true |
624ced22c735051f3b333657c314d848e7d905c5 | Python | fengges/leetcode | /1001-1050/1005. K 次取反后最大化的数组和.py | UTF-8 | 502 | 2.546875 | 3 | [] | no_license | class Solution:
def largestSumAfterKNegations(self, A, K):
fu=[i for i in A if i<0 ]
fu.sort()
zheng=[i for i in A if i>=0 ]
zheng.sort()
if len(fu)>=K:
for i in range(K):
fu[i]=-fu[i]
zheng.extend(fu)
else:
left=K-len(fu)
fu = [-n for n in fu]
zheng.extend(fu)
zheng.sort()
if left%2!=0:
zheng[0]=-zheng[0]
return sum(zheng)
| true |
3e17c28cc35a3216ffe6ef2c5f411ed4529ce114 | Python | supai-red/wealth_top_decile | /pages/predictions_rh_gapminder_eg.py | UTF-8 | 2,584 | 2.71875 | 3 | [
"MIT"
] | permissive | # Import from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import dash_daq as daq
import pandas as pd
# Imports from this app
from app import app
# Load pipeline
from joblib import load
pipeline = load('assets/pipeline.joblib')
column1 = dbc.Col(
[
dcc.Markdown('##P Predictions', className='mb-5'),
dcc.Markdown('#### Year'),
dcc.Slider(
id='year',
min=1955,
max=2055,
step=5,
value=2020,
marks={n: str(n) for n in range(1960,2060,20)},
className='mb-5',
),
dcc.Markdown('#### Continent'),
dcc.Dropdown(
id='continent',
options = [
{'label': 'Africa', 'value': 'Africa'},
{'label': 'Americas', 'value': 'Americas'},
{'label': 'Asia', 'value': 'Asia'},
{'label': 'Europe', 'value': 'Europe'},
{'label': 'Oceania', 'value': 'Oceania'},
],
value = 'Africa',
className='mb-5',
),
# daq.Slider(
# id='slider1',
# targets={"25": {"label": "TARGET"}},
# min=0, max=100, value=100,
# marks={'0': '0',
# '25': '25',
# '50': '50',
# '75': '75',
# '100': '100'},
# className='mb=10',
# ),
#
# dcc.Markdown(id='out1')
# #("here's some more text and a link to [google](google.com)", id='out1')
],
md=4,
)
column2 = dbc.Col(
[
html.H2('Expected Lifespan', className='mb-5'),
html.Div(id='prediction-content', className='lead')
# daq.Gauge(
# id='my-daq-gauge',
# max=100,
# value=20,
# min=0
# )
]
)
layout = dbc.Row([column1, column2])
@app.callback(
Output('prediction-content', 'children'),
[Input('year', 'value'), Input('continent', 'value')],
)
def predict(year, continent):
df = pd.DataFrame(
columns=['year', 'continent'],
data=[[year, continent]]
)
y_pred = pipeline.predict(df)[0]
return f'{y_pred:.0f} years'
# @app.callback(
# Output(component_id='my-daq-gauge', component_property='value'),
# [Input(component_id='slider1', component_property='value')]
# )
# def update_output_div(input_value):
# return input_value
| true |
bb981067400952557476729bf652fd95075b4a12 | Python | willie-lin/DoubanSpiders | /doubanspider/spiders/movie.py | UTF-8 | 2,399 | 2.640625 | 3 | [] | no_license | #!/usr/bin/python3
# --*-- coding:utf-8 --*--
# @Author : YuAn
# @Site :
# @File : movie.py
# @Time : 2018/6/15 9:34
# @software : PyCharm
import scrapy
from scrapy.selector import Selector
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from doubanspider.items import DoubanspiderItem
class MovieSpider(CrawlSpider):
name = 'movie'
allowed_domains = ['movie.douban.com']
start_urls = ['https://movie.douban.com/top250']
rules = (
Rule(LinkExtractor(allow=(r'https://movie.douban.com/top250\?start=\d+.*'))),
Rule(LinkExtractor(allow=(r'https://movie.douban.com/subject/\d+')),callback='parse_item')
)
# def parse_item(self, response):
# sel = Selector(response)
# item = DoubanspiderItem
# item['name'] = sel.xpath('//*[@id="content"]/h1/span[1]/text()').extract()
# item['year'] = sel.xpath('//*[@id="content"]/h1/span[2]/text()').re(r'\((\d+)\)')
# item['score'] = sel.xpath('//*[@id="interest_sectl"]/div/p[1]/strong/text()').extract()
# item['director'] = sel.xpath('//*[id="info"]/span[1]/a/text()').extract()
# item['classification'] = sel.xpath('//span[@property="v:genre"]/text()').extract()
# item['actor'] = sel.xpath('//*[@id="info"]/span[3]/a[1]/text()').extract()
# return item
def parse_item(self, response):
sel = Selector(response)
item = DoubanspiderItem()
item['name'] = sel.xpath('//*[@id="content"]/h1/span[1]/text()').extract()
item['year'] = sel.xpath('//*[@id="content"]/h1/span[2]/text()').re(r'\((\d+)\)')
# item['score'] = sel.xpath('//*[@id="interest_sectl"]/div/p[1]/strong/text()').extract()
item['score'] = sel.xpath('//*[@id="interest_sectl"]/div/div[2]/strong/text()').extract()
item['director'] = sel.xpath('//*[@id="info"]/span[1]/span/a/text()').extract()
item['celebrity'] = sel.xpath('//*[@id="info"]/span[2]/span/a/text()').extract()
item['classification'] = sel.xpath('//span[@property="v:genre"]/text()').extract()
item['actor'] = sel.xpath('//*[@id="info"]/span[3]//span/a/text()').extract()
item['date'] = sel.xpath('//span[@property="v:initialReleaseDate"]/text()').extract()
item['len_time'] = sel.xpath('//span[@property="v:runtime"]/text()').extract()
return item | true |
7d5c38b8257e8d726eec8af66f83c70e7cb99581 | Python | DanielWiest/6_902_Skill | /APIHitterEvents.py | UTF-8 | 1,559 | 3.109375 | 3 | [] | no_license | import requests
import json
from dateutil import parser
import datetime
def fetchUpcomingMITEvents(numberOfEvents):
badCharacters = {'@':' at ','&':' and ','%':' percent ','#':' number ','*':' ','(':' ',')':' ','/':' '}
url = "http://m.mit.edu/apis/calendars/events_calendar/events"
r = requests.get(url)
numLoops = 0
eventsAdded = 0
data = json.loads(r.text)
#print(data)
timeNow = datetime.datetime.now().timestamp()
outputText = "The next "+str(numberOfEvents)+" upcoming MIT events are "
#print("The next",numberOfEvents, "upcoming MIT events are:\n")
options = list(data)
while eventsAdded < numberOfEvents and numLoops < min(100,len(options)):
event = options[numLoops]
eventTime = event['start_at']
formattedTime = parser.parse(eventTime)
ts = formattedTime.timestamp()
secondsTill = ts - timeNow
minTill = secondsTill/60
if minTill > 0:
if minTill > 45:
outputText += event['title']+", which starts in "+str(round(minTill/60,1))+" hours, "
else:
outputText += event['title']+", which starts in "+str(round(minTill,1))+" minutes, "
eventsAdded += 1
numLoops += 1
updatedString = list(outputText[:-2]+".")
for index,char in enumerate(updatedString):
if char in badCharacters:
updatedString[index] = badCharacters[char]
return "".join(updatedString)
#print(fetchUpcomingMITEvents(5))
| true |
9cc94eb5bdd3ad1b2eed48f9b73d8f31481a87c5 | Python | Dulal13/python-practice | /Edabit_code/day1-10/day-07/08_outNumber.py | UTF-8 | 1,090 | 3.515625 | 4 | [] | no_license | # def outlier_number(lst):
# even = []
# odd = []
# for item in lst:
# if(item%2 == 0):
# even.append(item)
# else:
# odd.append(item)
# if(len(even) == 1):
# return even[0]
# else:
# return odd[0]
a = '''I am Md.Dulal Miah. This time I am studying at University. I love math and programming. My friend suggested me. You can try as a data analyst. I searched data analyst course. One day, I found this course.
But I can not buy this course. Because I earned very poor. So, I am applying for Financial Aid. I work as a private tutor. I teach math, chemistry, physics. As a student, I can't bear the tuition fee of my university. I get a waiver from my university.
My father died in 1999. My mother is working in a garments factory. Her income was used for food, medicine, etc. My income is used for tuition fees. At last, we had no money. so, I can't pay for this course.
If you guys will help for access this course, I will try my best. In this time, I learned python, SQL, Excel for data analysis.'''
print(len(a)) | true |
aebbbe1ad9805d0bb9e33cf1bb894c6a8d872190 | Python | Sachin-Ramesh10/Social-Network-Analysis | /P3/random graph/random_graph.py | UTF-8 | 1,131 | 3.078125 | 3 | [] | no_license | import networkx as nx
import math
import matplotlib.pyplot as plt
def avg_degree(A):
degree = A.degree()
Average_degree = sum(degree.values())/float(len(A))
return Average_degree
S = nx.Graph()
G = nx.read_edgelist("edges.txt", delimiter=",")
k = avg_degree(G)
print("The average degree of original graph is \t",k)
#propabilty of edge creation
p = k/(G.number_of_nodes() - 1)
S.add_edges_from((nx.fast_gnp_random_graph(G.number_of_nodes(), p, seed=None, directed=False)).edges())
c = avg_degree(S)
avg_clusco = nx.average_clustering(S)
print("Average local Clustering\t" + str(avg_clusco))
#average path length
apl = math.log(S.number_of_nodes())/float(math.log(c))
print("Average path length of simulated model \t", apl)
degs = {}
for n in S.nodes():
deg = S.degree( n )
if deg not in degs:
degs[deg] = 0
degs[deg] += 1
items = sorted(degs.items())
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([(k,2) for (k , v ) in items ] , [(v,2) for (k ,v ) in items])
plt.title ("Degree Distribution of random graph")
fig.savefig ("degree_distribution_randomg1.png")
fig.show() | true |
8bd4720c641eef4a158f7e727413d3416d60c190 | Python | aeDeaf/scibernetes | /matrixMul/matrixmul_app.py | UTF-8 | 439 | 2.5625 | 3 | [] | no_license | from flask import Flask, jsonify, request
import numpy
app = Flask("__name__")
@app.route("/", methods=['POST'])
def matrix_mul():
data = request.json
matrix = numpy.array(data['matrix'])
vector = numpy.array(data['vector'])
res = numpy.matmul(matrix, vector)
res_list = [int(res[i]) for i in range(len(res))]
return jsonify({'res': res_list})
if __name__ == "__main__":
app.run(host="0.0.0.0", port=5000)
| true |
5b20d529af5491a829aae7df418b486edbc04ad9 | Python | kms0524/MondaySinchon | /1주차-자료구조/자료구조&정렬-직접구현/dongjun/deque.py | UTF-8 | 1,458 | 3.546875 | 4 | [] | no_license | import time
import sys
class Node:
def __init__(self,data):
self.data = data
self.next = None
self.pre = None
class LinkedDeque:
def __init__(self):
self.front = None
self.rear = None
def isEmpty(self):
return True if not self.front or not self.rear else False
def pushBack(self,data):
newNode = Node(data)
if not self.isEmpty():
newNode.pre = self.rear
else:
self.front = newNode
if self.rear:
self.rear.next = newNode
self.rear = newNode
def pushFront(self,data):
newNode = Node(data)
if not self.isEmpty():
newNode.next = self.front
self.front.pre = newNode
else:
self.rear = newNode
self.front = newNode
def popBack(self):
if self.isEmpty():
return None
returnData = self.rear.data
temp = self.rear
self.rear = self.rear.pre
del temp
return returnData
def popFront(self):
if self.isEmpty():
return None
returnData = self.front.data
temp = self.front
self.front = self.front.next
del temp
return returnData
Q = LinkedDeque()
start = time.time()
for i in range(10):
Q.pushFront(i)
while not Q.isEmpty():
print(Q.popBack(),end=" ")
print()
ms = (time.time() - start)*1000
print(f'{round(ms,2)}ms') | true |
75af2e115cb8bf6acc23b12777c45e4b3cdeb6b4 | Python | MysteriousSonOfGod/redirector | /tests/test_non_database_flask_ext.py | UTF-8 | 1,861 | 2.53125 | 3 | [
"MIT"
] | permissive | import json
import pytest
from flask import Flask
from redirector.non_database import FlaskNonDatabase, NonDatabase
def test_default_initialization():
ext = FlaskNonDatabase()
with pytest.raises(RuntimeError):
# try to access property
ext.non_db()
def test_empty_file_default_filename_nonexisting_file(tmpdir, monkeypatch):
with monkeypatch.context():
monkeypatch.chdir(tmpdir)
with pytest.warns(UserWarning, match="no redirections database path specified and redirects.json not found"):
app = Flask(__name__)
ext = FlaskNonDatabase(app)
assert isinstance(ext.non_db, NonDatabase)
def test_empty_file_default_filename_empty_map(tmpdir, monkeypatch):
with monkeypatch.context():
monkeypatch.chdir(tmpdir)
with open(tmpdir.join("redirects.json"), "w") as f:
json.dump({}, f)
app = Flask(__name__)
ext = FlaskNonDatabase(app)
assert isinstance(ext.non_db, NonDatabase)
def test_empty_file_custom_filename_nonexisting_file(tmpdir, monkeypatch):
# use a tempdir to ensure the file does not exist
with monkeypatch.context():
monkeypatch.chdir(tmpdir)
app = Flask(__name__)
app.config["REDIRECTIONS_MAP_PATH"] = str(tmpdir.join("test.json"))
with pytest.raises(FileNotFoundError):
FlaskNonDatabase(app)
def test_empty_file_custom_filename_empty_map_file(tmpdir, monkeypatch):
with monkeypatch.context():
# create a file in the tmpdir but do _not_ chdir there
tmpfile = tmpdir.join("test.json")
with open(tmpfile, "w") as f:
json.dump({}, f)
app = Flask(__name__)
app.config["REDIRECTIONS_MAP_PATH"] = str(tmpfile)
ext = FlaskNonDatabase(app)
assert isinstance(ext.non_db, NonDatabase)
| true |
0b00fc166c2c13208f1747fdf586edfb33659c9e | Python | wylu/pyoffer | /数组中重复的数字.py | UTF-8 | 2,017 | 4.0625 | 4 | [] | no_license | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 数组中重复的数字.py
@Time : 2020/04/02 23:34:04
@Author : wylu
@Version : 1.0
@Contact : 15wylu@gmail.com
@License : (C)Copyright 2020, wylu-CHINA-SHENZHEN
@Desc : 在一个长度为n的数组里的所有数字都在0到n-1的范围内。数组中某些数字是重复的,
但不知道有几个数字是重复的。也不知道每个数字重复几次。请找出数组中任意一个
重复的数字。例如,如果输入长度为7的数组{2,3,1,0,2,5,3},那么对应的输出
是第一个重复的数字2。
"""
# https://www.nowcoder.com/practice/623a5ac0ea5b4e5f95552655361ae0a8
class Solution:
"""
从头到尾依次扫描这个数组中的每一个数字。在扫描到下标为i的数字时,首先比较这个数字
(用m表示)是不是等于i。如果是,则接着扫描下一个数字;如果不是,则那它和第m个数字
进行比较。如果他和第m个数字相等,就找到了一个重复数字(该数字在下标为i和m的位置都
出现了);如果它和第m个数字不相等,就把第i个数字和第m个数字交换,把m放到属于它的
位置。接下来在重复这个比较、交换过程,直到我们发现一个重复的数字。
"""
# 这里要特别注意~找到任意重复的一个值并赋值到duplication[0]
# 函数返回True/False
def duplicate(self, numbers, duplication):
if not numbers or len(numbers) == 1:
return False
for i in range(len(numbers)):
m = numbers[i]
while m != i:
if m == numbers[m]:
duplication[0] = m
return True
else:
numbers[i], numbers[m] = numbers[m], numbers[i]
m = numbers[i]
return False
if __name__ == '__main__':
print(Solution().duplicate([2, 3, 1, 0, 2, 5, 3], [-1]))
| true |
25be104ad22497d8856b882137957d9fbf6556fa | Python | fgzip/lista_exercicios_python_brasil | /01_estrutura_sequencial/11_operacoes_diversas.py | UTF-8 | 1,021 | 4.84375 | 5 | [] | no_license | '''
Faça um Programa que peça 2 números inteiros e um número real. Calcule e mostre:
o produto do dobro do primeiro com metade do segundo .
a soma do triplo do primeiro com o terceiro.
o terceiro elevado ao cubo.
x------------------------------------------------------------------------------x
Fonte: Lista de Exercícios PythonBrasil
Estrutura Sequencial
Data: 06.05.2020
x------------------------------------------------------------------------------x
'''
print("\n* * * * * NÚMEROS * * * * *\n")
n1 = int(input("\nDigite o primeiro número [inteiro]: "))
n2 = int(input("Digite o segundo número [inteiro]: "))
n3 = float(input("Digite o terceiro número [real]: "))
res_a = (n1 * 2) * (n2 / 2)
res_b = (n1 * 3) + n3
res_c = (n3 ** 3)
print("\n\n* * * * RESULTADOS * * * *\n\n")
print("a. Produto do dobro do primeiro com a metade do segundo: {:.2f}".format(res_a))
print("b. Soma do triplo do primeiro com o terceiro: {:.2f}".format(res_b))
print("c. Terceiro elevado ao cubo: {:.2f}\n".format(res_c))
| true |
1b623ae1225f53e661f6521465566a201a371f30 | Python | gamikun/unebus-python | /unebus/factory.py | UTF-8 | 904 | 2.5625 | 3 | [] | no_license | from core import Route, Vehicle
from time import strptime
def route_from_dict(d):
r = Route()
r.id = d.get('id_ruta', None)
r.name = d.get('nombre', None)
r.duration = d.get('duracion', 0)
r.route_length = d.get('longitud', 0.0)
fromtime = d.get('inicio_servicio', '00:00:00')
totime = d.get('fin_servicio', '00:00:00')
r.schedule = (
strptime(fromtime, '%H:%M:%S'),
strptime(totime, '%H:%M:%S'),
)
r.units = d.get('unidades_servicio', 0)
return r
def vehicle_from_dict(d):
r = Vehicle()
r.id = d.get('vehiculo_id', None)
r.imei = d.get('vehiculo_imei', None)
lat = d.get('latitud', None)
lng = d.get('longitud', None)
if not isinstance(lat, float):
raise ValueError('invalid lat')
if not isinstance(lng, float):
raise ValueError('invalid lng')
r.location = (lat, lng,)
return r | true |
ea7c328a0ee726753d822b14bb5c96df811604e0 | Python | 7676444908/Python_Plots | /ReadnPlot_CourtCaseData_Barplt.py | UTF-8 | 641 | 3.1875 | 3 | [] | no_license | import matplotlib.pyplot as plt
import csv
x = []
y = []
# Opening the csvfile
with open('LIMBS_Ministry_Wise_Court_Case_Data_2021.csv','r') as csvfile:
plots = csv.reader(csvfile, delimiter = ',')
# This skips the first row of the CSV file.
next(plots)
#plots = csv.reader(csvfile, delimiter = ',')
for row in plots:
x.append(row[0])
y.append(int(row[4]))
plt.bar(x, y, color = 'g', width = 0.72, label = "Cases")
plt.xlabel('S.No of Ministry')
plt.ylabel('Total Number of cases')
plt.title('Ministry Vs Total Number of cases')
plt.legend()
plt.show()
plt.show()
plt.savefig('MinistryVsTotalNumberOfcases_barplt.png')
| true |
8c52519ce78e3acdae5d57a346dd4de86c195654 | Python | celclow/azik-romaji-table | /romaji_table_maker.py | UTF-8 | 1,667 | 2.90625 | 3 | [] | no_license | #/usr/bin/env python3
# -*- coding: utf-8 -*-
DATA = './data/data.csv'
SPECIAL = './data/special.txt'
import csv
import sys
class romajiTable:
def __init__(self):
self.keyList = []
self.valueList = {}
def getKeyList(self):
return self.keyList
def getValueList(self):
return self.valueList
def getValue(self, key):
return self.valueList[key]
def addValue(self, key, value):
if key in self.keyList:
del self.keyList[self.keyList.index(key)]
self.keyList.append(key)
self.valueList[key] = value
else:
self.keyList.append(key)
self.valueList[key] = value
def addRomajiTable(self, filename):
for line in open(filename):
row = line.rstrip().split('\t')
if len(row) == 2:
key, value = row
self.addValue(key, value)
if __name__ == '__main__':
# 引数指定
argv = sys.argv
argc = len(argv)
romajiData = romajiTable()
csvdata = []
for line in csv.reader(open(DATA), delimiter=','):
if len(line) == 16:
csvdata.append(line)
for i, row in enumerate(csvdata[1:], 1):
for j, value in enumerate(row[1:], 1):
key = '{0}{1}'.format(csvdata[i][0], csvdata[0][j])
if value:
romajiData.addValue(key, value)
romajiData.addRomajiTable(SPECIAL)
for uniqueFile in argv[1:]:
romajiData.addRomajiTable(uniqueFile)
for key in romajiData.getKeyList():
print("{0}\t{1}".format(key, romajiData.getValue(key)))
| true |
6b315ab0de5e173e546d39a18b031d4877d5de21 | Python | LexLippi/GraphLibrary | /task/graph_library/priority_queue.py | UTF-8 | 297 | 3.65625 | 4 | [] | no_license | import heapq
class PriorityQueue:
def __init__(self):
self.items = []
def is_empty(self):
return len(self.items) == 0
def push(self, item, priority):
heapq.heappush(self.items, (item, priority))
def get(self):
return heapq.heappop(self.items)
| true |
fffc8b4a943555a2ccd3f6f824a2194fa9c5e341 | Python | inzva/gcn_text_categorization | /common.py | UTF-8 | 700 | 3.046875 | 3 | [
"MIT"
] | permissive | from collections import Counter
from typing import Iterable, Any
from typing import List
def extract_word_counts(docs_of_words: List[List[str]]) -> Counter:
word_counts = Counter()
for words in docs_of_words:
word_counts.update(words)
return word_counts
def check_data_set(data_set_name: str, all_data_set_names: List[str]) -> None:
if data_set_name not in all_data_set_names:
raise AttributeError("Wrong data-set name, given:%r, however expected:%r" % (data_set_name, all_data_set_names))
def flatten_nested_iterables(iterables_of_iterables: Iterable[Iterable[Any]]) -> Iterable[Any]:
return [item for sublist in iterables_of_iterables for item in sublist]
| true |
8d4f7d92da76fa18c59a2c710aca011116862a14 | Python | cchanzl/dc-federated | /src/dc_federated/examples/example_dcf_model/local_model.py | UTF-8 | 3,087 | 2.71875 | 3 | [
"Apache-2.0"
] | permissive | """
Contains a single class illustrating the use of the classes in
dc_federated.backend.DCFWorker.
"""
import io
from datetime import datetime
import logging
import torch
from dc_federated.utils import get_host_ip
from dc_federated.examples.example_dcf_model.torch_nn_class import ExampleModelClass
from dc_federated.backend import DCFWorker, GLOBAL_MODEL_VERSION, GLOBAL_MODEL, WID_LEN
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
class ExampleLocalModel(object):
"""
This is a simple class that illustrates how the DCFWorker class may be used to
implement a federated local model. This talks to an ExampleGlobalModel
object via a running DCFServer instance. For testing purposes, it writes all the
models it creates and receives to disk.
"""
def __init__(self, server_protocol=None, server_host_ip=None, server_port=None):
self.local_model = ExampleModelClass()
self.last_update_time = datetime(2017, 1, 1)
server_protocol = 'http' if server_protocol is None else 'https'
server_host_ip = get_host_ip() if server_host_ip is None else server_host_ip
server_port = 8080 if server_port is None else server_port
print(f"Server host ip {server_host_ip}")
self.worker = DCFWorker(
server_protocol=server_protocol,
server_host_ip=server_host_ip,
server_port=server_port,
global_model_version_changed_callback=self.global_model_status_changed_callback,
get_worker_version_of_global_model=lambda : self.worker_version_of_global_model,
private_key_file=None
)
self.global_model = None
self.worker_version_of_global_model = -1
# register the worker
self.worker_id = self.worker.register_worker()
with open(f"elm_worker_update_{self.worker_id}.torch", 'wb') as f:
torch.save(self.local_model, f)
# send the model update
self.worker.send_model_update(self.serialize_model())
def serialize_model(self):
"""
Serializes the local model so that it can be sent over to the global
model.
Returns
-------
byte-string:
A serialized version of the model.
"""
model_data = io.BytesIO()
torch.save(self.local_model, model_data)
return model_data.getvalue()
def global_model_status_changed_callback(self, model_dict):
"""
Example showing a callback for change to the global model status.
"""
logger.info(f"I got the global model version {model_dict[GLOBAL_MODEL_VERSION]}"
f"!! -- transforming...")
self.global_model = torch.load(io.BytesIO(model_dict[GLOBAL_MODEL]))
with open("elm_global_model.torch", 'wb') as f:
torch.save(self.global_model, f)
logger.info(self.global_model)
def start(self):
"""
Example showing how to start the worker - simply calls the
DCFWorker run().
"""
self.worker.run()
| true |
8f888d711e29486d00748e515fadbc1900886463 | Python | tallerverticalisc-12/Edison | /ParkSystem.py | UTF-8 | 2,255 | 2.8125 | 3 | [] | no_license | #ParkSystem.py
#Copyright (C) 2016 Smart Parking
#TThis program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
##This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
#Libraries used
import requests
import time
import json
import pyupm_grove as grove
import pyupm_i2clcd as lcd
import pyupm_ttp223 as ttp223
#Grove Touch Sensor variable
touchSensor = ttp223.TTP223(2)
#Grove Button variable
button = grove.GroveButton(3)
#Grove LCD Blacklight display variable
lcdDisplay = lcd.Jhd1313m1(0, 0x3E, 0x62)
lcdDisplay.setCursor(0,0)
#Variable to store the json from the server
r = requests.get('http://45.40.137.37:88/bestZone')
parsedJSON = json.loads(r.text)
#Variable to store a formatted json to print
global zone
zone = parsedJSON['zone']
#Requests the best place to parking
def requestPlace():
r = requests.get('http://45.40.137.37:88/bestZone')
print (r.text)
parsedJSON = json.loads(r.text)
#Request to the server that has the information about the parking zones
def updateInfo(info):
global dispPlaces
s = requests.post('http://45.40.137.37:88/sensor', {"zone":1,"parkID":1,"status":info})
print (s.text)
#Updates if a car leaves or enters to a parking zone
def parkInfo():
global dispPlaces
#Detects when a car is in the a determinate spot
if touchSensor.isPressed():
updateInfo(1)
#Detects when a car enters to the parking lot
if button.value() == 1:
lcdDisplay.write('Welcome!!!')
time.sleep(2)
lcdDisplay.clear()
lcdDisplay.write('Go to: ' + str(zone))
updateInfo(0)
time.sleep(5)
requestPlace()
lcdDisplay.clear()
while True:
parkInfo()
| true |
f6d22bd697f11f78c4a6e04ec1e0c0de8871ce42 | Python | Venkatesh0519/Christmas-Tree | /christTree4.py | UTF-8 | 457 | 3.234375 | 3 | [] | no_license | a=40
b=0
space=40
while b<a-1 and a>0:
print(' '*a+'*'+'*'*b)
a-=1
b+=2
for _ in range(4):
print(' '*(space-1)+'|||')
print(' '*(space-5),'\_@_@_@_/')
####within a function()
def tree(a,b,space, n):
while b<a-1 and a>0:
print(' '*a+'*'+'*'*b)
a-=1
b+=2
for _ in range(n):
print(' '*(space-1)+'|||')
print(' '*(space-5),'\_@_@_@_/')
a=40
b=0
space=40
n=4
tree(a,b,space, n) | true |
8641595473d7b40f29964a2eb234c1e0e71433d9 | Python | alexandersantosdev/exercicios-python | /functional.py | UTF-8 | 728 | 3.84375 | 4 | [] | no_license | from functools import reduce
lista = [1,2,3,4,5]
soma = reduce(lambda x, y: x + y, lista)
fatorial = reduce(lambda x,y: x * y, lista)
print(f'Soma de {lista} = {soma}')
print(f'Fatorial de {lista} = {fatorial}')
numerosPares = [x for x in lista if x % 2 == 0]
print(f'Numeros pares na lista: {numerosPares}')
numImpares = list(filter(lambda x: x % 2 != 0, lista))
print(f'Numeros impares {numImpares}')
numMulti = list(map(lambda x: x * 2, lista))
print(f'Numeros {lista} em dobro {numMulti}')
idades = [7, 8, 8, 7]
novo = min(idades)
velho = max(idades)
media = sum(idades) / len(idades)
media2 = lambda x,y : x / y
print(novo)
print(velho)
print(media)
print(media2(reduce(lambda x,y : x + y, idades), len(idades)))
| true |
39ef5960a894ce82c5198fa4306f2897b680d26f | Python | chertoGUN/HSE-University-projects | /Course-3/IoT-analytics/Task_7/src/1_parcing_and_filling.py | UTF-8 | 2,669 | 2.828125 | 3 | [] | no_license | # -*- coding: utf-8 cp1251 -*-
"""
Created on 0 0:00:00 0000
@author: Bloodies
"""
# !pip install influxdb
from influxdb import InfluxDBClient
import pandas as pd
import numpy as np
data = 'Дата замера'
hole = 'Скважина'
x1 = 'Способ эксплуатации'
x2 = 'Режим'
y1 = 'Рпр(ТМ)'
y2 = 'Рзаб(Рпр)'
y3 = 'Рзаб(Нд)'
y4 = 'Рзаб(иссл)'
file_name = 'data.xlsx'
def read_all_sheets(file_name_excel):
df = pd.DataFrame()
xls = pd.ExcelFile(file_name_excel)
for list_excel in xls.sheet_names:
df = df.append(pd.read_excel(xls, list_excel, parse_dates=[data], index_col=data))
return df
df = read_all_sheets(file_name)
df.sort_index(inplace=True)
def replace_text_values_in_x(df, nameX):
dict_changes = {}
_list = pd.unique(df[nameX]).tolist()
i = 1
for value in _list:
if (str(value) != str(np.NaN)):
df.loc[df[nameX] == value, nameX] = i
dict_changes[i] = value
i += 1
df[nameX] = df[nameX].fillna(len(_list))
dict_changes[len(_list)] = np.NaN
return dict_changes
what_replaced_x1 = replace_text_values_in_x(df, x1)
what_replaced_x2 = replace_text_values_in_x(df, x2)
all_data = df.copy()
cleaning_map = lambda x: str(x).strip()
all_data[hole] = all_data[hole].map(cleaning_map)
all_df_to_influx = all_data.copy()[[hole, x1, x2, y1, y2, y3, y4]]
influx_file_name = '_data_2018_01-07'
list_of_holes = pd.unique(all_data[hole]).tolist()
count_empty_data = 0
fill_method = 'bfill'
list_empty_data = []
cleaned_data = pd.DataFrame()
for _hole in list_of_holes[1:100]:
if count_empty_data > 0:
fill_method = 'ffill'
df_to_influx = all_df_to_influx[all_df_to_influx[hole] == _hole][[x1, x2, y1, y2, y3, y4]]
df_to_influx.insert(loc=0, column='Время', value=df_to_influx.index.time[0])
df_to_influx.insert(loc=0, column='Скважина', value=_hole)
temp_df = df_to_influx[[y1, y2, y3, y4]].dropna(axis=1, how='all')
if (not temp_df.empty):
cleaned_data = cleaned_data.append(df_to_influx)
#cleaned_data[y1].fillna(method=fill_method, inplace=True)
#cleaned_data[y2].fillna(method=fill_method, inplace=True)
#cleaned_data[y2].fillna(method=fill_method, inplace=True)
#cleaned_data[y3].fillna(method=fill_method, inplace=True)
#cleaned_data[y4].fillna(method=fill_method, inplace=True)
cleaned_data.to_csv('output\\' + 'holes'+'.csv', encoding='cp1251', sep=';')
else:
list_empty_data.append(_hole)
count_empty_data += 1
print('Сохранение в файл ".csv" выполнено!') | true |
4cc5279d45daa890c1f7f8eabc293cb74e06f9bc | Python | nesvera/travis | /travis_image_processing/src/homography/homography.py | UTF-8 | 6,024 | 2.828125 | 3 | [] | no_license | import numpy as np
import cv2
import pickle
import os
import sys
class Parameters:
def __init__(self):
# 4 points expressed in pixel
self.screen_points = []
# 4 points expressed in centimeters
self.world_points = []
# scale of cm/pixel
# ex: 400 centimeters in axis x in real world
# 1000 pixels in axis x of the image
# 0.4 cm/pixel in bird-view
self.scale_ratio = 1
self.homography_matrix = None
self.inverse_homography_matrix = None
self.matrix_to_plane = None
# transform parameters
self.offset_x = 500
self.offset_y = 500
self.afastamento = 500
self.afastamento_scale = 500
self.image_size = np.array((1280, 960))
self.output_size = np.array((400, 400))
def set(self, dict):
self.screen_points = dict['screen_points']
self.world_points = dict['world_points']
self.scale_ratio = dict['scale_ratio']
self.homography_matrix = np.array(dict['homography_matrix'])
self.inverse_homography_matrix = np.array(dict['inverse_homography_matrix'])
self.offset_x = dict['offset_x']
self.offset_y = dict['offset_y']
self.afastamento = dict['afastamento']
self.afastamento_scale = dict['afastamento_scale']
self.image_size = np.array(dict['image_size'])
class Homography:
def __init__(self, file_path):
self.parameters = Parameters()
self.file_path = file_path
if os.path.exists(self.file_path):
self.load_file()
def add_screen_point(self, x, y):
if len(self.parameters.screen_points) < 4:
self.parameters.screen_points.append((x,y))
if len(self.parameters.screen_points) == 4:
self.calculate_perspective_transform()
def delete_last_screen_point(self):
if len(self.parameters.screen_points) > 0:
self.parameters.screen_points.pop()
self.parameters.homography_matrix = None
def reset_screen_points(self):
del self.parameters.screen_points[:]
self.parameters.homography_matrix = None
def get_screen_points(self):
return self.parameters.screen_points
def set_world_points(self, points):
self.parameters.world_points = points
def set_parameters(self, offset_x, offset_y, ratio):
self.parameters.offset_x = offset_x
self.parameters.offset_y = offset_y
self.parameters.afastamento = ratio
self.parameters.scale_ratio = float(ratio)/self.parameters.afastamento_scale
# if the matrix is already calculated, to it again
if self.parameters.homography_matrix is not None:
self.calculate_perspective_transform()
def load_file(self):
file_obj = open(self.file_path, 'r')
try:
parameters_dict = pickle.load(file_obj)
except:
print("Failed to open pickle file!")
return
self.parameters.set(parameters_dict)
file_obj.close()
def save_file(self):
file_obj = open(self.file_path, 'w')
class_to_dict = self.parameters.__dict__
pickle.dump(class_to_dict, file_obj, pickle.HIGHEST_PROTOCOL)
file_obj.close()
def get_parameters_dict(self):
return self.parameters.__dict__
def calculate_perspective_transform(self):
screen_points_np = np.asarray(self.parameters.screen_points, dtype="float32")
world_points_np = np.asarray(self.parameters.world_points, dtype="float32")
world_points_np *= self.parameters.scale_ratio
world_points_np += [self.parameters.offset_x,
self.parameters.offset_y]
# Calculate matrix for bird view transformation
self.parameters.homography_matrix = cv2.getPerspectiveTransform(
screen_points_np,
world_points_np
)
self.parameters.inverse_homography_matrix = np.linalg.inv(self.parameters.homography_matrix)
# Calculate matrix to find position on plane from pixel
backward_world_points = np.zeros((4,2), dtype="float32")
backward_world_points[0] = self.parameters.world_points[3]
backward_world_points[1] = self.parameters.world_points[2]
backward_world_points[2] = self.parameters.world_points[1]
backward_world_points[3] = self.parameters.world_points[0]
self.matrix_to_plane = cv2.getPerspectiveTransform(
screen_points_np,
backward_world_points
)
self.matrix_bird_to_plane = np.matmul(
self.matrix_to_plane,
self.parameters.inverse_homography_matrix
)
def get_homography_matrix(self):
return self.parameters.homography_matrix
def get_inverse_homography_matrix(self):
return self.parameters.inverse_homography_matrix
def get_real_coordinate(self, x, y):
bird_coord = np.array([x, y, 1])
'''
screen_trans = np.matmul(self.parameters.inverse_homography_matrix,
bird_coord)
screen_coord = np.array([screen_trans[0]/screen_trans[2],
screen_trans[1]/screen_trans[2],
1])
world_trans = np.matmul(self.matrix_to_plane,
screen_coord)
world_coord = np.array([world_trans[0]/world_trans[2],
world_trans[1]/world_trans[2]])
'''
world_trans = np.matmul(
self.matrix_bird_to_plane,
bird_coord
)
world_coord = np.array([world_trans[0]/world_trans[2],
world_trans[1]/world_trans[2]])
| true |
0a54afbbcd31285e2a574c73394a69f819fba204 | Python | Slickness/SudokuSolver | /sudoku.py | UTF-8 | 6,969 | 2.828125 | 3 | [] | no_license | #!/usr/bin/env python
import time
def getPuzzle():
## main = "872516349461937258359428761736295184594871632218643597925164873647382915183759426"
## mask = "010110111011011111101100111011110100101111101001011110111001101111110110111011010"
## puzzle = []
## for i in range (81):
## if mask[i] == "0":
## puzzle.append(main[i])
## else:
## puzzle.append("0")
puzzle = ["8","0","0","0","0","0","0","0","0","0","0","3","6","0","0","0","0","0","0","7","0","0","9","0","2","0","0","0","5","0","0","0","7","0","0","0","0","0","0","0","4","5","7","0","0","0","0","0","1","0","0","0","3","0","0","0","1","0","0","0","0","6","8","0","0","8","5","0","0","0","1","0","0","9","0","0","0","0","4","0","0"]
#print len(puzzle)
return puzzle
def displayPuzzle(puzzle):
line = ""
x = 0
y = 1
for item in range(81):
x = x + 1
if len(line)%11 == 0:
y = y + 1
if y > 3:
if y%3 == 0:
print "---+---+---"
print (line)
x = 0
line = ""
if x == 3:
line = line + "|"
if x == 6:
line = line + "|"
line = line + str(puzzle[item])
print line
def Check(puzzle,loc,value):
a = [0,1,2,9,10,11,18,19,20]
b = [3,4,5,12,13,14,21,22,23]
c = [6,7,8,15,16,17,24,25,26]
d = [27,28,29,36,37,38,45,46,47]
e = [30,31,32,39,40,41,48,49,50]
f = [33,34,35,42,43,44,51,52,53]
g = [54,55,56,63,64,65,72,73,74]
h = [57,58,59,66,67,68,75,76,77]
i = [60,61,62,69,70,71,78,79,80]
grids = [a,b,c,d,e,f,g,h,i]
#check grids
for x in grids:
for y in x:
if y == loc:
for y in x:
#print puzzle[y]
#print value
if puzzle[y] == value:
return False
break
if loc/9 == 0:
cells = [0,1,2,3,4,5,6,7,8]
if loc/9 ==1:
cells = [9,10,11,12,13,14,15,16,17]
if loc/9 ==2:
cells = [18,19,20,21,22,23,24,25,26]
if loc/9 ==3:
cells = [27,28,29,30,31,32,33,34,35]
if loc/9 ==4:
cells = [36,37,38,39,40,41,42,43,44]
if loc/9 ==5:
cells = [45,46,47,48,49,50,51,52,53]
if loc/9 ==6:
cells = [54,55,56,57,58,59,60,61,62]
if loc/9==7:
cells = [63,64,65,66,67,68,69,70,71]
if loc/9 ==8:
cells = [72,73,74,75,76,77,78,79,80]
for x in cells:
if puzzle[x]== value:
return False
break
for x in range(len(puzzle)):
if loc%9 == x%9:
if puzzle[x] == value:
return False
break
return True
def Solve2(puzzle):
#load empty values
NeedSolve = []
for x in range(81):
if puzzle[x] == "0":
NeedSolve.append([x,"0"])
#print len(NeedSolve)
for x in NeedSolve:
#print x
ToSolve = 0
values = ["1","2","3","4","5","6","7","8","9","10"]
while True:
ToCheck = NeedSolve[ToSolve]
#print ToCheck
loc = ToCheck[0]
value=ToCheck[1]
start = value
for x in range(int(value),len(values)):
#if x ==int(value) or value == "0":
#print x
#print values[x]
value = values[x]
if value == "10":
ToCheck[0] = loc
ToCheck[1] = "0"
NeedSolve[ToSolve] = ToCheck
ToSolve = ToSolve -1
#print ToSolve
ToCheck = NeedSolve[ToSolve]
loc = ToCheck[0]
value=ToCheck[1]
puzzle[loc] = "0"
break
if Check(puzzle,loc,value):
puzzle[loc] = value
ToCheck = [loc,value]
NeedSolve[ToSolve]=ToCheck
#print ToCheck
#displayPuzzle(puzzle)
ToSolve = ToSolve + 1
#displayPuzzle(puzzle)
counter = 0
for x in range(81):
if puzzle[x] == "0":
counter = counter + 1
if counter == 0:
return puzzle
break
#print values[x] + " fail"
if ToSolve == len(NeedSolve)-1:
break
def Solve(puzzle):
#load empty values
NeedSolve = []
for x in range(81):
if puzzle[x] == "0":
NeedSolve.append([x,"0"])
print len(NeedSolve)
z = 0
while z < (len(NeedSolve)-1):
out = NeedSolve[z]
value = out[1]
loc = out[0]
values = ["1","2","3","4","5","6","7","8","9","10"]
print "trying " + str(out[0]) + " " + str(out[1])
for a in range(len(values)):
if values[a] == "10":
z = z - 1
out = NeedSolve[z]
value = out[1]
loc = out[0]
puzzle[loc] = "0"
break
else:
continue
if values[a-1] == value or value == "0":
value = values[a]
#if values[a] = "10":
# z = z - 1
# out = NeedSolve[z]
# value = out[1]
# loc = out[0]
# puzzle[loc] = "0"
# break
if Check(puzzle,loc,value):
puzzle[loc] = value
#displayPuzzle(puzzle)
#print out
out = [loc,value]
NeedSolve[z] = out
z = z + 1
displayPuzzle(puzzle)
break
else:
continue
break
else:
continue
else:
continue
#break
#break
#elif values[a] == "9":
# z = z - 1
# out = NeedSolve[z]
# value = out[1]
# loc = out[0]
#puzzle[loc] = "0"
#oldout = NeedSolve[z]
#value = oldout[1]
#break
#print z
#x = 61
#for grid in grids:
# for y in grid:
# if y == x:
# print grid
if __name__ == '__main__':
start = time.time()
puzzle = getPuzzle()
displayPuzzle(puzzle)
if Solve2(puzzle):
displayPuzzle(puzzle)
end = time.time()
elapsed = end - start
print elapsed
| true |
b96a5ec72b999ecda3d2eb9d29bd5409eb6b58bc | Python | jerryduncan/Cartoonize | /app/initialize_image.py | UTF-8 | 1,205 | 2.75 | 3 | [] | no_license | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
#import libraries
import numpy as np
import cv2
import matplotlib.pyplot as plt
img_dir = '/tmp/images/ezekiel.jpg.jpg' #this accepts the filepath
img_crop = cv2.resize(img_dir, (0,0), fx=0.5, fy=0.5)
img_crop = cv2.pyrDown(img_dir)
num_iter = 5
for _ in range(num_iter):
img_small = cv2.bilateralFilter(img_small, d=9, sigmaColor=9, sigmaSpace=7)
img_dir = cv2.pyrUp(img_crop)
def rgb_img(img_dir):
#reads from a BGR image file to a RGB
img = cv2.imread(img_dir)
return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
def hsv_img(img_dir):
#reads from a RGB image file to a HSV array
hsv = cv2.imread(img_dir)
return cv2.cvtColor(hsv, cv2.COLOR_RGB2HSV)
def img_read(img_dir):
#reads from a RGB image file to grayscale image array
rgb = rgb_img(img_dir)
hsv = hsv_img(img_dir)
gray = cv2.cvtColor(rgb, cv2.COLOR_RGB2GRAY)
return rgb, hsv, gray
#to display the img from the different color spaces
eze_rgb, eze_hsv, eze_gray = img_read(img_dir)
plt.imshow(eze_rgb)
| true |
c94ede17b16824ab497185d9818b8bf60ee2d856 | Python | Shawnjoseph2001/EE381-Project2 | /Project3/Project3.py | UTF-8 | 278 | 3.390625 | 3 | [] | no_license | """
EE 381 Project 3 part 1
Shawn Joseph 025671644
"""
import random
p = float(input("Enter probability of success"))
T = int(input("How many trials? "))
for i in range(T):
r = random.uniform(0, 1)
if r < p:
print('1', end=' ')
else:
print('0', end=' ')
| true |
283d8282813110832fd851820c3594e1996c97c3 | Python | aneettasara/Python-Developer | /Python Programs/first_recurring_character.py | UTF-8 | 341 | 3.96875 | 4 | [] | no_license | # FIRST RECURRING CHARACTER
def firstRecurringCharacter(ipStr):
counts = {}
for char in ipStr:
print(counts)
if char in counts:
return char
counts[char] = 1
return None
ip = "ABBA"
print("INPUT STRING: "+str(ip))
op = firstRecurringCharacter(ip)
print("FIRST RECURRING CHARACTER: "+str(op))
| true |
6e13e074f2e7de19e8a591f2b9f1da2f22cc99eb | Python | atkabyss/1G_python | /sample.py | UTF-8 | 694 | 4 | 4 | [] | no_license | """class Person:
def __init__(self, name, age): # __init__メソッドの定義
self.__name = name # 引数をインスタンス変数に代入。名称のPrefixに__あり
self.__age = age # 引数をインスタンス変数に代入。名称のPrefixに__あり
def print_profile(self): # クラスメソッドの定義
print(self.__name + 'さんは' + str(self.__age) + '歳です。')
taro = Person('蒲田太郎', 21) # オブジェクトとしてインスタントを生成
taro.print_profile() # インスタンスメソッドの呼び出し。
# => クラス内部からは__name, __ageを利用できる
"""
value = (yield(x+1) for in range(100))
| true |
7a2537d2d386a3e83088f88cd1af8faf036266ad | Python | hcoffey1/boatshantiesapi | /src/globot/globot/SignalValue.py | UTF-8 | 2,037 | 3.3125 | 3 | [] | no_license | from random import random
class SignalValue:
def __init__(self, total:float=None, r:float=None, g:float=None, b:float=None):
if total is not None:
self.r = float(total)
self.g = float(total)
self.b = float(total)
else:
self.r = float(r) if r else 0.0
self.g = float(r) if r else 0.0
self.b = float(r) if r else 0.0
@staticmethod
def build_from_json(json_data):
if type(json_data) in (int, float):
return SignalValue(total=float(json_data))
else:
return SignalValue(r=float(json_data[0]), g=float(json_data[1]), b=float(json_data[2]))
def random_low():
# Returns a SignalValue with values of RGB all randomly selected in the range [0, 0.1)
return SignalValue(r=(random() / 10), g=(random() / 10), b=(random() / 10))
@staticmethod
def distance(a, b):
return ((a.r - b.r) ** 2 + (a.g - b.g) ** 2 + (a.b - b.b) ** 2) ** 0.5
def __add__(self, other):
new_signal_value = SignalValue()
new_signal_value.alter(self.r, self.g, self.b)
new_signal_value.alter(other.r, other.g, other.b)
return new_signal_value
def __sub__(self, other):
new_signal_value = SignalValue()
new_signal_value.alter(self.r, self.g, self.b)
new_signal_value.alter(-other.r, -other.g, -other.b)
return new_signal_value
def __str__(self):
return "({:0.2}, {:0.2}, {:0.2})".format(float(self.r), float(self.g), float(self.b))
def alter(self, r_delta=0, g_delta=0, b_delta=0):
self.r += r_delta
if self.r > 2:
self.r = 2
if self.r < 0:
self.r = 0
self.g += g_delta
if self.g > 2:
self.g = 2
if self.g < 0:
self.g = 0
self.b += b_delta
if self.b > 2:
self.b = 2
if self.b < 0:
self.b = 0
def total(self):
return (self.r + self.g + self.b) / 3
| true |
98518f83a78d560602dfd5e83b1a185c4c6249d0 | Python | cocpy/raspberrypi4 | /第8章/6/hx711.py | UTF-8 | 1,191 | 2.921875 | 3 | [] | no_license | import time
import RPi.GPIO as GPIO
# 设置使用的引脚
DT = 5
SCK = 6
def init():
"""初始化方法"""
# 忽略警告
GPIO.setwarnings(False)
# 设置编号方式
GPIO.setmode(GPIO.BCM)
# 设置为输出模式
GPIO.setup(SCK, GPIO.OUT)
def get_count():
"""从传感器读取参数"""
count = 0
GPIO.setup(DT, GPIO.OUT)
GPIO.output(DT, 1)
GPIO.output(SCK, 0)
GPIO.setup(DT, GPIO.IN)
# 检测DT是否有高电平
while GPIO.input(DT) == 1:
continue
for i in range(24):
GPIO.output(SCK, 1)
count = count << 1
GPIO.output(SCK, 0)
time.sleep(0.001)
if GPIO.input(DT) == 0:
count = count + 1
GPIO.output(SCK, 1)
# 清除第24位
count = count ^ 0x800000
GPIO.output(SCK, 0)
return count
def main_loop():
"""主循环,打印读取到的数据"""
# 初始化
init()
while True:
count = get_count()
print("重量为:", count)
time.sleep(1)
if __name__ == '__main__':
try:
main_loop()
except KeyboardInterrupt:
print("程序结束!")
finally:
GPIO.cleanup()
| true |
d548d0c0bcb764d6abb00445ed99fa7018ca5ff7 | Python | maximilianogomez/Progra1 | /Practica 5/Hechos en clase/4-customexception.py | UTF-8 | 1,033 | 4.03125 | 4 | [] | no_license | class LoginError(Exception):
pass
def login(usuario, passwd):
"""Recibe un nombre de usuario y una contraseña,
devuelve verdadero si la combinacion es correcta.
Sino genera una excepcion LoginError
"""
pwdUsr = buscarPwdUsr(usuario)
if pwdUsr == None:
raise LoginError("El usuario no existe")
elif pwdUsr != passwd:
raise LoginError("La contraseña es incorrecta")
return True
def buscarPwdUsr(usr):
usuarios = ["fer", "vero", "eli", "fede", "rama"]
passwds = ["1234", "pa$$w0rd", "12345679", "", "rama"]
if usr not in usuarios:
pwd = None
else:
pwd = passwds[usuarios.index(usr)]
return pwd
def main():
usr = input("Ingrese nombre de usuario: ")
pwd = input("Ingrese su contraseña: ")
try:
if login(usr, pwd):
print("Se autentico correctamente")
except LoginError as msg:
print(msg)
if __name__ == "__main__":
main() | true |
21803bcd2faa90768c11d6415bfac2c4f9fe7efa | Python | cre8tion/AoC2020 | /day5/second.py | UTF-8 | 1,489 | 3.46875 | 3 | [] | no_license | def get_seat_ids():
with open('input.txt') as fin:
input_list = fin.readlines()
text_lst = [i.replace("\n", "") for i in input_list]
seat_ids_lst = []
for i in text_lst:
row = get_row(i[:7])
col = get_col(i[7:])
seat_ids_lst.append(int((row * 8) + col))
return seat_ids_lst
def get_row(inp):
minimum = 0
maximum = 127
for i in inp:
if(maximum-minimum != 1):
if i == "F":
maximum = ((maximum + minimum + 1)/2) - 1
elif i == "B":
minimum += ((maximum - minimum + 1)/2)
else:
if i == "F":
return minimum
elif i == "B":
return maximum
def get_col(inp):
minimum = 0
maximum = 7
for i in inp:
if(maximum-minimum != 1):
if i == "L":
maximum = ((maximum + minimum + 1)/2) - 1
elif i == "R":
minimum += ((maximum - minimum + 1)/2)
else:
if i == "L":
return minimum
elif i == "R":
return maximum
def find_missing_id(lst):
for i in range(len(lst)):
if i == 0:
prev = lst[0]
else:
if lst[i] - 2 == prev:
return lst[i] - 1
else:
prev = lst[i]
seat_ids = get_seat_ids()
seat_ids.sort()
your_id = find_missing_id(seat_ids)
print(your_id) | true |
f115abda2b3c77ac0227864f10ab370a0ec075e4 | Python | GuhanSGCIT/Trees-and-Graphs-problem | /Maximum Anagram.py | UTF-8 | 1,100 | 4.0625 | 4 | [] | no_license | """
Beware! You might lose time on this. Try others and jump back here.
So the deal is, you need to find the lexicographically maximum anagram of the given string. Can you do this?
timing:1sec
level:3
Input
First line contains T, the number of test cases.
Next T lines contain a string S.
Output
Print the lexicographically maximum anagram of S.
Constraints
1 <= T <= 100
1 <= |S| <= 100000
Input:
2
cabnrc
bjprss
Output:
rnccba
ssrpjb
input:
3
vbhjghgvgv
jhhmbjhbhh
jhbjj
output:
vvvjhhgggb
mjjhhhhhbb
jjjhb
input:
2
guvi
geek
output:
vuig
kgee
input:
3
adobe
forty five
evil
output:
oedba
yvtroiffe
vlie
input:
1
I am a weakish speller
output:
wssrpmllkiheeeaaaI
hint:
Two strings are anagrams if the frequency of each character is same in both strings. In question, lexicographically maximum substring is asked. It will be the reverse of the given string after sorting.
"""
t = int(input())
a = " "
for i in range(t) :
n = str(input())
a = "".join(sorted(n))
a = a[::-1]
print(a)
| true |
ef89f821fcda222c36c96f5651118056fa184336 | Python | nomihadar/Anastomotic-Leak-Prediction | /Code/parseData/parseData1.py | UTF-8 | 2,966 | 3.015625 | 3 | [] | no_license |
'''
- Drop duplicate rows.
- Drop rows where patient id is null.
- Drop empty columns.
- Drop irrlevant columns.
'''
import sys, os
sys.path.append(os.path.dirname(sys.path[0])) #to change to environment
from utils.constants import *
import pandas as pd
#files names
INPUT_FILE = "data0.csv"
OUT_FILE = "data1.csv"
def reorderColumns(df):
cols_ordered = ["pid", "admissionId", "eventName", \
"eventStartDate", "eventEndDate", \
"bValue", "dValue", "iValue", "sValue", \
"eventDesc", "unitOfMeasure", \
"orderNumber", "organismId", \
"eventCode", "eventCodeOrg", \
"eventType", "eventTypeOrg", "sourceName"]
df = df[cols_ordered]
def dropColumns(df):
#drop columns with all NaN's.
# There are columns with few non-nan values.
print("\nnum rows where all values are null:", \
df.isna().all(axis=0).sum(), "\n")
df.dropna(axis=1, how='all', inplace=True)
#Drop irrelevant columns
cols_to_drop = ["rowId", "id", "altPid", "bed", \
"cancelled", "converted", \
"messageId", "parentId", "tValue", \
"transferrable", "careGiver", "Time_Stamp"]
print("num irrelevant columnas:", len(cols_to_drop), "\n")
df.drop(columns=cols_to_drop, inplace=True)
def dropRows(df):
#drop rows with all NaN's.
print("num rows where all values are null:", df.isna().all(axis=1).sum())
df.dropna(axis=0, how='all', inplace=True)
#drop duplicate rows
print("\nNum duplicate rows:", df.duplicated().sum())
df.drop_duplicates(inplace=True)
#drop rows where patient id is null
print("\nNum rows with missing patient id:", df["pid"].isna().sum(), "\n")
#reset index
df.reset_index(drop=True, inplace=True)
def sortRows(df):
#parse date of start/end event
df['eventStartDate'] = pd.to_datetime(df['eventStartDate'], format='%Y-%m-%d')
df['eventEndDate'] = pd.to_datetime(df['eventEndDate'], format='%Y-%m-%d')
#sort
df.sort_values(["pid", "eventStartDate"], inplace=True)
if __name__ == "__main__":
#read input file
input_path = os.path.join(DPATH_DATA, INPUT_FILE)
df = pd.read_csv(input_path)
print("input file shape:", df.shape)
#print info
print("\n\nINFO:\n\n", df.info())
#print percentage of missing values in each column.
percent_missing = df.isna().mean().round(5).mul(100)\
.to_frame("% missing values")
print("\n", percent_missing)
#sort rows
sortRows(df)
#drop null and irrelevant columns. Num left columns: 18.
dropColumns(df)
#drop rows
dropRows(df)
#print info
print("\n\nINFO:\n\n", df.info())
#reorder columns
reorderColumns(df)
#print resulted shape
print("Resulted file shape:", df.shape)
#write output
output_path = os.path.join(DPATH_DATA, OUT_FILE)
if not os.path.exists(output_path):
df.to_csv(output_path, index=False)
| true |
d472c1c0fe23cdabd21bb1cabfb612cf8ecfec2d | Python | ktodorov/historical-ocr | /services/experiments/process/neighbourhood_similarity_process_service.py | UTF-8 | 2,969 | 2.703125 | 3 | [
"MIT"
] | permissive | import os
from services.file_service import FileService
from services.arguments.ocr_evaluation_arguments_service import OCREvaluationArgumentsService
from services.tagging_service import TaggingService
from services.log_service import LogService
from enums.part_of_speech import PartOfSpeech
from typing import Dict, List, Tuple
class NeighbourhoodSimilarityProcessService:
def __init__(
self,
arguments_service: OCREvaluationArgumentsService,
file_service: FileService,
log_service: LogService,
tagging_service: TaggingService):
self._arguments_service = arguments_service
self._file_service = file_service
self._log_service = log_service
self._tagging_service = tagging_service
def get_target_tokens(
self,
cosine_distances: Dict[str, float],
pos_tags: List[PartOfSpeech] = [PartOfSpeech.Noun, PartOfSpeech.Verb, PartOfSpeech.Adjective]) -> List[str]:
metric_results = [(word, distance)
for word, distance in cosine_distances.items()
if self._tagging_service.word_is_specific_tag(word, pos_tags)]
metric_results.sort(key=lambda x: x[1], reverse=True)
most_changed_100 = [result[0] for result in metric_results[-100:]]
most_changed_100_string = ', '.join(most_changed_100)
self._log_service.log_debug(
f'Most changed 100 words: [{most_changed_100_string}]')
most_changed = self._map_target_tokens(
metric_results,
targets_count=10)
log_message = f'Target words to be used: [' + \
', '.join(most_changed) + ']'
self._log_service.log_info(log_message)
return most_changed
def _map_target_tokens(
self,
ordered_tuples: List[Tuple[str, float]],
targets_count: int) -> List[str]:
result_tuples = []
preferred_tokens = self._get_preferred_target_tokens()
for tuple in ordered_tuples:
if preferred_tokens is None or tuple[0] in preferred_tokens:
result_tuples.append(tuple[0])
if len(result_tuples) == targets_count:
return result_tuples
return result_tuples
def _get_preferred_target_tokens(self) -> List[str]:
preferred_tokens_path = os.path.join(
self._file_service.get_experiments_path(),
f'preferred-tokens-{self._arguments_service.language.value}.txt')
if not os.path.exists(preferred_tokens_path):
return None
preferred_tokens = []
with open(preferred_tokens_path, 'r', encoding='utf-8') as tokens_file:
file_lines = tokens_file.readlines()
if file_lines is None or len(file_lines) == 0:
return None
preferred_tokens = [x.strip().lower() for x in file_lines]
return preferred_tokens
| true |
7a9498708463cb3c525da749da6fd6e6dc921304 | Python | jdgsmallwood/ProjectEuler | /project_euler_solutions/problem_15.py | UTF-8 | 362 | 3.203125 | 3 | [] | no_license | #It require 40 such moves to get to the bottom right corner starting at the top left, and so the
#number of possible paths is {40 choose 20}
#Can python do this automatically?
n = 20
num = 1
den = 1
for m in range(1,n+1):
num *= (m+n)
den *= m
print(num/den)
#I think the answer is 137846528820
#I could try to write a combinations function for this | true |
0d56543ef3a1b0d16525fe7b7ad2a31b1b8a174c | Python | feng-hsueh/bear-faultdetection | /core/cwru_cnn.py | UTF-8 | 2,488 | 2.5625 | 3 | [] | no_license | import keras
from keras.layers import *
from keras.models import *
from keras.optimizers import *
from cwru.core import cwru_input
# 迭代次数
EPOCHS = 50
# 每批次读取的训练数据集大小
BATCH_SIZE = 20
# 建立Sequential网络模型
def build_model(input_shape=(cwru_input.TIME_PERIODS,), num_classes=cwru_input.LABEL_SIZE):
model_inference = Sequential()
# 输入层变成(6000, 1)二维矩阵
model_inference.add(Reshape((cwru_input.TIME_PERIODS, 1), input_shape=input_shape))
model_inference.add(Conv1D(16, 8, strides=2, activation='relu', input_shape=(cwru_input.TIME_PERIODS, 1)))
model_inference.add(Conv1D(16, 8, strides=2, activation='relu', padding="same"))
model_inference.add(MaxPooling1D(2))
model_inference.add(Conv1D(32, 4, strides=2, activation='relu', padding="same"))
model_inference.add(Conv1D(32, 4, strides=2, activation='relu', padding="same"))
model_inference.add(MaxPooling1D(2))
model_inference.add(Conv1D(256, 4, strides=2, activation='relu', padding="same"))
model_inference.add(Conv1D(256, 4, strides=2, activation='relu', padding="same"))
model_inference.add(MaxPooling1D(2))
model_inference.add(Conv1D(512, 2, strides=1, activation='relu', padding="same"))
model_inference.add(Conv1D(512, 2, strides=1, activation='relu', padding="same"))
model_inference.add(MaxPooling1D(2))
model_inference.add(GlobalAveragePooling1D())
model_inference.add(Dropout(0.3))
"""
model_inference.add(Flatten())
model_inference.add(Dropout(0.3))
"""
model_inference.add(Dense(256, activation='relu'))
# Dense标准的一维全连接层
model_inference.add(Dense(num_classes, activation='softmax'))
return model_inference
if __name__ == "__main__":
x_train, y_train, x_test, y_test = cwru_input.read_data()
# 傅里叶变换
x_train, x_test = cwru_input.x_fft(x_train, x_test)
print(x_train.shape)
ckpt = keras.callbacks.ModelCheckpoint(
filepath='../model/best_model.{epoch:02d}-{val_loss:.4f}.h5',
monitor='val_loss', save_best_only=True, verbose=1)
model = build_model()
opt = Adam(0.0002)
model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy'])
print(model.summary())
model.fit(
x=x_train,
y=y_train,
batch_size=BATCH_SIZE,
epochs=EPOCHS,
validation_split=0.3,
callbacks=[ckpt],
)
model.save("finishModel.h5")
| true |
10289dde1e17fa05374cb2e6a095265e59549771 | Python | kissghosts/data-mining-2013 | /problem 1/get_student_num.py | UTF-8 | 687 | 3.3125 | 3 | [] | no_license | #!/usr/bin/python
from pylab import *
openpath = 'course-text.txt'
openf = open(openpath, 'r')
student_count = {}
students = 0
for line in openf:
students += 1
count = len(line.strip().split())
if not student_count.has_key(count):
student_count[count] = 1
else:
student_count[count] += 1
course_sum = 0
n = 0
for key in student_count.keys():
course_sum += key * student_count[key]
n += student_count[key]
print "avg: %s, students: %s" % (course_sum / students, n)
# x = student_count.keys()
# y = student_count.values()
# bar(x, y, facecolor='#9999ff', edgecolor='white')
# plt.xlabel('Course number')
# plt.ylabel('student number')
# show() | true |
2f40a1631c732934cb883a250a6693e8d2bcfa51 | Python | bwhmather/python-validation | /validation/tests/test_datetime.py | UTF-8 | 1,504 | 3 | 3 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | import unittest
from datetime import date, datetime
import pytz
from validation import validate_datetime
class ValidateDateTimeTestCase(unittest.TestCase):
def test_valid(self): # type: () -> None
validate_datetime(datetime.now(pytz.utc))
def test_no_timezone(self): # type: () -> None
with self.assertRaises(ValueError):
validate_datetime(datetime.now())
def test_date(self):
with self.assertRaises(TypeError):
validate_datetime(date.today())
def test_invalid_type(self):
with self.assertRaises(TypeError):
validate_datetime("1970-01-01T12:00:00+00:00")
def test_not_required(self): # type: () -> None
validate_datetime(None, required=False)
def test_required(self):
with self.assertRaises(TypeError):
validate_datetime(None)
def test_closure_valid(self): # type: () -> None
validator = validate_datetime()
validator(datetime.now(pytz.utc))
def test_closure_date(self):
validator = validate_datetime()
with self.assertRaises(TypeError):
validator(date.today())
def test_repr(self): # type: () -> None
validator = validate_datetime()
self.assertEqual(
repr(validator),
'validate_datetime()',
)
validator = validate_datetime(required=False)
self.assertEqual(
repr(validator),
'validate_datetime(required=False)',
)
| true |
89686d4678e266ceebe9a78bbf51d8b68acb952f | Python | mforbes/readme_renderer | /readme_renderer/__main__.py | UTF-8 | 1,053 | 2.625 | 3 | [
"Apache-2.0"
] | permissive | from __future__ import absolute_import, print_function
import argparse
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Renders a .rst or .md README to HTML",
)
parser.add_argument("input", help="Input README file", type=argparse.FileType("r"))
parser.add_argument(
"-o",
"--output",
help="Output file (default: stdout)",
type=argparse.FileType("w"),
default="-",
)
args = parser.parse_args()
if args.input.name.split(".")[-1].lower() == "md":
# Delay import in case user has not installed with the [md] extra
from readme_renderer.markdown import render
elif args.input.name.split(".")[-1].lower() == "txt":
from readme_renderer.txt import render
else:
# Default is rst to preserve backwards compatibility.
from readme_renderer.rst import render
rendered = render(args.input.read(), stream=sys.stderr)
if rendered is None:
sys.exit(1)
print(rendered, file=args.output)
| true |
14241bc20b766d135d93a358496a54a926abeaa5 | Python | carlosrgomes/curso-opencv | /exemplo1.py | UTF-8 | 460 | 2.703125 | 3 | [] | no_license | import cv2
classificador = cv2.CascadeClassifier('cascades/haarcascade_frontalface_default.xml')
imagem = cv2.imread('pessoas/pessoas1.jpg')
imagemCinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)
facesDetectadas = classificador.detectMultiScale(imagemCinza)
print(len(facesDetectadas))
for (x, y, l, a ) in facesDetectadas:
print(x, y, l, a)
cv2.rectangle(imagem, (x, y), (x +l, y + a), (0, 0, 255), 2)
cv2.imshow("faces", imagem)
cv2.waitKey() | true |
7b132075ec14c95193817962f5275f1cd70eec3a | Python | peterschwaller/pybench-mongodb | /pybench/stats.py | UTF-8 | 5,630 | 2.859375 | 3 | [] | no_license | """
Stats for pybench-mongodb
"""
import logging
import multiprocessing
import queue
import sys
import threading
import time
STATS_DUMP_DELAY = 2.0
class Timer:
"""Timer"""
# pylint: disable=attribute-defined-outside-init,too-few-public-methods
def __enter__(self):
self.start = time.clock()
return self
def __exit__(self, *args):
self.end = time.clock()
self.interval = self.end - self.start
class Stats(object):
"""Stats class"""
# pylint: disable=too-many-instance-attributes
header_format = "Time, Elapsed (s), Int, Int/s, Total, Total/s"
data_format = "{},{:10d}{:10d},{:10.1f},{:10d},{:10.1f}"
def __init__(self, max_iterations, max_time_seconds):
self.max_iterations = max_iterations
self.max_time_seconds = max_time_seconds
self.interval = 5
self.done = multiprocessing.Event()
self.start_time = 0
self.end_time = 0
self.total_inserts = 0
self.queue = multiprocessing.Queue(maxsize=500)
self.data = {}
self.results = []
self.lock = threading.Lock()
def set_interval(self, interval):
"""set interval"""
self.interval = interval
def process_item(self, current_time, instance, counters):
"""process item"""
time_index = int(int(current_time) / self.interval)
if time_index not in self.data:
self.data[time_index] = {}
if instance not in self.data[time_index]:
self.data[time_index][instance] = {"count": 0}
for key in counters:
self.data[time_index][instance][key] = \
(self.data[time_index][instance].get(key, 0) +
counters[key])
self.data[time_index][instance]["count"] += 1
if instance == "insert" and "inserts" in counters:
self.total_inserts += counters["inserts"]
if self.total_inserts >= self.max_iterations:
self.done.set()
if time.time() - self.start_time > self.max_time_seconds:
self.done.set()
def log(self, instance, counters):
"""log"""
self.queue.put([time.time(), instance, counters])
def start(self, interval=5):
"""start"""
self.start_time = time.time()
self.interval = interval
self.lock.acquire()
self.lock.release()
thread = threading.Thread(target=self.stats_monitor)
thread.start()
def end(self):
"""end"""
if self.end_time == 0:
self.end_time = time.time()
def save(self, file):
"""save"""
print(Stats.header_format, file=file)
for item in self.results:
self.show_result(item, file)
def stats_monitor(self):
"""monitor"""
last_shown_index = 0
logging.info("Starting stats monitor")
output_count = 0
while not self.done.is_set():
time_index = (int(int(time.time() - self.interval - STATS_DUMP_DELAY) /
self.interval))
self.lock.acquire()
try:
if time_index not in self.data:
self.data[time_index] = {}
finally:
self.lock.release()
if time_index > last_shown_index:
if output_count % 10 == 0:
print(Stats.header_format)
self.show_record(time_index)
output_count += 1
last_shown_index = time_index
if self.queue.full():
print("full")
try:
item = self.queue.get(True, 0.1)
except queue.Empty:
continue
self.process_item(item[0], item[1], item[2])
if last_shown_index + 1 in self.data:
self.show_record(last_shown_index + 1)
logging.info("Ending stats monitor")
def show_record(self, time_index, file=sys.stdout):
"""show record"""
# pylint: disable=too-many-locals,too-many-branches
if ((time_index+1) * self.interval) < self.start_time:
return
time_string = time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime((time_index+1) * self.interval))
if len(self.data[time_index]) == 0:
print(Stats.data_format.format(
time_string, int(time.time() - self.start_time), 0, 0, 0, 0),
file=file)
else:
inserts = 0
for instance in sorted(self.data[time_index]):
counters = self.data[time_index][instance]
inserts += counters.get("inserts")
# The first interval is truncated...
interval = min(self.interval, ((time_index+1) * self.interval) - self.start_time)
result = {
"time-string": time_string,
"elapsed": int(time.time() - self.start_time),
"inserts": inserts,
"insert-rate": inserts / interval,
"total": self.total_inserts,
"total-rate": self.total_inserts / (time.time() - self.start_time),
}
self.show_result(result, file)
self.results.append(result)
def show_result(self, result, file):
"""show result"""
print(Stats.data_format.format(
result["time-string"],
result["elapsed"],
result["inserts"],
result["insert-rate"],
result["total"],
result["total-rate"]),
file=file)
| true |
94205f709c5fd69e2e13625ef25bb0971bdfb2b0 | Python | tarvitz/udlg | /udlg/structure/utils.py | UTF-8 | 2,216 | 2.859375 | 3 | [] | no_license | # -*- coding: utf-8 -*-
"""
.. module:: udlg.structure.utils
:synopsis: Utilities
:platform: Linux, Unix, Windows
.. moduleauthor:: Nickolas Fox <tarvitz@blacklibary.ru>
.. sectionauthor:: Nickolas Fox <tarvitz@blacklibary.ru>
"""
from struct import unpack, calcsize
from ctypes import resize, sizeof, addressof, cast, c_void_p
from . constants import (
BYTE_SIZE, PrimitiveTypeConversionSet, PrimitiveTypeCTypesConversionSet
)
def read_record_type(stream, seek_back=True):
"""
reads record type
:param stream: stream object, file for example
:param bool seek_back: seek backwards after read block, True by default
if False no any seek operation
:rtype: udlg.structure.constants.RecordTypeEnum
:return: record type
"""
record_type, = unpack('b', stream.read(BYTE_SIZE))
if seek_back:
stream.seek(-1, 1)
return record_type
def resize_array(array, size):
"""
extends array with given size
:param array: ctypes array
:param int size: new size (should be more than current size)
:return: pointer on new array
"""
resize(array, sizeof(array._type_) * size)
return (array._type_ * size).from_address(addressof(array))
#: todo implement timespan, decimal, datetime
def read_primitive_type_from_stream(stream, primitive_type):
"""
read primitive type from stream
:param stream: stream object, for example file stream
:param int primitive_type: type (PrimitiveTypeEnumeration based type)
:rtype: int | float | bool | datetime | char | decimal.Decimal
:return:
"""
call_format = PrimitiveTypeConversionSet[primitive_type]
call_size = calcsize(call_format)
value, = unpack(call_format, stream.read(call_size))
return value
def make_primitive_type_elements_array_pointer(primitive_type, elements):
"""
create void pointer on array elements
:param PrimitiveTypeEnum primitive_type: primitive type
:param list | tuple elements: primitive type elements (values)
:rtype: ctypes.c_void_p
:return: void pointer on array
"""
size = len(elements)
array_type = PrimitiveTypeCTypesConversionSet[primitive_type]
array = (array_type * size)(*elements)
return cast(array, c_void_p)
| true |
67ed0ab01a5ae4fbe21fb925c8d75e7631c3e8fe | Python | ateneva/python-oop-2020-06 | /workshop-customlist/tests/customlist_tests/customlist_overbound_tests.py | UTF-8 | 1,609 | 2.9375 | 3 | [
"MIT"
] | permissive | import unittest
from tests.customlist_tests.base.CustomListTestsBase import CustomListTestsBase
class CustomList_OverboundTests(CustomListTestsBase):
def test_customListOverbound_whenListHasMultipleNumbersAndFloatIsBigger_shouldReturnTheIndexOfTheBiggest(self):
values = [1, 3.14]
cl = self.setup_list(*values)
expected = values.index(max(values))
actual = cl.overbound()
self.assertEqual(expected, actual)
def test_customListOverbound_whenListHasMultipleNumbersAndIntIsBigger_shouldReturnTheIndexOfTheBiggest(self):
values = [1, 3.14, 5]
cl = self.setup_list(*values)
expected = values.index(max(values))
actual = cl.overbound()
self.assertEqual(expected, actual)
def test_customListOverbound_whenListHasNumbersAndLenObjectsAndNumberIsBigger_shouldReturnTheIndexOfTheNumber(
self):
numbers = [1, 5.14]
len_objects = ['123', [1, 2], (3, 4), {1, 2, 3}, {1, 2}]
cl = self.setup_list(*numbers, *len_objects)
expected = 1
actual = cl.overbound()
self.assertEqual(expected, actual)
def test_customListOverbound_whenListHasNumbersAndLenObjectsAndLenObjectIsBigger_shouldReturnTheIndexOfTheNumber(
self):
numbers = [1, 3.14]
len_objects = ['123', [1, 2], (3, 4), {1, 2, 3, 6}, {1, 2}]
cl = self.setup_list(*numbers, *len_objects)
expected = 5
actual = cl.overbound()
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
| true |
03ef410fbcfe11ee9ec563e07e378a8863913a4f | Python | JYleekr847/python_summer | /Chapter12/3.py | UTF-8 | 790 | 4 | 4 | [] | no_license | #분수모듈
import math
import fractions
a = fractions.Fraction(4, 16)
print(a)
b = fractions.Fraction(-6,21)
print(b)
c = fractions.Fraction(3)
print(c)
a2 = fractions.Fraction(a)
print(a2)
print(fractions.Fraction('6/21'))
print(fractions.Fraction('3.14'))
print(fractions.Fraction("-0.34"))
s = """
-0.34
"""
print(fractions.Fraction(s))
from fractions import Fraction
print(Fraction.from_float(0.5)) # 실수를 받아 Fraction 객체 생성
print(Fraction.from_decimal(4)) # 10진수를 받아 Fraction 객체생성
from math import pi,cos
from fractions import Fraction
print(Fraction.from_float(pi))
print(Fraction.from_float(pi).limit_denominator(100))
# 최대공약수를 반환하는 gcd() 클래스 메서드
print(fractions.gcd(120,180))
print(fractions.gcd(0.5,6))
| true |
336574e1d401a0584247ef7f040f72131a27ad42 | Python | quincy-deng/pathogen-in-BGI | /09.已完成/01.不同采样部位病原菌水平分析/02.Huashan_AiJingwen/05.check_file.py | UTF-8 | 847 | 2.609375 | 3 | [] | no_license | # import pandas as pd
# import os
# import numpy as np
# import sys
# file_path =r'/hwfssz1/ST_PRECISION/USER/zhuzhongyi/04.CDC_data/05.CDC_20180518/CDC_20180518.total.txt'
# lines = 0
# for lines,line in enumerate(open(file_path)):
# pass
# print(lines)
data = [
[1,2],
[2,3],
[1,6],
[1,3],
[3,7],
[3,6],
[3,8],
[3,7],
[10,12],
[110,290],
[50,60],
[49,55],
]
def sortsss(a, b):
if a[0] > b[0]: return 1
if a[0] < b[0]: return -1
if a[1] > b[1]: return 1
if a[1] < b[1]: return -1
return 1
sortsss(data.sort())
print(data)
exit()
result = []
for a, b in data:
if not result:
result.append([a, b])
continue
la, lb = result[-1]
if la <= a <= lb and b > lb:
result[-1][1] = b
if a > lb:
result.append([a, b])
# print result | true |
3d9d8809994a0ec877b1611edf896749fc0634dc | Python | revathi2001/Cryptohack-and-cryptopals-set1 | /cryptopals/aes_in_ECB_mode.py | UTF-8 | 251 | 2.734375 | 3 | [] | no_license | from Crypto.Cipher import AES
import binascii
import codecs
import base64
key = 'YELLOW SUBMARINE'
msg=open('7.txt','r')
ciphertext = base64.b64decode(msg.read())
msg.close()
decipher = AES.new(key, AES.MODE_ECB)
print(decipher.decrypt(ciphertext))
| true |
72aba83205c8cc6f469cad21f465ec586730adc6 | Python | atsuhisa-i/Python_study1 | /comp_cond4.py | UTF-8 | 80 | 2.546875 | 3 | [] | no_license | print(['Buzz' if x%5 == 0 else 'Fizz' if x%3 == 0 else x for x in range(1, 16)]) | true |
cff4b800a7ff184af0a6969e3ce47a209faa4895 | Python | BlainWu/TrafficLight | /analyse_process.py | UTF-8 | 4,430 | 2.703125 | 3 | [] | no_license | #------------------------------------------------
# Project: paddle-Traffic
# Author:Peilin Wu - Najing Normal University
# File name :analyse_process.py.py
# Created time :2020/05
#------------------------------------------------
import json
import os
import numpy as np
#统计单个结果文件情况
def file_result(file_path):
count_empty = 0
count_multy = 0
with open(file_path, 'r', encoding='utf-8') as file:
data = json.load(file)
# <class 'dict'>,JSON文件读入到内存以后,就是一个Python中的字典。
# 字典是支持嵌套的,
for i, data in enumerate(data):
if len(data[1]) == 0:
count_empty += 1
if len(data[1]) > 1:
count_multy += 1
print(file_path, "中包含{0}个空结果,{1}个多结果".format(count_empty, count_multy))
#统计整个文件夹中的所有数据结果
def fold_results(fold_path):
file_lists = os.listdir(fold_path)
for file_list in file_lists:
filename = os.path.join(fold_path,file_list)
file_result(filename)
#转换标签,仅针对一开始训练出错的情况,一次性函数别用
def invert_result_label(origin_path,save_path):
file_lists = os.listdir(origin_path)
new_data = []
for file_list in file_lists:
filename = os.path.join(origin_path, file_list)
with open(filename, 'r', encoding='utf-8') as file:
datas = json.load(file)
for i, data in enumerate(datas):
data_buffer = data
for j in range(len(data_buffer[1])):
if data_buffer[1][j][0] == 0.0:
data_buffer[1][j][0] = 1.0
else:
data_buffer[1][j][0] = 0.0
new_data.append(data_buffer)
print("处理完成文件:{}".format(filename))
json.dump(new_data, open(os.path.join(save_path, file_list), 'w'))
#将bbox的浮点数改成整数,实验性函数,对分数没影响
def bbox_float_to_int(origin_path,save_path):
file_lists = os.listdir(origin_path)
new_data = []
for file_list in file_lists:
filename = os.path.join("./results", file_list)
with open(filename, 'r', encoding='utf-8') as file:
datas = json.load(file)
for i, data in enumerate(datas):
data_buffer = data
for j in range(len(data_buffer[1])):
data_buffer[1][j][2] = int(data[1][j][2])
data_buffer[1][j][3] = int(data[1][j][3])
data_buffer[1][j][4] = int(data[1][j][4])
data_buffer[1][j][5] = int(data[1][j][5])
new_data.append(data_buffer)
print("处理完成文件:{}".format(filename))
json.dump(new_data, open(os.path.join(save_path, file_list), 'w'))
#去除单张图片中的多个结果,仅留一个
def de_multi(origin_file_path):
new_datas = []
file_name = origin_file_path.split('/')[-1] #取文件名
save_name = "[de_multi]_" + file_name #保存的文件名
save_path = origin_file_path.replace(file_name, save_name) #保存的文件地址
print("原结果统计:")
file_result(origin_file_path)
with open(origin_file_path,'r',encoding='utf-8') as file:
datas = json.load(file)
for i,data in enumerate(datas):
if len(data[1]) > 1:#有不止一个结果
scores = []
inds = []
buffer_data = []
saved_data =[]
for i,multi_resul in enumerate(data[1]):
scores.append(multi_resul[1])#按顺序保存所有结果分数
inds = np.argsort(scores) # 升排列索引
buffer_data.append(data[0])
saved_data.append(data[1][inds[-1]])
buffer_data.append(saved_data)
new_datas.append(buffer_data)#多嵌套一层,要不然格式不一致
else:
new_datas.append(data)
with open(save_path,'w',encoding='utf-8') as f:
json.dump(new_datas,f)
file_result(save_path)
#de_multi('./uploaded/90.8809765epoch-valid22-nms05.json')
#fold_results('./result_histories')
fold_results("./uploaded")
#fold_results('./results')
#file_result('./results/65epoch-valid2-nms05.json')
#invert_result_label('./result_buffer','./correct_result') | true |
1a9ed9fa10edb55a434cf7c5990c56f9e0151832 | Python | daniel-reich/ubiquitous-fiesta | /pmYNSpKyijrq2i5nu_23.py | UTF-8 | 363 | 2.84375 | 3 | [] | no_license |
from itertools import combinations_with_replacement
def darts_solver(sections, darts, target):
cwr = []; res = []
for i in combinations_with_replacement(sections, darts):
if sum(i) == target:
cwr.append(str(i))
for i in cwr:
i = i.strip('()')
i = i.replace(', ', '-')
res.append(i)
return res
| true |
0e60b5db336943f613f76e6d6098991c220be684 | Python | mylbrinkerhoff/NegShifting | /UnicodeTesting.py | UTF-8 | 1,121 | 4.21875 | 4 | [
"MIT"
] | permissive | # alphabet = 'αβγδεζηθικλμνξοπρςστυφχψ'
# print(alphabet)
filename = 'test.txt'
# Reading an entire file at once'
with open(filename) as f_obj:
contents = f_obj.read()
# Reading line by line
# with open(filename) as f_obj:
# for line in f_obj:
# print(line.rstrip())
# Opening the file one line at a time
# with open(filename) as f_obj:
# lines = f_obj.readlines()
# for line in lines:
# print(line.rstrip())
# Testing lists
# squares = []
# for x in range(1,11):
# squares.append(x**2)
# print(squares)
# Writing to an output
output = 'output.txt'
with open(output, 'w') as f:
f.write(contents.rstrip())
# age = input("What is your age? ")
# age = int(age)
# if age < 16:
# print("You are to young to drive in Utah!")
# elif age >= 70:
# print("You should probably not drive anymore.")
# elif age == 16:
# print("You can now drive in Utah!")
# elif age > 16:
# print("You can drive in Utah!")
# else:
# print("You didn't give me an age!")
# bikes = ['trek', 'redline', 'giant']
# print(bikes)
# for bike in bikes:
# print(bike) | true |
9b6282cf8e4dc36b523e046745247844e5251f03 | Python | halida/code_example | /python/marcotest.py | UTF-8 | 287 | 3.234375 | 3 | [] | no_license | #!/usr/bin/env python
#-*- coding:utf-8 -*-
"""
module: marcotest
"""
# marco example
defmacro NAME(name):
name
me = 12
print NAME(me) # me
# foreach
defmacro FOREACH(var, n):
for var in range(`n)
for i in range(12):
print i
FOREACH(i, 12):
print i
def is():
=
| true |
92da84d09b476c57c1950a3591f86ca94fb94ab2 | Python | mateusoro/bolsa-replit | /Buscar/modulo_mysql.py | UTF-8 | 1,665 | 2.640625 | 3 | [] | no_license | import mysql.connector
import time
from threading import Thread
def conectar():
global mycursor
global mydb
mydb = mysql.connector.connect(
host="sql434.main-hosting.eu",
user="u888071488_root",
password="Ss161514",
database="u888071488_stremio_db"
)
mycursor = mydb.cursor()
while(True):
time.sleep(10)
try:
if not mydb.is_connected():
print('Conectando')
mydb = mysql.connector.connect(
host="sql434.main-hosting.eu",
user="u888071488_root",
password="Ss161514",
database="u888071488_stremio_db"
)
mycursor = mydb.cursor()
except Exception as e:
print('Erro conectando')
def select(sql):
global mycursor
global mydb
if not mydb.is_connected():
print('Esperando conexão')
time.sleep(2)
return select(sql)
else:
try:
#print(sql)
mycursor.execute(sql)
if "delete" in sql:
mydb.commit()
return ""
else:
myresult = mycursor.fetchall()
mydb.commit()
return myresult
except Exception as e:
#print(e)
if "No result set to fetch from" in str(e):
return []
else:
print(str(e))
print('Esperando conexão')
time.sleep(2)
return select(sql)
t1 = Thread(target=conectar,args=[])
t1.start()
| true |
790f2ef4b8a5bbcea2b986b4ab821e6b5da4c3a9 | Python | 6306022610113/INEPython | /week6/pass.py | UTF-8 | 244 | 4.0625 | 4 | [] | no_license | value = int(input('number : '))
def main():
show_double(value)
#The show_double function accepts an argument
#and displays double its value
def show_double(number):
result = number * 2
print(result)
#call the main function.
main() | true |
a3e433ddf81749bcdcc6833fef7fafdbab7cc189 | Python | RiddhiRex/Leetcode | /Missing Element in Sorted Array.py | UTF-8 | 1,488 | 4.03125 | 4 | [] | no_license | '''
Given a sorted array A of unique numbers, find the K-th missing number starting from the leftmost number of the array.
Example 1: Input: A = [4,7,9,10], K = 1 Output: 5 Explanation: The first missing number is 5.
Example 2: Input: A = [4,7,9,10], K = 3 Output: 8 Explanation: The missing numbers are [5,6,8,…], hence the third missing number is 8. Example 3:
Input: A = [1,2,4], K = 3 Output: 6 Explanation: The missing numbers are [3,5,6,7,…], hence the third missing number is 6.
Link:https://strstr.io/Leetcode1060-Missing-Element-in-Sorted-Array/
'''
class Solution(object):
def missingElement(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
#missing contains accumulated number of numbers missing till that index
missing = [None]*len(nums)
missing[0]=0
for i in range(1, len(nums)):
missing[i]=nums[i]-nums[i-1]-1+missing[i-1]
print(missing)
#Check if missing number comes after the end of the array given
if k>missing[-1]:
return nums[-1]+k-missing[-1]
#Binary search to find where the missing number will go
l=0
r=len(nums)-1
while(l!=r):
mid = (l+r)//2
if missing[mid]<k:
l = mid+1
else:
r=mid
print(l)
return nums[l-1]+k-missing[l-1]
| true |
39ffa18d4c74b58a45265c8173756ae92b1da27a | Python | wangyiyao2016/Mypystudy | /tool_modules/parallel_tasks/task_mapper.py | UTF-8 | 604 | 2.78125 | 3 | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from multiprocessing.managers import BaseManager
import random
class QueueManager(BaseManager):
pass
QueueManager.register('get_task_queue')
server_addr = '127.0.0.1'
m = QueueManager(address=(server_addr, 5000), authkey=b'123')
m.connect()
print('Connect to server %s...' % server_addr)
def main():
task_queue = m.get_task_queue()
for _ in range(100):
n = random.randint(5000, 10000)
print('Put task %d...' % n)
task_queue.put_nowait(n)
print('mapper exit.')
if __name__ == '__main__':
main()
pass
| true |
ae4dc80701bee61411a3ae245215d2dd8cf73817 | Python | cech92/test-drf-with-react-redux | /backend/apis/models.py | UTF-8 | 1,945 | 2.609375 | 3 | [] | no_license | from django.contrib.auth.models import AbstractUser, BaseUserManager
from django.db import models
class UserProfileManager(BaseUserManager):
def create_user(self, email, first_name, last_name, password=None):
# if not email:
# raise ValueError('Email address not valid')
email = self.normalize_email(email)
user = self.model(email=email, username=email, first_name=first_name, last_name=last_name)
user.set_password(password)
user.save(using=self._db)
return user
def create_superuser(self, email, first_name, last_name, password):
user = self.create_user(email, first_name, last_name, password)
user.is_superuser = True
user.is_staff = True
user.save(using=self._db)
return user
class User(AbstractUser):
email = models.EmailField(max_length=255, unique=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserProfileManager()
USERNAME_FIELD = 'email'
REQUIRED_FIELDS = ['first_name', 'last_name']
@property
def show_updated_at(self):
if not self.updated_at:
return ""
_date = self.updated_at.strftime('%Y/%m/%d %H:%M')
return u"%s" % _date
class Meta:
db_table = 'user'
class Usage(models.Model):
id = models.BigAutoField(primary_key=True)
user = models.ForeignKey(User, on_delete=models.CASCADE)
usage_type = models.ForeignKey('UsageType', on_delete=models.CASCADE)
usage_at = models.DateTimeField()
amount = models.FloatField()
@property
def show_usage_at(self):
if not self.usage_at:
return ""
return '%s' % self.usage_at.strftime('%Y/%m/%d %H:%M')
@property
def total(self):
if not self.usage_type:
return self.amount
return round(self.amount * self.usage_type.factor, 3)
class Meta:
db_table = 'usage'
class UsageType(models.Model):
id = models.BigAutoField(primary_key=True)
name = models.CharField(max_length=50)
unit = models.CharField(max_length=10)
factor = models.FloatField()
class Meta:
db_table = 'usage_type'
| true |
37748b891641446c5132d8c0350abf1146a7c3f7 | Python | while-dante/Proyectos-Academia | /Python/quickSort.py | UTF-8 | 2,408 | 3.171875 | 3 | [] | no_license | import numpy as np
from matplotlib import pyplot as plt
import random as rn
class QuickSort:
group = []
first = 0
last = 0
comparisons = 0
swaps = 0
def __init__(self,group):
self.group = group
self.first = self.group.index(group[0])
self.last = self.group.index(group[len(group)-1])
def swap(self,i,j):
temp = self.group[i]
self.group[i] = self.group[j]
self.group[j] = temp
self.swaps += 1
return True
def partition(self,first,last):
i = first-1
pivot = self.group[last]
for j in range(first,last):
self.comparisons += 1
if self.group[j] < pivot:
i += 1
self.swap(i,j)
self.swap(i+1,last)
return i+1
def quickSort(self,first,last):
if first < last:
pivotIndex = self.partition(first,last)
self.quickSort(first,pivotIndex-1)
self.quickSort(pivotIndex+1,last)
return True
def sort(self):
self.quickSort(self.first,self.last)
return [self.group,self.comparisons,self.swaps]
def func(x):
res = x*np.log(x)
return res
size = list(range(1,1051,50))
rawResultsSwaps = []
resultsSwaps = []
rawResultsComps = []
resultsComps = []
model = []
raw = 0
"""startTime = tm.time()"""
for quant in size:
model.append(func(quant))
group = list(range(quant))
rawResultsSwaps.append([])
rawResultsComps.append([])
repeats = 0
while repeats < 50:
rn.shuffle(group)
qSort = QuickSort(group)
info = qSort.sort()
comps = info[1]
swaps = info[2]
rawResultsComps[raw].append(comps)
rawResultsSwaps[raw].append(swaps)
repeats = repeats +1
resultsSwaps.append(np.average(rawResultsSwaps[raw]))
resultsComps.append(np.average(rawResultsComps[raw]))
raw = raw +1
"""elapsedTime = tm.time() - startTime"""
"""print(tm.strftime("%H:%M:%S", tm.gmtime(elapsedTime)))"""
plt.figure()
plt.grid()
plt.plot(size,model,'b--',linewidth=2,label='Model of complexity')
plt.plot(size,resultsSwaps,'*g',markersize=4,label='Number of swaps')
plt.plot(size,resultsComps,'or',markersize=4,label='Number of comparisons')
plt.legend()
plt.xlim(0,1100)
plt.xlabel("n (number of distinct elements)")
| true |
b3eef19f5f01af5d0ffe9710e6d5c8366e8ae5ef | Python | jun0811/TIL | /algo/그래프/연산.py | UTF-8 | 828 | 2.8125 | 3 | [] | no_license | def mul(n):
return n*2
# 값이 바뀌고 그걸 넘기고 다시 4개 경우의 수 다시 보고
def bfs(s,cnt):
global min_cnt
if (cnt >= min_cnt):
return
for c in cal:
if c==2:
n = mul(s)
if n ==M:
if cnt+1 < min_cnt:
min_cnt = cnt+1
return
bfs(n,cnt+1)
s += c
if s >= 1000000 or (s < 1):
s -= c
continue
if s == M:
if cnt+1 < min_cnt:
min_cnt = cnt+1
return
bfs(s,cnt+1)
s -= c
T = int(input())
for tc in range(1,T+1):
N,M = map(int,input().split())
# 1, -1, *2, -10
cal = [1,-1,2,-10]
visited = [0]*4
min_cnt = 0XFFFF
bfs(N,0)
print('#{} {}'.format(tc,min_cnt)) | true |
bfe7a03204b4772e77fea4d7abac4d1f115aedc5 | Python | spoiler0/backtrader | /data.py | UTF-8 | 1,235 | 2.78125 | 3 | [] | no_license | import backtrader as bt
from datetime import date, time, datetime
from backtrader.utils import date2num
class CustomCSVData(bt.CSVDataBase):
params = (
("open", 3),
("high", 4),
("low", 5),
("close", 6),
("volume", 7),
)
def start(self):
super(CustomCSVData, self).start()
def stop(self):
super(CustomCSVData, self).stop()
def _loadline(self, linetokens):
# For Binance_BTCUSDT_minute.csv
# [unixtime, date time, symbol, open, high, low, close, VolumeBTC, VolumeUSDT, tradecount]
year, month, day, hour, minute, second = linetokens[1].replace("-", " ").replace(":", " ").split(" ")
row_date = date(int(year), int(month), int(day))
row_time = time(int(hour), int(minute), int(second))
self.lines.datetime[0] = date2num(datetime.combine(row_date, row_time))
self.lines.open[0] = float(linetokens[self.params.open])
self.lines.high[0] = float(linetokens[self.params.high])
self.lines.low[0] = float(linetokens[self.params.low])
self.lines.close[0] = float(linetokens[self.params.close])
self.lines.volume[0] = float(linetokens[self.params.volume])
return True | true |
26943d8039098999b85f92dd8156847252f96113 | Python | Negaentropy/scheduler_frame | /src/frame/regular_into_db.py | UTF-8 | 3,926 | 2.65625 | 3 | [
"Apache-2.0"
] | permissive | import re
import sys
import json
import ConfigParser
from mysql_manager import mysql_manager
from loggingex import LOG_WARNING
class regualer_into_db:
def __init__(self, conf_path):
self._path = conf_path
self._cp = ConfigParser.SafeConfigParser()
self._cp.read(conf_path)
def walks(self, data):
section_name = "strategy"
if False == self._cp.has_section(section_name):
LOG_WARNING("%s has no %s" % (self._path, section_name))
return
if len(self._get_regular(0)) == 0 :
LOG_WARNING("%s has no regular" % (self._path))
return
data_array = []
self._recursion_regular(data, 0, data_array)
if len(data_array):
self._insert_db(data_array)
def _get_regular(self, deep):
section_name = "strategy"
regular_name_pre = "regular_expression_"
regular_name = regular_name_pre + str(deep)
if False == self._cp.has_option(section_name, regular_name):
return ""
regular_str = self._cp.get(section_name, regular_name)
return regular_str
def _recursion_regular(self, data, deep, data_array):
regular_str = self._get_regular(deep)
split_data = re.findall(regular_str, data)
regualer_next_str = self._get_regular(deep + 1)
split_array = []
if len(regualer_next_str) > 0:
for item in split_data:
self._recursion_regular(item, deep + 1, data_array)
else:
for item in split_data:
split_array.append(item)
if len(split_array) > 0:
data_array.append(split_array)
def _insert_db(self, data_array):
section_name = "strategy"
conn_name_name = "conn_name"
if False == self._cp.has_option(section_name, conn_name_name):
LOG_WARNING("%s has no %s %s" % (self._path, section_name, conn_name_name))
return False
conn_name = self._cp.get(section_name, conn_name_name)
table_name_name = "table_name"
if False == self._cp.has_option(section_name, table_name_name):
LOG_WARNING("%s has no %s %s" % (self._path, section_name, table_name_name))
return False
table_name = self._cp.get(section_name, table_name_name)
keys_info_name = "keys_info"
if False == self._cp.has_option(section_name, keys_info_name):
LOG_WARNING("%s has no %s %s" % (self._path, section_name, keys_info_name))
return False
keys_info = self._cp.get(section_name, keys_info_name)
db_columns_info = json.loads(keys_info)
into_db_columns = db_columns_info.keys()
into_db_index = db_columns_info.values()
into_db_values_array = []
for data in data_array:
values = []
for index in into_db_index:
values.append(data[index])
into_db_values_array.append(values)
db_manager = mysql_manager()
conn = db_manager.get_mysql_conn(conn_name)
if None == conn:
LOG_WARNING("%s get db connect %s error" % (self._path, conn_name))
return False
conn.insert_data(table_name, into_db_columns, into_db_values_array)
return True
if __name__ == "__main__":
a = mysql_manager()
test_data_1 = {"stock_db":{"host":"127.0.0.1", "port":3306, "user":"root", "passwd":"fangliang", "db":"stock", "charset":"utf8"}}
a.add_conns(test_data_1)
a = regualer_into_db("../../conf/market_maker_strategy.conf")
data = 'var SXcUovEK={pages:1646,date:"2014-10-22",data:["2,000858,DDDD,35.90,3.58,29113.49,17.77,16218.12,9.90,12895.37,7.87,-11230.50,-6.86,-17882.99,-10.92,2017-01-04 15:00:00","1,603298,ABCD,29.38,10.00,25382.35,29.62,15561.30,18.16,9821.05,11.46,12111.83,14.13,-37494.18,-43.75,2017-01-04 15:00:00"]}'
a.walks(data)
pass
| true |
bda7873784122bcb94a265590f7cb1bd78aa0c1e | Python | quangthao321/Crawl_Vietstock | /Thao6sql.py | UTF-8 | 1,143 | 2.875 | 3 | [] | no_license | import csv
import mysql.connector
from mysql.connector import Error
db = {
'host' : 'localhost',
'user' : 'root',
'database' : 'vietstock',
'password' : ''
}
def conn(db):
'''connect to database'''
conn = None
try:
conn = mysql.connector.connect(**db);
if conn.is_connected():
print('Connect to mySQL database')
data = []
mycursor = conn.cursor()
sql = "INSERT INTO datasql (id, tiltle, content) VALUES (%s, %s ,%s)"
with open("Vietstock.csv") as my_file:
csv_reader = csv.reader(my_file, delimiter=',')
next(csv_reader)
for row in csv_reader:
# data.append(tuple(row))
data.append(tuple((row[0], row[1].strip("[']\""),row[2].strip("[']\""))))
mycursor.executemany(sql, data)
except Error as e:
print(e)
finally:
if conn is not None and conn.is_connected():
print("Closing database")
conn.commit()
conn.close()
def main():
conn(db)
if __name__ == '__main__':
main() | true |
82316d3677cadaf619423d3799853d2d38712158 | Python | cirosantilli/python-cheat | /logging_cheat.py | UTF-8 | 2,873 | 3.15625 | 3 | [] | no_license | #!/usr/bin/env python
"""
## logging
Standard way to output error messages.
Advantages:
- has many useful built-in error formats
- has a level system
- easy to change where logs go, e.g. a file.
http://docs.python.org/2/howto/logging.html
TODO log all at once
"""
import logging
import sys
import time
if '## Default logger':
logging.basicConfig(
# Log to a file. Default is sys.stderr.
# This can only take file path strings.
# To log to stdout, use:
#filename = 'example.log',
# Mode defaults to `a`, which appends to old log.
#filemode = 'w'
# Minimum log level that will get printed.
# Often taken as a CLI parameter.
level = logging.DEBUG,
#level = logging.INFO,
#level = logging.WARNING,
#level = logging.ERROR,
#level = logging.CRITICAL,
# Default does not contain time, so you very likely want to override this.
format = ' %(levelname)s %(asctime)s %(message)s',
# Format for asctime
datefmt = '%m/%d/%Y %I:%M:%S %p',
)
sys.stderr.write("logging:\n")
logging.debug('debug')
logging.info('info')
logging.warning('warning')
logging.error('error')
logging.critical('critical')
try:
raise Exception
except:
logging.exception('inside exception. also prints exception stack')
if '## Custom loggers':
# Create logger
logger = logging.getLogger('logger_name')
logger.setLevel(logging.DEBUG)
# Create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# Create formatter
formatter = logging.Formatter(' %(asctime)s - %(name)s - %(levelname)s - %(message)s')
# Add formatter to ch
ch.setFormatter(formatter)
# Add ch to logger
logger.addHandler(ch)
# Usage:
sys.stderr.write("custom logger:\n")
logger.debug('debug')
logger.info('info')
logger.warn('warn')
logger.error('error')
logger.critical('critical')
# TODO: log all / a certain level to stdout
if '## Align':
"""
http://stackoverflow.com/questions/7771912/how-to-right-align-level-field-in-python-logging-formatter
http://stackoverflow.com/questions/20618570/python-logging-formatter-is-there-any-way-to-fix-the-width-of-a-field-and-jus
For the level name, use: `%(levelname)8s`
"""
if '## UTC time':
# http://stackoverflow.com/questions/6321160/python-logging-how-to-set-time-to-gmt
logging.debug('not UTC')
logging.Formatter.converter = time.gmtime
logging.debug('UTC')
if '## Threading':
"""
logging is thread safe:
http://stackoverflow.com/questions/2973900/is-pythons-logging-module-thread-safe?lq=1
You will likely want to log the thread name on every log write:
http://stackoverflow.com/a/2357652/895245
"""
| true |
4e078d086e4bf8a4675e91f35e5fbbffb1fc8f49 | Python | mayuriindalkar/loop | /q11.py | UTF-8 | 535 | 3.828125 | 4 | [] | no_license | import random
b = 5
sum = 1
a = random.randint(0,3)
while b <= 5:
i = int(input("Enter Guees Number Between 1 and 10 : "))
if i != a:
print("Your Gueesing Number Was :",i)
print("Not Correct Try Again")
if a > i:
print("You Losse Your Guess Number Was Small")
if a < i:
print("Your Guess Number Was So High")
i = int(input("Enter Guess Number Again : "))
if a == i:
print("Your Gueesing Number Was :",i)
print("You Win")
break
sum = sum + 1
| true |
84f76429fefd47e45329a047b3e9851fa0772ee9 | Python | derekbatoyon/adventofcode2020 | /day07/part2.py | UTF-8 | 1,737 | 3.484375 | 3 | [] | no_license | import fileinput
import re
class Edge(object):
def __init__(self, color, count):
self.color = color
self.count = count
class Graph(object):
def __init__(self):
self.graph = dict()
def add_edge(self, tail, head, count):
if tail not in self.graph:
self.graph[tail] = set()
self.graph[tail].add(Edge(head, count))
def edges(self, tail):
result = set()
for edge in self.edges_helper(tail):
result.add(edge.color)
return result
def edges_helper(self, tail):
if tail in self.graph:
for edge in self.graph[tail]:
yield edge
yield from self.edges_helper(edge.color)
def count(self, tail):
total = 0
if tail in self.graph:
for edge in self.graph[tail]:
total += edge.count
total += edge.count * self.count(edge.color)
return total
def dump(self):
for tail, edge in self.graph.items():
print(tail, '=>', [head.color for head in edge])
def main():
container_re = re.compile('(?P<color>.*?) bags contain')
containee_re = re.compile('(?P<count>\d+) (?P<color>.*?) bags?')
graph = Graph()
for line in fileinput.input():
m = container_re.match(line)
container = m.group('color')
end = m.end()
while m := containee_re.search(line, end):
containee = m.group('color')
count = int(m.group('count'))
graph.add_edge(container, containee, count)
end = m.end()
#graph.dump()
print(len(graph.edges('shiny gold')))
print(graph.count('shiny gold'))
if __name__ == "__main__":
main()
| true |
9978ce6908d894dccff5dec6d145157d2dcd4ea3 | Python | srikanthpragada/PYTHON_02_MAR_2021 | /demo/funs/wish.py | UTF-8 | 167 | 3.71875 | 4 | [] | no_license | def wish(name, message):
print(f"{message}, {name}")
wish('Andy', 'Hello') # Positional params
wish(message="Good Morning", name="Scott") # keyword parameters
| true |
91c88b2d0bdfa5ad82133153d492d95075897e09 | Python | shimakaze-git/algorithm-program | /marubatu/classbase_marubatu.py | UTF-8 | 5,485 | 3.375 | 3 | [] | no_license | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# 参考源
# https://qiita.com/r34b26/items/317cc82568845d6bbbcb
class Marubatu:
"""oxゲームをまとめたクラス
"""
def __init__(self):
self.candidates_text = """
1|2|3
-----
4|5|6
-----
7|8|9
"""
self.coordinate_list = [
str(i) for i in range(1, 10)
]
self.candidates = [
2**i for i in range(9)
]
self.pre_user_operations = []
self.pos_user_operations = []
self.turn_user = 0
self.turn_count = 0
self.err_message = '正しい座標を入力する必要があります。'
def game_start(self):
"""oxゲームの処理を開始する
"""
print('座標リスト\n')
print(self.candidates_text)
pre_user_input = str()
pos_user_input = str()
while True:
if self.turn_user == 0:
try:
mes = 'pre_userの座標を入力'
pre_user_input = input(mes)
except Exception as e:
print(self.err_message)
continue
if pre_user_input in self.coordinate_list:
pre_user_operations = self.pre_user_operations
self.pre_user_operations = self.operation(
pre_user_operations,
pre_user_input,
'o'
)
if self.check_decision(self.pre_user_operations):
print('user0の勝ち')
break
else:
print(self.err_message)
continue
self.turn_user = 1
self.turn_count += 1
else:
try:
mes = 'pos_userの座標を入力'
pos_user_input = input(mes)
except Exception as e:
print(self.err_message)
continue
if pos_user_input in self.coordinate_list:
pos_user_operations = self.pos_user_operations
self.pos_user_operations = self.operation(
pos_user_operations,
pos_user_input,
'x'
)
if self.check_decision(self.pos_user_operations):
print('user1の勝ち')
break
else:
print(self.err_message)
continue
self.turn_user = 0
self.turn_count += 1
if self.turn_count == 9:
print("引き分けです")
break
def operation(
self,
user_operations: list,
user_input: str,
mark: str
):
"""入れ替え操作などを行う
Args:
user_operations (list): userが今まで選択位置のリスト
user_input (str): userの入力値
mark (str): userの記号
Returns:
list: userが選択した位置を加えたuser_operations
"""
self.candidates_text = self.candidates_text.replace(
str(user_input),
mark
)
idx = self.coordinate_list.index(user_input)
self.coordinate_list[idx] = mark
# self.coordinate_list.remove(user_input)
user_operations.append(self.candidates[idx])
print(self.candidates_text)
return user_operations
def check_decision(self, coordinate_map: list):
"""勝利判定を行う
Args:
coordinate_map (list): 現在選択している位置のリスト
Returns:
bool: 勝利ならTrue, それ以外ならFalse
"""
candidates = [2**i for i in range(9)]
decision_coordinates = []
for i in range(3):
# 横の合計を求める
yoko_first_idx = i*3
yoko_last_idx = (i+1)*3
yoko_ans = sum(candidates[yoko_first_idx:yoko_last_idx])
decision_coordinates.append(yoko_ans)
# 縦の合計を求める
tate_list = [candidates[i+3*j] for j in range(3)]
tate_ans = sum(tate_list)
decision_coordinates.append(tate_ans)
# 斜め
# 1+16+256=273
# 4+16+64=84
# 斜め 1
# 1+16+256=273
naname_1_list = [candidates[4*i] for i in range(3)]
naname_1_ans = sum(naname_1_list)
decision_coordinates.append(naname_1_ans)
# 斜め 2
# 4+16+64=84
naname_2_list = [candidates[2*(i+1)] for i in range(3)]
naname_2_ans = sum(naname_2_list)
decision_coordinates.append(naname_2_ans)
# 横列
# 1+2+4=7
# 8+16+32=56
# 64+128+256=448
# 縦列
# 1+8+64=73
# 2+16+128=146
# 4+32+256=292
# 斜め
# 1+16+256=273
# 4+16+64=84
# リスト内を全て足して、decision_coordinates内に
# total_valがあるかを判定する
# 判定処理
total_val = sum([int(i) for i in coordinate_map])
if total_val in decision_coordinates:
return True
return False
marubatu = Marubatu()
marubatu.game_start()
| true |
f044b9c53791c54274230208758b5c4e28fe0fe4 | Python | beautifulNow1992/evolute | /evolute/evaluation/fitness.py | UTF-8 | 2,401 | 3.140625 | 3 | [
"MIT"
] | permissive | import numpy as np
from .grade import SumGrader
class FitnessBase:
def __init__(self, no_fitnesses):
self.no_fitnesses = no_fitnesses
def __call__(self, phenotype):
raise NotImplementedError
class SimpleFitness(FitnessBase):
def __init__(self, fitness_function, constants: dict=None, **kw):
super().__init__(no_fitnesses=1)
self.function = fitness_function
self.constants = {} if constants is None else constants
self.constants.update(kw)
def __call__(self, phenotype, **variables):
return self.function(phenotype, **self.constants, **variables)
class MultipleFitnesses(FitnessBase):
def __init__(self, functions_by_name, constants_by_function_name=None, order_by_name=None, grader=None):
super().__init__(no_fitnesses=len(functions_by_name))
if len(functions_by_name) < 2:
raise ValueError("MultipleFitnesses needs more than one fitness!")
self.functions = functions_by_name
self.order = order_by_name or list(self.functions)
self.constants = constants_by_function_name or {k: {} for k in self.order}
self.grader = grader or SumGrader()
if len(self.order) != len(self.functions) or any(o not in self.functions for o in self.order):
raise ValueError("The specified order is wrong: {}".format(self.order))
if len(self.constants) != len(self.functions) or any(k not in self.functions for k in self.constants):
raise ValueError("The specified constants are wrong: {}".format(self.constants))
def __call__(self, phenotype, **variables_by_function):
fitness = np.array(
[self.functions[funcname](phenotype, **self.constants[funcname], **variables_by_function[funcname])
for funcname in self.order])
return self.grader(fitness)
class MultiReturnFitness(FitnessBase):
def __init__(self, fitness_function, number_of_return_values, constants: dict=None, grader=None):
super().__init__(no_fitnesses=number_of_return_values)
self.function = fitness_function
self.constants = {} if constants is None else constants
self.grader = SumGrader() if grader is None else grader
def __call__(self, phenotype, **variables):
fitness = np.array(self.function(phenotype, **self.constants, **variables))
return self.grader(fitness)
| true |
5a6e82bda771ce61bf9bbba70fab4af512eda7a2 | Python | KilluaKukuroo/GPA_Calculator | /GPA_calculator.py | UTF-8 | 6,863 | 3.546875 | 4 | [] | no_license | import os
import time
'''
V0
2020-3-30
功能:通过读取键盘输入的课程百分制或者等级制成绩和对应课程学分,计算百分制平均成绩和GPA绩点;
学校代码:西电:0 安大:1 南开:2 兰大:3
'''
print("西电er请输入0;安大er请输入1;南开er请输入2;兰大儿请输入3; 郑大儿请输入4;浙大儿请输入5;北大er请输入6;")
university = input()
gpa_count = {0:4,1:5}
score_sum = 0
credit_sum = 0
course_credit_sum = 0
'''
draw a welcome circle;
function: (x**2 + y**2 - 1)**3 - x**2*y**3 = 0
'''
def draw_circle():
line = []
for y in range(15, -15, -1):
temp = []
for x in range(-30, 30, 1):
temp.append('*' if ((x*0.05)**2 + (y*0.1)**2 - 1)**3 - (x*0.05)**2*(y*0.1)**3 <= 0 else ' ')
line.append(''.join(temp))
for i in line:
print(i)
time.sleep(0.1)
for i in range(5):
for j in range(50):
if(i == 0 or i == 4):
print('*', end='')
else:
if(i == 2):
if(j == 0 or j == 49):
print("*", end="")
else:
print('welcome!'[(j+4)%8] if (20 <= j <= 27) else " ", end="")
else:
print('*' if j == 0 or j == 49 else ' ', end='')
print("\n")
draw_circle()
print("请按行输入分数或等级,和对应的学分,用空格分隔。例如:90 3; 优秀 2;\n输入#结束并打印百分制GPA和平均绩点;")
'''
西电GPA计算;
2018年修订,4分制:”https://info.xidian.edu.cn/info/1015/16588.htm“
'''
def case0(score):
rank = {"优秀":95, "通过":75, "不通过":0, "良好":85, "中等":75, "及格":65, "不及格":0}
try:
s = int(score)
except ValueError:
s = float(rank[score])
if(95 <= s <= 100):
c = 4.0
elif(90<=s<=94):
c = 3.9
elif(84<=s<=89):
c = 3.8
elif(80 <= s <= 83):
c = 3.6
elif(76 <= s <= 79):
c = 3.4
elif(73 <= s <= 75):
c = 3.2
elif(70 <= s <=72):
c = 3.0
elif (67 <= s <= 69):
c = 2.7
elif (64 <= s <= 66):
c = 2.4
elif (62 <= s <= 63):
c = 2.2
elif(60 <= s <= 61):
c = 2.0
else:
c = 0
return s, c
'''
安徽大学GPA计算;
2017年更新,5分制:”http://jwc.ahu.edu.cn/main/show.asp?id=4802“
'''
def case1(score):
rank = {"优秀":95, "良好":85, "中等":75, "及格":60, "不及格":0}
try:
s = int(score)
except ValueError:
s = float(rank[score])
c = (s/10) - 5
return s, c
'''
南开大学GPA计算
2019年征求意见稿:“https://chem.nankai.edu.cn/res/bk/2019/南开大学本科课程成绩绩点制管理办法(征求意见稿).pdf”
备注:南开的优秀、良好等都对应了好几等百分制成绩和绩点,这里都取最高的;通过和不通过没有给绩点只给了百分制范围,这里分别按照80和0计算;
'''
def case2(score):
rank = {"优秀":100, "良好":89, "一般":79, "及格":69, "不及格":0, "通过":80, "不通过":0}
try:
s = int(score)
except ValueError:
s = float(rank[score])
if(94 <= s <= 100):
c = 4.0
elif(90<=s<=93):
c = 3.7
elif(87<=s<=89):
c = 3.3
elif(83 <= s <= 86):
c = 3.0
elif(80 <= s <= 82):
c = 2.7
elif(77 <= s <= 79):
c = 2.3
elif(73 <= s <=76):
c = 2.0
elif (70 <= s <= 72):
c = 1.7
elif (67 <= s <= 69):
c = 1.3
elif (60 <= s <= 66):
c = 1.0
else:
c = 0
return s, c
'''
兰州大学本科生GPA:”http://archives.lzu.edu.cn/pub/search/pub_default.asp?fmt=&fopen=&showtitle=&showbtn=&fpub=1&fid=320&id=29“
'''
def case3(score):
rank = {"优":95, "良":85, "中":75, "及格":65, "不及格":50}
try:
s = int(score)
except ValueError:
s = float(rank[score])
if(90 <= s <= 100):
a = s - 90
c = a / 10 + 4.0
elif(80 <= s <= 89):
a = s - 80
c = a / 10 + 3.0
elif(70 <= s <= 79):
a = s - 70
c = a / 10 + 2.0
elif(60 <= s <= 69):
a = s - 60
c = a / 10 + 1.0
else:
c = 0
return s, c
'''
郑州大学2017年GPA计算:”http://www5.zzu.edu.cn/flfg/info/1027/1378.htm“
'''
def case4(score):
rank = {"优":95, "良":75, "中":75, "及格":65, "差":0}
try:
s = int(score)
except ValueError:
s = float(rank[score])
if(90 <= s <= 100):
c = 4.0
elif(85 <= s <= 89):
c = 3.7
elif(80 <= s <= 84):
c = 3.2
elif(75 <= s <= 79):
c = 2.7
elif(70 <= s <= 74):
c = 2.2
elif(65 <= s <= 69):
c = 1.7
elif(60 <= s <= 64):
c = 1.2
else:
c = 0
return s, c
'''
浙大GPA算法
2009年:”http://kyjs.zju.edu.cn/chinese_old/redir.php?catalog_id=711168&object_id=712935“
'''
def case5(score):
rank = {"优":100, "良":80, "中":70, "及格":60, "不及格":0, "合格":75, "不合格":0}
try:
s = int(score)
except ValueError:
s = float(rank[score])
if(85 <= s <= 100):
c = 4.0
elif(60 <= s <= 84):
a = s - 60
c = a / 10 + 1.5
else:
c = 0
return s, c
'''
北大GPA算法:课程绩点=4-3(100-X)^2/1600(60≤X≤100)
非百分制课程成绩,综合性考试、毕业论文成绩等均不参与平均学分绩点(GPA)计算。EX、I、IP、P、NP、W 均不参与平均学分绩点(GPA)计算。
2019年,”http://www.dean.pku.edu.cn/web/rules_info.php?id=12“
'''
def case6(score):
try:
s = float(score)
except ValueError:
print("北大非百分制成绩不参与GPA计算,请继续输入百分制成绩;")
return -1,-1
if(60 <= s <= 100):
c = 4 - 3 * (100-s)**2 / 1600
else:
c = 0
return s, c
switch = {'0':case0,'1':case1, '2':case2, '3':case3, '4':case4, '5':case5, '6':case6}
while(True):
line = input()
if(line == '#'): #input "#" to exit
break
try:
score = line.split(" ")[0]
credit_full = int(line.split(" ")[1])
except IndexError:
print("IndexError; 请按行输入分数或等级,和对应的学分,用空格分隔。例如:90 3; 优秀 2")
continue
s, c = switch[university](score)
if(s >0 or c > 0): #解决北大等级制成绩不计入GPA的bug
course_credit_sum += credit_full
score_sum += s * credit_full
credit_sum += c * credit_full
print("您的百分制GPA为:", score_sum / course_credit_sum)
print("您的平均绩点GPA为:", credit_sum / course_credit_sum)
os.system("pause") | true |
ea6f5e48ea8f7167120cb5e9b1f9079c4ae12b57 | Python | zbqq/libwave | /scripts/test/statistics.py | UTF-8 | 633 | 2.640625 | 3 | [
"MIT"
] | permissive | import pandas as pd
from pathlib import Path
import sys
if __name__ == "__main__":
if len(sys.argv) != 2:
sys.exit()
error_files = Path(sys.argv[1]).glob('**/*.txt')
for err in error_files:
data = pd.read_csv(err, delim_whitespace=True)
mean_data = data.mean(axis=0)
filename = err.stem
savename = filename + "_avg.txt"
savepath = Path(sys.argv[1]) / savename
degrees = mean_data[1] * 57.29577951
with open(str(savepath), 'w') as file:
file.write("%.17f" % degrees)
file.write(" ")
file.write("%.17f" % mean_data[2])
| true |
ce1ded1c188ff877541e9ec9f76fbe11db07178b | Python | Netharria/Imperialist | /bot.py | UTF-8 | 6,389 | 3 | 3 | [] | no_license | import random
import discord
import typing
from discord.ext import commands
import length
import temperature
import volume
import weight
bot = commands.Bot(command_prefix="imp.")
@bot.event
async def on_ready():
await bot.change_presence(
activity=discord.Activity(
type=discord.ActivityType.watching, name="over her vast colonies."
)
)
print("Bot is ready.")
@bot.command()
async def ping(ctx):
embed = discord.Embed(
title="**Ping**", description=f"Pong! {round(bot.latency * 1000)}ms"
)
embed.set_author(name=f"{bot.user.display_name}", icon_url=bot.user.avatar_url)
await ctx.send(embed=embed)
# 8ball random answer
def is_in_channel():
def predicate(ctx):
return ctx.channel.id == 390318225440768000
return commands.check(predicate)
def has_permissions():
def predicate(ctx):
return ctx.author.guild_permissions.manage_messages is True
return commands.check(predicate)
@bot.command(aliases=["8ball"])
@commands.check_any(is_in_channel(), has_permissions())
async def _8ball(ctx, *, question):
responses = [
"It is certain.",
"It is decidedly so.",
"Without a doubt.",
"Yes - definitely.",
"You may rely on it.",
"As I see it, yes.",
"Most likely.",
"Outlook good.",
"Yes.",
"Signs point to yes.",
"Reply hazy, try again.",
"Ask again later.",
"Better not tell you now.",
"Cannot predict now.",
"Concentrate and ask again.",
"Don't count on it.",
"My reply is no.",
"My sources say no.",
"Outlook not so good.",
"Very doubtful.",
]
professions = [
"healthcare professional",
"artist",
"business professional",
"transport engineer",
"military officer",
"accountant",
"social worker",
"ship captain",
"vocalist",
"linguist",
"musician",
"HR representative",
"scientist",
"home inspector",
"cable technician",
"plumber",
"electrian",
"structural engineer",
"arborist",
]
embed = discord.Embed(
title=f"**Magic 8Ball**",
description=f"**Question: {question}\n\nAnswer: {random.choice(responses)}**",
color=0x8000FF,
)
embed.set_author(name=f"{ctx.author.display_name}", icon_url=ctx.author.avatar_url)
embed.set_footer(
text=f"This is not intended to give actual advice. | "
f"For actual advice, please consult a trained {random.choice(professions)}."
)
await ctx.send(embed=embed)
@bot.command()
async def roll(ctx, *, limit: int = 100):
embed = discord.Embed(
title=f"**Roll**", description=f"**{random.randint(1, int(limit))}/{limit}**"
)
embed.set_author(name=f"{bot.user.display_name}", icon_url=bot.user.avatar_url)
await ctx.send(embed=embed)
# temp Commands
@bot.command()
async def temp(ctx, start_temp: float, start_unit: str, word, destination_unit: str):
result = temperature.convert_temp(start_temp, start_unit, destination_unit)
start_unit = temperature.convert_units(start_unit)
end_unit = temperature.convert_units(destination_unit)
if result is False:
await ctx.send(f"The Correct format is Exp:`imp.temp 10 F to C`")
else:
embed = discord.Embed(
title=f"**Temperature Conversions**",
description=f"{start_temp} {start_unit} is {result:.2f} {end_unit}",
)
embed.set_author(name=f"{bot.user.display_name}", icon_url=bot.user.avatar_url)
await ctx.send(embed=embed)
# Length command
@bot.command(aliases=["len"])
async def _len(ctx, start_len: float, start_unit: str, word, destination_unit: str):
result = length.convert_length(start_len, start_unit, destination_unit)
start_unit = length.convert_unit(start_unit)
end_unit = length.convert_unit(destination_unit)
if result is False:
await ctx.send(
f"The Correct format is Exp:`imp.len 10 km to mi` Valid units are km m cm mm mi yd ft in"
)
else:
embed = discord.Embed(
title=f"**Length Conversions**",
description=f"{start_len} {start_unit} is {result:.4f} {end_unit}",
)
embed.set_author(name=f"{bot.user.display_name}", icon_url=bot.user.avatar_url)
await ctx.send(embed=embed)
# Length command
@bot.command()
async def wgt(ctx, start_wgt: float, start_unit, word, destination_unit):
result = weight.convert_wgt(start_wgt, start_unit, destination_unit)
start_unit = weight.convert_unit(start_unit)
end_unit = weight.convert_unit(destination_unit)
if result is False:
await ctx.send(
f"The Correct format is Exp:`imp.wgt 180 lb to kg` Valid units are kg g lb oz"
)
else:
embed = discord.Embed(
title=f"Weight Conversions",
description=f"{start_wgt} {start_unit} is {result:.2f} {end_unit}",
)
embed.set_author(name=f"{bot.user.display_name}", icon_url=bot.user.avatar_url)
await ctx.send(embed=embed)
@bot.command()
async def vol(ctx, start_vol: float, start_unit, word, destination_unit):
result = volume.convert_vol(start_vol, start_unit, destination_unit)
start_unit = volume.convert_units(start_unit)
end_unit = volume.convert_units(destination_unit)
if result is False:
await ctx.send(
f"The Correct format is Exp:`imp.vol 180 L to gal` Valid units are L mL gal qt pt c oz tbsp tsp"
)
else:
embed = discord.Embed(
title=f"Volume Conversions",
description=f"{start_vol} {start_unit} is {result:.2f} {end_unit}",
)
embed.set_author(name=f"{bot.user.display_name}", icon_url=bot.user.avatar_url)
await ctx.send(embed=embed)
@roll.error
@temp.error
@_len.error
@wgt.error
@vol.error
async def temp_error(ctx, error):
if isinstance(error, commands.BadArgument):
await ctx.send("Please enter a valid number.")
@bot.command()
async def hey(ctx, user: typing.Union[discord.Member, discord.TextChannel] = "me"):
if user == "me":
user = ctx.author
await ctx.send(f"Hi {user.mention}")
with open("token", "r") as f:
bot.run(f.readline().strip())
| true |
bcb093d8b0cda65d1a7653d73ebd196d8081aaeb | Python | Suraj-KD/AbsentiaVR_Task | /app.py | UTF-8 | 3,463 | 3.15625 | 3 | [] | no_license | from facebook_imp import page_id
from campaigns import createCamp
from campaigns import readCampaigns
from campaigns import deleteCampaigns
from campaigns import pauseCamp
from campaigns import startCamp
from adsets import createAdset
from adsets import readAdsets
from adsets import deleteAdset
from adsets import pauseAdset
from adsets import startAdset
from ads import adcreate
from ads import readADS
from ads import deleteAd
from ads import pauseAD
from ads import startAd
x = 'yes'
while(x == 'y' or x == 'yes'):
print('Starting the App:-')
print('What would you like to do?')
print('1. Create a Campaign')
print('2. Create Adset within a Campaign')
print('3. Create Ad within a Adset')
print('4. Read Campaign')
print('5. Read Adset')
print('6. Read Ad')
print('7. Delete Campaign')
print('8. Delete Adset')
print('9. Delete Ad')
print('10. Turn Off Campaign')
print('11. Turn Off Adset')
print('12. Turn Off Ad')
print('13. Activate a Campaign')
print('14. Activate a Adset')
print('15. Activate a Ad')
userInput = input('Your Input: ')
if (userInput == '1' or userInput == 'campaign' or userInput == 'Campaign'):
print('Creating Campaign')
campID = createCamp()
print('Campaign with ID: ', campID, ' created!')
elif(userInput == '2' or userInput == 'adset' or userInput == 'Adset'):
campID = input("Enter CampID : ")
print('Creating Adset')
print('campID: ', campID)
adSetID = createAdset(campID)
print('Adset with ID: ', adSetID, 'created!')
elif(userInput == '3' or userInput == 'Ad' or userInput == 'ad'):
adSetID = input("Enter AdSetID : ")
print('Creating Ad')
print('Adset ID: ',adSetID)
adID = adcreate(adSetID, page_id)
print('Ad with ID: ', adID,' created!')
elif(userInput == '4'):
print('Printing Campaign')
readCampaigns()
elif(userInput == '5'):
print('Printing Adset')
readAdsets()
elif(userInput == '6'):
print('Previewing Ad')
readADS()
elif(userInput == '7'):
print('Deleting Campaign')
campIDD = input('Please enter the campaign ID: ')
deleteCampaigns(campIDD)
elif(userInput == '8'):
print('Deleting Adset')
adSetID = input('Please enter the Adset ID: ')
deleteAdset(adSetID)
elif(userInput == '9'):
print('Deleting Ad')
adIDD = input('Please enter Ad ID you wish to delete: ')
deleteAd(adIDD)
elif(userInput == '10'):
print('Turning Off Campaign')
campIDD = input('Please enter the Campaign ID you wish to pause: ')
pauseCamp(campIDD)
elif(userInput == '11'):
print('Turning Off Adset')
adSetID = input('Please enter the Adset ID you wish to pause: ')
pauseAdset(adSetID)
elif(userInput == '12'):
print('Turning Off Ad')
adID = input('Please enter the Ad ID you wish to pause: ')
pauseAD(adID)
elif(userInput == '13'):
campID = input('CampID: ')
startCamp(campID)
elif(userInput == '14'):
adSetID = input('Adset ID: ')
startAdset(adSetID)
elif(userInput == '15'):
adIDD = input('Ad ID: ')
startAd(adIDD)
else:
print('Please select valid options')
x = input('Would you like to continue(y/n)?')
| true |
a77c46f8cf2e2f3630d91e484f7c6b57119300b9 | Python | trytonus/trytond-async | /tasks.py | UTF-8 | 3,145 | 2.578125 | 3 | [] | permissive | # -*- coding: UTF-8 -*-
"""
trytond_async.tasks
Implements the actual task runners.
Usual celery projects would have the method/functions which have the code
to run as tasks. However, the tryton inheritance and majority of tryton
code being in class and instance methods makes it hard for the pattern to
be followed. Read more about the design on the getting started
documentation of this module.
"""
from trytond import backend
from trytond.transaction import Transaction
from trytond.pool import Pool
from trytond.cache import Cache
from trytond_async.app import app
class RetryWithDelay(Exception):
"""
A special case of exception meant to be used by Tryton models to
indicate to the worker that the task needs to be retried. This is
needed because Tryton models itself are ignorant to the invocation from
regular model code and asynchronously through workers!
:param delay: Delay in seconds after which the task should be retried
"""
def __init__(self, delay=5, *args, **kwargs):
super(RetryWithDelay, self).__init__(*args, **kwargs)
self.delay = delay
def _execute(app, database, user, payload_json):
"""
Execute the task identified by the given payload in the given database
as `user`.
"""
if database not in Pool.database_list():
# Initialise the database if this is the first time we see the
# database being used.
with Transaction().start(database, 0, readonly=True):
Pool(database).init()
with Transaction().start(database, 0):
Cache.clean(database)
with Transaction().start(database, user) as transaction:
Async = Pool().get('async.async')
DatabaseOperationalError = backend.get('DatabaseOperationalError')
# De-serialize the payload in the transaction context so that
# active records are constructed in the same transaction cache and
# context.
payload = Async.deserialize_payload(payload_json)
try:
with Transaction().set_context(payload['context']):
results = Async.execute_payload(payload)
except RetryWithDelay as exc:
# A special error that would be raised by Tryton models to
# retry the task after a certain delay. Useful when the task
# got triggered before the record is ready and similar cases.
transaction.connection.rollback()
raise app.retry(exc=exc, countdown=exc.delay)
except DatabaseOperationalError as exc:
# Strict transaction handling may cause this.
# Rollback and Retry the whole transaction if within
# max retries, or raise exception and quit.
transaction.connection.rollback()
raise app.retry(exc=exc)
except Exception:
transaction.connection.rollback()
raise
else:
transaction.connection.commit()
return results
@app.task(bind=True, default_retry_delay=2)
def execute(app, database, user, payload_json):
return _execute(app, database, user, payload_json)
| true |
ddd21e3c933a703a122f9d4e307de4da7aea521f | Python | learlinian/Python-Leetcode-Solution | /40. Combination Sum II.py | UTF-8 | 1,113 | 3.21875 | 3 | [] | no_license | class Solution(object):
def combinationSum2(self, candidates, target):
candidates.sort()
candidates.reverse()
self.results = []
def test(candidates, target, combination):
for i in range(len(candidates)):
if candidates[i] > target:
continue
# print(combination, target)
combination.append(candidates[i])
if candidates[i] == target:
temp = list(combination)
temp.sort()
# print('temp: ' + str(temp))
if temp not in self.results:
self.results.append(temp)
elif candidates[i] < target:
test(candidates[i+1:], target-candidates[i], combination)
if combination:
del combination[-1]
test(candidates, target, [])
return self.results
if __name__ == '__main__':
candidates = [2,5,2,1,2]
target = 5
print(Solution().combinationSum2(candidates, target))
| true |
c330b69044089459b1d483c5466d7fc3b9a63acd | Python | AK-1121/code_extraction | /python/python_16087.py | UTF-8 | 163 | 2.546875 | 3 | [] | no_license | # Remove ASCII control characters from text file Python
>>> import string
>>> filter(string.printable.__contains__, '\x00\x01XYZ\x00\x10')
'XYZ'
| true |
00ae74f2ff29196c6e2990aced32736ddba1f7e8 | Python | ikhwan1366/Datacamp | /Data Engineer with Python Track/16. Database Design/Chapter/04. Database Management/06-Creating vertical partitions.py | UTF-8 | 1,851 | 4.28125 | 4 | [] | no_license | '''
Creating vertical partitions
In the video, you learned about vertical partitioning and saw an example.
For vertical partitioning, there is no specific syntax in PostgreSQL. You have to create a new table with particular columns and copy the data there. Afterward, you can drop the columns you want in the separate partition. If you need to access the full table, you can do so by using a JOIN clause.
In this exercise and the next one, you'll be working with the example database called pagila. It's a database that is often used to showcase PostgreSQL features. The database contains several tables. We'll be working with the film table. In this exercise, we'll use the following columns:
- film_id: the unique identifier of the film
- long_description: a lengthy description of the film
Instructions 1/2
50 XP
-Create a new table film_descriptions containing 2 fields: film_id, which is of type INT, and long_description, which is of type TEXT.
-Occupy the new table with values from the film table.
'''
-- Create a new table called film_descriptions
CREATE TABLE film_descriptions(
film_id int,
long_description text
)
-- Copy the descriptions from the film table
INSERT INTO film_descriptions
SELECT film_id, long_description FROM film
'''
Instructions 2/2
50 XP
- Drop the field long_description from the film table.
- Join the two resulting tables to view the original table.
'''
-- Create a new table called film_descriptions
CREATE TABLE film_descriptions(
film_id INT,
long_description TEXT
)
-- Copy the descriptions from the film table
INSERT INTO film_descriptions
SELECT film_id, long_description FROM film
-- Drop the descriptions from the original table
ALTER TABLE film DROP COLUMN long_description
-- Join to view the original table
SELECT * FROM film
JOIN film_descriptions USING(film_id)
| true |
ba5bdb29792f4b28c85017d6a398442ca4ff7786 | Python | bcc008/metroinsight | /steps/experiments.py | UTF-8 | 2,177 | 2.921875 | 3 | [] | no_license | import itertools
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.cluster import KMeans
import xgboost as xgb
import keras
"""
Modify these functions to experiment with different machine learning models.
Input = Train and test data and (if classification) their labels
Output = Predictions after fit
Cluster experiment: Clustering algorithm used in cluster class.
Binary_clf_experiment: Classification algorithm used in binary classification stage.
Multi_clf_experiment: Classification algorithm used in multi classification stage.
"""
def version():
version = 0
return version
def cluster(X_train, X_test):
clst = KMeans(n_clusters=2, init='k-means++', n_jobs=30, random_state=1337)
clst.fit(X_train)
pred_train = clst.predict(X_train)
pred_test = clst.predict(X_test)
return pred_train, pred_test
# use function to provide LIST of parameters to try
def binary_clf_parameters():
n_estimators = [128]
criterion = ['gini']
iter_params = list(itertools.product(n_estimators, criterion))
parameters = [{'n_estimators': iter_params[i][0], 'criterion': iter_params[i][1], 'n_jobs': 30, 'random_state': 1337} for i in range(len(iter_params))]
return parameters
def binary_clf(X_train, X_test, y_train, y_test, params):
clf = RandomForestClassifier()
model = str(clf)
clf.set_params(**params)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
return predictions, model, clf
# use function to provide LIST of parameters to try
def multi_clf_parameters():
n_estimators = [128]
criterion = ['gini']
iter_params = list(itertools.product(n_estimators, criterion))
parameters = [{'n_estimators': iter_params[i][0], 'criterion': iter_params[i][1], 'n_jobs': 30, 'random_state': 1337} for i in range(len(iter_params))]
return parameters
def multi_clf(X_train, X_test, y_train, y_test, params):
clf = RandomForestClassifier()
model = str(clf)
clf.set_params(**params)
clf.fit(X_train, y_train)
predictions = clf.predict(X_test)
return predictions, model, clf | true |
d02fa4ce98566470cca960b13e8dbbc8925b0507 | Python | jim58/moose | /numeric.py | UTF-8 | 1,859 | 2.90625 | 3 | [] | no_license | #!/usr/bin/python3
# dsky.py - a DiSplay KeYboard emulator
import tkinter as tk
root = tk.Tk()
root.geometry("200x400+30+30")
comp = tk.Label(root, fg='Black', bg='lightgrey', text="COMP\nACTY")
prog_name = tk.Label(root, fg='Black', bg='lightgrey', text="PROG")
prog_value = tk.Label(root, fg='Black', bg='lightgrey', text="13")
verb_name = tk.Label(root, fg='Black', bg='lightgrey', text="VERB")
verb_value = tk.Label(root, fg='Black', bg='lightgrey', text="33")
noun_name = tk.Label(root, fg='Black', bg='lightgrey', text="NOUN")
noun_value = tk.Label(root, fg='Black', bg='lightgrey', text="13")
num1 = tk.Label(root, fg='Black', bg='lightgrey', text="-92311")
num2 = tk.Label(root, fg='Black', bg='lightgrey', text="+13270")
num3 = tk.Label(root, fg='Black', bg='lightgrey', text="-46514")
num1['font'] = "terminal 36 bold"
num2['font'] = "terminal 36 bold"
num3['font'] = "terminal 36 bold"
verb_name['font'] = "terminal 12 bold"
verb_value['font'] = "terminal 26 bold"
noun_name['font'] = "terminal 12 bold"
noun_value['font'] = "terminal 26 bold"
noun_name['font'] = "terminal 12 bold"
prog_value['font'] = "terminal 26 bold"
quit = tk.Button(root, text="QUIT", fg="red",
command=root.destroy)
comp.place(x = 10, y = 10, width=100, height=50)
prog_name.place(x = 115, y = 10, width=100, height=15)
prog_value.place(x = 115, y = 25, width=100, height=35)
verb_name.place(x = 10, y = 70, width=100, height=15)
verb_value.place(x = 10, y = 85, width=100, height=35)
noun_name.place(x = 115, y = 70, width=100, height=15)
noun_value.place(x = 115, y = 85, width=100, height=35)
num1.place(x = 10, y = 120, width=205, height=50)
num2.place(x = 10, y = 170, width=205, height=50)
num3.place(x = 10, y = 220, width=205, height=50)
quit.place(x=100, y = 300)
root.title("DSKY Numeric")
root.minsize(280, 400)
root.mainloop()
| true |
7b3cae6ff583faec24c27970d37285e327aca61c | Python | catapult-project/catapult | /dashboard/dashboard/common/clustering_change_detector.py | UTF-8 | 9,968 | 2.75 | 3 | [
"BSD-3-Clause"
] | permissive | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""An approximation of the E-Divisive change detection algorithm.
This module implements the constituent functions and components for a change
detection module for time-series data. It derives heavily from the paper [0] on
E-Divisive using hierarchical significance testing and the Euclidean
distance-based divergence estimator.
[0]: https://arxiv.org/abs/1306.4933
"""
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import itertools
import logging
import random
from dashboard.common import math_utils
# TODO(dberris): Remove this dependency if/when we are able to depend on SciPy
# instead.
from dashboard.pinpoint.models.compare import compare as pinpoint_compare
# This number controls the maximum number of iterations we perform when doing
# permutation testing to identify potential change-points hidden in the
# sub-clustering of values. The higher the number, the more CPU time we're
# likely to spend finding these potential hidden change-points.
_PERMUTATION_TESTING_ITERATIONS = 199
# This is the threshold percentage that permutation testing must meet for us to
# consider the sub-range that might contain a potential change-point.
_MIN_SIGNIFICANCE = 0.95
# The subsampling length is the maximum length we're letting the permutation
# testing use to find potential rearrangements of the underlying data.
_MAX_SUBSAMPLING_LENGTH = 20
# Extend the change range based on the estimate value in the range of tolerance.
_CHANGE_RANGE_TOLERANCE = 0.90
class Error(Exception):
pass
class InsufficientData(Error):
pass
def Cluster(sequence, partition_point):
"""Return a tuple (left, right) where partition_point is part of right."""
return (sequence[:partition_point], sequence[partition_point:])
def Midpoint(sequence):
"""Return an index in the sequence representing the midpoint."""
return (len(sequence) - 1) // 2
def ClusterAndCompare(sequence, partition_point):
"""Returns the comparison result and the clusters at the partition point."""
# Detect a difference between the two clusters
cluster_a, cluster_b = Cluster(sequence, partition_point)
if len(cluster_a) > 2 and len(cluster_b) > 2:
magnitude = float(math_utils.Iqr(cluster_a) + math_utils.Iqr(cluster_b)) / 2
else:
magnitude = 1
return (pinpoint_compare.Compare(cluster_a, cluster_b,
(len(cluster_a) + len(cluster_b)) // 2,
'performance',
magnitude), cluster_a, cluster_b)
def PermutationTest(sequence, change_point, rand=None):
"""Run permutation testing on a sequence.
Determine whether there's a potential change point within the sequence,
using randomised permutation testing.
Arguments:
- sequence: an iterable of values to perform permutation testing on.
- change_point: the possible change point calculated by Estimator.
- rand: an implementation of a pseudo-random generator (see random.Random))
Returns significance of the change point we are testing.
"""
if len(sequence) < 3:
return 0.0
if rand is None:
rand = random.Random()
segment_start = max(change_point - (_MAX_SUBSAMPLING_LENGTH // 2), 0)
segment_end = min(change_point + (_MAX_SUBSAMPLING_LENGTH // 2),
len(sequence))
segment = list(sequence[segment_start:segment_end])
change_point_q = Estimator(segment, change_point - segment_start)
# Significance is defined by how many change points in random permutations
# are less significant than the one we choose. This works because the change
# point should be less significant if we mixing the left and right part
# seperated by the point.
significance = 0
for _ in range(_PERMUTATION_TESTING_ITERATIONS):
rand.shuffle(segment)
_, q, _ = ChangePointEstimator(segment)
if q < change_point_q:
significance += 1
return float(significance) / (_PERMUTATION_TESTING_ITERATIONS + 1)
def Estimator(sequence, index):
cluster_a, cluster_b = Cluster(sequence, index)
if len(cluster_a) == 0 or len(cluster_b) == 0:
return float('NaN')
a_array = tuple(
abs(a - b)**2 for a, b in itertools.combinations(cluster_a, 2))
if not a_array:
a_array = (0.,)
b_array = tuple(
abs(a - b)**2 for a, b in itertools.combinations(cluster_b, 2))
if not b_array:
b_array = (0.,)
y = sum(abs(a - b)**2 for a, b in itertools.product(cluster_a, cluster_b))
x_a = sum(a_array)
x_b = sum(b_array)
a_len = len(cluster_a)
b_len = len(cluster_b)
a_len_combinations = len(a_array)
b_len_combinations = len(b_array)
y_scaler = 2.0 / (a_len * b_len)
a_estimate = (x_a / a_len_combinations)
b_estimate = (x_b / b_len_combinations)
e = (y_scaler * y) - a_estimate - b_estimate
return (e * a_len * b_len) / (a_len + b_len)
def ChangePointEstimator(sequence):
# This algorithm does the following:
# - For each element in the sequence:
# - Partition the sequence into two clusters (X[a], X[b])
# - Compute the intra-cluster distances squared (X[n])
# - Scale the intra-cluster distances by the number of intra-cluster
# pairs. (X'[n] = X[n] / combinations(|X[n]|, 2) )
# - Compute the inter-cluster distances squared (Y)
# - Scale the inter-cluster distances by the number of total pairs
# multiplied by 2 (Y' = (Y * 2) / |X[a]||X[b]|)
# - Sum up as: Y' - X'[a] - X'[b]
# - Return the index of the highest estimator.
#
# The computation is based on Euclidean distances between measurements
# within and across clusters to show the likelihood that the values on
# either side of a sequence is likely to show a divergence.
#
# This algorithm is O(N^2) to the size of the sequence.
margin = 1
max_estimate = None
max_index = 0
estimates = [
Estimator(sequence, i)
for i, _ in enumerate(sequence)
if margin <= i < len(sequence)
]
if not estimates:
return (0, 0, False)
for index, estimate in enumerate(estimates):
if max_estimate is None or estimate > max_estimate:
max_estimate = estimate
max_index = index
return (max_index + margin, max_estimate, True)
def ExtendChangePointRange(change_point, sequence):
max_estimate = Estimator(sequence, change_point)
left, right = 1, len(sequence) - 1
for index in range(change_point, 0, -1):
if Estimator(sequence, index) < _CHANGE_RANGE_TOLERANCE * max_estimate:
left = index + 1
break
for index in range(change_point, len(sequence) - 1):
if Estimator(sequence, index) < _CHANGE_RANGE_TOLERANCE * max_estimate:
right = index - 1
break
return (left, right)
def ClusterAndFindSplit(values, rand=None):
"""Finds a list of indices where we can detect significant changes.
This algorithm looks for the point at which clusterings of the "left" and
"right" datapoints show a significant difference. We understand that this
algorithm is working on potentially already-aggregated data (means, etc.) and
it would work better if we had access to all the underlying data points, but
for now we can do our best with the points we have access to.
In the E-Divisive paper, this is a two-step process: first estimate potential
change points, then test whether the clusters partitioned by the proposed
change point internally has potentially hidden change-points through random
permutation testing. Because the current implementation only returns a single
change-point, we do the change point estimation through bisection, and use the
permutation testing to identify whether we should continue the bisection, not
to find all potential change points.
Arguments:
- values: a sequence of values in time series order.
- rand: a callable which produces a value used for subsequence permutation
testing.
Returns:
- A list of indices into values where we can detect potential split points.
Raises:
- InsufficientData when the algorithm cannot find potential change points
with statistical significance testing.
"""
logging.debug('Starting change point detection.')
length = len(values)
if length <= 3:
raise InsufficientData(
'Sequence is not larger than minimum length (%s <= %s)' % (length, 3))
candidate_indices = set()
exploration_queue = [(0, length)]
while exploration_queue:
# Find the most likely change point in the whole range, only excluding the
# first and last elements. We're doing this because we want to still be able
# to pick a candidate within the margins (excluding the ends) if we have
# enough confidence that it is a change point.
start, end = exploration_queue.pop(0)
logging.debug('Exploring range seq[%s:%s]', start, end)
segment = values[start:end]
partition_point, _, _ = ChangePointEstimator(segment)
probability = PermutationTest(segment, partition_point, rand)
logging.debug(
'Permutation testing change point %d at seq[%s:%s]: %s;'
' probability = %.4f', partition_point, start, end,
probability >= _MIN_SIGNIFICANCE, probability)
if probability < _MIN_SIGNIFICANCE:
continue
lower, upper = ExtendChangePointRange(partition_point, segment)
if lower != partition_point or upper != partition_point:
logging.debug('Extending change range from %d to %d-%d.',
partition_point, lower, upper)
candidate_indices.add(
(start + partition_point, (start + lower, start + upper)))
exploration_queue.append((start, start + partition_point))
exploration_queue.append((start + partition_point, end))
if not candidate_indices:
raise InsufficientData('Not enough data to suggest a change point.')
return [c for c in sorted(candidate_indices)]
| true |
c1415ba85f4c9c341f0838110ea900de72d6faed | Python | ajila123/security-surveillance | /numphy.py | UTF-8 | 1,608 | 3.265625 | 3 | [] | no_license | import numpy as np
def numpybasics():
array1=np.array([1.45,7.8])
print(array1)
array2=np.array([2.5,55.7])
print(array2)
array3=np.array(['a','c'])
print(array3)
array4=np.array([2,4.5,'g'])
print(array4)
array5=array4[1:2]
print(array5)
array5=array4[:2]
print(array5)
array6=np.array([1,3,4,6,7,8,9,99,77,888,900])
print(array6)
array7=array6[0:4]
print(array7)
a=[1,2,3,4]
a.append(6)
print(a)
array8=np.array(a)
print(array8)
array9=array8.copy()
print(array9)
array0=np.concatenate((array4,array5))
print(array0)
array11=np.where(array7>2)
print(array11)
array7[2]=7
print(array7)
def numpysort():
b=[6,5,9,2,43,21]
array12=np.array(b)
print(array12)
array13=np.sort(array12)
print(array13)
c=['ami','aji','anu','adhi']
array14=np.array(c)
print(array14)
array15=np.sort(array14)
print(array15)
d=[1.25,1.05,1.00,1.75,1.95]
array16=np.array(d)
print(array16)
array17=np.sort(array16)
print(array17)
#numpysort()
def numpyfilter():
e=[2,5,7,8,1]
array19=np.array(e)
print(array19)
filter=array19>5
print(filter)
array20=array19[filter]
print(array20)
#numpyfilter()
def numpymultdmntnarry():
a=[[1,2],[2,3],[1,4]]
array22=np.array(a)
print(array22)
array23=np.array([[1,6],[2,7],[6,5]])
print(array23)
array24=np.add(array22,array23)
print(array24)
#numpymultdmntnarry()
| true |
0371c074c7997f6bd607d439fa6633adb0d75c6a | Python | phhhhhj/python_vision | /house/product_crawling.py | UTF-8 | 743 | 2.546875 | 3 | [] | no_license | import requests
from bs4 import BeautifulSoup
import pymysql as my
# 수납정리 11, 생활용품 4, 주방 5, 셀프시공 6, 시공/서비스 7, 반려동물 8
category_num = [11, 4, 5, 6, 7, 8, ]
# id : auto
name_list = []
# category
price_list = []
brand_list = []
photo_list = []
discount_list = []
# keyword
page = requests.get('https://ohou.se/store/category?affect_type=StoreHamburger&category=11')
soup = BeautifulSoup(page.content, 'html.parser')
name = soup.select('div')
print(name)
for x in name:
print(x)
# for i in category_num:
# page = requests.get('https://ohou.se/store/category?affect_type=StoreHamburger&category=' + str(i))
# soup = BeautifulSoup(page.content, 'html.parser')
| true |
09bb1f0f67435e06e5a90f01916f8bafce92a9f9 | Python | gchaturvedi/tweethealth | /tweethealth/tests/basic_functional_test.py | UTF-8 | 1,811 | 2.578125 | 3 | [
"MIT"
] | permissive | """
These are basic functional unit tests site-wide. They
test things such as 404s being displayed, general homepage checks.
"""
from django.test import TestCase
from django.test.client import Client
from django.test.client import RequestFactory
from tweethealth import views
class BasicFunctionalTest(TestCase):
def setUp(self):
self.client = Client()
self.factory = RequestFactory()
def test_homepage_no_twitter(self):
"""
Tests that the homepage displays proper content and a button to
login via Twitter if person has not yet logged in via Twitter.
"""
response = self.client.get('/')
self.assertTrue('Login' in response.content)
self.assertEqual(response.templates[0].name, 'homepage.html')
def test_homepage_with_twitter(self):
"""
Tests that the homepage displays twitter related content by
mocking out a user returning to the homepage after Twitter
sign in.
"""
request = self.factory.get('/')
request.session = {'twitter_info':{ 'oauth_token' : '22452978-i85AzdfGeHh5s5mbAVcV2EpnC0jz02TxOcC0ZZN6J',
'oauth_token_secret' : 'Zdlk4CkrLpMmDDBGsxWjhWSpqgLQEpIegWREB5NOMqw'}}
response = views.homepage(request)
self.assertEqual(response.status_code,200)
self.assertTrue('Your score' in response.content)
def test_about_page(self):
"""
Tests that the about homepage is displayed.
"""
response = self.client.get('/about/')
self.assertEqual(response.templates[0].name, 'about.html')
def test_404_page(self):
"""
Tests that 404 page exists and unrouted URLs are forwarded to it.
"""
response = self.client.get('/random-page/')
self.assertEqual(response.status_code,404)
| true |
373e019b0acacc2abcc1def83c5c23cbe11e5e74 | Python | bailarinbonita/exercises-mit-ocw | /ps0.py | UTF-8 | 624 | 3.796875 | 4 | [] | no_license | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 16 15:08:42 2019
@author: tamar
"""
import math
while True:
try:
x = float(input('Enter a number x: '));
except ValueError:
continue
else:
break
while True:
try:
y = float(input('Enter a number y: '))
except ValueError:
continue
else:
break
print('x**y: ',
x**y);
print('log(x): ',
math.log2(x))
#the longer blocks prompt the user to enter a number
#used a while loop to account for the possibility
#that the user might type something other than a
#number | true |
8fb26a8467ba19213cffa7e992ff8b1765d90cad | Python | HongSungRae/DeepLearningPaper | /Codes/SetFunctionForTimeSeries/dataprocess/tfrecord.py | UTF-8 | 1,657 | 3.03125 | 3 | [
"ODC-By-1.0"
] | permissive | """ https://github.com/vahidk/tfrecord
Parse (generic) TFRecords dataset into `IterableDataset` object,
which contain `np.ndarrays`s.
Params:
-------
data_path: str
The path to the tfrecords file.
index_path: str or None
The path to the index file.
description: list or dict of str, optional, default=None
List of keys or dict of (key, value) pairs to extract from each
record. The keys represent the name of the features and the
values ("byte", "float", or "int") correspond to the data type.
If dtypes are provided, then they are verified against the
inferred type for compatibility purposes. If None (default),
then all features contained in the file are extracted.
shuffle_queue_size: int, optional, default=None
Length of buffer. Determines how many records are queued to
sample from.
transform : a callable, default = None
A function that takes in the input `features` i.e the dict
provided in the description, transforms it and returns a
desirable output.
"""
import torch
from tfrecord.torch.dataset import TFRecordDataset
tfrecord_path = "/daintlab/data/sr/paper/setfunction/tensorflow_datasets/root/tensorflow_datasets/physionet2012/1.0.10/physionet2012-test.tfrecord"
#Z:\daintlab\data\sr\paper\setfunction\tensorflow_datasets\root\tensorflow_datasets\physionet2012\1.0.10
index_path = None
description = None
def trans(x):
return x
dataset = TFRecordDataset(tfrecord_path, index_path, description)
loader = torch.utils.data.DataLoader(dataset, batch_size=32)
data = next(iter(loader))
print(data) | true |
b17429459a87fb3c8779f3a19e0dbce3d3bae0f8 | Python | patrickoliveras/lpthw | /ex3a.py | UTF-8 | 112 | 3.53125 | 4 | [] | no_license | print("Calculating the determinant of:")
print("⌈0 1⌉")
print("⌊1 1⌋")
print("It is:", 0 * 1 - 1 * 1)
| true |
283945b7b3491fdc9a5718300d5616feff8f088f | Python | bhushanhegde/AlgoVisual | /VisualMergeSort.py | UTF-8 | 7,227 | 2.921875 | 3 | [] | no_license | import pygame
pygame.init()
import random
import time
#colors
white=(255,255,255)
black=(0,0,0)
blue=(0,0,255)
red=(255,0,0)
green=(0,255,0)
width=1200
hight=600
win=pygame.display.set_mode((width,hight))
pygame.display.set_caption("MergeSort")
win.fill(black)
pygame.display.update()
#clock=pygame.time.Clock()
font=pygame.font.SysFont('comicsansms',100)
font_small=pygame.font.SysFont('comicsansms',50)
def preview():
seen=False
text1=font.render("MERGE SORT",True,white)
win.blit(text1,[300,100])
text2=font.render("Time Complexity:O(n log n)",True,white)
win.blit(text2,[200,300])
text3=font.render("Space Complexity:O(n)",True,white)
win.blit(text3,[200,450])
text4=font_small.render('press any key to continue',True,white)
win.blit(text4,[300,550])
pygame.display.update()
while not seen:
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
if event.type==pygame.KEYDOWN:
win.fill(black)
pygame.display.update()
seen=True
poleH=[]
poleW=5
#FPS=20
for x in range(width//poleW):
poleH.append(random.randint(1,599))
def display(h,w):
x=0
for hi in h:
pygame.draw.rect(win,green,[x,hi,poleW,hight])
pygame.display.update()
x+=poleW
text=font_small.render('press any key to start merge sort',True,white)
win.blit(text,[300,550])
pygame.display.update()
pressed=False
while not pressed:
for event in pygame.event.get():
if event.type==pygame.KEYDOWN:
pressed=True
text=font_small.render('press any key to start merge sort',True,green)
win.blit(text,[300,550])
pygame.display.update()
def MergeSort(arr):
if len(arr)>1:
mid=len(arr)//2
L=arr[:mid]
R=arr[mid:]
MergeSort(L)
MergeSort(R)
i=j=k=0
while i<len(L) and j<len(R):
if L[i]>R[j]:
pygame.draw.rect(win,black,[k*poleW,0,poleW,hight])
pygame.draw.rect(win,green,[k*poleW,L[i],poleW,hight])
pygame.display.update()
arr[k]=L[i]
i+=1
k+=1
else:
pygame.draw.rect(win,black,[k*poleW,0,poleW,hight])
pygame.draw.rect(win,green,[k*poleW,R[j],poleW,hight])
pygame.display.update()
arr[k]=R[j]
j+=1
k+=1
while i<len(L):
pygame.draw.rect(win,black,[k*poleW,0,poleW,hight])
pygame.draw.rect(win,green,[k*poleW,L[i],poleW,hight])
pygame.display.update()
arr[k]=L[i]
i+=1
k+=1
while j<len(R):
pygame.draw.rect(win,black,[k*poleW,0,poleW,hight])
pygame.draw.rect(win,green,[k*poleW,R[j],poleW,hight])
pygame.display.update()
arr[k]=R[j]
j+=1
k+=1
def gameloop():
#print(poleH)
pygame.display.set_caption("MERGE SORT")
preview()
display(poleH,poleW)
MergeSort(poleH)
text=font.render('DONE!!!',True,white)
win.blit(text,[400,300])
pygame.display.update()
exit=False
while not exit:
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
if event.type==pygame.KEYDOWN:
pygame.quit()
quit()
pygame.display.update()
#clock.tick(FPS)
#gameloop()
#start for event
'''
import pygame
pygame.init()
import random
import time
#colors
white=(255,255,255)
black=(0,0,0)
blue=(0,0,255)
red=(255,0,0)
green=(0,255,0)
width=1200
hight=600
win=pygame.display.set_mode((width,hight))
pygame.display.set_caption("MergeSort")
win.fill(black)
pygame.display.update()
#clock=pygame.time.Clock()
font=pygame.font.SysFont('comicsansms',100)
font_small=pygame.font.SysFont('comicsansms',50)
def preview():
seen=False
text1=font.render("MERGE SORT",True,white)
win.blit(text1,[300,100])
text2=font.render("Time Complexity:O(n log n)",True,white)
win.blit(text2,[200,300])
text3=font.render("Space Complexity:O(n)",True,white)
win.blit(text3,[200,450])
text4=font_small.render('press any key to continue',True,white)
win.blit(text4,[300,550])
pygame.display.update()
while not seen:
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
if event.type==pygame.KEYDOWN:
win.fill(black)
pygame.display.update()
seen=True
poleH=[]
poleW=5
#FPS=20
for x in range(width//poleW):
poleH.append(random.randint(1,599))
def display(h,w):
x=0
for hi in h:
pygame.draw.rect(win,green,[x,hi,poleW,hight])
pygame.display.update()
x+=poleW
text=font_small.render('press any key to start merge sort',True,white)
win.blit(text,[300,550])
pygame.display.update()
pressed=False
while not pressed:
for event in pygame.event.get():
if event.type==pygame.KEYDOWN:
pressed=True
text=font_small.render('press any key to start merge sort',True,green)
win.blit(text,[300,550])
pygame.display.update()
def MergeSort(arr):
if len(arr)>1:
mid=len(arr)//2
L=arr[:mid]
R=arr[mid:]
MergeSort(L)
MergeSort(R)
i=j=k=0
while i<len(L) and j<len(R):
if L[i]>R[j]:
pygame.draw.rect(win,black,[k*poleW,0,poleW,hight])
pygame.draw.rect(win,green,[k*poleW,L[i],poleW,hight])
pygame.display.update()
arr[k]=L[i]
i+=1
k+=1
else:
pygame.draw.rect(win,black,[k*poleW,0,poleW,hight])
pygame.draw.rect(win,green,[k*poleW,R[j],poleW,hight])
pygame.display.update()
arr[k]=R[j]
j+=1
k+=1
while i<len(L):
pygame.draw.rect(win,black,[k*poleW,0,poleW,hight])
pygame.draw.rect(win,green,[k*poleW,L[i],poleW,hight])
pygame.display.update()
arr[k]=L[i]
i+=1
k+=1
while j<len(R):
pygame.draw.rect(win,black,[k*poleW,0,poleW,hight])
pygame.draw.rect(win,green,[k*poleW,R[j],poleW,hight])
pygame.display.update()
arr[k]=R[j]
j+=1
k+=1
def gameloop():
#print(poleH)
pygame.display.set_caption("MERGE SORT")
preview()
display(poleH,poleW)
MergeSort(poleH)
text=font.render('DONE!!!',True,white)
win.blit(text,[400,300])
pygame.display.update()
exit=False
while not exit:
for event in pygame.event.get():
if event.type==pygame.QUIT:
pygame.quit()
quit()
if event.type==pygame.KEYDOWN:
pygame.quit()
quit()
pygame.display.update()
#clock.tick(FPS)
#gameloop()
'''
| true |
3372581475816129a6dd140601b80de66a5a17c3 | Python | czkyyz/scrapy-003 | /shiyanlou/shiyanlou/spiders/courses.py | UTF-8 | 1,436 | 2.6875 | 3 | [] | no_license | # -*- coding: utf-8 -*-
import scrapy
from shiyanlou.items import CourseItem
class CoursesSpider(scrapy.Spider):
name = 'courses'
@property
def start_urls(self):
url1 = 'https://github.com/shiyanlou?page={}&tab=repositories'
return (url1.format(i) for i in range(1,5))
def parse(self, response):
for i in response.css('li.col-12'):
item = CourseItem({
"name": i.css('a::text').re_first('\s*(\w*)'),
"update_time":i.css('relative-time::attr(datetime)').extract_first()
})
course_url = response.urljoin(i.xpath('.//a/@href').extract_first())
#print(course_url)
request = scrapy.Request(url=course_url,callback=self.parse_code)
request.meta['item'] = item
yield request
def parse_code(self,response):
item = response.meta['item']
# print('--------------------------------------')
# print(item)
if not response.xpath('//span[contains(@class,"num")]/text()').extract():
pass
else:
item['commits'] = response.xpath('//span[contains(@class,"num")]/text()').extract()[0].strip()
item['branches'] = response.xpath('//span[contains(@class,"num")]/text()').extract()[1].strip()
item['releases'] = response.xpath('//span[contains(@class,"num")]/text()').extract()[2].strip()
yield item
| true |
53f331791270c587aac44f1a2995db237f10cee7 | Python | raresmihai/Numerical-Calculus | /Tema2/norms.py | UTF-8 | 478 | 2.984375 | 3 | [] | no_license | import numpy
from math import sqrt
def getSecondNorm(A, x, b):
# Compute y=A(init) *x
n = A[0].size
y = numpy.zeros(n)
for i in range(0, n):
for j in range(0, n):
if i > j:
y[i] += A[j, i] * x[j]
else:
y[i] += A[i, j] * x[j]
# Compute z=y-b
z = numpy.subtract(y, b)
# Compute the euclidean norm
eucledianNorm = sum(z[i] ** 2 for i in range(0, n))
return sqrt(eucledianNorm)
| true |
c77d9e4971aed33c9c6a5a2e65c9db9b46b4da96 | Python | heathzhang35/CCDLUtil | /Communications/DepreciatedCommunications/Receive.py | UTF-8 | 1,993 | 3.640625 | 4 | [] | no_license | """
This method is for receiving messages between computers on the same network.
Messages are sent as strings. If sending a nonstring object, it will be converted
to a string.
"""
import os
import socket
import Queue
import threading
import warnings
class Receive(object):
def __init__(self, port, receive_message_queue, host="", buf=1024):
"""
Sends messages to the specified host and port for computers on the same network.
:param host: Defaults to ""
:param port: Set to desired port. Only one Send object can be opened on a given port. Example: 13000
:param send_message_queue: Place messages on this queue to have them sent.
"""
host = host
port = port
self.buf = buf
self.addr = (host, port)
self.UDPSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.UDPSock.bind(self.addr)
self.receive_message_queue = receive_message_queue
warnings.warn(DeprecationWarning)
def receive_from_queue(self):
"""
Receives messages and passes them to the receive_message_queue passed during initialization
Received data in accordance with the host and port set during initialization
Pass 'exit' to close the socket.
All data passed is of string format -- meaning all data placed on the receive_message_queue will
be a string.
"""
while True:
message, addr = self.UDPSock.recvfrom(self.buf)
self.receive_message_queue.put(message)
if message == "exit":
self.UDPSock.close()
if __name__ == '__main__':
RECEIVE_MESSAGE_QUEUE = Queue.Queue()
RECEIVE_OBJECT = Receive(port=13000, receive_message_queue=RECEIVE_MESSAGE_QUEUE)
threading.Thread(target=RECEIVE_OBJECT.receive_from_queue).start()
print "Waiting to Receive"
while True:
data = RECEIVE_MESSAGE_QUEUE.get()
print "Received message: " + data
| true |
a48b21d14920d4d2b0a806466350822d9b48f0c6 | Python | realhardik18/songs-downloader | /youtube/youtube_downloader.py | UTF-8 | 453 | 2.578125 | 3 | [] | no_license | from pytube import Playlist
from pytube import YouTube
import os
playlist = Playlist("your youtube playlist")
urls = []
i = 1
for url in playlist.video_urls:
yt = YouTube(url)
video = yt.streams.filter(only_audio=True).first()
destination = r"download destination here"
out_file = video.download(output_path=destination)
base, ext = os.path.splitext(out_file)
new_file = f"{i}.mp3"
os.rename(out_file, new_file)
i += 1
| true |
0c9ac1d7f039fa21180368cbe154cc9044dd38cf | Python | Capidunord/texlab | /imports/python/linear_algebra/linear_system.py | UTF-8 | 2,386 | 2.859375 | 3 | [] | no_license | from sympy import latex, Matrix, zeros
from general import expr
from .render_row import render_row
class System:
def __init__(self, A, X, Y):
self.A = A.copy()
self.X = X.copy()
self.Y = Y.copy()
def to_latex(self):
result = '\left\{\\begin{array}{' + \
'rc' * (self.A.cols - 1) + 'r' + 'cr' + '}\n'
for i in range(0, self.A.rows):
row = [self.A[i, j] for j in range(0, self.A.cols)]
line = render_row(row, self.X)
line += " & = & " + expr.platex(self.Y[i, 0])
if i != self.A.rows - 1:
line += ' \\\\'
line += "\n"
result += line
result += "\end{array}\\right."
return result
def transvection(self, row1, row2, factor=1):
self.A[row1 - 1, :] += factor * self.A[row2 - 1, :]
self.Y[row1 - 1, :] += factor * self.Y[row2 - 1, :]
def multiply(self, row, l):
self.A[row - 1, :] = self.A[row - 1, :] * l
self.Y[row - 1, :] = self.Y[row - 1, :] * l
def switch(self, row1, row2):
temp = self.A[row1 - 1, :]
self.A[row1 - 1, :] = self.A[row2 - 1, :]
self.A[row2 - 1, :] = temp
temp = self.Y[row1 - 1, :]
self.Y[row1 - 1, :] = self.Y[row2 - 1, :]
self.Y[row2 - 1, :] = temp
@staticmethod
def _remove_line_from_matrix(M: Matrix, line: int) -> Matrix:
newM = zeros(M.rows - 1, M.cols)
for i in range(1, M.rows):
for j in range(1, M.cols + 1):
if i < line:
newM[i-1,j-1] = M[i-1,j-1]
else:
newM[i-1,j-1] = M[i, j-1]
return newM
def remove_line(self, line):
self.A = System._remove_line_from_matrix(self.A, line)
self.Y = System._remove_line_from_matrix(self.Y, line)
@staticmethod
def _remove_col_from_matrix(M: Matrix, col: int) -> Matrix:
newM = zeros(M.rows, M.cols - 1)
for i in range(1, M.rows + 1):
for j in range(1, M.cols):
if j < col:
newM[i-1,j-1] = M[i-1,j-1]
else:
newM[i-1,j-1] = M[i-1, j]
return newM
def remove_col(self, col):
self.A = System._remove_col_from_matrix(self.A, col)
self.X = System._remove_line_from_matrix(self.X, col)
| true |
3aec30a919e1b07bfd9360a8aa9ab7247694f5fc | Python | mishrakeshav/Tkinter-Tutorials | /15DropDownMenu.py | UTF-8 | 409 | 3 | 3 | [] | no_license | from tkinter import *
root = Tk()
root.geometry("400x400")
def check():
global var
lbl = Label(root,text = var.get()).pack()
var = StringVar()
options = [
"Monday",
"Tuesday",
"Wednesday",
"Thursday",
"Friday",
"Saturday"
]
var.set(options[0])
drop = OptionMenu(root,var,*options)
drop.pack()
btn = Button(root,text = "Click here", command = check).pack()
root.mainloop() | true |
6b0286c916400b0309b9c5a890b663c9c3a3fd16 | Python | regisb/braingeyser | /braingeyser.py | UTF-8 | 3,917 | 2.546875 | 3 | [] | no_license | #! /usr/bin/env python
import argparse
import os
import chardet
from flask import Flask, render_template, redirect, url_for, send_from_directory
import pycaption
app = Flask(__name__)
# TODO
# - Convert mkv videos to mp4: avconv -i file.mkv -vcodec copy -acodec mp3 file.mp4
# - Convert srt files to vtt? (not sure it's necessary)
class Videos:
ROOT_DIR = os.environ.get('VIDEO_ROOT_DIR', '/tmp')
SUPPORTED_EXTENSIONS = ['.mp4', '.avi', '.mkv']
SUPPORTED_SUBTITLES_EXTENSIONS = ['.srt', '.vtt']
@app.route('/')
def home():
return redirect(url_for('navigate', root=''))
@app.route('/navigate/')
@app.route('/navigate/<path:root>')
def navigate(root=''):
absolute_root = os.path.join(Videos.ROOT_DIR, root)
directories = []
videos = []
for path in sorted(os.listdir(absolute_root)):
absolute_path = os.path.join(absolute_root, path)
relative_path = os.path.join(root, path)
if os.path.isdir(absolute_path):
directories.append({
'name': path,
'path': relative_path
})
elif path[-4:] in Videos.SUPPORTED_EXTENSIONS:
videos.append({
'name': path,
'path': relative_path
})
return render_template('navigate.html', directories=directories, videos=videos)
@app.route('/video/<path:src>')
def video(src):
root = os.path.dirname(src)
absolute_root = os.path.join(Videos.ROOT_DIR, root)
target = {
'name': os.path.basename(src),
'path': src,
}
# Find subtitles
# (we don't use glob because it does not work with '[' characters in the path name)
subtitles = []
video_name = os.path.splitext(os.path.basename(src))[0]
for subtitle in sorted(os.listdir(absolute_root)):
subtitle_name, subtitle_ext = os.path.splitext(subtitle)
if subtitle_ext not in Videos.SUPPORTED_SUBTITLES_EXTENSIONS:
continue
subtitle_name = os.path.basename(subtitle)
is_default = video_name in subtitle_name
subtitles.append({
'name': subtitle_name,
'path': os.path.join(root, subtitle_name),
'default': is_default,
})
return render_template('video.html', video=target, subtitles=subtitles)
@app.route('/data/<path:src>')
def data(src):
# Surprisingly, Flask handles mp4 file streaming magnificently. So we don't
# need to setup nginx for mp4 file streaming.
directory = os.path.join(Videos.ROOT_DIR, os.path.dirname(src))
filename = os.path.basename(src)
return send_from_directory(directory, filename)
@app.route('/track/<path:src>')
def track(src):
return convert_to_vtt(os.path.join(Videos.ROOT_DIR, src))
def convert_to_vtt(path):
"""
Convert a subtitles file from any format (e.g: srt) to vtt. This is
necessary for use with videojs, which supports only vtt subtitles.
"""
caps = open(path, 'rb').read()
try:
caps = caps.decode('utf8')
except UnicodeDecodeError:
# Attempt to read with custom encoding
detected = chardet.detect(caps)
caps = caps.decode(detected['encoding'])
caps = caps.strip("\ufeff").strip("\n").strip("\r")
sub_reader = pycaption.detect_format(caps)
if sub_reader is None:
return None
if sub_reader != pycaption.WebVTTReader:
read_caps = sub_reader().read(caps)
caps = pycaption.WebVTTWriter().write(read_caps)
return caps
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="A video streaming server")
parser.add_argument('--host', default='127.0.0.1')
parser.add_argument('-p', '--port', type=int, default=8000)
parser.add_argument('path', help='Source directory of video files')
args = parser.parse_args()
Videos.ROOT_DIR = os.path.abspath(args.path)
app.run(host=args.host, port=args.port, debug=True)
| true |
35cfe5178500b14942890415ba9b2dd08458788a | Python | lucifer443/adaptive_quantize_training | /quantize_utils.py | UTF-8 | 3,336 | 2.640625 | 3 | [] | no_license | # 量化相关基础函数
# =========================================================
import tensorflow as tf
import os.path as osp
import sys
from importlib import import_module
def compute_quant_param(data, bitnum, ifscale=False, ifoffset=False, ifchannel=False):
"""
计算量化参数
:param data: 需要被量化的数据
:param bitnum: 量化位宽
:param ifscale: 是否缩放
:param ifoffset: 是否为非对称成量化
:param ifchannel: 是否分通道
:return: 量化参数shift、scale、offset
"""
with tf.variable_scope(data.name[:-2] + "/quantize", reuse=tf.AUTO_REUSE):
axis = (0, 1, 2) if ifchannel else None
if ifoffset:
data_max = tf.reduce_max(data, axis=axis)
data_min = tf.reduce_min(data, axis=axis)
else:
data_max = tf.reduce_max(tf.abs(data), axis=axis)
data_min = -data_max
Z = (data_max - data_min) / 2 + 1e-9
o = (data_max + data_min) / 2
shift = tf.ceil(tf.log(Z / (2 ** (bitnum - 1) - 1)) / tf.log(2))
f = Z / (tf.pow(2.0, shift) * (tf.pow(2.0, (bitnum - 1)) - 1)) if ifscale else tf.ones_like(shift)
return shift, f, o
def float2fix(data, shift, f=1.0, o=0.0, bitnum=8):
"""
根据量化参数将数据量化为int,再反量化成float
:param data: 需要量化的数据
:param shift: shift
:param f: scale
:param o: offset
:return: 通过量化处理的数据
"""
with tf.variable_scope(data.name[:-2] + "/dequantize", reuse=tf.AUTO_REUSE):
# 量化成int
tmp1 = (data - o) / (tf.pow(2.0, shift) * f)
tmp2 = tf.round(tmp1) # round to even
exp = tf.cast(bitnum-1, tf.float32)
tmp2 = tf.clip_by_value(tmp2, -tf.pow(2.0, exp), tf.pow(2.0, exp)-1.0)
# 反量化
fix = tmp2 * tf.pow(2.0, shift) * f + o
return fix
class Config:
"""
读取量化配置文件
"""
def __init__(self, filename):
filename = osp.abspath(osp.expanduser(filename))
module_name = osp.basename(filename)[:-3]
if '.' in module_name:
raise ValueError('Dots are not allowed in config file path.')
config_dir = osp.dirname(filename)
sys.path.insert(0, config_dir)
mod = import_module(module_name)
sys.path.pop(0)
self.cfg_dict = {
name: value
for name, value in mod.__dict__.items()
if not name.startswith('__')
}
self.is_training = self.cfg_dict["is_training"]
self.forward_quant_ops = self.cfg_dict["forward_quant_ops"]
self.backward_quant_ops = self.cfg_dict["backward_quant_ops"]
def get_config(self, op_type, op_name, data_type):
config = dict()
config["save name"] = "%s/%s" %(op_name, data_type)
# 设置量化方式
config.update(self.cfg_dict["quantize method"]["default"])
config.update(self.cfg_dict.get("%s/%s" % (op_type, data_type), dict()))
if self.is_training:
config["adaptive strategy"] = self.cfg_dict["training setting"]["adaptive strategy"][data_type]
config.update(self.cfg_dict["training setting"]["hyper-parameters"])
else:
config.update(self.cfg_dict["inference setting"])
return config
| true |
03a7b4898159f636ddf0b32c8102b5c46d23e362 | Python | chunche95/ProgramacionModernaPython | /Proyectos/Practicas_Reciclaje/funciones.py | UTF-8 | 2,321 | 4.09375 | 4 | [] | no_license | def operaciones():
lista= [];
resultado = 0;
try:
operacion = int(input("¿Que operacion desea realizar [número]? "));
except ValueError:
print("El valor introducido no es válido, vuelva a intentarlo de nuevo");
operaciones();
while True:
try:
lista.append(input ("Escriba valor. "))
valor=lista;
print(valor);
if("F" in lista):
lista.remove("F");
break;
except ValueError:
print("ERROR, no se puede operar la cadena introducida")
except Exception as valor:
print(type(valor).__name__)
print(lista)
art = len(lista)
if (operacion == 1):
# Recorro la longitud de la lista de argumentos de entrada y opero
for i in lista:
resultado += int(i);
elif (operacion == 2):
for i in lista:
resultado -= int(i);
elif (operacion == 3):
for i in lista:
resultado *= int(i);
elif (operacion == 4):
try:
for i in lista:
resultado /= int(i);
except ZeroDivisionError:
print("No se puede dividir los valores entre 0.")
return "Operacion no válida"
except Exception as i:
print(type(i).__name__)
elif (operacion == 5):
for i in lista:
resultado += int(i);
resultado = (resultado*art)/100;
else:
print("Algo salió mal...");
print(resultado);
exit();
def anuncio():
print ('''Operaciones válidas.
1- Sumar copmpra
2- Resta cartera
3- Multiplicacion articulos
4- Division pedidos (yoq sé)
5- Ahorro BlackDays
_________________________________________________
"Escriba los valores, cuando termine escriba F
_________________________________________________
''')
operaciones()
def main():
anuncio()
main() | true |
0270f440724e5a8184f1b3cff17c9aa90eedd2f6 | Python | zrlram/leds | /shows/white_snow.py | UTF-8 | 3,706 | 3.40625 | 3 | [] | no_license | #
# White Snow
#
# Show draws vertically failing trails
#
# Snow color is always white
# Background is black
#
from random import randint
import looping_show
from color import morph_color
class WhiteSnow(looping_show.LoopingShow):
is_show = True
name = "White Snow"
def __init__(self, geometry):
looping_show.LoopingShow.__init__(self, geometry)
self.paths = [] # List that holds paths objects
self.geometry = geometry
self.max_paths = 8
self.decay = 1.0 / 3
self.background = (0,0,0) # Always Black
self.foreground = (255,255,255) # Always White
self.duration = 1
self.last_update = 0
"""
def set_param(self, name, val):
# name will be 'colorR', 'colorG', 'colorB'
rgb255 = int(val * 0xff)
if name == 'colorR':
self.max_paths = (rgb255 * 8 / 255) + 2
elif name == 'colorG':
self.decay = 1.0 / ((rgb255 * 4) + 1)
"""
def update_at_progress(self, progress, new_loop, loop_instance):
if len(self.paths) < self.max_paths:
over = randint(0, len(self.geometry.VERT_RINGS) - 1)
new_path = Path(self.geometry, self.geometry.VERT_RINGS[over], self.decay)
self.paths.append(new_path)
# Set background cells
#self.sheep.set_all_cells(self.background)
# Draw paths
if (new_loop): self.last_update = 0
if (progress - self.last_update) > 0.05:
for p in self.paths:
p.draw_path(self.foreground, self.background)
p.move_path()
self.last_update = progress
for p in self.paths:
if p.path_alive() == False:
self.paths.remove(p)
self.geometry.draw()
class Fader(object):
def __init__(self, model, pixel_number, decay):
self.geometry = model
self.pixel = pixel_number
self.decay = decay
self.life = 1.0
def draw_fader(self, fore_color, back_color):
adj_color = morph_color(back_color, fore_color, self.life)
self.geometry.set_pixel(self.pixel, adj_color)
def age_fader(self):
self.life -= self.decay
if self.life > 0:
return True # Still alive
else:
return False # Life less than zero -> Kill
class Path(object):
def __init__(self, geometry, over, decay):
self.geometry = geometry
self.faders = [] # List that holds fader objects
self.down = 0 # how far down did we fall already
self.decay = decay
self.over = over # where the snow will fall
self.length = len(self.over) # how many rows there are total
new_fader = Fader(self.geometry, self.over[0], self.decay)
self.faders.append(new_fader)
def draw_path(self, foreground, background):
for f in self.faders:
f.draw_fader(foreground, background)
for f in self.faders:
if f.age_fader() == False:
self.faders.remove(f)
def path_alive(self):
if len(self.faders) > 0:
return True
else:
return False
def move_path(self):
if self.down < (self.length - 1):
self.down += 1
new_fader = Fader(self.geometry, self.over[self.down], self.decay)
self.faders.append(new_fader)
__shows__ = [
(WhiteSnow.name, WhiteSnow)
]
| true |
f4e99e516b69b815a3bda177a04ef54d7e91e7fc | Python | SysSynBio/PyFBA | /PyFBA/gapfill/subsystem.py | UTF-8 | 5,155 | 2.65625 | 3 | [
"MIT"
] | permissive | import os
import sys
import io
import PyFBA
# We want to find the path to the Biochemistry/SEED/ files. This is a relative path and is two levels above us
pyfbadir, tail = os.path.split(__file__)
pyfbadir, tail = os.path.split(pyfbadir)
SS_FILE_PATH = os.path.join(pyfbadir, "Biochemistry/SEED/Subsystems/SS_functions.txt")
def suggest_reactions_from_subsystems(reactions, reactions2run, ssfile=SS_FILE_PATH, threshold=0, verbose=False):
"""
Identify a set of reactions that you should add to your model for growth based on the subsystems that are present
in your model and their coverage.
Read roles and subsystems from the subsystems file (which has role, subsystem, classification 1, classification 2)
and make suggestions for missing reactions based on the subsystems that only have partial reaction coverage.
:param threshold: The minimum fraction of the genes that are already in the subsystem for it to be added (default=0)
:type threshold: float
:param reactions: our reactions dictionary from parsing the model seed
:type reactions: dict
:param reactions2run: set of reactions that we are going to run
:type reactions2run: set
:param ssfile: a subsystem file (really the output of dump_functions.pl on the seed machines)
:type ssfile: str
:param verbose: add additional output
:type verbose: bool
:return: A set of proposed reactions that should be added to your model to see if it grows
:rtype: set
"""
if not os.path.exists(ssfile):
sys.stderr.write("FATAL: The subsystems file {} does not exist from working directory {}.".format(ssfile, os.getcwd()) +
" Please provide a path to that file\n")
return set()
# read the ss file
subsys_to_roles = {}
roles_to_subsys = {}
with io.open(ssfile, 'r', encoding="utf-8", errors='replace') as sin:
for l in sin:
# If using Python2, must convert unicode object to str object
if sys.version_info.major == 2:
l = l.encode('utf-8', 'replace')
if l.startswith('#'):
continue
p = l.strip().split("\t")
if len(p) < 2:
if verbose:
sys.stderr.write("Too few columns in subsystem file at line: {}\n".format(l.strip()))
continue
if p[1] not in subsys_to_roles:
subsys_to_roles[p[1]] = set()
for role in PyFBA.parse.roles_of_function(p[0]):
if role not in roles_to_subsys:
roles_to_subsys[role] = set()
subsys_to_roles[p[1]].add(role)
roles_to_subsys[role].add(p[1])
# now convert our reaction ids in reactions2run into roles
# we have a hash with keys = reactions and values = set of roles
reacts = PyFBA.filters.reactions_to_roles(reactions2run)
# foreach subsystem we need to know the fraction of roles that are present
# this is complicated by multifunctional enzymes, as if one function is present they all should be
# but for the moment (??) we are going to assume that each peg has the multi-functional annotation
ss_present = {}
ss_roles = {}
for r in reacts:
for rl in reacts[r]:
if rl in roles_to_subsys:
for s in roles_to_subsys[rl]:
if s not in ss_present:
ss_present[s] = set()
ss_roles[s] = set()
ss_present[s].add(rl)
ss_roles[s].add(r)
ss_fraction = {}
for s in ss_present:
ss_fraction[s] = 1.0 * len(ss_present[s]) / len(subsys_to_roles[s])
if verbose:
for s in ss_roles:
print("{}\t{}\t{}".format(s, ss_fraction[s], ss_roles[s], "; ".join(ss_present)))
# now we can suggest the roles that should be added to complete subsystems.
suggested_ss = set()
for s in ss_fraction:
if ss_fraction[s] >= threshold:
suggested_ss.add(s)
if verbose:
sys.stderr.write("Suggesting " + str(len(suggested_ss)) + " subsystems\n")
# suggested_ss = {s for s, f in ss_fraction.items() if f>0}
suggested_roles = set()
for s in suggested_ss:
for r in subsys_to_roles[s]:
if r not in reactions2run:
suggested_roles.add(r)
if verbose:
sys.stderr.write("Suggesting " + str(len(suggested_roles)) + " roles\n")
# finally, convert the roles to reactions
new_reactions = PyFBA.filters.roles_to_reactions(suggested_roles)
if verbose:
sys.stderr.write("Found " + str(len(new_reactions)) + " reactions\n")
suggested_reactions = set()
for rl in new_reactions:
suggested_reactions.update(new_reactions[rl])
if verbose:
sys.stderr.write("Suggested reactions is " + str(len(suggested_reactions)) + "\n")
suggested_reactions = {r for r in suggested_reactions if r in reactions and r not in reactions2run}
if verbose:
sys.stderr.write("Suggested reactions is " + str(len(suggested_reactions)) + "\n")
return suggested_reactions
| true |