seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
40122267120 | # Chenyue Hu u1275460
# Assignment 1 Animate shape
from graphics import *
def eyes(x, y, win):
c1 = Circle(Point(x, y), 20)
c1.setFill("white")
c1.draw(win)
def draw_a_line(x,y,win):
aline = Line(Point(x,y), Point(x + 20, y))
aline.draw(win)
def shape_body(x, y, win):
body1 = Oval(Point(x, y), Point(x+200,y+150))
body1.setFill("pink")
body1.draw(win)
def tail(x,y,win):
aRectangle = Rectangle(Point(x, y), Point(x+100, y+30))
aRectangle.setFill("purple")
aRectangle.draw(win)
def draw_my_shape(x,y,win):
shape_body(x,y,win)
draw_a_line(x+10,y+100,win)
eyes(x+50, y+50, win)
tail(x+170,y+100,win)
tail(x+170,y+50,win)
def main():
win = GraphWin('Drawing', 700, 500, autoflush=False)
win.setBackground('blue')
while True:
win.clear_win()
point = win.getMousePosition()
x = point.getX()
y = point.getY()
draw_my_shape(x, y, win)
win.update()
if __name__ == "__main__":
main() | HetonDH/PythonCodeAssignment | A1/AnimateShape.py | AnimateShape.py | py | 1,059 | python | en | code | 0 | github-code | 90 |
36600228448 | import requests
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import time
import pyautogui
import re
import csv
# webdriver options 설정
options = webdriver.ChromeOptions()
# 브라우저 종료되지 않게 설정
options.add_experimental_option("detach", True)
# # 화면숨김
# options.headless = True
# # 화면 최대화
# options.add_argument("window-size=1920x1080")
# 브라우저 열기
browser = webdriver.Chrome(options=options)
# 화면 최대화
browser.maximize_window()
# csv파일 저장
filename = "yanolja.csv"
f=open(filename,"w",encoding="utf-8-sig",newline="")
writer = csv.writer(f)
# ,분리해서 list타입으로 반환
title="숙소,평점,가격,링크".split(",")
# title csv에 저장
writer.writerow(title)
url="https://www.yanolja.com/"
browser.get(url)
# time.sleep(2)
# 검색클릭
browser.find_element_by_xpath('//*[@id="__next"]/section/header/div/a[2]').click()
time.sleep(2)
# 날짜 선택 부분
browser.find_element_by_xpath('//*[@id="__next"]/div[1]/header/nav/div[2]/form/div[2]/button[1]').click()
time.sleep(1)
# 입실날짜
browser.find_element_by_xpath('/html/body/div[3]/div/div/section/section[3]/div/div/div/div[2]/div/div[2]/div[2]/div/table/tbody/tr[4]/td[6]').click()
# 퇴실날짜
browser.find_element_by_xpath('/html/body/div[3]/div/div/section/section[3]/div/div/div/div[2]/div/div[2]/div[2]/div/table/tbody/tr[4]/td[7]').click()
# 확인클릭
browser.find_element_by_xpath('/html/body/div[3]/div/div/section/section[4]/button').click()
# 검색창 제주리조트 입력
elem = browser.find_element_by_class_name('SearchInput_input__342U2')
elem.send_keys('제주리조트')
elem.send_keys(Keys.ENTER)
#화면이 나타날때까지 대기
WebDriverWait(browser,10).until(EC.presence_of_element_located((By.XPATH,'//*[@id="__next"]/div[1]/main/div/section[2]/div/div/div[1]')))
# time.sleep(5)
#스크롤 내림
prev_height = browser.execute_script("return document.body.scrollHeight")
time.sleep(2)
while True:
# browser.execute_script("window.scroll(0,document.body.scrollHeight)")
# 마우스 중앙으로 이동
pyautogui.moveTo(500,500)
# 마우스 아래로 이동
pyautogui.scroll(-prev_height)
time.sleep(2)
curr_height = browser.execute_script("return document.body.scrollHeight")
if curr_height == prev_height:
break
prev_height = curr_height
# 화면 스크린샷
browser.get_screenshot_as_file("yanolja.jpg")
soup = BeautifulSoup(browser.page_source,"lxml")
# 숙소리스트
items = soup.find_all("div",{"class":"PlaceListItemText_container__fUIgA text-unit"})
for i,item in enumerate(items):
title = item.find("strong",{"class":"PlaceListTitle_text__2511B small"}).get_text()
# 평점이 없을시 제외
if not item.find("span",{"class":"PlaceListScore_rating__3Glxf"}):
continue
rate = float(item.find("span",{"class":"PlaceListScore_rating__3Glxf"}).get_text())
# 금액이 없을시 제외
if item.find("span",{"class":"PlacePriceInfo_salePrice__28VZD"}).get_text()=="예약마감":
continue
price = item.find("span",{"class":"PlacePriceInfo_salePrice__28VZD"}).get_text()
item_url = item.find("a",{"class":"common_clearfix__M6urU"})["href"]
if rate < 4.0:
continue
item_url = "https://www.yanolja.com"+item_url
# 출력부분
print("숙소명 : ",title)
print("평점 : ",rate)
print("가격 : ",price)
print("링크 : ",item_url)
print("-"*50)
# csv에 모든 숙소 저장
data=[]
data.append(title)
data.append(rate)
data.append(price)
data.append(item_url)
writer.writerow(data)
# 파일저장
img_item = item.find("div",{"class":"PlaceListImage_imageText__2XEMn"})["style"]
# https로 시작하는 위치 반환
temp = img_item.find("https://")
img_item = img_item[temp:-3]
print("파일 위치 : ",img_item)
res_img = requests.get(img_item)
with open("item_{}.jpg".format(i),"wb") as f:
f.write(res_img.content)
f.close()
# print(soup.prettify()) | onulee/https---github.com-onulee-kdigital1 | 05.web/web0426/w0426_02_야놀자.py | w0426_02_야놀자.py | py | 4,304 | python | en | code | 0 | github-code | 90 |
35383452674 | # -*- coding: utf-8 -*-
from sqlalchemy import desc
from application import session_scope
from application.model.use_case import UseCase, UseCaseInterfaceRelation, UseCaseParameterRelation
from application.model.batch import BatchUseCaseRelation
def add_use_case(**kwargs):
with session_scope() as session:
use_case = UseCase(**kwargs)
session.add(use_case)
session.flush()
return use_case.id
def get_use_case(**kwargs):
with session_scope() as session:
query = session.query(UseCase).filter_by(**kwargs).filter_by(status=1).order_by(UseCase.update_time.desc())
use_case_list = [use_case.to_dict() for use_case in query]
return use_case_list
def get_use_case_by_name(use_case_name):
with session_scope() as session:
query = session.query(UseCase).filter(UseCase.use_case_name.like('%{0}%'.format(use_case_name))).\
filter_by(status=1).order_by(UseCase.create_time.desc())
use_case_list = [use_case.to_dict() for use_case in query]
return use_case_list
def get_multi_use_case(use_case_list):
with session_scope() as session:
query = session.query(UseCase).filter(UseCase.id.in_(use_case_list)).\
order_by(UseCase.create_time.desc())
use_case_dict = {}
for use_case in query:
use_case_info = use_case.to_dict()
use_case_id = use_case_info['id']
use_case_dict[use_case_id] = use_case_info
return use_case_dict
def get_use_case_with_function_id(function_line_list):
with session_scope() as session:
use_case_with_function_line_dict = {}
for function_info in function_line_list:
function_id = function_info.get('id')
query = session.query(UseCase).filter_by(function_id=function_id).filter_by(status=1).\
order_by(UseCase.create_time.desc())
use_case_list = [use_case.to_dict() for use_case in query]
use_case_with_function_line_dict[function_id] = use_case_list
return use_case_with_function_line_dict
def get_single_use_case(use_case_id):
with session_scope() as session:
query = session.query(UseCase).filter_by(id=use_case_id)
use_case_info = [use_case.to_dict() for use_case in query][0]
return use_case_info
def query_use_case_count(**kwargs):
with session_scope() as session:
use_case_count = session.query(UseCase).filter_by(status=1).filter_by(**kwargs).count()
return use_case_count
def modify_use_case(**kwargs):
with session_scope() as session:
id = kwargs.pop('id')
session.query(UseCase).filter_by(id=id).update(kwargs)
return id
def del_use_case(**kwargs):
"""
删除use_case
同时将所有该use_case的关联关系清除
:param kwargs:
:return:
"""
with session_scope() as session:
id = kwargs.pop('id')
session.query(UseCase).filter_by(id=id).update({'status': 0})
session.query(BatchUseCaseRelation).filter_by(use_case_id=id).update({'status': 0})
session.query(UseCaseInterfaceRelation).filter_by(use_case_id=id).update({'status': 0})
def get_max_order(use_case_id):
"""
获取某use_case下interface的最大order数
:param use_case_id: use_case id
:return: 最大order数
"""
with session_scope() as session:
max_order_relation = session\
.query(UseCaseInterfaceRelation)\
.filter_by(use_case_id=use_case_id, status=1)\
.order_by(desc(UseCaseInterfaceRelation.order))\
.first()
if not max_order_relation:
return 0
else:
return max_order_relation.order
def add_relation(use_case_id, interface_id, order=None):
"""
将某interface与use_case关联
如未传入order则加入到最后
如传入order则大于当前order的全部加一
:param use_case_id: use_case id
:param interface_id: interface id
:param order: interface顺序,如果为空加入到最后
:return:
"""
with session_scope() as session:
if not order:
order = get_max_order(use_case_id) + 1
else:
session\
.query(UseCaseInterfaceRelation)\
.filter_by(use_case_id=use_case_id)\
.filter(UseCaseInterfaceRelation.order >= order)\
.update({'order': UseCaseInterfaceRelation.order + 1})
relation = UseCaseInterfaceRelation(use_case_id=use_case_id, interface_id=interface_id, order=order)
session.add(relation)
session.flush()
return relation.id
def update_eval_relation(id, eval_string):
"""
获取某use_case下interface的最大eval_string值
:param : id, eval_string
:return:
"""
with session_scope() as session:
session.query(UseCaseInterfaceRelation).filter_by(id=id).update({'eval_string': eval_string})
def get_relation(**kwargs):
"""
根据传入参数不同获取不同信息:
use_case_id:获取某个use_case包含的interface
interface_id:获取某个interface关联的use_case
:param: use_case_id:
:return:
"""
with session_scope() as session:
query = session\
.query(UseCaseInterfaceRelation)\
.filter_by(**kwargs)\
.filter_by(status=1).order_by(UseCaseInterfaceRelation.order)
session.expunge_all()
relation_list = [s_relation.to_dict() for s_relation in query]
return relation_list
def del_relation(relation_id):
"""
删除use_case与interface关系
如果有order大于当前删除order的,全部减一
:param relation_id: relation id
:return:
"""
with session_scope() as session:
relation = session.query(UseCaseInterfaceRelation).filter_by(id=relation_id).one()
current_order = relation.order
use_case_id = relation.use_case_id
session.query(UseCaseInterfaceRelation).filter_by(id=relation_id).update({'status': 0})
session\
.query(UseCaseInterfaceRelation)\
.filter_by(use_case_id=use_case_id)\
.filter(UseCaseInterfaceRelation.order > current_order)\
.update({'order': UseCaseInterfaceRelation.order - 1})
def modify_interface_delay_relation(relation_id, interface_delay):
"""
更新关系表中的interface_delay
:param relation_id:
:param interface_delay:
:return:
"""
with session_scope() as session:
session.query(UseCaseInterfaceRelation).filter_by(id=relation_id).update({'interface_delay': interface_delay})
def reorder_relation(relation_id, new_order):
"""
调整某个已有interface的order
同时将其他影响范围内的interface的order全部加一或者减一
:return:
"""
with session_scope() as session:
relation = session.query(UseCaseInterfaceRelation).filter_by(id=relation_id).one()
current_order = relation.order
use_case_id = relation.use_case_id
if current_order == new_order:
return
elif current_order < new_order:
session\
.query(UseCaseInterfaceRelation)\
.filter_by(use_case_id=use_case_id)\
.filter(UseCaseInterfaceRelation.order > current_order)\
.filter(UseCaseInterfaceRelation.order <= new_order)\
.update({'order': UseCaseInterfaceRelation.order - 1})
elif current_order > new_order:
session\
.query(UseCaseInterfaceRelation)\
.filter_by(use_case_id=use_case_id)\
.filter(UseCaseInterfaceRelation.order < current_order)\
.filter(UseCaseInterfaceRelation.order >= new_order)\
.update({'order': UseCaseInterfaceRelation.order + 1})
session\
.query(UseCaseInterfaceRelation)\
.filter_by(id=relation_id)\
.update({'order': new_order})
def add_case_parameter_relation(**kwargs):
"""
添加用例关联参数信息
:param kwargs:
:return:
"""
with session_scope() as session:
use_case_parameter = UseCaseParameterRelation(**kwargs)
session.add(use_case_parameter)
session.flush()
return use_case_parameter.id
def get_case_parameter_relation(**kwargs):
"""
查询用例关联参数信息
:param kwargs:
:return:
"""
with session_scope() as session:
query = session.query(UseCaseParameterRelation).filter_by(**kwargs).filter_by(status=1)
session.expunge_all()
parameter_list = [s_param.to_dict() for s_param in query]
return parameter_list
def modify_case_parameter_relation(**kwargs):
"""
更新用例关联参数信息
:param kwargs:
:return:
"""
with session_scope() as session:
id = kwargs.pop('id')
session.query(UseCaseParameterRelation).filter_by(id=id).update(kwargs)
def del_case_parameter_relation(**kwargs):
"""
删除用例关联参数
:param kwargs:
:return:
"""
with session_scope() as session:
session.query(UseCaseParameterRelation).filter_by(**kwargs).update({'status': 0})
| cnboysliber/AutoTest | application/api/use_case.py | use_case.py | py | 9,261 | python | en | code | 4 | github-code | 90 |
23902072839 | # -*- coding: utf-8 -*-
from django.contrib.auth.models import User
from django.shortcuts import redirect
from projeto.models import Projeto
from desenvolvedor.models import Trabalha_em
from django.contrib import messages
from django.contrib.auth.forms import UserCreationForm
class Desenvolvedor_helper:
@staticmethod
def trabalha(u_id, p_id):
try:
Projeto.objects.get(id=p_id)
Trabalha_em.objects.get(user=u_id, projeto=p_id)
except Trabalha_em.DoesNotExist:
return False
return True
@staticmethod
def get_working_projects(u_id):
return Trabalha_em.objects.raw("SELECT p.* FROM desenvolvedor_trabalha_em as t, projeto_projeto as p WHERE t.user_id={0} and p.id=t.projeto_id".format(u_id))
@staticmethod
def get_users_project(p_id):
return Trabalha_em.objects.raw("SELECT a.* FROM desenvolvedor_trabalha_em as t, auth_user as a WHERE t.projeto_id = {0} and a.id = t.user_id".format(p_id))
@staticmethod
def must_work_at(func):
def new_func(request,*args,**options):
if Desenvolvedor_helper.trabalha(request.user.id, options.get("p_id")):
return func(request,*args,**options)
else:
messages.error(request, "Você não trabalha nesse projeto e/ou este projeto não existe.")
return redirect("/")
return new_func
class CreateUserForm(UserCreationForm):
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email')
def __init__(self, rqt=None):
if rqt != None:
super(CreateUserForm, self).__init__(rqt)
else:
super(CreateUserForm, self).__init__()
self.fields['username'].label = 'Nome de usuário'
self.fields['first_name'].label = 'Nome'
self.fields['last_name'].label = 'Sobrenome'
self.fields['email'].label = 'E-mail'
self.fields['password1'].label = 'Senha'
self.fields['password2'].label = 'Confirmar Senha'
| mvsousa/university | database_systems/sgrs/desenvolvedor/helper.py | helper.py | py | 2,071 | python | pt | code | 0 | github-code | 90 |
30950488850 | class Solution(object):
def countMatches(self, items, ruleKey, ruleValue):
"""
:type items: List[List[str]]
:type ruleKey: str
:type ruleValue: str
:rtype: int
"""
c=0
for i in items:
if ruleKey=="type":
if i[0] == ruleValue:
c+=1
elif ruleKey=="color":
if i[1] == ruleValue:
c+=1
else:
if i[2] == ruleValue:
c+=1
print(c)
s = Solution()
s.countMatches([["phone","blue","pixel"],["computer","silver","phone"],["phone","gold","iphone"]],"type","phone")
| AaroneGeorge/LEETCODE | string/1773/Solution.py | Solution.py | py | 697 | python | en | code | 0 | github-code | 90 |
23190145358 | from bed.apnmc import APNMC, IS_Proposal
from bed.vnmc import VNMC
from simulator.toy2 import Toy
import torch
import numpy as np
import pickle
import matplotlib.pyplot as plt
def compute_ground_truth_eig(args):
design1, design2, noise_std, n_out, n_in = args
init_design = torch.cat((design1,design2))
simulator = Toy(init_design, noise_std=noise_std)
vnmc = VNMC(simulator)
return vnmc.pce( n_out, n_in)
if __name__ == "__main__":
# results for noise_std = 0.01
n_grids = 41
x = torch.linspace(0,1,n_grids)
y = torch.linspace(0,1,n_grids)
X, Y = torch.meshgrid(x, y)
noise_std = 0.01
n_out = 10000
n_in = 10000
zipper = zip(
X.reshape(n_grids**2,1),
Y.reshape(n_grids**2,1),
n_grids**2*[noise_std],
n_grids**2*[n_out],
n_grids**2*[n_in])
results = np.zeros(n_grids**2)
for i, args in enumerate(zipper):
results[i] = compute_ground_truth_eig(args)
if i % n_grids == 1:
with open("results/toy/ground_truth_noise_std_{0}.pkl".format(noise_std), "wb") as file:
pickle.dump(results.reshape(n_grids, n_grids), file)
| ziq-ao/GradEIG | scripts/experiments_toy_ground_truth.py | experiments_toy_ground_truth.py | py | 1,177 | python | en | code | 2 | github-code | 90 |
17959219774 | import random
# Gera um número aleatório entre 1 e 100
numero_secreto = random.randint(1, 100)
# Inicializa a variável de contagem de tentativas
tentativas = 0
print("Bem-vindo ao jogo Quente e Frio!")
print("Tente adivinhar o número secreto entre 1 e 100.")
while True:
# Solicita um palpite ao jogador
palpite = int(input("Digite o seu palpite: "))
# Incrementa o contador de tentativas
tentativas += 1
# Verifica se o palpite está correto
if palpite == numero_secreto:
print(f"Parabéns! Você acertou o número secreto ({numero_secreto}) em {tentativas} tentativas.")
break
else:
# Calcula a diferença entre o palpite e o número secreto
diferenca = abs(numero_secreto - palpite)
# Dá dicas "quente" ou "frio" com base na diferença
if diferenca <= 10:
print("Quente!")
else:
print("Frio!")
| AnaMedlyn/Python | Desafios/brincadeira quente e frio.py | brincadeira quente e frio.py | py | 964 | python | pt | code | 0 | github-code | 90 |
30507055715 | '''
@auther: Samaneh
'''
from Bio import SeqIO
from pandas import ExcelWriter
import numpy as np
import pandas as pd
#sprot
SprotRef1 = "/home/samaneh/AHRD/data/reference/non_red_sprot_batch_3_references.fasta"
SprotRef2 = "/home/samaneh/AHRD/data/reference/non_red_sprot_batch_4_references.fasta"
#Tair
TairRef1 = "/home/samaneh/AHRD/data/reference/nrTair/non_red_tair10_batch_1_references.fasta"
TairRef2 = "/home/samaneh/AHRD/data/reference/nrTair/non_red_tair10_batch_2_references.fasta"
#Blumeria
BlumeriaRef = "/home/samaneh/AHRD/data/reference/blumeria_graminis_references.fasta"
des_blacklist = open("/home/samaneh/AHRD/data/blacklist_descline.txt", "r")
blacklist = des_blacklist.readlines()
refFiltered = ExcelWriter("/home/samaneh/AHRD/outputs/5references_filtered.xlsx")
desList = []
sprotFilteredList = []
tairFilteredList = []
blumeriaFilteredList = []
sprotRecList = []
tairRecList = []
blumeriaRecList = []
for token in blacklist:
desList.append(token.lstrip("(?i)^").rstrip("\r\n ").replace("\s", " ").replace("+", ""))
recList = []
for record in SeqIO.parse(SprotRef1, "fasta"):
for token in desList:
if token.lower() in record.description.lower():
if record.id not in recList:
sprotRecList.append(record.id)
newRec = (record.description).strip(record.name).strip(" ")
if newRec not in sprotFilteredList:
sprotFilteredList.append(newRec)
for record in SeqIO.parse(SprotRef2, "fasta"):
for token in desList:
if token.lower() in record.description.lower():
if record.id not in recList:
sprotRecList.append(record.id)
newRec = (record.description).strip(record.name).strip(" ")
if newRec not in sprotFilteredList:
sprotFilteredList.append(newRec)
for record in SeqIO.parse(TairRef1, "fasta"):
for token in desList:
if token.lower() in record.description.lower():
if record.id not in recList:
tairRecList.append(record.id)
newRec = (record.description).strip(record.name).strip("| Symbols: |")
if newRec not in tairFilteredList:
tairFilteredList.append(newRec)
for record in SeqIO.parse(TairRef2, "fasta"):
for token in desList:
if token.lower() in record.description.lower():
if record.id not in recList:
tairRecList.append(record.id)
newRec = (record.description).strip(record.name).strip("| Symbols: |")
if newRec not in tairFilteredList:
tairFilteredList.append(newRec)
for record in SeqIO.parse(BlumeriaRef, "fasta"):
for token in desList:
if token.lower() in record.description.lower():
if record.id not in recList:
blumeriaRecList.append(record.id)
newRec = (record.description).strip(record.name).strip(" ")
if newRec not in blumeriaFilteredList:
blumeriaFilteredList.append(newRec)
sprotFilteredDf = pd.DataFrame(sprotFilteredList)
sprotFilteredDf.to_excel(refFiltered, "sprot")
tairFilteredDf = pd.DataFrame(tairFilteredList)
tairFilteredDf.to_excel(refFiltered, "tair")
blumeriaFilteredDf = pd.DataFrame(blumeriaFilteredList)
blumeriaFilteredDf.to_excel(refFiltered, "blumeria")
refFiltered.save()
| samiscoding/blacklistsTest | Sprot_Tair_Blumeria_ReferenceFilteredByBlacklist.py | Sprot_Tair_Blumeria_ReferenceFilteredByBlacklist.py | py | 3,079 | python | en | code | 0 | github-code | 90 |
18561467889 | n = int(input())
s = list(input() for _ in range(n))
cnt = [0] * 5
for x in s:
if x[0] == 'M':
cnt[0] += 1
if x[0] == 'A':
cnt[1] += 1
if x[0] == 'R':
cnt[2] += 1
if x[0] == 'C':
cnt[3] += 1
if x[0] == 'H':
cnt[4] += 1
if cnt.count(0) > 2:
print(0)
exit()
ans = 0
for i in range(len(cnt)-2):
for j in range(i+1, len(cnt)-1):
for k in range(j+1, len(cnt)):
ans += cnt[i] * cnt[j] * cnt[k]
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03425/s203902911.py | s203902911.py | py | 497 | python | en | code | 0 | github-code | 90 |
35643357780 | # python3
class Process:
def __init__(self, number, processing, seconds):
self.number = number
self.processing = processing
self.seconds = seconds
class Data:
def __init__(self, number, taken):
self.number = number
self.taken = taken
def parallel_processing(n, m, data):
output = []
processes = []
for process in range(n):
processes.append(Process(process, False, 0))
dataList = []
for da in data:
dataList.append(Data(da, False))
seconds = 0
for da in dataList:
taken = False
while taken == False:
for process in processes:
if (process.seconds <= seconds):
process.processing = False
if taken == False:
taken = add_job(process, seconds, da, output)
if (taken == False):
seconds = seconds + 1
return output
def add_job(process, seconds, da, output):
if (process.processing == False and process.seconds <= seconds):
process.processing = True
process.seconds = process.seconds + da.number
output.append([process.number, seconds])
return True
else:
return False
def main():
n = 0
m = 0
data = []
firstLine = input()
inputArray = firstLine.split()
n = int(inputArray[0])
m = int(inputArray[1])
secondLine = input()
inputArray = secondLine.split()
for i in range(0, m):
ele = int(inputArray[i])
data.append(ele)
result = parallel_processing(n,m,data)
for i, j in result:
print(i, j)
if __name__ == "__main__":
main() | DA-testa/parallel-processing-ElinaNorenberga | main.py | main.py | py | 1,682 | python | en | code | 0 | github-code | 90 |
3816692875 | #######################################################
### Fahimeh Baftizadeh: ###############################
#######################################################
# How to run this code from the command line:
# Make sure, you have a directory for output such as "new_keras_models/"
#python -m Keras_imbalanced_trainer --batch_size 500 --epochs 10 --run_iter 0 --n_features 4020 --n_celltypes 93 --n_hidden 10 --n_test_cells 10 --dropout 0.6
#######################################################
### Loading libraries: ################################
#######################################################
import csv
import timeit
import random
import argparse
import datetime
import itertools
import numpy as np
import pandas as pd
import tensorflow as tf
from pathlib import Path
from scipy.sparse import issparse
from sklearn.base import clone
from sklearn.utils import safe_indexing
from sklearn.metrics import confusion_matrix
from sklearn.utils import check_random_state
from sklearn.utils.testing import set_random_state
from sklearn.model_selection import train_test_split
from keras import regularizers
from keras.optimizers import Adam
from keras.utils import Sequence
from keras.models import Sequential
from keras.callbacks import CSVLogger
from keras.layers import Activation, Dense, Dropout, BatchNormalization, Flatten
from imblearn.utils import Substitution
from imblearn.utils._docstring import _random_state_docstring
from imblearn.tensorflow import balanced_batch_generator as tf_bbg
from imblearn.keras import BalancedBatchGenerator
#######################################################
### Reading inputs from the command line: #############
#######################################################
print(tf.__version__)
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", default=500, type=int, help="Batch size")
parser.add_argument("--epochs", default=10, type=int, help="Number of training epochs")
parser.add_argument("--run_iter", default=0, type=int, help="Run iteration")
parser.add_argument("--data_dir", default='Manuscript_patchseq_2019', help='parent dir')
parser.add_argument("--output_dir", default='Keras_models', help='model dir')
parser.add_argument("--n_features", default=4020, type=int, help="Number of features")
parser.add_argument("--n_celltypes",default=93, type=int, help="Number of celltypes or labels")
parser.add_argument("--n_hidden", default=10, type=int, help="size of hidden layer")
parser.add_argument("--n_test_cells", default=10, type=int, help="N test cells")
parser.add_argument("--dropout", default=0.6, type=float, help="drop out")
#######################################################
### Functions: ########################################
#######################################################
def read_data(path):
file_path = path
data = pd.read_csv(file_path)
colnames = list(data['Unnamed: 0'])
data = data.T
data.columns = colnames
data = data.drop(axis=0, labels='Unnamed: 0')
return data
def read_labels(path, select_cl):
file_path = path
labels = pd.read_csv(file_path)
labels.index = list(labels['Unnamed: 0'])
labels = labels.drop(axis=1, labels='Unnamed: 0')
new_factors = np.arange(len(select_cl)).tolist()
cls = select_cl
refactor_cls = []
for items in labels.cl:
if items in cls:
index = cls.index(items)
refactor_cls = refactor_cls + [new_factors[index]]
else:
refactor_cls = refactor_cls + [np.nan]
labels["old_factor_cl"] = labels["factor_cl"]
labels["factor_cl"] = refactor_cls
return labels
def split_data_intwo(data, labels, test_size, cvset):
train_data, test_data, train_labels, test_labels = train_test_split(data, labels,
test_size = test_size,
random_state = cvset)
return train_data, test_data, train_labels, test_labels
def make_model(n_features, HL, FL, dropout):
model = Sequential()
model.add(Dropout(dropout, input_shape=(n_features,)))
model.add(Dense(HL,
kernel_regularizer= regularizers.l2(0.01),
bias_regularizer=regularizers.l2(0.01)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dense(FL, activation=tf.nn.softmax))
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
return model
#######################################################
### Keras trainer: ####################################
#######################################################
def main(batch_size=500, epochs=10, run_iter=0, data_dir='/Manuscript_Patchseq_2019',
output_dir = "/new_Keras_models", n_features = 4020, n_celltypes = 93,
n_hidden=10, dropout = 0.6, n_test_cells = 1464):
model_id = str(epochs) + '_' + str(batch_size)
fileid = model_id + '_ri_' + str(run_iter)
facs_output_id = "facs_membership_" + str(run_iter)
cells_output_id = model_id + '_testcells_' + str(run_iter)
V1_cl = pd.read_csv(data_dir + "/select_cl.csv")['x'].tolist()
FACs_data = read_data(data_dir + "/FACs_norm.csv")
FACs_labels = read_labels(data_dir + "/FACs_correct_labels.csv", V1_cl)
FACs_labels = FACs_labels['factor_cl']
FACs_cells = FACs_data.index.tolist()
FACs_membership = pd.DataFrame(0, index=FACs_cells, columns=V1_cl)
train_data , test_data, train_labels, test_labels = split_data_intwo(FACs_data, FACs_labels,
test_size = n_test_cells, cvset = random.randint(0, 10000))
test_cells = test_data.index.tolist()
start_time = timeit.default_timer()
model = make_model(n_features, n_hidden, n_celltypes, dropout)
results = model.fit(train_data, train_labels, epochs=epochs, batch_size=batch_size, verbose=0)
facs_memb = model.predict(test_data)
facs_memb = pd.DataFrame(facs_memb, index= test_cells, columns=V1_cl)
FACs_membership.loc[test_cells] = FACs_membership.loc[test_cells] + facs_memb.loc[test_cells]
print(datetime.datetime.now())
elapsed = timeit.default_timer() - start_time
score, acc = model.evaluate(test_data, test_labels,
batch_size=batch_size, verbose=0)
print('Test accuracy:', acc)
print('-------------------------------')
print('Training time:', elapsed)
print('-------------------------------')
FACs_membership.to_csv(output_dir + facs_output_id + '.csv')
model.save(output_dir + fileid + '.h5')
with open(output_dir + cells_output_id + '.csv', 'w') as myfile:
wr = csv.writer(myfile)
wr.writerow(test_cells)
if __name__ == "__main__":
args = parser.parse_args()
main(**vars(args))
| Fahimeh1983/NN_patchseq_FACs_mapping | Keras_imbalanced_trainer.py | Keras_imbalanced_trainer.py | py | 6,981 | python | en | code | 0 | github-code | 90 |
74305925415 | '''
BFS + DFS on graph
1. BFS from end to start, count each node's distance to end, saving a distance hashmap
2. DFS from start to end, ensure every step make the distance to end closer!
3. finding next_words: every ward has L char, each char has 25 changes, new word shall in dict
errors
1. how to import deque
2. wrong identation when defining the function
3. deep copy in dfs
logic
1. in bfs, if next_word is already in distance hashmap, skip it, since it's going back in graph
- direction of deque for FIFO? Both work: Left in right out; right in left out
- use distance hashmap = {}: 1. avoid go back in graph, 2. track the steps to target
- in Word Latter 1, used visted = set() to avoid go back in the graph
- not level order traversal required
2. in dfs, would the distance[next_word] != distance[start] - 1 garantee there is no turning back in graph?
3. what's the purpose of dfs?
review
why not use BFS directly? - try use it directly, shall work, but slow
- will get into the paths that won't lead to the shortest path, in bfs, each node is a paths
- get the distance to the end with BFS, helps avoid picking longer path in DFS (2 BFS should also work)
- BFS 在树上,在图上还是不熟, 什么时候可以不用level order traversal,deque怎么使用,怎么根据当前层找出下一层,避免往回走
- DFS 在图上,需要哪些参数?(cur, end), dict, distance, (path, result)如何避免往回走?什么时候需要deepcopy?
'''
from collections import deque
class Solution:
"""
@param: start: a string
@param: end: a string
@param: dict: a set of string
@return: a list of lists of string
"""
def findLadders(self, start, end, dict):
# main
dict.add(start) # may start from the end
dict.add(end)
distance = self.bfs_getDistance(end, start, dict)
#print(distance)
results = []
path = [start]
self.dfs_findPath(start, end, dict, distance, path, results)
return results
# do a bfs from target to start, get the distance from node to the target
def bfs_getDistance(self, start, end, dict):
distance = {}
queue = deque([start])
distance[start] = 0
while queue:
for _ in range(len(queue)): # can work without level order traversal
word = queue.popleft()
next_words = self.find_nextWords(word, dict)
for next_word in next_words:
if next_word not in distance: # if _next_word already in distance, it's visited
distance[next_word] = distance[word] + 1
queue.append(next_word)
return distance
# get the next word
def find_nextWords(self, word, dict):
alphabet = 'abcdefghijklmnopqrstuvwxyz'
next_words = []
for i in range(len(word)):
for char in alphabet:
next_word = word[:i] + char + word[i+1:] #replace ith letter with char
if next_word != word and next_word in dict: #only prevent getting the same word
next_words.append(next_word)
return next_words
# dfs with contraint to always reduce distance to the target
# find the shortest path to target from the current word: curt, append the path to results
def dfs_findPath(self, curt, end, dict, distance, path, results):
if curt == end:
results.append(list(path)) # deep copy!
return
next_words = self.find_nextWords(curt, dict)
for next_word in next_words:
if distance[next_word] != distance[curt] - 1: # will this guarantee no turning back?
continue
path.append(next_word)
#print(next_word, path)
self.dfs_findPath(next_word, end, dict, distance, path, results)
path.pop()
| fudigit/Basic_Algorithm | 7.Permutation-based&Graph-based_DFS/121.Word_Ladder_II.py | 121.Word_Ladder_II.py | py | 3,962 | python | en | code | 0 | github-code | 90 |
39819246607 | import os
import streamlit as st
from pymongo import MongoClient
import pandas as pd
from bson import ObjectId
class helper:
def __init__(_self):
"""
Constructor for the helper class.
Contains helper fuctions for this streamlit application.
"""
_self.gpt_api_key = st.secrets['chatgpt']['api_key']
_self.db_conn = _self.mongo_connection()
@st.experimental_singleton(suppress_st_warning=True)
def mongo_connection(_self):
"""
Creates a connection to the LLM mongo database.
Returns: A connection to the LLM mongo database.
"""
user = st.secrets['mongodb']['username']
pwd = st.secrets['mongodb']['password']
cluster = st.secrets['mongodb']['cluster']
server = st.secrets['mongodb']['server']
connect = 'mongodb+srv://' + user + ':'
connect += pwd + '@' + cluster
connect += server
connect += '?retryWrites=true&w=majority'
_self.mongo_db = MongoClient(connect)
return _self.mongo_db
def session_variable_exists(variable):
"""
Checks if a session variable exists.
Returns: True if the session variable exists, and contains a value.
"""
exists = variable in st.session_state
if not exists:
return False
else:
return st.session_state[variable] is not None
| mattwibboweaver/CGPT4 | helper.py | helper.py | py | 1,462 | python | en | code | 0 | github-code | 90 |
18004000359 | import sys,copy
from itertools import accumulate
input=sys.stdin.readline
N=int(input())
A=list(map(int,input().split()))
A=list(accumulate(A))
B=copy.copy(A)
test1=0
s=0
for i in range(0,N):
A[i]+=s
if i%2==0:
if 0>=A[i]:
test1+=1-A[i]
s+=1-A[i]
else:
if A[i]>=0:
test1+=A[i]+1
s-=A[i]+1
test2=0
s=0
for i in range(0,N):
B[i]+=s
if i%2==0:
if B[i]>=0:
test2+=B[i]+1
s-=B[i]+1
else:
if 0>=B[i]:
test2+=1-B[i]
s+=1-B[i]
print(min(test1,test2)) | Aasthaengg/IBMdataset | Python_codes/p03739/s470455929.py | s470455929.py | py | 600 | python | en | code | 0 | github-code | 90 |
18378824959 | # A - Security
# https://atcoder.jp/contests/abc131/tasks/abc131_a
s = input()
result = 'Good'
i = 0
j = 1
while j < len(s):
if s[i] == s[j]:
result = 'Bad'
break
i += 1
j += 1
print(result)
| Aasthaengg/IBMdataset | Python_codes/p02993/s816511498.py | s816511498.py | py | 223 | python | en | code | 0 | github-code | 90 |
44140998001 | class Solution:
def leafSimilar(self, root1: Optional[TreeNode], root2: Optional[TreeNode]) -> bool:
"""
Time: O(n+m)
Space: O(max(log(n), log(m))) to O(max(n, m))
"""
seq = []
def dfs(node: TreeNode):
if not node.left and not node.right:
seq.append(node.val)
if node.left:
dfs(node.left)
if node.right:
dfs(node.right)
dfs(root1)
dfs(root2)
print(seq)
return seq[:len(seq)//2] == seq[len(seq)//2:]
| rajrkane/daileetcode | 872_leafSimilar.py | 872_leafSimilar.py | py | 572 | python | en | code | 0 | github-code | 90 |
30767350958 | import math
import random
import numpy as np
import pandas as pd
import folium
import os
import matplotlib.pyplot as plt
from matplotlib.pylab import mpl
mpl.rcParams['font.sans-serif'] = ['SimHei'] # 添加这条可以让图形显示中文
#####对一系列装卸货点进行适当的路径规划,在满足约束条件(客户需求、车辆载重和容积、车型、车辆行驶里程、配送时间窗、配送中心数量等限制)
# 和目标最优化(路程最短、成本最低、使用车辆数最少、配送时间最快等)下,将客户的配送需求从配送中心送达客户点,或从客户点送回配送中心。
# def calDistance(CityCoordinates):
# '''
# 计算城市间距离
# 输入:CityCoordinates-城市坐标;
# 输出:城市间距离矩阵-dis_matrix
# '''
# dis_matrix = pd.DataFrame(data=None, columns=range(len(CityCoordinates)), index=range(len(CityCoordinates)))
# for i in range(len(CityCoordinates)):
# xi, yi = CityCoordinates[i][0], CityCoordinates[i][1]
# for j in range(len(CityCoordinates)):
# xj, yj = CityCoordinates[j][0], CityCoordinates[j][1]
# dis_matrix.iloc[i, j] = round(math.sqrt((xi - xj) ** 2 + (yi - yj) ** 2), 2)
# return dis_matrix
def car_type_data(car_type):
# 车辆参数
CAPACITY = 0 # 车辆最大容量
C0 = 0 # 车辆启动成本
C1 = 0 # 车辆单位距离行驶成本
Z0 = 0 # 装货效率 单位 方/min
Z1 = 0 #卸货效率
if car_type == 4.2:
CAPACITY = 16
C0 = 200
C1 = 1.686
Z0 = 0.227
Z1 = 0.544
if car_type == 5.2:
CAPACITY = 23
C0 = 220.8
C1 = 2.314
Z0 = 0.326
Z1 = 0.782
if car_type == 6.8:
CAPACITY = 38
C0 = 268.9666667
C1 = 3
Z0 = 0.538
Z1 = 1.292
return CAPACITY, C0, C1, Z0, Z1
def greedy(CityCoordinates, dis_matrix):
'''
贪婪策略构造初始解,初始化时将VRP简化为TSP进行构造。
输入:CityCoordinates-节点坐标,dis_matrix-距离矩阵
输出:初始解-line
'''
# 修改dis_matrix以适应求解需要
dis_matrix = dis_matrix.astype('float64')
for i in range(len(CityCoordinates)):
dis_matrix.loc[i, i] = math.pow(10, 10)
dis_matrix.loc[:, 0] = math.pow(10, 10) # 0不在编码内
line = [] # 初始化
now_city = random.randint(1, len(CityCoordinates) - 1) # 随机生成出发城市
line.append(now_city) # 添加当前城市到路径
dis_matrix.loc[:, now_city] = math.pow(10, 10) # 更新距离矩阵,已经过城市不再被取出
for i in range(1, len(CityCoordinates) - 1):
next_city = dis_matrix.loc[now_city, :].idxmin() # 距离最近的城市
line.append(next_city) # 添加进路径
dis_matrix.loc[:, next_city] = math.pow(10, 10) # 更新距离矩阵
now_city = next_city # 更新当前城市
return line
def calFitness(birdPop, Demand, dis_matrix, DISTABCE, V):
'''
贪婪策略分配车辆(解码),计算路径距离(评价函数)
输入:birdPop-路径,Demand-客户需求,dis_matrix-城市间距离矩阵,CAPACITY-车辆最大载重,DISTABCE-车辆最大行驶距离,C0-车辆启动成本,C1-车辆单位距离行驶成本;
输出:birdPop_car-分车后路径,fits-适应度
'''
birdPop_car, fits , birdPop_car_type, birdPop_car_dis = [], [], [], [] # 初始化
times, moneys = [], []
for j in range(len(birdPop)):
bird = birdPop[j]
lines = [] # 存储线路分车
line = [0] # 每辆车服务客户点
car_types = [] # 存储线路分车的车型
dis_sum = [] # 线路上的路径距离
time_car = [] # 线路上各路径所需时间
dis, d = 0, 0 # 当前客户距离前一个客户的距离、当前客户需求量
i = 0 # 指向配送中心
time_point = 0
car_type = 0 # 所需车型
# 车辆参数
CAPACITY = 0 # 车辆最大容量
C0 = 0 # 车辆启动成本
C1 = 0 # 车辆单位距离行驶成本
Z0 = 0 # 装货效率 单位 方/min
Z1 = 0 # 卸货效率
money = [] # 分车后的费用
while i < len(bird):
if (Demand[bird[i]] <= 16) & (dis == 0):
car_type = 4.2
elif (Demand[bird[i]] <= 16) & (dis != 0) & (car_type == 4.2):
car_type = 4.2
elif (Demand[bird[i]] <= 16) & (dis != 0) & (car_type == 5.2):
car_type = 5.2
else:
if Demand[bird[i]] > 16:
if (car_type == 4.2) & (dis != 0):
dis += dis_matrix.loc[line[-1], 0] # 当前车辆装满
time_point += dis_matrix.loc[line[-1], 0] / V
time_point += d / Z0 / 60
line.append(0)
dis_sum.append(dis)
lines.append(line)
car_types.append(car_type)
time_car.append(round(time_point, 3))
money.append(round(C1 * dis + C0, 1))
# 下一辆车
dis, d = 0, 0
time_point = 0
line = [0]
car_type = 5.2
elif (car_type == 4.2) & (dis == 0):
dis, d = 0, 0
time_point = 0
line = [0]
car_type = 5.2
else:
car_type = 5.2
if car_type == 4.2:
CAPACITY, C0, C1, Z0, Z1 = car_type_data(car_type)
if line == [0]: # 车辆未分配客户点
dis += dis_matrix.loc[0, bird[i]] # 记录距离
line.append(bird[i]) # 为客户点分车
d += Demand[bird[i]] # 记录需求量
time_point += dis_matrix.loc[0, bird[i]] / V
time_point += Demand[bird[i]] / Z1 / 60
i += 1 # 指向下一个客户点
else: # 已分配客户点则需判断车辆载重和行驶距离
if (dis_matrix.loc[line[-1], bird[i]] + dis_matrix.loc[bird[i], 0] + dis <= DISTABCE) & (
d + Demand[bird[i]] <= CAPACITY):
dis += dis_matrix.loc[line[-1], bird[i]]
line.append(bird[i])
d += Demand[bird[i]]
time_point += dis_matrix.loc[line[-1], bird[i]] / V
time_point += Demand[bird[i]] / Z1 / 60
i += 1
else:
dis += dis_matrix.loc[line[-1], 0] # 当前车辆装满
time_point += dis_matrix.loc[line[-1], 0] / V
time_point += d / Z0 / 60
line.append(0)
dis_sum.append(dis)
lines.append(line)
car_types.append(car_type)
time_car.append(round(time_point, 3))
money.append(round(C1 * dis + C0, 1))
# 下一辆车
dis, d = 0, 0
time_point = 0
line = [0]
if car_type == 5.2:
CAPACITY, C0, C1, Z0, Z1 = car_type_data(car_type)
if line == [0]: # 车辆未分配客户点
dis += dis_matrix.loc[0, bird[i]] # 记录距离
line.append(bird[i]) # 为客户点分车
d += Demand[bird[i]] # 记录需求量
time_point += dis_matrix.loc[0, bird[i]] / V
time_point += Demand[bird[i]] / Z1 / 60
i += 1 # 指向下一个客户点
else: # 已分配客户点则需判断车辆载重和行驶距离
if (dis_matrix.loc[line[-1], bird[i]] + dis_matrix.loc[bird[i], 0] + dis <= DISTABCE) & (
d + Demand[bird[i]] <= CAPACITY):
dis += dis_matrix.loc[line[-1], bird[i]]
line.append(bird[i])
d += Demand[bird[i]]
time_point += dis_matrix.loc[line[-1], bird[i]] / V
time_point += Demand[bird[i]] / Z1 / 60
i += 1
else:
dis += dis_matrix.loc[line[-1], 0] # 当前车辆装满
time_point += dis_matrix.loc[line[-1], 0] / V
time_point += d / Z0 / 60
line.append(0)
dis_sum.append(dis)
lines.append(line)
car_types.append(car_type)
time_car.append(round(time_point, 3))
money.append(round(C1 * dis + C0, 1))
# 下一辆车
dis, d = 0, 0
time_point = 0
line = [0]
# 最后一辆车
dis += dis_matrix.loc[line[-1], 0]
time_point += dis_matrix.loc[line[-1], 0] / V
time_point += d / Z0 / 60
line.append(0)
dis_sum.append(round(dis, 3))
lines.append(line)
car_types.append(car_type)
time_car.append(round(time_point, 3))
money.append(round(C1 * dis + C0, 1))
birdPop_car.append(lines)
fits.append(round(sum(money), 1))
birdPop_car_type.append(car_types)
birdPop_car_dis.append(dis_sum)
times.append(time_car)
moneys.append(money)
return birdPop_car, fits, birdPop_car_type, birdPop_car_dis, times, moneys
def crossover(bird, pLine, gLine, w, c1, c2):
'''
采用顺序交叉方式;交叉的parent1为粒子本身,分别以w/(w+c1+c2),c1/(w+c1+c2),c2/(w+c1+c2)
的概率接受粒子本身逆序、当前最优解、全局最优解作为parent2,只选择其中一个作为parent2;
输入:bird-粒子,pLine-当前最优解,gLine-全局最优解,w-惯性因子,c1-自我认知因子,c2-社会认知因子;
输出:交叉后的粒子-croBird;
'''
croBird = [None] * len(bird) # 初始化
parent1 = bird # 选择parent1
# 选择parent2(轮盘赌操作)
randNum = random.uniform(0, sum([w, c1, c2]))
if randNum <= w:
parent2 = [bird[i] for i in range(len(bird) - 1, -1, -1)] # bird的逆序
elif randNum <= w + c1:
parent2 = pLine
else:
parent2 = gLine
# parent1-> croBird
start_pos = random.randint(0, len(parent1) - 1)
end_pos = random.randint(0, len(parent1) - 1)
if start_pos > end_pos: start_pos, end_pos = end_pos, start_pos
croBird[start_pos:end_pos + 1] = parent1[start_pos:end_pos + 1].copy()
# parent2 -> croBird
list2 = list(range(0, start_pos))
list1 = list(range(end_pos + 1, len(parent2)))
list_index = list1 + list2 # croBird从后往前填充
j = -1
for i in list_index:
for j in range(j + 1, len(parent2) + 1):
if parent2[j] not in croBird:
croBird[i] = parent2[j]
break
return croBird
def draw_path(car_routes, CityCoordinates):
'''
#画路径图
输入:line-路径,CityCoordinates-城市坐标;
输出:路径图
'''
# for route in car_routes:
# x, y = [], []
# for i in route:
# Coordinate = CityCoordinates[i]
# x.append(Coordinate[0])
# y.append(Coordinate[1])
# x.append(x[0])
# y.append(y[0])
# plt.plot(x, y, 'o-', alpha=0.8, linewidth=0.8)
# plt.xlabel('x')
# plt.ylabel('y')
# plt.show()
m = folium.Map(location=[30.29924, 120.809512], zoom_start=10,
tiles='http://webrd02.is.autonavi.com/appmaptile?lang=zh_cn&size=1&scale=1&style=7&x={x}&y={y}&z={z}',
attr='default'
) # 中心区域的确定
color = ['red', 'blue', 'green', 'purple', 'orange', 'darkred', 'gray', 'black', 'pink']
l = []
location = []
p = car_routes
for point in p:
for i in point:
a = CityCoordinates[i]
l.append([a[1], a[0]])
location.append(l)
l = []
# print(location)
for i in range(len(location)):
loca = location[i]
point = p[i]
# print(loca)
# print(len(loca))
# print(point)
# print(len(point))
for j in range(len(loca)):
# print("第{}号点的坐标为{}".format(point[j], loca[j]))
folium.CircleMarker(location=[loca[j][0], loca[j][1]],
radius=4, popup=point[j],
color='black', fill=True,
fill_color='orange').add_to(m)
for i in range(len(location)):
route = folium.PolyLine( # polyline方法为将坐标用线段形式连接起来
location[i], # 将坐标点连接起来
weight=3, # 线的大小为3
color=color[i], # 线的颜色为橙色
popup=i,
opacity=0.8 # 线的透明度
).add_to(m)
m.save(os.path.join(r'C:\Users\22041210\PycharmProjects\pso_rode', 'cvrp.html')) # 将结果以HTML形式保存到桌面上
if __name__ == '__main__':
# 车辆参数
# CAPACITY = 23 # 车辆最大容量
DISTABCE = 1000 # 车辆最大行驶距离
V = 60 # 速度,km/h
# C0 = 220.8 # 车辆启动成本
# C1 = 2.314 # 车辆单位距离行驶成本
# PSO参数
birdNum = 50 # 粒子数量
w = 0.2 # 惯性因子
c1 = 0.4 # 自我认知因子
c2 = 0.4 # 社会认知因子
pBest, pLine = 0, [] # 当前最优值、当前最优解,(自我认知部分)
gBest, gLine = 0, [] # 全局最优值、全局最优解,(社会认知部分)
# 其他参数
iterMax = 10000 # 迭代次数
iterI = 1 # 当前迭代次数
bestfit = [] # 记录每代最优值
# 读入数据,
# DistributionCenter = #配送中心
# Customer = [(50, 50), (96, 24), (40, 5), (49, 8), (13, 7), (29, 89), (48, 30), (84, 39), (14, 47), (2, 24),
# (3, 82), (65, 10), (98, 52), (84, 25), (41, 69), (1, 65),
# (51, 71), (75, 83), (29, 32), (83, 3), (50, 93), (80, 94), (5, 42), (62, 70), (31, 62), (19, 97),
# (91, 75), (27, 49), (23, 15), (20, 70), (85, 60), (98, 85)] #客户坐标点
Customer = [(120.809512, 30.29924), (119.788449, 30.769), (119.782318, 30.777408), (119.712324, 30.665222),
(119.623712, 30.829714), (119.384080, 30.571701), (119.680676, 30.659247), (119.670816, 30.665036),
(119.680353, 30.638675), (119.686617, 30.631837), (119.782318, 30.777408), (120.258767, 30.843226),
(119.676461, 30.656651), (119.782318, 30.777408), (119.782318, 30.777408), (119.389073, 30.575829),
(119.566473, 30.598395), (119.569681, 30.582537), (119.698264, 30.617036), (119.645151, 30.774609),
(119.782318, 30.777408), (119.566420, 30.598355), (119.678924, 30.657969), (119.697415, 30.63885),
(119.659394, 30.632242), (119.621393, 30.519815), (119.697276, 30.604234), (119.686536, 30.63874),
(119.690617, 30.637546)] # 客户坐标点
# Demand = [0, 16, 11, 6, 10, 7, 12, 16, 6, 16, 8, 14, 7, 16, 3, 22, 18, 19, 1, 14, 8, 12, 4, 8, 24, 24, 2, 10, 15, 2,
# 14, 9] #客户需求量
# 客户需求量
Demand = [0, 2.5589375, 3.738, 1.067967742, 3.427, 6.6375, 0.063, 22.21635484, 2.3545, 7.4145, 5.3415, 3.38,
8.2278, 3.848125, 0.021, 2.4405, 10.7832, 3.26, 0.1295, 9.175, 3.10475, 3.0318, 10.5865, 3.833,
2.538461538, 3.228, 5.507928571, 0.5388, 0.576]
data_matrix = pd.read_csv('data_matrix.csv', delimiter=",")
data_matrix = np.array(data_matrix)
print(data_matrix)
print(data_matrix.shape)
dis_matrix = pd.DataFrame(data=None, columns=range(len(Customer)), index=range(len(Customer)))
for i in range(len(Customer)):
for j in range(len(Customer)):
dis_matrix.iloc[i, j] = round(data_matrix[i][j]/1000, 3)
# dis_matrix = calDistance(Customer) # 计算城市间距离
print(dis_matrix)
birdPop = [greedy(Customer, dis_matrix) for i in range(birdNum)] # 贪婪算法构造初始解
# birdPop = [random.sample(range(1,len(Customer)),len(Customer)-1) for i in range(birdNum)]#客户点编码,随机初始化生成种群
birdPop_car, fits, birdPop_car_type, birdPop_car_dis, times, moneys = calFitness(birdPop, Demand, dis_matrix, DISTABCE, V) # 分配车辆,计算种群适应度
gBest = pBest = min(fits) # 全局最优值、当前最优值
gLine = pLine = birdPop[fits.index(min(fits))] # 全局最优解、当前最优解
gLine_car = pLine_car = birdPop_car[fits.index(min(fits))]
gLine_car_type = pLine_car_type = birdPop_car_type[fits.index(min(fits))]
gLine_car_dis = pLine_car_dis = birdPop_car_dis[fits.index(min(fits))]
gTimes = pTimes = times[fits.index(min(fits))]
gMoneys = pMoneys = moneys[fits.index(min(fits))]
bestfit.append(gBest)
print("初始化时的全局最优解为:", bestfit[0])
print("初始化的最优总时间为:", sum(gTimes))
flag = 0
while iterI <= iterMax: # 迭代开始
for i in range(birdNum):
birdPop[i] = crossover(birdPop[i], pLine, gLine, w, c1, c2)
birdPop_car, fits, birdPop_car_type, birdPop_car_dis, times, moneys = calFitness(birdPop, Demand, dis_matrix, DISTABCE, V) # 分配车辆,计算种群适应度
pBest, pLine, pLine_car, pLine_car_type, pLine_car_dis = min(fits), birdPop[fits.index(min(fits))],\
birdPop_car[fits.index(min(fits))],\
birdPop_car_type[fits.index(min(fits))], birdPop_car_dis[fits.index(min(fits))]
pTimes, pMoneys = times[fits.index(min(fits))], moneys[fits.index(min(fits))]
if min(fits) == gBest:
flag = 1
if min(fits) < gBest:
gBest, gLine, gLine_car, gLine_car_type, gLine_car_dis = min(fits), birdPop[fits.index(min(fits))], birdPop_car[fits.index(min(fits))], \
birdPop_car_type[fits.index(min(fits))], birdPop_car_dis[fits.index(min(fits))]
gTimes, gMoneys = times[fits.index(min(fits))], moneys[fits.index(min(fits))]
if min(fits) <= gBest:
gBest, gLine, gLine_car, gLine_car_type, gLine_car_dis = min(fits), birdPop[fits.index(min(fits))], birdPop_car[fits.index(min(fits))], \
birdPop_car_type[fits.index(min(fits))], birdPop_car_dis[fits.index(min(fits))]
gMoneys = moneys[fits.index(min(fits))]
if sum(pTimes) <= sum(gTimes):
gTimes = times[fits.index(min(fits))]
bestfit.append(gBest)
print("第{}次迭代,当前最优解为{},全局最优解为{},当前代的全局最优解为{},当前代的全局最优总时间为{}".
format(iterI, pBest, gBest, bestfit[iterI], round(sum(gTimes), 3))) # 打印当前代数和最佳适应度值
iterI += 1 # 迭代计数加一
print()
print("分车后的路径数据:", gLine_car) # 路径顺序
print("路径分车的车型:", gLine_car_type) #路径分车的车型
print("分车后单个车的行驶距离: ", gLine_car_dis) # 分车后单个车的行驶距离
print("分车后单个车的行驶时间: ", gTimes) # 分车后单个车的行驶时间
print("分车后单个车的行驶费用: ", gMoneys) # 分车后单个车的行驶费用
for i in range(0, len(gLine_car)):
print("第{}辆车,型号为{},路径距离长度为: {}km,所需时间为: {}h,所需费用为: {}元,路径为: {}".format(i+1, gLine_car_type[i], round(gLine_car_dis[i], 3), gTimes[i], gMoneys[i], gLine_car[i]))
print("全局最优的总费用为: {}, 全局最优的总时间为: {} ".format(gBest, round(sum(gTimes), 3)))
print()
draw_path(gLine_car, Customer) # 画路径图
| Mrk298/PSO_routes | my_cvrp.py | my_cvrp.py | py | 21,022 | python | en | code | 13 | github-code | 90 |
19790762841 | from collections import deque
def can_construct(original, seqs):
sorted_order = []
if len(original) <= 0:
return False
#1. initialize graph
in_degree = {}
graph = {}
for seq in seqs:
for num in seq:
in_degree[num] = 0
graph[num] = []
#2. build graph
for seq in seqs:
for i in range(1, len(seq)):
parent, child = seq[i-1], seq[i]
graph[parent].append(child)
in_degree[child] += 1
#if we don't have ordering rules for all numbers we won't be able to construct the sequence
if len(in_degree) != len(original):
return False
#3 identify sources
sources = deque()
for key in in_degree:
if in_degree[key] == 0:
sources.append(key)
#4. go through sources and decrement their children's in_degrees
while sources:
if len(sources) > 1:
return False #more than one source menas there are more ways than one to reconstruct
if original[len(sorted_order)] != sources[0]:
return False #next source/number is different from the original sequence
vertex = sources.popleft()
sorted_order.append(vertex)
for child in graph[vertex]:
in_degree[child] -= 1
if in_degree[child] == 0:
sources.append(child)
return len(sorted_order) == len(original)
# return sorted_order == original
def main():
print("Can construct: " +
str(can_construct([1, 2, 3, 4], [[1, 2], [2, 3], [3, 4]])))
print("Can construct: " +
str(can_construct([1, 2, 3, 4], [[1, 2], [2, 3], [2, 4]])))
print("Can construct: " +
str(can_construct([3, 1, 4, 2, 5], [[3, 1, 5], [1, 4, 2, 5]])))
main() | willgorick/grokking-coding | topological_sort/reconstructing_a_sequence.py | reconstructing_a_sequence.py | py | 1,628 | python | en | code | 0 | github-code | 90 |
25599764034 | #!/usr/bin/env python
import math
import pylink
import matplotlib.pyplot as plt
import os
import pprint
import income as myMeager
import position as anAwkward
import taxes as deathAnd
import report as explosive
import private
if float(pylink.__version__) < 0.9:
s = """
===============================================================================
=== WARNING ===
===============================================================================
TL;DR
If you aren't the type to write python code, you'll be fine, but we'd
still prefer you upgrade by running the following command:
pip install -U pylink-satcom
The full story:
Your version of pylink has a bug that includes a workaround. We
recommend upgrading to the latest version (0.9) by issuing the
following command:
pip install -U pylink-satcom
The bug pertains to initialization of the system before using the
solve_for method in the DAGModel. Bug 47 was fixed in pull request
48, and is reflected in version 0.9 of pylink.
if (!willing_to_upgrade && want_to_c0d3)
{
/*
* WORKAROUND and DESCRIPTION
*
* Follow the example provided in amt_free_iso where we override the
* thing we're solving for to be the lowest value in the search range
* (really just any value in the search range). Otherwise, what can
* happen in versions before 0.9 is that you can accidentally leave
* that value close to the correct value, but outside the search
* range, then when you run the search, it computes the initial
* best-position as being the uninitialized value it starts with
* (i.e. whatever you had it set to before running solve_for). If the
* value you have it initially set to is closer to the right number
* than any other position within the search range then it will
* improperly attribute that diff to the first value in the search
* range and return the wrong thing.
*/
}
"""
print(s)
input('Press <enter> to continue.')
from report import comma
class Investigator(object):
"""Executes a number of queries against the financial model."""
def __init__(self, model):
self.m = model
self.e = model.enum
self.rep = explosive.Report(model)
def _qst(self, num, msg):
print()
print('/%s/'%('*'*78))
print('/* %-75s*/'%('Question #%d'%num))
print('/* %-75s*/'%(msg))
print('/%s/'%('*'*78))
print()
print()
def question_1(self):
self._qst(1, "How many RSUs will be automatically withheld?")
print(
"""These numbers are for shares vesting throughout the
year, since those will affect taxes as well. Shares
available on the big day will NOT be affected by RSU
withholdings that will happen afterwards.
""")
held = m.shares_withheld_rsu_n
vested = m.shares_vested_rsu_eoy_n
if vested:
rate = round(100.0 * held / float(vested), 1)
else:
rate = -1.0
print("Withholding: %s / %s ( %5.1f %% )" % (
comma(held, dec=False, white=False),
comma(vested, dec=False, white=False),
rate))
def question_2(self):
self._qst(2, "What is the outstanding tax burden?")
self.rep.print_tax_summary()
self.rep.print_grants()
def question_3(self):
self._qst(3, "How many RSUs need to be sold to cover tax burden?")
needed = int(math.ceil(m.outstanding_taxes_usd /
float(m.ipo_price_usd)))
print("Share Price: $ %.2f" % m.ipo_price_usd)
print("Owed: $ %s" % comma(m.outstanding_taxes_usd,
dec=False, white=False))
if m.shares_vested_rsu_n:
pct = int((needed * 100) / m.shares_vested_rsu_n)
else:
pct = -1
print("Sell: %s ( %5.1f %% )" % (
comma(needed, dec=False, white=False),
pct))
def question_4(self):
self._qst(4, "How much cash if we sell it all (starting with the expensive NSOs)?")
orders = myMeager.sales_orders_all(self.m,
nso_first=True,
cheap_first=False)
self.m.override(self.e.sales_orders, orders)
self.rep.print_grants()
self.rep.print_tax_summary()
print()
print("Share Price: $ %.2f" % m.ipo_price_usd)
print("NSOs Sold: %s" % comma(m.shares_sold_nso_n,
dec=False, white=False))
print("NSO Income: $ %s" % comma(m.nso_income_usd,
dec=False, white=False))
print("ISOs Sold: %s" % comma(m.shares_sold_iso_n,
dec=False, white=False))
print("ISO Income: $ %s" % comma(m.iso_sales_income_usd,
dec=False, white=False))
print("RSUs Sold: %s" % comma(m.shares_sold_rsu_n,
dec=False, white=False))
print("RSU Income: $ %s" % comma(m.rsu_income_usd,
dec=False, white=False))
print("Sale Income: $ %s" % comma(m.rsu_income_usd
+ m.nso_income_usd
+ m.iso_sales_income_usd,
dec=False, white=False))
print("Owed: $ %s" % comma(m.outstanding_taxes_usd,
dec=False, white=False))
print("We Clear: $ %s"%comma(m.cleared_from_sale_usd, dec=False, white=False))
print()
print("This is AFTER paying all outstanding taxes.")
def question_5(self):
self._qst(5, "How much cash if we sell the RSUs?")
orders = myMeager.sales_orders_rsu(self.m)
self.m.override(self.e.sales_orders, orders)
self.rep.print_grants()
self.rep.print_tax_summary()
print()
print("We Clear: %s"%comma(m.cleared_from_sale_usd))
def amt_free_iso(self, strike=None):
m = self.m
e = m.enum
if strike is None:
g = [g for g in m.grants_lst if g.vehicle=='iso'][0]
strike = g.strike_usd
dollars = 0
amti_gap = 0
n_shares = 0
max_val = m.shares_available_iso_n*(m.ipo_price_usd-strike)
### BUG IN PYLINK ###
# https://github.com/harrison-caudill/pylink/issues/47
m.override(e.iso_exercise_income_usd, 0)
### BUG IN PYLINK ###
iso_in = m.solve_for(e.iso_exercise_income_usd,
e.amt_taxes_usd, m.fed_reg_income_taxes_usd,
0, max_val, max_val/10.0, rounds=5)
n_shares = iso_in / float(strike)
n_shares = int(min(n_shares, m.shares_vested_outstanding_iso_n))
cost = n_shares * strike
return (iso_in, n_shares, strike, cost)
def question_6(self):
self._qst(6, "If we sell it all, how many ISOs can we buy w/o AMT?")
orders = myMeager.sales_orders_all(self.m)
self.m.override(self.e.sales_orders, orders)
self.rep.print_grants()
self.rep.print_tax_summary()
print()
print("Just using the first ISO strike price we find for starters.")
cleared = m.cleared_from_sale_usd
(amt, n, strike, cost) = self.amt_free_iso()
remaining = cleared - cost
print("AMT Income Gap: $ %s" % comma(amt, dec=False, white=False))
print("Exercisable shares: %s" % comma(n, dec=False, white=False))
print("Strike Price: $ %s" % comma(strike,
dec=True, white=False))
print("Exercise Cost: $ %s" % comma(cost, dec=False, white=False))
print("Cash Cleared: $ %s" % comma(cleared,
dec=False, white=False))
print("Cash Remaining: $ %s" % comma(remaining,
dec=False, white=False))
def question_7(self):
self._qst(7, "If we sell all RSUs, how many ISOs can we buy w/o AMT?")
# Place an order for all RSUs
orders = myMeager.sales_orders_rsu(self.m)
self.m.override(self.e.sales_orders, orders)
orig = m.iso_exercise_income_usd
self.rep.print_grants()
print()
print("Just using the first ISO strike price we find for starters.")
cleared = m.cleared_from_sale_usd
(amt, n, strike, cost) = self.amt_free_iso()
remaining = cleared - cost
m.override(self.e.iso_exercise_income_usd, n*(m.ipo_price_usd-strike))
self.rep.print_tax_summary()
print("AMT Income Gap: $ %s" % comma(amt, dec=False, white=False))
print("Exercisable shares: %s" % comma(n, dec=False, white=False))
print("Strike Price: $ %s" % comma(strike,
dec=True, white=False))
print("Exercise Cost: $ %s" % comma(cost, dec=False, white=False))
print("Cash Cleared: $ %s" % comma(cleared,
dec=False, white=False))
print("Cash Remaining: $ %s" % comma(remaining,
dec=False, white=False))
m.override(self.e.iso_exercise_income_usd, orig)
def question_8(self, rsu_only=False):
augment = "RSU + NSO"
if rsu_only: augment = "RSU Only"
self._qst(8, "Basic financials vs share price (%s)" % augment)
m = self.m
e = m.enum
# Set up the loop
lo = 5.0 # low fmv
hi = 25.0 # high fmv
up = 100
orig = m.ipo_price_usd
x = []
y_gross = []
y_net = []
y_tax = []
y_amti = []
amt_exemption_rolloff = -1
amt_exemption_gone = -1
for i in range(up+1):
fmv = (i*(hi-lo)/up)+lo
x.append(fmv)
m.override(e.ipo_price_usd, fmv)
orders = myMeager.sales_orders_all(m,
nso_first=True,
cheap_first=False,
prefer_exercise=True,
restricted=True,
price=fmv)
if rsu_only:
orders = myMeager.sales_orders_rsu(self.m, price=fmv)
self.m.override(self.e.sales_orders, orders)
if (0 > amt_exemption_rolloff
and m.amt_base_income_usd > m.amt_exemption_rolloff_threshhold_usd):
amt_exemption_rolloff = fmv
if (0 > amt_exemption_gone
and m.amt_exemption_usd == 0):
amt_exemption_gone = fmv
m.override(e.iso_exercise_income_usd, 0)
m.override(e.ext_amt_income_usd, 0)
y_gross.append(m.total_income_usd-m.reg_income_usd)
y_tax.append(m.outstanding_taxes_usd)
y_amti.append(m.amt_taxable_income_usd)
y_net.append(m.cleared_from_sale_usd)
# Reset our state
m.override(e.ipo_price_usd, orig)
# Let's make our plots
fig, ax = plt.subplots()
ax.set_ylabel('Value (USD)')
ax.plot(x, y_gross, label='Gross Sales Income (post Withholding)')
ax.plot(x, y_tax, label='Outstanding Tax Bill')
ax.plot(x, y_amti, label='AMTI')
ax.plot(x, y_net, label='Net Income from Sale')
if 0 < amt_exemption_rolloff:
ax.axvline(amt_exemption_rolloff)
ax.text(amt_exemption_rolloff,
m.amt_exemption_rolloff_threshhold_usd*1.1,
'AMT Rolloff',
rotation=45)
if 0 < amt_exemption_gone:
ax.axvline(amt_exemption_gone)
ax.text(amt_exemption_gone,
m.amt_exemption_rolloff_threshhold_usd*1.1,
'AMT Exemp. Gone',
rotation=45)
ax.grid()
fig.suptitle('Financials vs FMV (%s)'%augment)
ax.legend()
fname = 'fin_all.png'
if rsu_only: fname = 'fin_rsu.png'
path = os.path.join('.', fname)
fig.savefig(path, transparent=False, dpi=600)
def question_9(self):
self._qst(9, "What does exercisable ISOs look like vs IPO price?")
m = self.m
e = m.enum
# Set up the loop
lo = 12.0 # low fmv
hi = 15.0 # high fmv
up = 100
orig = m.ipo_price_usd
x = []
y_iso_n = []
y_gross = []
y_tax = []
y_cleared = []
y_ex_cost = []
y_rem = []
y_test = []
y_amt_diff = []
m.override(self.e.query_date, '3/15/21')
#m.override(self.e.query_date, '12/31/35')
# triggers for vertical lines
amt_exemption_rolloff = -1
amt_exemption_gone = -1
iso_saturated = -1
rolloff_val = -1
gone_val = -1
for i in range(up):
fmv = (i*(hi-lo)/up)+lo
x.append(fmv)
m.override(e.ipo_price_usd, fmv)
orders_all = myMeager.sales_orders_all(m,
nso_first=True,
cheap_first=False,
prefer_exercise=True,
restricted=False,
price=fmv)
orders_opts = myMeager.sales_orders_options(m,
nso_first=True,
cheap_first=False,
prefer_exercise=True,
restricted=False,
price=fmv)
orders_rsu = myMeager.sales_orders_rsu(m,price=fmv)
self.m.override(self.e.sales_orders, orders_all)
(amt, iso, strike, cost) = self.amt_free_iso()
if (0 > amt_exemption_rolloff
and m.amt_base_income_usd > m.amt_exemption_rolloff_threshhold_usd-10*m.ipo_price_usd
and m.amt_base_income_usd < m.amt_exemption_rolloff_threshhold_usd+10*m.ipo_price_usd):
amt_exemption_rolloff = fmv
rolloff_val = iso
if (0 > amt_exemption_gone
and 0 < amt_exemption_rolloff
and m.amt_exemption_usd == 0):
amt_exemption_gone = fmv
gone_val = iso
if ((0 > iso_saturated)
and (iso >= m.shares_vested_outstanding_iso_n)):
iso_saturated = fmv
m.override(self.e.iso_exercise_income_usd, amt)
a = m.fed_reg_income_taxes_usd
b = m.amt_taxes_usd
y_iso_n.append(iso)
cleared = m.cleared_from_sale_usd
y_gross.append(m.total_income_usd)
# y_tax.append(m.outstanding_taxes_usd)
y_cleared.append(cleared)
y_ex_cost.append(cost)
# y_rem.append(cleared - cost)
m.override(e.ipo_price_usd, orig)
# Let's make our plots
fig, ax_shares = plt.subplots()
ax_shares.set_xlabel('Share Price (USD)')
ax_shares.set_ylabel('ISOs (n)')
ax_shares.plot(x, y_iso_n,
label='AMT Free ISO Exercises',
color='tab:purple')
color = 'tab:green'
ax_dollars = ax_shares.twinx()
ax_dollars.set_ylabel('Value ($k)')
y_cleared = [x/1000 for x in y_cleared]
y_gross = [x/1000 for x in y_gross]
y_ex_cost = [x/1000 for x in y_ex_cost]
y_rem = [x/1000 for x in y_rem]
ax_dollars.set_ylim(0, y_gross[-1]*1.1)
ax_dollars.plot(x, y_gross, label='Pre-Tax Income')
ax_dollars.plot(x, y_cleared, label='Post-Tax Cash from Sale')
# ax_dollars.plot(x, y_tax, label='Taxes Owed')
# ax_dollars.plot(x, y_cleared, label='Post-Tax Income')
#ax_dollars.plot(x, y_ex_cost, label='Cost to Exercise')
# ax_dollars.plot(x, y_rem, label='Remaining')
if 0 < amt_exemption_rolloff:
ax_shares.axvline(amt_exemption_rolloff)
ax_shares.text(amt_exemption_rolloff,
rolloff_val*1.05,
'AMT Rolloff',
rotation=45)
if 0 < amt_exemption_gone:
ax_shares.axvline(amt_exemption_gone)
ax_shares.text(amt_exemption_gone,
gone_val*.95,
'AMT Exemp. Gone',
rotation=45)
if 0 < iso_saturated:
ax_shares.axvline(iso_saturated)
ax_shares.text(iso_saturated,
m.shares_vested_outstanding_iso_n*.95,
"All ISO's Available",
rotation=0)
fig.suptitle('ISO Outlook vs FMV')
ax_shares.legend(loc=3)
ax_dollars.legend()
fname = 'iso.png'
path = os.path.join('.', fname)
fig.savefig(path, transparent=False, dpi=600)
def go(self):
self.question_1()
self.question_2()
self.question_3()
self.question_4()
self.question_5()
self.question_6()
self.question_7()
self.question_8()
self.question_8(rsu_only=True)
self.question_9()
print()
if __name__ == '__main__':
m = private.MODEL
dixon_hill = Investigator(m)
dixon_hill.go()
| harrison-caudill/ipo-sim | investigator.py | investigator.py | py | 18,030 | python | en | code | 4 | github-code | 90 |
22346779245 | """Unit tests for pysetl.utils module."""
import inspect
import logging
import time
from typing import Generic, TypeVar
import pytest
from pyspark.sql import SparkSession
from pysetl.utils import BenchmarkModifier, BenchmarkResult, pretty
from pysetl.utils.exceptions import PySparkException
from pysetl.utils.get_signature import get_signature
from pysetl.utils.mixins import (
HasDiagram, HasLogger, HasRegistry, IsIdentifiable,
HasSparkSession
)
class DummyFactory:
"""Simple object to be benchmarked."""
def read(self):
"""Wait."""
time.sleep(0.2)
return self
def process(self):
"""Wait."""
time.sleep(0.3)
return self
def write(self):
"""Wait and return string."""
time.sleep(0.1)
return self
def get(self):
"""Return string."""
return "benchmarked"
def test_benchmark_modifier_get():
"""Test BenchmarkModifier."""
benchmarked = BenchmarkModifier[DummyFactory](DummyFactory()).get()
methods = dict(inspect.getmembers(benchmarked, callable))
assert benchmarked.read().process().write().get() == "benchmarked"
times = {
k: round(v, 1)
for k, v
in benchmarked.times.items() # type: ignore
}
results = BenchmarkResult(**times | {
"cls": "DummyFactory",
"total": sum(times.values())
})
assert "read" in methods
assert "process" in methods
assert "write" in methods
assert "get" in methods
assert isinstance(times, dict)
assert str(results) == "\n".join([
"Benchmark class: DummyFactory",
"Total elapsed time: 0.6 s",
"read: 0.2 s",
"process: 0.3 s",
"write: 0.1 s",
"================="
])
def test_get_signature():
"""Test get_signature utility."""
def _sum(x, y):
"""For testing purposes."""
return x + y
assert get_signature(_sum) == ["x", "y"]
def test_pretty():
"""Test pretty utility."""
T = TypeVar("T")
class GenericTest(Generic[T]):
"""Simple object to be formatted."""
assert pretty(type("")) == "str"
assert pretty(None) == ""
assert pretty(type(GenericTest[int])) == "_GenericAlias"
assert pretty(GenericTest[int]) == "GenericTest[int]"
assert pretty(list[int]) == "list[int]"
assert pretty("module.type") == "type"
with pytest.raises(NotImplementedError) as error:
pretty(1)
assert error
def test_has_diagram():
"""Test HasDiagram mixin."""
class WithDiagram(HasDiagram):
"""Class for test purposes."""
def to_diagram(self) -> str:
return "A mermaid diagram"
@property
def diagram_id(self) -> str:
return "id"
format_diagram_str = (
WithDiagram()
.format_diagram_id(
name="hola",
diagram_id="adios",
suffix="12g"
)
)
assert WithDiagram().get_signature() == ["args", "kwargs"]
assert format_diagram_str == "holaAdios12g"
def test_has_logger():
"""Test logging functionality."""
assert HasLogger().log_info("hola") == logging.info("hola")
assert HasLogger().log_debug("hola") == logging.debug("hola")
assert HasLogger().log_debug("hola") == logging.debug("hola")
assert HasLogger().log_warning("hola") == logging.warning("hola")
assert HasLogger().log_error("hola") == logging.error("hola")
def test_has_registry():
"""Test Registry."""
class Id(IsIdentifiable):
"""Identifiable object."""
obj1 = Id()
obj2 = Id()
obj3 = Id()
registry = (
HasRegistry[Id]()
.register_items([obj2, obj3])
.register(obj1)
)
assert registry.last_registered_item == obj1
assert registry.get_item(obj2.uuid) == obj2
assert len(registry.get_registry()) == 3
assert registry.clear_registry().size == 0
assert not registry.last_registered_item
def test_has_spark_session_exceptions():
"""Throw PySparkException if no spark session found."""
SparkSession.builder.getOrCreate().stop()
with pytest.raises(PySparkException) as error:
_ = HasSparkSession().spark
assert str(error.value) == "No active Spark session"
| JhossePaul/pysetl | tests/test_utils.py | test_utils.py | py | 4,252 | python | en | code | 0 | github-code | 90 |
10425382579 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import time
import zlib
from lib.logger import logger
from models import Area, Device_log, User_log, User
class Feature(object):
def get_feature(self, area, devices_request, timestamp, mac_address, add_del_done=0, limit=50, ttl=0):
timestamp = int(timestamp if timestamp else time.time())
last_record_time = -1 # 查询到的最后一条记录的时间
result = []
update_feature_done = 0
user_msg_num = 0
logger.info("get data start, area: %s, devices_request: %s", str(area), str(devices_request))
area_record = Area.query.filter_by(area=area).first()
if not area_record:
logger.info("not area %s info", area)
return json.dumps(result)
data = []
uls = area_record.user_log.filter(User_log.timestamp >= timestamp).order_by(User_log.timestamp).limit(
limit).all()
for ul in uls:
last_record_time = ul.timestamp
u = User.query.filter_by(uid=ul.uid).first()
if u:
# 根据传入的device过滤,不传时取全部数据
if devices_request:
u_devices = [d.device for d in u.devices]
if set(u_devices) & set(devices_request) == set():
logger.warn("user %s filtered by request device %s", u, devices_request)
continue
for f in u.feature.all():
data.append(
{'uid': ul.uid, 'pic_md5': u.pic_md5, 'feature_id': f.feature_id, 'feature': f.feature})
if not uls or len(uls) < limit:
update_feature_done = 1
if data:
user_msg_num += len(data)
result.append({'type': 'update_feature', 'timestamp': last_record_time, 'data': data,
'update_feature_done': update_feature_done})
else:
result.append({'timestamp': last_record_time, 'data': [], 'update_feature_done': update_feature_done})
logger.info("get feature data end, area:%s, user_msg_num:%d" % (area, user_msg_num))
device_msg_num = 0
if not add_del_done:
result.append({'timestamp': last_record_time, 'add_del_done': 1})
for cmd in ['add_user', 'del_user']:
data = {}
for dl in area_record.device_log.filter(
(Device_log.timestamp > timestamp) & Device_log.cmd.__eq__(cmd)):
if devices_request and dl.device not in devices_request:
# print("device_log %s filtered by request device %s" %
# (dl, devices_request))
continue
uid = dl.uid
if ttl: # 需要同步有效期
relation = {
"uid": uid,
"start_time": dl.start_time,
"end_time": dl.end_time
}
data.setdefault(dl.device, []).append(relation)
else: # 不需要同步有效期
data.setdefault(dl.device, []).append(uid)
if data:
device_msg_num += 1
result.append({'type': cmd, 'timestamp': last_record_time, 'data': data, 'add_del_done': 1})
logger.info("get user data end, area:%s, device_msg_num:%d", area, device_msg_num)
# return zlib.compress(json.dumps(result))
return json.dumps(result)
| anstones/py-collection | RabbitMQ/rabbitmq_kafka/face_center_read_server/process-4010/core/feature.py | feature.py | py | 3,630 | python | en | code | 3 | github-code | 90 |
37359483711 | # Все 4-буквенные слова, составленные из букв А, И, О, У, записаны в алфавитном порядке и пронумерованы.
# Вот начало списка:
# АААА
# АААИ
# АААО
# АААУ
# ААИА
# Запишите слово, которое стоит под номером 250.
from itertools import product
a, cnt = 'АИОУ', 0
for w in product(a, repeat=4):
cnt += 1
if cnt == 250:
for c in w:
print(c, end='')
break
| sosnovskiim/Informatics | school.sgu.ru/lap/10.29-11.04/22.py | 22.py | py | 540 | python | ru | code | 0 | github-code | 90 |
20388954359 | from research.grammatical_gender.bundle import GGTrainIndexBuilder
from tg.grammar_ru.corpus import CorpusBuilder
from tg.grammar_ru.features import PyMorphyFeaturizer, SlovnetFeaturizer, SyntaxTreeFeaturizer, SyntaxStatsFeaturizer
from tg.grammar_ru.common import Loc
from yo_fluq_ds import *
# VOCAB_FILE = Path(__file__).parent / 'words.json'
#
# def read_data():
# return CorpusReader(Loc.corpus_path / 'lenta.base.zip').get_frames().feed(fluq.with_progress_bar())
def build_index():
gg_index_builder = GGTrainIndexBuilder()
CorpusBuilder.transfuse_corpus(
[Loc.corpus_path / 'lenta.base100.zip'],
Loc.bundles_path / 'grammatical_gender/prepare/lenta100/raw/raw.zip',
selector=gg_index_builder.build_train_index
)
def featurize_index():
CorpusBuilder.featurize_corpus(
Loc.bundles_path / 'grammatical_gender/prepare/lenta100/raw/raw.zip',
Loc.bundles_path / 'grammatical_gender/prepare/lenta100/feat/feat.zip',
[
PyMorphyFeaturizer(),
SlovnetFeaturizer(),
SyntaxTreeFeaturizer(),
SyntaxStatsFeaturizer()
],
2,
True,
)
def assemble(name, limit):
bundle_path = Loc.bundles_path / f'grammatical_gender/{name}'
CorpusBuilder.assemble(
Loc.bundles_path / 'grammatical_gender/prepare/lenta100/feat/feat.zip',
bundle_path,
limit
)
src = pd.read_parquet(bundle_path / 'src.parquet')
index = GGTrainIndexBuilder.build_index_from_src(src)
index.to_parquet(bundle_path / 'index.parquet')
print(index.groupby('split').size())
# def upload_bundle(name):
# bundle_path = Loc.bundles_path / f'tsa/{name}'
# S3Handler.upload_folder(
# 'ps-data-science-sandbox',
# 'sagemaker/tsa/datasets/' + name,
# bundle_path)
if __name__ == '__main__':
build_index()
featurize_index()
# assemble('tiny3featdist', 5)
assemble('toy3featdist', 5)
# # assemble('toy3', 5)
assemble('mid3featdist', 20)
assemble('big3featdist', 50)
# assemble('tiny', None)
# upload_bundle('big')
| okulovsky/grammar_ru | archive/SergeyPishchulov/from_research/build_bundle.py | build_bundle.py | py | 2,125 | python | en | code | 11 | github-code | 90 |
7260132210 | #encoding=utf-8
from django.test import TestCase, Client, SimpleTestCase
import json
from django.contrib.auth.models import User
# Create your tests here.
'''
class View_registTestCase(SimpleTestCase):
def setUp(self):
self.client=Client()
def test_regist(self):
request_data1 = {
'email':"535091412@qq.com",
'password1':'123456',
'password2':'123456',
'username':'654321'
}
response1 = self.client.post('/accounts/register/', json.JSONEncoder().encode(request_data1), 'application/json')
import pdb; pdb.set_trace()
print("************")
print response1
print("************")
content1 = {}
data1=json.loads(response1.content.decode('utf-8'))
self.assertDictEqual(content1, data1)
class View_loginTestCase(SimpleTestCase):
def setUp(self):
self.client=Client()
def test_login(self):
user=User(username="123",password="321")
user.save()
request_data1 = {
'username':"123",
'password':"321",
}
response1 = self.client.post('/accounts/login/', json.JSONEncoder().encode(request_data1), 'application/json')
print("************")
print response1
print("************")
content1 = {}
data1=json.loads(response1.content.decode('utf-8'))
self.assertDictEqual(content1, data1)
'''
class View_weiboTestCase(SimpleTestCase):
def setUp(self):
self.client=Client()
def test_regist(self):
response = self.client.get('/verification/weiboLogin/')
code=response.context['code']
response1 = self.client.get('/verificationn/weibo_check/', {'code':code})
username=response1.context['username']
name='aa'
self.assertEqual(username, name)
class View_QQTestCase(SimpleTestCase):
def setUp(self):
self.client=Client()
def test_regist(self):
response = self.client.get('/verification/QQLogin/')
code=response.context['code']
response1 = self.client.get('/verificationn/QQ_check/', {'code':code})
username=response1.context['username']
name='aa'
self.assertEqual(username, name)
class View_WEIXINTestCase(SimpleTestCase):
def setUp(self):
self.client=Client()
def test_regist(self):
response= self.client.get('/verification/weixinLogin/')
code=response.context['code']
response1 = self.client.get('/verificationn/weixin_check/', {'code':code})
username=response1.context['username']
name='aa'
self.assertEqual(username, name)
| Luxun0535/Forum | verification/tests.py | tests.py | py | 2,674 | python | en | code | 0 | github-code | 90 |
18279738809 | #!usr/bin/env python3
import math
import sys
import bisect
def LI(): return [int(x) for x in sys.stdin.buffer.readline().split()]
def I(): return int(sys.stdin.buffer.readline())
def LIR(n):
return [LI() for i in range(n)]
class SegmentTree:
def __init__(self, size, f=lambda x,y : x+y, default=0):
self.size = 2**(size-1).bit_length()
self.default = default
self.dat = [default]*(self.size*2)
self.f = f
def update(self, i, x):
i += self.size
self.dat[i] = x
while i > 0:
i >>= 1
self.dat[i] = self.f(self.dat[i*2], self.dat[i*2+1])
def get(self, a, b=None):
if b is None:
b = a + 1
l, r = a + self.size, b + self.size
lres, rres = self.default, self.default
while l < r:
if l & 1:
lres = self.f(lres, self.dat[l])
l += 1
if r & 1:
r -= 1
rres = self.f(self.dat[r], rres)
l >>= 1
r >>= 1
res = self.f(lres, rres)
return res
def solve():
n,d,a = LI()
g = LIR(n)
g.sort()
X = [i for (i,_) in g]
s = SegmentTree(n)
ans = 0
for i in range(n):
x = g[i][0]
j = bisect.bisect_left(X,x-2*d)
h = g[i][1]-s.get(j,i)
if h <= 0:
continue
k = math.ceil(h/a)
ans += k
s.update(i,k*a)
print(ans)
return
#Solve
if __name__ == "__main__":
solve()
| Aasthaengg/IBMdataset | Python_codes/p02788/s085716639.py | s085716639.py | py | 1,513 | python | en | code | 0 | github-code | 90 |
42679034104 | #Concious life (version 2.0) (can input and edit to-do items)
from time import *
from openpyxl import Workbook, load_workbook
from openpyxl.utils import get_column_letter
def input_topic(): #2 uses /input items into a list
while True:
try:
hmi = int(input('how many item to add:'))
do = []
for i in range(hmi):
item_name = input('item name:')
do.append(item_name)
return do
except:
print('please input a number')
def Topic(dol):
while True: #while loop within def:>
topic = input('wt r u going to do\n"add" to add item:')
if topic == 'add': #add item with checking repeated item
dol1 = input_topic()
print (dol1)
re = 0
for col in range(1,ws.max_column): #check if row 1 has the item
chara = get_column_letter(col+1) #from column2 'B' to
if ws[chara + '1'].value in dol1: #check if row has new item
print('repeated item')
re = 'repeat'
break
else:
pass
if re != 'repeat':
for i in range(len(dol1)): #if upper no break,here will run
ws[get_column_letter(ws.max_column + 1) + '1'] = dol1[i]
break
elif topic not in dol:
print('type again {} or add item'.format(dol))
continue
else:
return topic
return 0
def po():
while True:
proo = input('timer/ stop:') # process operation
if proo == 'stop':
op_time = time()
tt = op_time-st_time
print('accumulated time = {}min'.format( int((tt) / 60)))
return tt/60**2
if proo == 'timer':
op_time = time()
tt = op_time-st_time
print('accumulated time = {}min'.format(int((tt) / 60)))
continue
else:
print('type timer/stop')
continue
def open_excel(excel_name):
try:
wb = load_workbook(excel_name)
ws = wb.active
except:
wb = Workbook()
ws = wb.active
return wb, ws
def according_topic1(topic,row,tt):
for i in range(1, ws.max_column): #locate column for selected item
chara = get_column_letter(i+1)
if ws[chara + '1'].value == topic: #locate column for selected item
if ws[chara + str(row)].value != None: # check if any record before
ws[chara + str(row)] = ws[chara + str(row)].value + '+' + tt
else:
ws[chara + str(row)] = '=' + tt
else:
pass
while True:
wb, ws = open_excel('conscious life1.xlsx')
if ws['a1'].value == None:
dol = input_topic()
dol = ['date DD MM YYYY'] + dol
for i in range(1,len(dol)+1):
chara = get_column_letter(i)
ws[chara + '1'] = dol[i-1] #checked
else:
dol =[]
for col in range(1,ws.max_column):
chara = get_column_letter(col+1) #col 1 is date
dol.append(ws[chara + '1'].value)
topic = Topic(dol) #dol stopped so no need to save dol2
st_time = time()
if topic != 0:
tt = str(po())
else:
tt = 0
#save to excel
t = gmtime() #date would be the day you open the program
td = str(t[2])+' '+str(t[1])+' '+str(t[0])
for row in range(1, ws.max_row+1):
if ws['a' + str(row)].value == td: #step 1:check date (check if there is, if not then add)
according_topic1(topic,row,tt) #select column (default: study to column b)
elif ws['a' + str(ws.max_row)].value != td:
ws.append([td])
according_topic1(topic,ws.max_row,tt)
else:
pass
wb.save('conscious life1.xlsx')
| SYY-YYS/consciouslife | conscious life 2.0.py | conscious life 2.0.py | py | 3,993 | python | en | code | 0 | github-code | 90 |
73868774376 | import numpy as np
import cv2
import os
import detect_face
import shutil
import tensorflow as tf
from tqdm import tqdm
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
def get_boundingbox(bb, width, height, scale=1.3, minsize=None):
"""
Expects a dlib face to generate a quadratic bounding box.
:param face: dlib face class
:param width: frame width
:param height: frame height
:param scale: bounding box size multiplier to get a bigger face region
:param minsize: set minimum bounding box size
:return: x, y, bounding_box_size in opencv form
"""
x1 = bb[0]
y1 = bb[1]
x2 = np.minimum(bb[2], width)
y2 = np.minimum(bb[3], height)
size_bb = int(max(x2 - x1, y2 - y1) * scale)
if minsize:
if size_bb < minsize:
size_bb = minsize
center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2
# Check for out of bounds, x-y top left corner
scale_x1 = max(int(center_x - size_bb // 2), 0)
scale_y1 = max(int(center_y - size_bb // 2), 0)
# Check for too big bb size for given x, y
size_bb = min(width - scale_x1, size_bb)
size_bb = min(height - scale_y1, size_bb)
x1 = abs(scale_x1 - x1)
x2 = abs(x2 - scale_x1)
y1 = abs(scale_y1 - y1)
y2 = abs(y2 - scale_y1)
return scale_x1, scale_y1, size_bb, x1, x2, y1, y2
def extract_frames(videos_path, frame_subsample_count=30, output_path=None,
pnet=None, rnet=None, onet=None):
reader = cv2.VideoCapture(videos_path)
# fps = video.get(cv2.CAP_PROP_FPS)
frame_num = 0
while reader.isOpened():
success, whole_image = reader.read()
if not success:
break
if frame_num % frame_subsample_count == 0:
cropped_face = get_face(whole_image, pnet=pnet, rnet=rnet, onet=onet)
if cropped_face == []:
continue
# plt.imshow(cropped_face)
# plt.show()
save_path = os.path.join(output_path, '{:04d}.png'.format(frame_num))
cv2.imwrite(save_path, cropped_face)
frame_num += 1
reader.release()
def get_face(whole_image, pnet=None, rnet=None, onet=None):
# Face detector
minsize = 20 # minimum size of face
threshold = [ 0.6, 0.7, 0.7 ] # three steps's threshold
factor = 0.709 # scale factor
# Image size
height, width = whole_image.shape[:2]
image = cv2.cvtColor(whole_image, cv2.COLOR_BGR2RGB)
bounding_boxes, points = detect_face.detect_face(image, minsize, pnet, rnet, onet, threshold, factor)
cropped_face = []
if bounding_boxes.shape[0]:
# For now only take biggest face
prob_index = np.argsort(bounding_boxes[:, 4])
bb = bounding_boxes[prob_index[-1]][0:4].astype(np.int32)
scale_x1, scale_y1, size_bb, x1, x2, y1, y2 = \
get_boundingbox(bb, width, height, scale=1.2)
cropped_face = whole_image[scale_y1:scale_y1 + size_bb,
scale_x1:scale_x1 + size_bb]
# cropped_face = whole_image[bb[1]:bb[3],
# bb[0]:bb[2]]
return cropped_face
def main():
# video_path = '/data1/cby/dataset/DeepForensic/videos/manipulated_videos/end_to_end'
# face_path = '/data1/cby/dataset/DeepForensic/face_images/manipulated_images/end_to_end'
# video_path = '/data1/cby/dataset/DeepForensic/videos/manipulated_videos/end_to_end_level_1'
# face_path = '/data1/cby/dataset/DeepForensic/face_images/manipulated_images/end_to_end_level_1'
# video_path = '/data1/cby/dataset/DeepForensic/videos/manipulated_videos/end_to_end_level_2'
# face_path = '/data1/cby/dataset/DeepForensic/face_images/manipulated_images/end_to_end_level_2'
# video_path = '/data1/cby/dataset/DeepForensic/videos/manipulated_videos/end_to_end_level_3'
# face_path = '/data1/cby/dataset/DeepForensic/face_images/manipulated_images/end_to_end_level_3'
# video_path = '/data1/cby/dataset/DeepForensic/videos/manipulated_videos/end_to_end_level_4'
# face_path = '/data1/cby/dataset/DeepForensic/face_images/manipulated_images/end_to_end_level_4'
# video_path = '/data1/cby/dataset/DeepForensic/videos/manipulated_videos/end_to_end_level_5'
# face_path = '/data1/cby/dataset/DeepForensic/face_images/manipulated_images/end_to_end_level_5'
# video_path = '/data1/cby/dataset/DeepForensic/videos/manipulated_videos/end_to_end_mix_2_distortions'
# face_path = '/data1/cby/dataset/DeepForensic/face_images/manipulated_images/end_to_end_mix_2_distortions'
# video_path = '/data1/cby/dataset/DeepForensic/videos/manipulated_videos/end_to_end_mix_3_distortions'
# face_path = '/data1/cby/dataset/DeepForensic/face_images/manipulated_images/end_to_end_mix_3_distortions'
# video_path = '/data1/cby/dataset/DeepForensic/videos/manipulated_videos/end_to_end_mix_4_distortions'
# face_path = '/data1/cby/dataset/DeepForensic/face_images/manipulated_images/end_to_end_mix_4_distortions'
# video_path = '/data1/cby/dataset/DeepForensic/videos/manipulated_videos/end_to_end_random_level'
# face_path = '/data1/cby/dataset/DeepForensic/face_images/manipulated_images/end_to_end_random_level'
video_path = '/data1/cby/dataset/DeepForensic/videos/manipulated_videos/reenact_postprocess'
face_path = '/data1/cby/dataset/DeepForensic/face_images/manipulated_images/reenact_postprocess'
model_path = 'mtcnn_models'
if not os.path.isdir(face_path):
os.mkdir(face_path)
print(face_path)
start = 0
end = start+1
with tf.Graph().as_default():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, model_path)
video_file_path = video_path
face_file_path = face_path
if not os.path.isdir(face_file_path):
os.mkdir(face_file_path)
for name in tqdm(os.listdir(video_file_path)):
input_path = os.path.join(video_file_path, name)
if name.find('.mp4') == -1:
try:
shutil.copy(input_path, face_file_path)
continue
except:
continue
output_path = os.path.join(face_file_path, name)
if not os.path.isdir(output_path):
os.mkdir(output_path)
if len(os.listdir(output_path)) != 0:
continue
extract_frames(input_path, frame_subsample_count=20, output_path=output_path,
pnet=pnet, rnet=rnet, onet=onet)
def getFile(path, format='mp4'):
files = os.listdir(path) # 得到文件夹下的所有文件,包含文件夹名称
FileList = []
for name in files:
if os.path.isdir(os.path.join(path, name)):
FileList.extend(getFile(os.path.join(path, name), format)) #回调函数,对所有子文件夹进行搜索
elif os.path.isfile(os.path.join(path, name)):
if format.lower() in name.lower():
FileList.append(os.path.join(path, name))
else:
print("未知文件:%s", name)
return FileList
def main_real(vid):
video_path = '/data1/cby/dataset/DeepForensic/videos/source_videos/' + vid
face_path = '/data1/cby/dataset/DeepForensic/face_images/source_images/' + vid
model_path = 'mtcnn_models'
if not os.path.isdir(face_path):
os.mkdir(face_path)
print(face_path)
start = 0
end = start+1
with tf.Graph().as_default():
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
with sess.as_default():
pnet, rnet, onet = detect_face.create_mtcnn(sess, model_path)
video_file_path = video_path
face_file_path = face_path
if not os.path.isdir(face_file_path):
os.mkdir(face_file_path)
for input_path in tqdm(getFile(video_file_path, format='mp4')):
# output_path = os.path.join(face_file_path, input_path)
output_path = input_path.replace(video_path, face_path)
if not os.path.isdir(output_path):
os.makedirs(output_path)
if len(os.listdir(output_path)) != 0:
continue
extract_frames(input_path, frame_subsample_count=50, output_path=output_path,
pnet=pnet, rnet=rnet, onet=onet)
if __name__ == "__main__":
# main_real()
# video_path = '/data1/cby/dataset/DeepForensic/videos/source_videos/M004'
# face_path = '/data1/cby/dataset/DeepForensic/face_images/manipulated_images/M004'
# file_list = getFile(path=video_path, format='mp4')
# print(file_list[0], '\n', file_list[0].replace(video_path, face_path))
# print(len(file_list))
vids = os.listdir('/data1/cby/dataset/DeepForensic/videos/source_videos')
print('vids total lenght:', len(vids))
start = 70
end = start + 10
print(vids[start:end], start, end)
for i, vid in enumerate(vids[start:end]):
print(i, 'Start extract face in', vid)
main_real(vid)
print(i, 'Extract face in', vid, 'Finished!')
| beibuwandeluori/DeeperForensicsChallengeSolution | data/generate_face.py | generate_face.py | py | 9,120 | python | en | code | 27 | github-code | 90 |
7584807063 | #Libraries
import os
from bs4 import BeautifulSoup as BS
def HocrSplit(file, pattern):
#Load document
with open(file, "r", encoding = "ISO-8859-1") as f:
doc = BS(f.read(), "html.parser")
#Get pages
pages = doc.findAll("div", {"class": "ocr_page"})
#Divide them
for i, page in enumerate(pages, 1):
#Clear doc
doc.body.clear()
#Append text to doc
doc.body.append(page)
#Save it
with open(pattern % i, "w", encoding = "utf-8") as f:
f.write(str(doc))
| documenti-aperti/sw-offline | hocrsplit.py | hocrsplit.py | py | 492 | python | en | code | 0 | github-code | 90 |
38945671878 | import argparse
import io
from PIL import Image
import torch
from flask import Flask, request, jsonify, render_template
app = Flask(__name__)
DETECTION_URL = "/v1/object-detection/yolov5"
@app.route(DETECTION_URL, methods=["POST", "GET"])
def predict():
if request.method == "POST":
if request.files.get("image"):
image_file = request.files["image"]
image_bytes = image_file.read()
img = Image.open(io.BytesIO(image_bytes))
results = model(img, size=640) # reduce size=320 for faster inference
cropped_images = []
for result in results.pred:
for *box, _, cls in result:
x1, y1, x2, y2 = map(int, box)
cropped_image = img.crop((x1, y1, x2, y2))
cropped_images.append(cropped_image)
# Store cropped images and image URLs in session
for i, cropped_image in enumerate(cropped_images):
image_url = f"cropped_image_{i}.jpg"
cropped_image.save(f"static/{image_url}")
session[image_url] = cropped_image
return render_template("index.html", image_urls=session.keys())
elif request.method == "GET":
return render_template("index.html", image_urls=session.keys())
return
@app.route("/crop/<image_url>", methods=["GET"])
def crop(image_url):
cropped_image = session.get(image_url)
if cropped_image:
cropped_image.save("static/cropped.jpg")
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Flask API exposing YOLOv5 model")
parser.add_argument("--port", default=5000, type=int, help="port number")
parser.add_argument('--model', default='yolov5s', help='model to run, i.e. --model yolov5s')
args = parser.parse_args()
model = torch.hub.load('ultralytics/yolov5', args.model)
session = {}
app.run(host="0.0.0.0", port=args.port)
| mohansharma077/object-detection-flaskapp | restapi.py | restapi.py | py | 1,971 | python | en | code | 0 | github-code | 90 |
18547243083 | import logging
from typing import Dict, List
from azure.core.exceptions import ResourceNotFoundError
from azure.identity import DefaultAzureCredential
from azure.storage.filedatalake import DataLakeServiceClient
from msgraph.core import GraphClient
from ..config import Configuration
from ..storage import StorageHelper
from ..keyvault.client import ClientHelper
from .data_security_common import DataSecurityCommon
class DataSecurityStorage:
"""Contains methods for managing data security in storage"""
_datalake_service_client: DataLakeServiceClient
def __init__(self, configuration: Configuration):
self.logger = logging.getLogger(__name__)
self._configuration = configuration
self._credentials = DefaultAzureCredential()
self._datalake_service_client = DataLakeServiceClient(
account_url=(
f"https://{configuration.storage_account_name}.dfs.core.windows.net"
),
credential=self._credentials,
)
self._storage_helper = StorageHelper(configuration.storage_account_name)
self._security_common = DataSecurityCommon(configuration)
self.logger.info("DataSecurityStorage initialized")
def _get_ad_group_id(self, group_name):
"""
Get the AD Group ID based on its name
"""
app_client = GraphClient(
credential=self._credentials,
scopes=["https://graph.microsoft.com/.default"],
)
endpoint = "/groups"
request_url = f"{endpoint}?$filter=displayName eq '{group_name}'"
groups_response = app_client.get(request_url)
groups = groups_response.json()
if len(groups.get("value", {})) > 0:
return groups["value"][0]["id"]
else:
return None
def _flatten_list(self, list_to_flatten: List):
"""
Flatten a list composed of strings and sublists
"""
result = []
for item in list_to_flatten:
if isinstance(item, list):
result.extend(self._flatten_list(item))
else:
result.append(item)
return result
def get_security_groups_acl(self, security_groups: List) -> Dict[str, str]:
"""
Creates mapping of security group names and ACL statements, based on object ids.
"""
security_groups_acl = {}
# if the list contains lists, then we flatten it
security_group_flatten_list = self._flatten_list(security_groups)
unique_group_names = set(val for val in security_group_flatten_list)
for group_name in unique_group_names:
object_id = self._get_ad_group_id(group_name)
if object_id is not None:
acl = f"group:{object_id}:r-x"
security_groups_acl[group_name] = acl
else:
self.logger.warning(
f"AD Group {group_name} not found - using variables from keyvault."
)
keyvault_client = ClientHelper(config=self._configuration)
objid_secret_name = f"{self._configuration.objid_prefix}-{group_name}"
env_group_id = keyvault_client.get_secret(secret_name=objid_secret_name)
acl = f"group:{env_group_id}:r-x"
security_groups_acl[group_name] = acl
return security_groups_acl
def get_directory_client(self, file_system_path: str, directory_name: str):
"""
Returns ADLS directory client.
"""
file_system_client = self._datalake_service_client.get_file_system_client(
file_system=f"{file_system_path}"
)
return file_system_client.get_directory_client(directory_name)
def apply_acl_to_view_recursively(
self, path_to_view_dir: str, view_dir: str, acl: str
):
"""
Apply ACL to a view directory (and subdirectories/files) in ADLS2
"""
self.logger.info(f"Applying permissions to the view {view_dir}")
try:
directory_client = self.get_directory_client(
f"{path_to_view_dir}", view_dir
)
directory_client.update_access_control_recursive(acl=acl)
self.logger.info(
f"Successfully applied ACL: [{acl}] to the view {view_dir}"
)
except ResourceNotFoundError as e:
self.logger.warning(f"Resource not found. Skipping. {e.reason}")
except Exception as e:
self.logger.error(f"Error applying permissions to the view {view_dir}: {e}")
def apply_acl_to_directory(self, file_system_path: str, directory: str, acl: str):
"""
Apply ACL to a directory in ADLS2
"""
self.logger.info(f"Applying permissions to the directory {directory}")
try:
directory_client = self.get_directory_client(file_system_path, directory)
directory_client.set_access_control(acl=acl)
self.logger.info(
f"Successfully applied acl: [{acl}] to the directory {directory}"
)
except ResourceNotFoundError as e:
self.logger.warning(f"The resource not found. Skipping. {e.reason}")
except Exception as e:
self.logger.error(
f"Error applying permissions to the directory {directory}: {e}"
)
def apply_acl_to_all_views(
self,
assigned_security_groups: dict,
security_groups_acls: dict,
container: str,
path_to_views: str,
):
"""
Apply ACL to all views based on the assigned security group(s)
"""
# get the schema form env variable just to compose the folder
schema = self._configuration.synapse_database_schema
for view in assigned_security_groups.keys():
group = assigned_security_groups.get(view)
# if the value is a string, then we apply ACL to a single group
if isinstance(group, str):
print(f"Applying ACL - {group} should have access to {view}")
self.apply_acl_to_view_recursively(
path_to_view_dir=f"{container}/{path_to_views}",
view_dir=f"{schema}_{view}",
acl=security_groups_acls.get(group),
)
# if the value is a list, then we apply ACL to all groups.
else:
for item in group:
print(f"Applying ACL - {item} should have access to {view}")
self.apply_acl_to_view_recursively(
path_to_view_dir=f"{container}/{path_to_views}",
view_dir=f"{schema}_{view}",
acl=security_groups_acls.get(item),
)
def apply_acl_to_parent_directories(
self,
container: str,
path_to_views: str,
security_groups_acls: dict,
):
"""
Apply ACL to all parent directories
"""
# get security groups ACLs as single string.
# This is required to use the 'set' method to apply ACL to directories
comma_separated_acl_list = ",".join(security_groups_acls.values())
directories = path_to_views.split("/")
path = "/"
for item in directories:
directory = f"{path}{item}"
self.apply_acl_to_directory(
file_system_path=container,
directory=directory,
acl=comma_separated_acl_list,
)
self.logger.info(f"Assigned ACL to folder: {directory}")
print(f"Assigned ACL to folder: {directory}")
path = path + f"{item}/"
def apply_acl_to_storage_for_security(
self, container: str, path: str, assigned_security_groups: dict
) -> dict():
"""
Based on the Security Groups gathered from Purview,
apply ACL to all View folders/subfolders/files and parent folders
Parameters
--------
container: str
The container where to apply ACL
path: str
The folder structure within the container
assigned_security_groups: dict(str: str | dict)
The dictionary of security information to be used to apply ACL.
key: view name
value: security group in case of table level security
dict(column_name: security group) in case of column level security
Returns
--------
dict(str: str)
The dictionary with unique security groups and corresponding ACLs applied.
"""
# Remove column names and duplicate security groups applied to different columns
# in the same view, unnecessary information to apply ACL
for item in assigned_security_groups:
if isinstance(assigned_security_groups.get(item), dict):
unique_security_groups_in_view = list(
dict.fromkeys(assigned_security_groups.get(item).values())
)
unique_security_groups_in_view.remove("Not Assigned")
assigned_security_groups[item] = unique_security_groups_in_view
self.logger.info(
"Removed unnecessary column information. Assigned security groups: "
f"{assigned_security_groups}"
)
# get ACL for the assigned groups
security_groups_acls = self.get_security_groups_acl(
security_groups=assigned_security_groups.values()
)
# apply ACL to all Views in the metadata file
self.apply_acl_to_all_views(
assigned_security_groups=assigned_security_groups,
security_groups_acls=security_groups_acls,
container=container,
path_to_views=path,
)
self.logger.info(f"ACL applied to all Views in {container}/{path}/")
# apply ACL to all parent directories
self.apply_acl_to_parent_directories(
security_groups_acls=security_groups_acls,
container=container,
path_to_views=path,
)
self.logger.info(f"ACL applied to parent directories: {path}")
return security_groups_acls
| Azure-Samples/enterprise-data-sharing | 3-data-sharing-exploration/src/helpers/datasecurity/data_security_storage.py | data_security_storage.py | py | 10,250 | python | en | code | 2 | github-code | 90 |
70611694058 | class NerTagger():
"""命名实体识别包装类的基类,用于定义标准接口
"""
@classmethod
def load_model(cls, model_dir):
raise NotImplementedError
def predict(self, text):
return self.predict_batch([text])[0]
def predict_batch(self, texts):
raise NotImplementedError
def recognize_nes(self, text):
"""识别实体
Args:
text (str): 输入文本
Returns:
list: 识别到的实体列表
"""
if not text:
return []
tags = self.predict(text)
nes = []
ne = []
start = None
type_ = None
for i, (chr, tag) in enumerate(zip(text, tags)):
if tag == 'O':
ne, type_ = self._handle_current_entity(nes, ne, start, i, type_)
elif tag.startswith('S'):
ne, type_ = self._handle_current_entity(nes, ne, start, i, type_)
start = i
type_ = tag[2:]
ne.append(chr)
elif tag.startswith('B'):
ne, type_ = self._handle_current_entity(nes, ne, start, i, type_)
start = i
type_ = tag[2:]
ne.append(chr)
elif tag.startswith('E'):
ne.append(chr)
ne, type_ = self._handle_current_entity(nes, ne, start, i+1, type_)
else:
ne.append(chr)
if ne and type_ is not None:
nes.append(("".join(ne), type_, (start, len(text))))
return nes
def _handle_current_entity(self, nes, ne, start, end, type_):
if ne and type_ is not None:
nes.append(("".join(ne), type_, (start, end)))
ne = []
type_ = None
return ne, type_
| lawRossi/ner | ner/base.py | base.py | py | 1,805 | python | en | code | 0 | github-code | 90 |
11029373583 | import os
import time
from test.fixtures import *
import pytest
from mock import patch
from app import export_action_logs_queue
from endpoints.api.logs import ExportOrgLogs, OrgLogs, _validate_logs_arguments
from endpoints.api.test.shared import conduct_api_call
from endpoints.test.shared import client_with_identity
@pytest.mark.skipif(
os.environ.get("TEST_DATABASE_URI", "").find("mysql") >= 0,
reason="Queue code is very sensitive to times on MySQL, making this flaky",
)
def test_export_logs(app):
timecode = time.time()
def get_time():
return timecode - 2
with patch("time.time", get_time):
with client_with_identity("devtable", app) as cl:
assert export_action_logs_queue.get() is None
# Call to export logs.
body = {
"callback_url": "http://some/url",
"callback_email": "a@b.com",
}
conduct_api_call(
cl, ExportOrgLogs, "POST", {"orgname": "buynlarge"}, body, expected_code=200
)
# Ensure the request was queued.
assert export_action_logs_queue.get() is not None
def test_invalid_date_range(app):
starttime = "02/02/2020"
endtime = "01/01/2020"
parsed_starttime, parsed_endtime = _validate_logs_arguments(starttime, endtime)
assert parsed_starttime >= parsed_endtime
with client_with_identity("devtable", app) as cl:
conduct_api_call(
cl,
OrgLogs,
"GET",
{"orgname": "buynlarge", "starttime": starttime, "endtime": endtime},
{},
expected_code=400,
)
| quay/quay | endpoints/api/test/test_logs.py | test_logs.py | py | 1,658 | python | en | code | 2,281 | github-code | 90 |
14950350021 | from retdec.analysis import Analysis
from retdec.exceptions import MissingParameterError
from retdec.file import File
from retdec.service import Service
class Fileinfo(Service):
"""Access to the file-analyzing service."""
def start_analysis(self, **kwargs):
"""Starts an analysis with the given parameters.
:param input_file: File to be analyzed (**required**).
:type input_file: str or file-like object
:param output_format: Format of the output from the analysis.
:type output_format: str
:param verbose: Should the analysis produce a detailed output?
:type verbose: bool
:returns: Started analysis (:class:`~retdec.analysis.Analysis`).
"""
conn = self._create_new_api_connection('/fileinfo/analyses')
id = self._start_analysis(conn, kwargs)
return Analysis(id, conn)
def _start_analysis(self, conn, kwargs):
"""Starts an analysis with the given parameters.
:param retdec.conn.APIConnection conn: Connection to the API to be used
for sending API requests.
:param dict kwargs: Parameters for the analysis.
:returns: Unique identifier of the analysis.
"""
files = {
'input': self._get_input_file(kwargs),
}
params = {}
self._add_param_when_given('output_format', params, kwargs)
self._add_param_when_given('verbose', params, kwargs)
response = conn.send_post_request(files=files, params=params)
return response['id']
def _get_input_file(self, kwargs):
"""Returns the input file to be analyzed."""
try:
return File(kwargs['input_file'])
except KeyError:
raise MissingParameterError('input_file')
def __repr__(self):
return '<{} api_url={!r}>'.format(
__name__ + '.' + self.__class__.__name__,
self.api_url
)
| s3rvac/retdec-python | retdec/fileinfo.py | fileinfo.py | py | 1,941 | python | en | code | 92 | github-code | 90 |
18406026239 | from collections import defaultdict
import sys
sys.setrecursionlimit(1000000)
class edge:
def __init__(self, to, cost):
self.to, self.cost = to, cost
def main():
N = int(input())
g = defaultdict(list)
for i in range(N-1):
u, v, w = (int(_) for _ in input().split())
g[u].append(edge(v, w))
g[v].append(edge(u, w))
color = [0] * (N+1)
def dfs(ver):
for e in g[ver]:
if color[e.to] == 0 and e.cost%2 == 0:
color[e.to] = color[ver]
dfs(e.to)
elif color[e.to] == 0 and e.cost%2 == 1:
color[e.to] = (-1) * color[ver]
dfs(e.to)
color[1] = 1
dfs(1)
for i in range(1, N+1):
if color[i] == -1: color[i] = 0
print(*color[1:], sep='\n')
return
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p03044/s559113336.py | s559113336.py | py | 856 | python | en | code | 0 | github-code | 90 |
2938940419 | import asyncio
import threading
import time
from concurrent.futures import ThreadPoolExecutor
from dataclasses import dataclass
from datetime import timedelta
from temporalio import activity, workflow
from temporalio.client import Client
from temporalio.worker import Worker
@dataclass
class ComposeGreetingInput:
greeting: str
name: str
@activity.defn
def compose_greeting(input: ComposeGreetingInput) -> str:
# We'll wait for 3 seconds, heartbeating in between (like all long-running
# activities should do), then return the greeting
for _ in range(0, 3):
print(f"Heartbeating activity on thread {threading.get_ident()}")
activity.heartbeat()
time.sleep(1)
return f"{input.greeting}, {input.name}!"
@workflow.defn
class GreetingWorkflow:
@workflow.run
async def run(self, name: str) -> str:
return await workflow.execute_activity(
compose_greeting,
ComposeGreetingInput("Hello", name),
start_to_close_timeout=timedelta(seconds=10),
# Always set a heartbeat timeout for long-running activities
heartbeat_timeout=timedelta(seconds=2),
)
async def main():
# Start client
client = await Client.connect("localhost:7233")
# Run a worker for the workflow
async with Worker(
client,
task_queue="hello-activity-threaded-task-queue",
workflows=[GreetingWorkflow],
activities=[compose_greeting],
# Synchronous activities are not allowed unless we provide some kind of
# executor. This same thread pool could be passed to multiple workers if
# desired.
activity_executor=ThreadPoolExecutor(5),
):
# While the worker is running, use the client to run the workflow and
# print out its result. Note, in many production setups, the client
# would be in a completely separate process from the worker.
result = await client.execute_workflow(
GreetingWorkflow.run,
"World",
id="hello-activity-threaded-workflow-id",
task_queue="hello-activity-threaded-task-queue",
)
print(f"Result on thread {threading.get_ident()}: {result}")
if __name__ == "__main__":
asyncio.run(main())
| temporalio/samples-python | hello/hello_activity_threaded.py | hello_activity_threaded.py | py | 2,287 | python | en | code | 68 | github-code | 90 |
13117270959 | """This is my solution to Leetcode problem 1047: Remove All Adjacent Duplicates In String."""
class Solution:
def removeDuplicates(self, S: str) -> str:
stack = []
for char in S:
if stack and char == stack[-1]:
stack.pop()
else:
stack.append(char)
return "".join(stack)
#Overall time complexity: O(n), where n is the length of the input string
#Explanation: You have to iterate over all n characters in the input string,
#and all operations performed during each loop take O(1) time. Thus this
#leads to O(n) time complexity.
#Overal space complexity: O(n), where n is the length of the input string
#Explanation: In the worst case your stack will contain all characters in the input
#string if there are no duplicaties, meaning that the size of the stack is upper bounded
#by n, where n is the length of the input string. | EricMontague/Leetcode-Solutions | easy/problem_1047_remove_all_adjacent_duplicates_in_string.py | problem_1047_remove_all_adjacent_duplicates_in_string.py | py | 911 | python | en | code | 0 | github-code | 90 |
10416099314 | from django.http import HttpResponse
from django.core.mail import send_mail
from django.template.loader import render_to_string
from django.conf import settings
from .models import Order, OrderLineItem
from products.models import Product
from profiles.models import UserProfile
import json
import time
class StripeWH_Handler:
"""Handle Stripe Webhooks"""
def __init__(self, request):
self.request = request
def _send_confirmation_email(self, order):
"""
Send the user a confirmation email
"""
cust_email = order.email
subject = render_to_string(
'checkout/confirmation_emails/confirmation_email_subject.txt',
{'order': order})
body = render_to_string(
'checkout/confirmation_emails/confirmation_email_body.txt',
{'order': order, 'contact_email': settings.DEFAULT_FROM_EMAIL})
send_mail(
subject,
body,
settings.DEFAULT_FROM_EMAIL,
[cust_email]
)
def handle_event(self, event):
"""
Handle a generic/unknown/unexpected webhook event
"""
return HttpResponse(
content=f'Unhandled webhook received: {event["type"]}',
status=200)
def handle_payment_intent_succeeded(self, event):
"""
Handle payment_intent.succeeded webhook from Stripe
Ensure orders are entered in database
- even if there is a user error
"""
# intended event - checkout
intent = event.data.object
# payment intent id
pid = intent.id
# shopping bag
bag = intent.metadata.bag
# check whether save info is checked
save_info = intent.metadata.save_info
# order information to use:
billing_details = intent.charges.data[0].billing_details
shipping_details = intent.shipping
grand_total = round(intent.charges.data[0].amount / 100, 2)
# Clean data in the shipping details
# replace empty strings in shipping details with none
# avoid storing as blank string, not null value
for field, value in shipping_details.address.items():
if value == "":
shipping_details.address[field] = None
# Update profile info if save_info is checked
profile = None
username = intent.metadata.username
if username != 'AnonymousUser':
profile = UserProfile.objects.get(user__username=username)
if save_info:
# if save_info is checked, add the data to profile
profile.defult_phone_number__iexact=shipping_details.phone
profile.defult_country__iexact=shipping_details.address.country
profile.defult_postcode__iexact=shipping_details.address.postal_code
profile.defult_town_or_city__iexact=shipping_details.address.city
profile.defult_street_address1__iexact=shipping_details.address.line1
profile.defult_street_address2__iexact=shipping_details.address.line2
profile.defult_county__iexact=shipping_details.address.state
profile.save()
# Check if order exists
# if exists - return response
# if does not - create it in the webhook
order_exists = False
# introduce delay to order creation
# for when it isn't found in
attempt = 1
while attempt <= 5:
try:
# get order info from payment intent
# iexact lookup field makes sure it is an exact match
order = Order.objects.get(
full_name__iexact=shipping_details.name,
email__iexact=billing_details.email,
phone_number__iexact=shipping_details.phone,
country__iexact=shipping_details.address.country,
postcode__iexact=shipping_details.address.postal_code,
town_or_city__iexact=shipping_details.address.city,
street_address1__iexact=shipping_details.address.line1,
street_address2__iexact=shipping_details.address.line2,
county__iexact=shipping_details.address.state,
grand_total=grand_total,
original_bag=bag,
stripe_pid=pid,
)
# if order is found:
order_exists = True
# if the order is found, break the loop
break
except Order.DoesNotExist:
# increment attempt by 1
attempt += 1
# python time module to sleep for one second
# webhook searchs for order five time in five seconds
time.sleep(1)
if order_exists:
self._send_confirmation_email(order)
return HttpResponse(
content=f'Webhook received: {event["type"]} | SUCCESS: Verified order already in database',
status=200)
else:
order = None
try:
# creates form to save in webhook to create order
# objects.create useing data from payment intent
order = Order.objects.create(
full_name=shipping_details.name,
user_profile=profile,
email=billing_details.email,
phone_number=shipping_details.phone,
country=shipping_details.address.country,
postcode=shipping_details.address.postal_code,
town_or_city=shipping_details.address.city,
street_address1=shipping_details.address.line1,
street_address2=shipping_details.address.line2,
county=shipping_details.address.state,
original_bag=bag,
stripe_pid=pid,
)
# load bag from json verious in payment intent
for item_id, item_data in json.loads(bag).items():
# get product id out of bag
product = Product.objects.get(id=item_id)
if isinstance(item_data, int):
# if product value is integer, there are no sizes
order_line_item = OrderLineItem(
order=order,
product=product,
quantity=item_data,
)
order_line_item.save()
else:
# else, if product has size
for size, quantity in item_data['items_by_size'].items():
order_line_item = OrderLineItem(
order=order,
product=product,
quantity=quantity,
product_size=size,
)
order_line_item.save()
except Exception as e:
if order:
order.delete()
return HttpResponse(content=f'Webhook received: {event["type"]} | Error: {e}',
status=500)
self._send_confirmation_email(order)
return HttpResponse(
content=f'Webhook received: {event["type"]} | SUCCESS: Created order in webhook',
status=200)
def handle_payment_intent_payment_failed(self, event):
"""
Handle the payment_intent.payment_failed webhook from Stripe
"""
return HttpResponse(
content=f'Webhook received: {event["type"]}',
status=200)
| rebeccatraceyt/boutique_ado | checkout/webhook_handler.py | webhook_handler.py | py | 7,838 | python | en | code | 0 | github-code | 90 |
27647324864 | from itertools import combinations
from collections import OrderedDict
# Scikit Data Access imports
from .image_util import convertBinCentersToEdges
# 3rd part imports
import pandas as pd
import numpy as np
from netCDF4 import Dataset, num2date
def averageDates(dates, round_nearest_day = False):
'''
Compute the average of a pandas series of timestamps
@param dates: Pandas series of pandas datetime objects
@param round_nearest_day: Round to the nearest day
@return Average of dates
'''
start = dates.min()
newdate = (dates - start).mean() + start
if round_nearest_day:
newdate = newdate.round('D')
return newdate
def dateMismatch(dates, days=10):
'''
Check if dates are not within a certain number of days of each other
@param dates: Iterable container of pandas timestamps
@param days: Number of days
@return true if they are not with 10 days, false otherwise
'''
for combo in combinations(dates,2):
if np.abs(combo[0] - combo[1]) > pd.to_timedelta(days, 'D'):
return True
return False
def computeEWD(grace_data, scale_factor, round_nearest_day=False):
'''
Compute scale corrected equivalent water depth
Equivalent water depth by averaging results from
GFZ, CSR, and JPL, and then applying the scale factor
@param grace_data: Data frame containing grace data
@param scale_factor: Scale factor to apply
@param round_nearest_day: Round dates to nearest day
@return Equivalent water depth determined by applying the scale factor to
the average GFZ, JPL and CSR.
'''
def cutMissingData(in_data, reverse=False):
'''
Removes data from the beginning (or ending if reverse=True) so that
data exists for all 3 sources (GFZ, JPL, and CSR).
This function is necessary as not all sources may get cut when
a starting and ending date is specified.
@param in_data: Input grace data
@param reverse: Remove data from end instead of beginning
@return Tuple containing modified in_data, the last cut date
'''
last_cut_date = None
if reverse==True:
index = in_data.index[::-1]
else:
index = in_data.index
for date in index:
cut = in_data.loc[date-pd.to_timedelta('10D'):date+pd.to_timedelta('10D')]
if min(len(cut['CSR'].dropna()), len(cut['GFZ'].dropna()), len(cut['JPL'].dropna())) == 0:
if reverse:
in_data = in_data.iloc[:-1]
else:
in_data = in_data.iloc[1:]
last_cut_date = date
else:
break
return in_data,last_cut_date
# Check if there is no valid data
if len(grace_data['CSR'].dropna()) + len(grace_data['GFZ'].dropna()) + len(grace_data['JPL'].dropna()) == 0:
if round_nearest_day == True:
return pd.Series(np.nan, index=grace_data.index.round('D'))
else:
return pd.Series(np.nan, index=grace_data.index)
# Find all months that have different dates supplied by GFZ, JPL, and CSR
offsets = grace_data[grace_data.isnull().any(axis=1)]
# Starting and ending months if they don't have valid data for all 3 data sets
offsets,cut_date1 = cutMissingData(offsets)
offsets,cut_date2 = cutMissingData(offsets, reverse=True)
# If beginning data has been cut, update data accordingly
if cut_date1 != None:
index_location = np.argwhere(grace_data.index == cut_date1)[0][0]
new_index = grace_data.index[index_location+1]
grace_data = grace_data.loc[new_index:]
# If ending data has been cut, update data accordingly
if cut_date2 != None:
index_location = np.argwhere(grace_data.index == cut_date2)[0][0]
new_index = grace_data.index[index_location-1]
grace_data = grace_data.loc[:new_index]
# Get all valid data for JPL, GFZ, and CSR
csr = offsets['CSR'].dropna()
gfz = offsets['GFZ'].dropna()
jpl = offsets['JPL'].dropna()
new_index = []
new_measurements = []
# Iterate over all data with offset dates and combine them
for (c_i, c_v), (g_i,g_v), (j_i, j_v) in zip(csr.iteritems(), gfz.iteritems(), jpl.iteritems()):
# Check if the dates are within 10 days of each other
dates = pd.Series([c_i,g_i,j_i])
if dateMismatch(dates):
raise ValueError('Different dates are not within 10 days of each other')
# Determine new index and average value of data
new_index.append(averageDates(dates, round_nearest_day))
new_measurements.append(np.mean([c_v, g_v, j_v]))
# Create series from averaged results
fixed_means = pd.Series(data = new_measurements, index=new_index)
fixed_means.index.name = 'Date'
# Averaging results from non mimsatched days
ewt = grace_data.dropna().mean(axis=1)
# If requested, round dates to nearest day
if round_nearest_day:
ewt_index = ewt.index.round('D')
else:
ewt_index = ewt.index
# Reset ewt index
ewt = pd.Series(ewt.as_matrix(),index = ewt_index)
# Combined data with mismatched days with data
# without mismatched days
ewt = pd.concat([ewt, fixed_means])
ewt.sort_index(inplace=True)
# Apply scale factor
ewt = ewt * scale_factor
# Return results
return ewt
def readTellusData(filename, lat_lon_list, lat_name, lon_name, data_name, data_label=None,
time_name=None, lat_bounds_name=None, lon_bounds_name=None,
uncertainty_name = None, lat_bounds=None, lon_bounds = None):
'''
This function reads in netcdf data provided by GRACE Tellus
@param filename: Name of file to read in
@param lat_lon_list: List of latitude, longitude tuples that are to be read
@param data_label: Label for data
@param lat_name: Name of latitude data
@param lon_name: Name of longitude data
@param data_name: Name of data product
@param time_name: Name of time data
@param lat_bounds_name: Name of latitude boundaries
@param lon_bounds_name: Name of longitude boundaries
@param uncertainty_name: Name of uncertainty in data set
@param lat_bounds: Latitude bounds
@param lon_bounds: Longitude bounds
@return dictionary containing data and dictionary containing latitude and longitude
'''
def findBin(in_value, in_bounds):
search = np.logical_and(in_value >= in_bounds[:,0], in_value < in_bounds[:,1])
if np.sum(search) == 1:
return np.argmax(search)
elif in_value == in_bounds[-1]:
return len(in_bounds)-1
else:
raise RuntimeError("Value not found")
if data_label == None and time_name != None:
raise RuntimeError("Need to specify data label when time data is used")
if lat_bounds is None and lon_bounds is not None or \
lat_bounds is not None and lon_bounds is None:
raise ValueError('Must specify both lat_bounds and lon_bounds, or neither of them')
nc = Dataset(filename, 'r')
lat_data = nc[lat_name][:]
lon_data = nc[lon_name][:]
data = nc[data_name][:]
if lat_bounds is None:
if lat_bounds_name == None and lon_bounds_name == None:
lat_edges = convertBinCentersToEdges(lat_data)
lon_edges = convertBinCentersToEdges(lon_data)
lat_bounds = np.stack([lat_edges[:-1], lat_edges[1:]], axis=1)
lon_bounds = np.stack([lon_edges[:-1], lon_edges[1:]], axis=1)
else:
lat_bounds = nc[lat_bounds_name][:]
lon_bounds = nc[lon_bounds_name][:]
if time_name != None:
time = nc[time_name]
date_index = pd.to_datetime(num2date(time[:],units=time.units,calendar=time.calendar))
if uncertainty_name != None:
uncertainty = nc[uncertainty_name][:]
data_dict = OrderedDict()
meta_dict = OrderedDict()
for lat, lon in lat_lon_list:
# Convert lontitude to 0-360
orig_lon = lon
if lon < 0:
lon += 360.
lat_bin = findBin(lat, lat_bounds)
lon_bin = findBin(lon, lon_bounds)
label = str(lat) + ', ' + str(orig_lon)
if time_name != None and uncertainty_name != None:
frame_data_dict = OrderedDict()
frame_data_dict[data_label] = data[:,lat_bin, lon_bin]
frame_data_dict['Uncertainty'] = uncertainty[:,lat_bin, lon_bin]
data_dict[label] = pd.DataFrame(frame_data_dict, index=date_index)
elif time_name != None and uncertainty_name == None:
data_dict[label] = pd.DataFrame({data_label : data[:, lat_bin, lon_bin]}, index=date_index)
else:
data_dict[label] = data[lat_bin, lon_bin]
meta_dict[label] = OrderedDict()
meta_dict[label]['Lat'] = lat
meta_dict[label]['Lon'] = orig_lon
return data_dict, meta_dict, lat_bounds, lon_bounds
def getStartEndDate(in_data):
label, data = next(in_data.items())
start_date = in_data.index[0]
end_date = in_data.index[-1]
return start_date, end_date
| MITHaystack/scikit-dataaccess | skdaccess/utilities/grace_util.py | grace_util.py | py | 9,215 | python | en | code | 44 | github-code | 90 |
24132355585 | import requests
import pandas as pd
from bs4 import BeautifulSoup
def rushing_dict():
link = 'https://www.pro-football-reference.com/years/2022/rushing.htm'
html_text = requests.get(link).text
soup = BeautifulSoup(html_text, 'lxml')
table = soup.find('table', class_="per_match_toggle")
rows = table.find_all('tr')
playerDic = {}
for tr in rows:
td = tr.find_all('td')
row = [i.text for i in td]
if row:
name = row[0]
team = row[1]
attempts = row[6]
yards = row[7]
TDs = row[8]
YPA = row[11]
YPG = row[12]
Fumbles = row[13]
for i in range(len(name)-1, 0, -1):
if name[i] == '*' or name[i] == '+':
name = name[:-1]
playerDic[name.replace(" ", "")] = \
{
'team': team,
'attempts': attempts,
'yards': yards,
'TDs': TDs,
'YPA': YPA,
'YPG': YPG,
'fumbles': Fumbles,
}
return playerDic
def beautifulRB(player):
if player:
string = "attempts: " + player['attempts'] + ", yards: " + player['yards'] + ", TDs: " + player['TDs'] \
+ ", fumbles: " + player['fumbles']
return string
| abealam/FFBot | rushing.py | rushing.py | py | 1,391 | python | en | code | 0 | github-code | 90 |
25676235281 | # Задайте список из нескольких чисел. Напишите программу, которая найдёт сумму элементов списка, стоящих на нечётной идексах.
# Пример:
# [2, 3, 5, 9, 3] -> на нечётных идексах элементы 3 и 9, ответ: 12
import random
list_size = int(input("Введите размерность списка который хотите создать - "))
list = []
for i in range(list_size):
list.append(random.randint(0, 10))
print(list)
sum_elements = 0
for i in range(len(list)):
if i % 2 == 1:
sum_elements += list[i]
print(f"{list} Cумма элементов на нечётных позициях = {sum_elements}")
| AntonSavchenko88/World-of-programming | PythonSeminar03/Task01.py | Task01.py | py | 768 | python | ru | code | 0 | github-code | 90 |
17872647581 | import requests
import csv
proxies = {
"http" : "http://pfrie-std.proxy.e2.rie.gouv.fr:8080",
"https": "http://pfrie-std.proxy.e2.rie.gouv.fr:8080",
}
#on souhaite avoir une réponse en json => on l'indique dans les entêtes
code_postaux_a_verifier = open("a_verifier.csv","r",encoding='utf-8')
a_verifier = csv.reader(code_postaux_a_verifier, delimiter="|", quotechar='"')
next (a_verifier)
#on lit la première ligne avec le noms des colonnes pour la passer
code_postal_new = [['dossier_id'],['numero'],['td'],['code_postal'],['commune']]#j'initie la premiere ligne d'un tableau de 5 éléments
for row in a_verifier : #pour chaque ligne du fichier
code_postal_new[0].append(row[0])#j'ajoute en fin du 1er élément (dossier_id) de la liste la 1ere colonne de la ligne n
code_postal_new[1].append(row[1])#j'ajoute en fin du 2nd élément (numero_dossier) de la liste la 2nde colonne de la ligne n
code_postal_new[2].append(row[2])#j'ajoute en fin du 3ème élément (td) de la liste la 3ème colonne de la ligne n
code_postal_new[3].append(row[3])#j'ajoute en fin du 3ème élément (adr_td_code) de la liste la 4ème colonne de la ligne n
code_postal_new[4].append(row[4])#j'ajoute en fin du 4ème élément (adr_td_localitede) de la liste la 5ème colonne de la ligne n
#print (code_postal_new)
code_postaux_verifie = open("verifie.txt","w",encoding='utf-8')
headers = {"Accept": "application/json"}
url_api_code_postal = "https://apicarto.ign.fr/api/codes-postaux/communes/"
n=0
#requête recherche de mouvements
#response = requests.get(f"{url_api_code_postal+code_postal}",headers=headers)
for code in code_postal_new[3] : #pour chaque élément du second bloc (chaque code postal) de liste code_postal_new
url_appel = url_api_code_postal + code
reponse = requests.get(url_appel, proxies=proxies , headers=headers)#Avec Proxy en VPN
#reponse = requests.get(url_appel, headers=headers)# Hors VPN
code_postaux_verifie.write(f"{code_postal_new[0][n]};") #sur la ligne n, je (ré)écris le dossier_id,
code_postaux_verifie.write(f"{code_postal_new[1][n]};") #--le numéro de dossier,
#code_postaux_verifie.write(f"{element};")
code_postaux_verifie.write(f"{code_postal_new[2][n]};") #--l'objet concerné terrain ou demandeur,
code_postaux_verifie.write(f"{code_postal_new[3][n]};") #--le code postal,
code_postaux_verifie.write(f"{code_postal_new[4][n]};") #--la commune,
print (reponse.status_code)
if reponse.status_code != 200 :#si code postal introuvable
#print (reponse.status_code)
#code_postaux_verifie.write(f"{reponse.status_code};")
code_postaux_verifie.write((f"erreur_suspectée\n")) #--l'erreur puis je change de ligne
n=n+1 #je compte les tours de boucle
continue #je reprends la boucle après le test
rep = reponse.json()
#print(code_postal_new[3][n])
dictionnaire={} #j'initie un dictionnaire
nbre_element = len(rep) #ma réponse est une liste d'élément de type dictionnaire
o = 0 #compteur d'élémént renvoyé
for element in rep : #pour chaque élement renvoyé
dictionnaire=(rep[o]) #je crée un dictionnaire par élément
print (dictionnaire["nomCommune"])
if (dictionnaire["nomCommune"]) == code_postal_new[4][n] : #si la valeur de la clé nomCommune (déterminée à partir du code postal au moyen de l'api) correspond à la commune issues de la BDD
code_postaux_verifie.write((f"OK\n")) #--OK
break #et je sors de la boucle
o+=1
if o == nbre_element : #si j'ai passé tous les élements sans correspondance établie
code_postaux_verifie.write((f"erreur_suspectée\n")) #--erreur suspectée
#m=+
n=n+1
code_postaux_verifie.close
| MANCodeClub/pythonfromscratch | musculation/011_Code_Postal/code_postal_nico_m.py | code_postal_nico_m.py | py | 3,840 | python | fr | code | 1 | github-code | 90 |
35180712831 | #IMPORT RANDOM
import random
dis = random.choice ([10, 20, 30])
#Discount is working so that is great
dis1 = dis/100
print ("Congratulations! As our 30000th customer, you get a discount of:", dis, "On today's purchase.")
def purchase_1(amount_1, discount):
total_1 = 0
for accumulation in range (amount_1):
x_1 = float(input("Tell me the price of your item."))
total_1 = total_1 + x_1
print (total_1)
GST = total_1 *1.05
DIS1 = (GST *discount)
DIS2 = GST- DIS1
final = (round(DIS2,2))
return final
#Output
amount = int(input("Please tell me the number of items you wish to purchase."))
price = purchase_1(amount,dis1)
print ("Your Total is", price, "Have a nice day!")
| Amina-Yassin/Amina_Yassin_Cashier_Register.py | GSTPart.py | GSTPart.py | py | 752 | python | en | code | 0 | github-code | 90 |
2872933126 | # SJIP = "192.168.0.110:5555"
SJIP = "127.0.0.1:62001"
# oaX 微信每个公众号向下遍历oaX组信息
oaX = 200
# oaname = ["第一财经",
# "人民日报"
# ]
oaname = [
"吉林检察",
"通化检察",
"长春检察",
"长春林检",
"梨树检察",
"吉林市检察",
"延吉检声",
"敦化市检察院",
"延边林检",
"珲春林检",
"松原检察",
"吉林丰检",
"伊通检察",
"吉林省临江林区人民检察院",
"白山检察",
"辽源检察",
"南关检察",
"敦化林检",
"吉林省抚松林区人民检察院",
"九台检察",
"前郭检察",
"吉林昌邑检察",
"农安检察",
"梅河口市人民检察院",
"辽源西安检察",
"延边检察",
"宽城检察",
"洮南检察",
"扶余检察",
"长白检察",
"舒兰检察",
"和龙林检",
"白石山林检",
"辽源龙山检察",
"红石林检",
"白河林检",
"四平铁东检察",
"集安检察",
"珲春市人民检察院",
"长岭检察",
"船检",
"蛟河检察",
"双阳检察",
'四平检察',
"洮北检察",
"二道江检察院",
"江源林检",
"公主岭检察",
"龙井检察",
"永吉县人民检察院",
"桦甸市人民检察院",
"通化市东昌区人民检察院",
"和龙市人民检察院",
"东丰检察",
"通化县人民检察院",
"宁江检察",
"吉林高新检察",
"磐石检察",
"乾安检察",
"汪清林检",
"吉林市城西地区人民检察院",
"白城检察",
"图们检察",
"镇赉县人民检察院",
"双辽市人民检察院",
"汪清检察",
"净月检察",
"临江检察",
"浑江检察",
"德惠检察",
"城郊检察",
"柳河县人民检察院",
"通榆检察",
"西检在线",
"四平平东检察",
"大安检察",
"安图县人民检察院",
"绿园检察",
"抚检在线",
"江源检察",
"靖宇县人民检察院",
"正义辉检",
"长春市朝阳区人民检察院",
"榆树市人民检察院",
"长春经开检察院",
"长春新区检察院",
"吉林省铁检分院",
"长春铁路运输检察院公众号",
"吉林铁检",
"白城铁检",
"通化铁路运输检察院",
"延边铁路运输检察院",
"正义龙潭",
"正义赉宁",
"二道检察",
"长春汽开检察",
"东辽检察"]
'''
adb connect 192.168.0.110:5555 # 连接移动设备 ip 192.168.0.110 端口 5555
# ps:雷电模拟器端口:5555 夜神模拟器端口:62001 逍遥安卓模拟器端口:21503 海马玩模拟器端口:53001 网易MUMU模拟器端口:7555
#初始化安装
adb connect ip地址:5555
python -m uiautomator2 init
#启动
python -m weditor
# git clone https://github.com/alibaba/web-editor 部署包
adb devices # 查看连接设备
mitmdump -s weixinDatacaught.py # 启动 mitm监听
需要代理被监控对象 127.0.0.1:8080
adb connect 192.168.0.105:62001
python -m uiautomator2 init
python -m weditor
'''
| ppzoe/Python-learn | c20220215/WeChat/settings.py | settings.py | py | 3,750 | python | en | code | 0 | github-code | 90 |
6541996285 | import math
from .point import Point
from .router import DEFAULT_ACCESS_POINTS, R1, R2
class Device(object):
"""docstring for Device"""
def __init__(self, unique_id):
self.id = unique_id
self.coordinate = Point(0, 0)
self.signal_strengths = {}
# lifted from wikipedia
# link - https://en.wikipedia.org/wiki/True_range_multilateration
def compute_coordinate(self):
# considering 2D plane
distances = {}
for key, value in self.signal_strengths.items():
if DEFAULT_ACCESS_POINTS.get(key, None) is not None:
access_point = DEFAULT_ACCESS_POINTS[key]
distances[key] = access_point.distance_from_signal(value)
distance_apart = R2.coordinate.xcord - R1.coordinate.xcord
xcord = (distances[R1.SSID]**2 - distances[R2.SSID]**2 + distance_apart**2)/(2*distance_apart)
ycord=math.sqrt(distance_apart**2 + xcord**2)
self.coordinate.xcord = xcord + distances[R1.SSID]
self.coordinate.ycord = ycord + distances[R1.SSID] | beesaferoot/wifi-tracker | web-interface/map/device.py | device.py | py | 1,067 | python | en | code | 0 | github-code | 90 |
24264808581 | import ray
import numpy as np
from ray.rllib.algorithms.dqn.dqn import DQN
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF
from sklearn.metrics import mean_squared_error
from Environment.IGEnvironments import InformationGatheringEnv
from Evaluation.Utils.metrics_wrapper import MetricsDataCreator
from Training.TrainingConfigurations.ShekelConfiguration import create_configuration
""" Initialize Ray """
ray.init()
configuration = create_configuration()
# Create the environment and set up the evaluation mode #
env = InformationGatheringEnv(configuration.env_config)
env.eval()
N = env.number_of_agents
trainer = DQN(config=configuration)
trainer.restore(r'C:\Users\yane_sa\PycharmProjects\MultiAgentInformationGathering\Training\runs\DQN_2022-09-30_14-52-05\DQN_InformationGatheringEnv_afad5_00000_0_2022-09-30_14-52-05\checkpoint_000001\checkpoint-1')
trainer.get_policy('shared_policy').export_model(export_dir='.')
# Create the evaluator and pass the metrics #
evaluator = MetricsDataCreator(metrics_names=['Mean Reward',
'Average Uncertainty',
'Mean regret',
'Model Error'],
algorithm_name='DRL',
experiment_name='DeepReinforcementLearningResults')
paths = MetricsDataCreator(
metrics_names=['vehicle', 'x', 'y'],
algorithm_name='DRL',
experiment_name='DeepReinforcementLearningResults_paths',
directory='./')
gp = GaussianProcessRegressor(kernel=RBF(length_scale=15.0, length_scale_bounds=(2.0, 100.0)), alpha=0.001)
for run in range(20):
# Reset the environment #
t = 0
obs = env.reset()
done = {i: False for i in range(N)}
done['__all__'] = False
episode_reward = 0
while not done['__all__']:
# Compute action for every agent #
action = {}
for agent_id, agent_obs in obs.items():
if not done[agent_id]:
action[agent_id] = trainer.compute_action(agent_obs, policy_id='shared_policy', explore=False)
# Send the computed action `a` to the env.
obs, reward, done, info = env.step(action)
env.render()
# Save the reward #
episode_reward += np.sum(list(reward.values()))
# Let's test if there is much more improvement with a complimentary surrogate model
gp.fit(env.measured_locations, env.measured_values)
surr_mu, surr_unc = gp.predict(env.visitable_positions, return_std=True)
real_mu = env.ground_truth.ground_truth_field[env.visitable_positions[:, 0], env.visitable_positions[:, 1]]
mse = mean_squared_error(y_true=real_mu, y_pred=surr_mu, squared=False)
metrics = [info['metrics']['accumulated_reward'],
info['metrics']['uncertainty'],
info['metrics']['instant_regret'],
mse
]
evaluator.register_step(run_num=run, step=t, metrics=[*metrics])
for veh_id, veh in enumerate(env.fleet.vehicles):
paths.register_step(run_num=run, step=t, metrics=[veh_id, veh.position[0], veh.position[1]])
t += 1
print(f"Episode done: Total reward = {episode_reward}")
# Register the metrics #
evaluator.register_experiment()
paths.register_experiment()
| derpberk/MultiAgentInformationGathering | Evaluation/evaluate_policy.py | evaluate_policy.py | py | 3,263 | python | en | code | 2 | github-code | 90 |
13648699370 | # -*- coding:utf-8 -*-
# 功能描述:使用BiLSTMCRF进行语义编码、维特比算法进行解码
import copy
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
START_TAG = "START"
STOP_TAG = "STOP"
# view用法:https://zhuanlan.zhihu.com/p/87856193?from_voters_page=true
# 计算整个损失函数loss的辅助函数
# 好像是用来帮助做矩阵加法用的
# 应该是用来计算发射矩阵和转移矩阵的和用的
def log_sum_exp(vec):
max_score = torch.max(vec, 0)[0].unsqueeze(0)
max_score_broadcast = max_score.expand(vec.size(1), vec.size(1))
result = max_score + torch.log(torch.sum(torch.exp(vec - max_score_broadcast), 0)).unsqueeze(0)
return result.squeeze(1)
class BiLSTMCRF(nn.Module):
# 模型参数的初始化过程
# 这里dim指的都是维度
# 具体的概念层的东西可以到以下地址补一下:
# https://www.cnblogs.com/pinard/p/6945257.html(像什么隐藏层之类的这里面都有举例,但具体的这个代码好像用的不是这个模型,所以只要知道一些基本概念就好了)
# https://www.zhihu.com/question/20136144(这里面的第二个回答举得生病的例子可以用来练练手,下面的维特比算法也有提到这个链接)
def __init__(
self,
tag_map={'O': 0, 'START': 1, 'STOP': 2, 'B-Medicinal_Name': 3, 'E-Medicinal_Name': 4, 'B-Medicinal_Other_Name': 5, 'I-Medicinal_Other_Name': 6, 'E-Medicinal_Other_Name': 7, 'B-Medicinal_Function': 8, 'E-Medicinal_Function': 9, 'I-Medicinal_Function': 10, 'B-Medicinal_Use_Num': 11, 'I-Medicinal_Use_Num': 12, 'E-Medicinal_Use_Num': 13, 'B': 14, 'E': 15, 'I': 16, 'I-Medicinal_Name': 17, 'B-Medicinal_Taste': 18, 'E-Medicinal_Taste': 19, 'I-Medicinal_Taste': 20, 'S-Medicinal_Other_Name': 21, 'S-Medicinal_Taste': 22, 'S-Medicinal_Use_Num': 23, 'S-Medicinal_Function': 24},
#据目前查到的资料来看,这个batch_size是一次处理的橘子的个数
batch_size=20,
vocab_size=20,
# 这里的隐藏层就我目前的理解,应该是:内置属性,咱们看不见的东西
# 举个例子:就用上面链接中的,三个盒子抽白球红球,其中每个盒子中白红球数量不一样
# 那么可观测层就应该是白球和红球
# 由于每个盒子中球的数目不等,所以会有很多不等的概率,但每个盒子中的概率相加为1,而这个就成为可观测矩阵
# 而隐藏层就是盒子1,盒子2,盒子3。
# 同时这个隐藏层也有很多内规则,比如:如果当前抽球的盒子是第一个盒子,则以0.5的概率仍然留在第一个盒子继续抽球,以0.2的概率去第二个盒子抽球,以0.3的概率去第三个盒子抽球。如果当前抽球的盒子是第二个盒子,则以0.5的概率仍然留在第二个盒子继续抽球,以0.3的概率去第一个盒子抽球,以0.2的概率去第三个盒子抽球。...........
# 这也就使其也会生成一个3*3的矩阵,这个矩阵我们就叫状态转移分布矩阵(以上都是对HHM模型来说的)
# 然后我们要预测的数据输出出来的应该是抽三次之后的球的颜色都是什么比如为:{红,白,红},这个叫做观测序列
# 最后一个就是我们需要给他一个初始状态分布,用来初始化第一次抽取球的时候从哪个箱子开始抽,准确来说就是一个概率序列如:{0.2,0.4,0.4}
# 这个初始化状态分布的作用是用来初始化隐藏层的,在初始化之后隐藏层就可以自己不断地执行下去了。(以上是对上面链接中的红白球的例子的个人理解)
hidden_dim=128,
dropout=0.5,
embedding_dim=100
):
super(BiLSTMCRF, self).__init__()
self.batch_size = batch_size
self.hidden_dim = hidden_dim
self.embedding_dim = embedding_dim
self.vocab_size = vocab_size
self.dropout = dropout
self.tag_size = len(tag_map)
self.tag_map = tag_map
'''这里应该是随机初始化一个tag_size*tag_size大小的矩阵,用以作为状态转移矩阵
并将该矩阵绑定到Module中,以便后面可以对初始化的这个转移矩阵进行参数优化'''
self.transitions = nn.Parameter(
torch.randn(self.tag_size, self.tag_size)
)
'''这里的.data是从原来绑定到transitions上的parameter中以tensor的形式取出那个随机矩阵,并根据上面的全局变量进行截取'''
'''self.transitions.data[:, self.tag_map[START_TAG]] = -1000.的操作的意思是:
通过self.tag_map(一个字典)中的START标签定位到其的value是1,那么式子就变为了self.transitions.data[:, 1] = -1000.
而这里data[:,1]是numpy中截取二维数组中一维数据的方法,所以上面那个式子的意思就变为了,截取self.transitions这个二维数组的第1维的所有数据并将其赋值为-1000.
而self.transitions.data[self.tag_map[STOP_TAG], :] = -1000.同理,只不过在字典中对应下来的STOP标签的value是2,那就截取第二维的所有数据并赋值为-1000.'''
self.transitions.data[:, self.tag_map[START_TAG]] = -1000.
self.transitions.data[self.tag_map[STOP_TAG], :] = -1000.
'''https://blog.csdn.net/qq_31829611/article/details/90263794'''
self.word_embeddings = nn.Embedding(vocab_size, self.embedding_dim)
#这个函数好像涉及到关键了
#目前有用的资料有
# https://zhuanlan.zhihu.com/p/41261640(这个一定要看,注释非常详细)
# https://blog.csdn.net/yangyang_yangqi/article/details/84585998(这个主要看那个图)
# 就本实验来讲,由于我们做的是文章中的实体识别,所以这里第一个参数为句子长度,第二个参数为多少个句子,第三个参数为这个句子中选出的单词的维度数(这个维度数主要用来存放各种的词向量,毕竟一个词可以用到不同的语义中,在每个语义中都会有一个词向量与它对应,就可以想象成一个点,有好多箭头从这个点里面指出来,指出来的每个方向都是一个意思)(但好像这个代码里面并没有第三个参数,而文章中有说要严格遵循三维张量,所以并不是很懂这里)
# num_layers:lstm隐层的层数,默认为1
# bidirectional:True则为双向lstm默认为False
# batch_first:True则输入输出的数据格式为 (batch, seq, feature)
# dropout:除最后一层,每一层的输出都进行dropout,默认为: 0(这里代码设成了1,应该就算是启动了)
# 这里还要提一句一般的LSTM都是单向的,但是BiLSTM是双向的,具体可以看第二个链接中的那个视图,和下面的例子,
# 整体来说就是一句话分为好多个词,然后正着过去每个词(不是字)都创建一个词向量,然后反过来,再每个词都创建一个词向量,将二者拼起来形成一个词向量就是BiLSTM
# 然后通过形成的词向量与后输入的词进行对比,算出应该的词(以上都是个人理解,具体直接看上面两个链接即可)
'''先看这里!!!这里之所以有两个参数,是因为此时的神经层的详细结构还没确定,而下面的函数表达的意思是
输入单词用一个维度为embedding_dim的向量表示, 隐藏层的一个维度hidden_dim // 2,仅有一层的神经元,
仅仅是说这个网络可以接受[seq_len,batch_size,3]的数据输入,
这里之所以用embedding_dim来创建,是因为此时的套接字的每个
维度都用来存储一个词向量,所以等于给一个词创建了一个100平米的大房子,每平米都住着一个他的孩子
'''
self.lstm = nn.LSTM(self.embedding_dim, self.hidden_dim // 2,
num_layers=1, bidirectional=True, batch_first=True, dropout=self.dropout)
self.hidden2tag = nn.Linear(self.hidden_dim, self.tag_size)
self.hidden = self.init_hidden()
# 隐藏层的初始化
# 返回两个随机初始化的张量,其中的每个元素都是小于一的
def init_hidden(self):
return (torch.randn(2, self.batch_size, self.hidden_dim // 2),
torch.randn(2, self.batch_size, self.hidden_dim // 2))
# 使用lstm捕获语义特征
'''# 即调用上面的定义好的 self.lstm 以获取lstm层的输出'''
def __get_lstm_features(self, sentence):
# 初始化hidden层
self.hidden = self.init_hidden()
# sentence.shape[1]是返回该矩阵的第一维的长度
length = sentence.shape[1]
'''这里传入的句子应该是一个tensor,然后根据这个句子的tensor,利用上面建好的embeddings,给每个句子tensor中的元素加载对应的映射,映射的维度应该是self.embedding_dim
然后在利用view方法对或得到的embeddings重构为一个self.batch_size*length*self.embedding_dim大小的张量
其中对于embedding处理的资料为:https://blog.csdn.net/qq_31829611/article/details/90263794'''
embeddings = self.word_embeddings(sentence).view(self.batch_size, length, self.embedding_dim)
# 这里的lstm_out是用来获取所有的隐状态值, 而用 "hidden" 的值来
# 进行序列的反向传播运算, 具体方式就是将它作为参数传入后面的 LSTM 网络.
# 这里我也不太懂,猜测是用来返回隐藏层的某些信息吧,因为好像隐藏层是交给神经网络去做的,所以要返回隐藏层的信息以用来后面的处理吧
'''下面三步加起来的作用是:将构建好的lstm模型拿来进行运算,然后取出lstm_out(输出),并对lstm_out 进行重构,取最后一个时刻的输出。
lstm_out的格式是(batch,time step,input),所以下面的view()中的-1就是取time step的最后一个,然后再放进hidden2tag(即nn.Linear(输出层))
得到logits(即为bilstm层的输出)'''
lstm_out, self.hidden = self.lstm(embeddings, self.hidden)
lstm_out = lstm_out.view(self.batch_size, -1, self.hidden_dim)
# 这里猜测是,用返回过来的lstm_out,来控制hidden2tag,并将控制完后的张量返回给logits(这里说的控制,就类似于给一张二维的矩阵,利用返回来的类似开关的lstm_out,控制,哪里应该是1,哪里应该是0,但是具体隐藏层返回回来的控制信息能将这个二维张量中的数据怎么改变并不知道)
# 这里由上面创建hidden2tag时我们可以知道,hidden2tag其实是一个列(或者行)数为hidden_size的,行(或者列)数为tag_size的二维矩阵(这样就可以控制,一个词的属性了,比如:中国这个词,tag_map就可以表示为{0,1,0,1,1,1})
logits = self.hidden2tag(lstm_out)
return logits
'''在这以后的四个方法,都为用于计算loss的方法'''
def real_path_score_(self, feats, tags):
# Gives the score of a provided tag sequence
score = torch.zeros(1)
tags = torch.cat([torch.tensor([self.tag_map[START_TAG]], dtype=torch.long), tags])
for i, feat in enumerate(feats):
score = score + self.transitions[tags[i], tags[i + 1]] + feat[tags[i + 1]]
score = score + self.transitions[tags[-1], self.tag_map[STOP_TAG]]
return score
#计算
def real_path_score(self, logits, label):
'''
caculate real path score
:params logits -> [len_sent * tag_size]
:params label -> [1 * len_sent]
Score = Emission_Score + Transition_Score
Emission_Score = logits(0, label[START]) + logits(1, label[1]) + ... + logits(n, label[STOP])
Transition_Score = Trans(label[START], label[1]) + Trans(label[1], label[2]) + ... + Trans(label[n-1], label[STOP])
'''
#创建一个全为0的矢量
score = torch.zeros(1)
#创建一个以tag_map列表为元素的矢量,并将其中的元素与label进行拼接
label = torch.cat([torch.tensor([self.tag_map[START_TAG]], dtype=torch.long), label])
#遍历传入的词向量表
for index, logit in enumerate(logits):
emission_score = logit[label[index + 1]]
transition_score = self.transitions[label[index], label[index + 1]]
score += emission_score + transition_score
#计算分数
score += self.transitions[label[-1], self.tag_map[STOP_TAG]]
return score
# 计算整个句子预测的得分
def total_score(self, logits, label):
"""
caculate total score
:params logits -> [len_sent * tag_size]
:params label -> [1 * tag_size]
SCORE = log(e^S1 + e^S2 + ... + e^SN)
"""
obs = []
#这里是创建一个1*6的张量,里面全是0
previous = torch.full((1, self.tag_size), 0)
for index in range(len(logits)):
# 将原来1*6的张量扩展为6*6的张量
previous = previous.expand(self.tag_size, self.tag_size).t()
#将原来的词向量扩展为6*6的张量
obs = logits[index].view(1, -1).expand(self.tag_size, self.tag_size)
#好像是矩阵的加法
scores = previous + obs + self.transitions
previous = log_sum_exp(scores)
#然后一直加到tag=STOP位置
previous = previous + self.transitions[:, self.tag_map[STOP_TAG]]
# caculate total_scores
total_scores = log_sum_exp(previous.t())[0]
return total_scores
# 计算log损失
def neg_log_likelihood(self, sentences, tags, length):
#这里的batch_size是一次性处理的句子的个数,而sentences.size(0)则是传入的sentences张量的第0维的大小
self.batch_size = sentences.size(0)
#这里的调用与下面的forward里面一样,这里就不赘述了
logits = self.__get_lstm_features(sentences)
#建立一个全为0的向量
real_path_score = torch.zeros(1)
#建立一个全为0的向量
total_score = torch.zeros(1)
for logit, tag, leng in zip(logits, tags, length):
#这里好像整个项目都没有用到整个函数,所以我也不太清楚他穿进来的tags是个啥.....并不是很能理解为啥tag还要截断一下
logit = logit[:leng]
tag = tag[:leng]
#这里调用了上面的函数real_path_score
real_path_score += self.real_path_score(logit, tag)
#这里调用了上面的函数total_score
total_score += self.total_score(logit, tag)
# print("total score ", total_score)
# print("real score ", real_path_score)
#返回两者的差,即损失
return total_score - real_path_score
# 神经网络前向传播,抽取每个句子中的特征
def forward(self, sentences, lengths=None):
"""
翻译:这里的参数sentence是用来被预测的句子,这里传入的句子应该是一个tensor
:params sentences sentences to predict
翻译:参数lengths是句子的真实长度,默认值为-1
:params lengths represent the ture length of sentence, the default is sentences.size(-1) #默认句子大小为-1
"""
#个人猜测这里是使用传进来的句子组成了一个矩阵
sentences = torch.tensor(sentences, dtype=torch.long)
sen_size=sentences.size()
#如果没有长度,那么将传进的句子的长度默认设为-1
if not lengths:
lengths = [i.size(-1) for i in sentences]
#这里将批处理数量设置为句子大小
self.batch_size = sentences.size(0)
# 调用函数抽取特征
#这里提取完了之后,应该就是一个充满词向量的二维表(词向量就类似于上面举得那个例子:tag_map就可以表示为{0,1,0,1,1,1},只不过隐藏层控制的行(或列)可能还包含更多信息,以方便进一步区分词和词)
logits = self.__get_lstm_features(sentences)
#得分和路径初始化
scores = []
paths = []
#该zip函数用于将参数中的迭代器打包成组,并将组形成list
for logit, leng in zip(logits, lengths):
#我猜测这个是用来根据长度截取词的,比如:吃饭饭,如果传入的长度为2,则截取到吃饭,如果传入的参数为3,则是吃饭饭
logit = logit[:leng]
#这里调用维特比解码,并接受返回的得分和路径(可以想象一个矩阵,然后按列与分别算他们的得分,并求出全局最大或最小的得分的路径)
score, path = self.__viterbi_decode(logit)
#更新上述初始化后的数组
scores.append(score)
paths.append(path)
return scores, paths
# 维特比算法具体的解释在:https://www.zhihu.com/question/20136144 链接中有很简单和详细的解释,我就不在这里瞎逼逼了
# 假设有很多列,有点像贪心,每次找出局部最优解,然后删除其他解,然后再往后推进一列,最后到最终的点,再根据前面的给出的一系列局部最优解,找出全局最优解
# 使用维特比算法进行解码
# 这个解码过程是算法上的....我就不太会了,不过原理在上面链接里面
def __viterbi_decode(self, logits):
#个人猜测是回溯点数组,用于存储回头回溯时返回的最佳路径
backpointers = []
#创建一个全是0的矩阵(张量)
trellis = torch.zeros(logits.size())
#和上面一样,只不过变成长整型的了
backpointers = torch.zeros(logits.size(), dtype=torch.long)
trellis[0] = logits[0]
for t in range(1, len(logits)):
v = trellis[t - 1].unsqueeze(1).expand_as(self.transitions) + self.transitions
trellis[t] = logits[t] + torch.max(v, 0)[0]
backpointers[t] = torch.max(v, 0)[1]
viterbi = [torch.max(trellis[-1], -1)[1].cpu().tolist()]
backpointers = backpointers.numpy()
for bp in reversed(backpointers[1:]):
viterbi.append(bp[viterbi[-1]])
viterbi.reverse()
viterbi_score = torch.max(trellis[-1], 0)[0].cpu().tolist()
return viterbi_score, viterbi
# 获取状态转移矩阵
def get_tran(self):
return self.transitions
# 功能与上述代码类似,解码过程
def __viterbi_decode_v1(self, logits):
init_prob = 1.0
trans_prob = self.transitions.t()
prev_prob = init_prob
path = []
for index, logit in enumerate(logits):
if index == 0:
obs_prob = logit * prev_prob
prev_prob = obs_prob
prev_score, max_path = torch.max(prev_prob, -1)
path.append(max_path.cpu().tolist())
continue
obs_prob = (prev_prob * trans_prob).t() * logit
max_prob, _ = torch.max(obs_prob, 1)
_, final_max_index = torch.max(max_prob, -1)
prev_prob = obs_prob[final_max_index]
prev_score, max_path = torch.max(prev_prob, -1)
path.append(max_path.cpu().tolist())
return prev_score.cpu().tolist(), path
| srx-2000/traditional_Chinese_medicine | medicine/BiLSTMCRF/model.py | model.py | py | 19,829 | python | zh | code | 69 | github-code | 90 |
69931192937 | import typing as ty
import PySimpleGUI as sg
import reapy_boost as rpr
from pprint import pprint
from .gui import (LayoutType, FADE_SHAPES, ValuesFilledType)
from .item_handler import (ItemsHandler, ItemHandler)
def NamedSlider(
name: str,
range: ty.Tuple[float, float],
key: str,
need_check: bool = False,
resolution: float = .001,
tooltip: str = '',
enable_events: bool = False,
default_value: ty.Optional[float] = None,
orientation: str = 'h',
size: ty.Tuple[int, int] = (30, 10)
) -> sg.Column:
text_layout = [sg.Text(name)]
if need_check:
text_layout.append(
sg.Checkbox('use', key=key + '_used', default=False)
)
return sg.Column(
[
text_layout,
[
sg.Slider(
range,
key=key,
resolution=resolution,
tooltip=tooltip,
enable_events=enable_events,
default_value=default_value,
orientation=orientation,
size=size
)
],
]
)
class FadeRegions:
layout: LayoutType
def __init__(
self,
namespace: str,
name: str,
direction: str = 'fade_out',
range_: ty.Tuple[float, float] = (0, 1)
) -> None:
self.name = name
self.sr = 8000
assert direction in (
'fade_out', 'fade_in'
), 'direction can be only "fade_out" or "fade_in"'
self.direction_text = (
'fade-out' if direction == 'fade_out' else 'fade-in'
)
self.ns = namespace + f'{direction}_'
self.fade_sl = NamedSlider(
f'{self.direction_text} time',
range_,
key=self.ns + 'fade_time',
resolution=.001,
tooltip=f'{name} {self.direction_text} time',
default_value=.2,
orientation='h',
size=(30, 10)
)
self.fade_sh = sg.Combo(
values=list(FADE_SHAPES.keys()),
default_value=list(FADE_SHAPES.keys())[1],
key=self.ns + 'fade_shape',
tooltip=f'{self.name} {self.direction_text} shape'
)
self.make_fades_btn = sg.Button(
f'make all {self.direction_text}s', key=self.ns + 'make_fades'
)
self.layout = sg.Column(
[[self.fade_sl], [self.fade_sh, self.make_fades_btn]]
)
@property
def key(self) -> str:
return self.ns + 'make_fades'
def time(self, values: ValuesFilledType) -> float:
return ty.cast(float, values[self.ns + 'fade_time'])
def shape(self, values: ValuesFilledType) -> int:
return FADE_SHAPES[ty.cast(str, values[self.ns + 'fade_shape'])]
def fade_all(
self, values: ValuesFilledType,
regions_w_metadata: ty.Iterable[ty.Tuple[rpr.Region, object]]
) -> None:
with rpr.undo_block(
'set all {name}s {d_t}s to {time}'.format(
name=self.name,
d_t=self.direction_text,
time=values[self.ns + 'fade_time']
), -1
):
print('fade_all')
pr = rpr.Project()
pr.select_all_items(False)
all_items: ty.Iterator[rpr.Item] = pr.items # type:ignore
for_fade: ty.Iterator[rpr.Item] = [] # type:ignore
def is_in_bounds(
item_bounds: ty.Tuple[float, float],
region_bounds: ty.Tuple[float, float]
) -> bool:
# print(item_bounds, region_bounds)
if (
item_bounds[0] >= region_bounds[0] and
item_bounds[1] <= region_bounds[1]
):
return True
return False
def item_for_selection(
item: rpr.Item, regions_bounds: ty.Iterable[ty.Tuple[float,
float]]
) -> bool:
start = item.position
end = start + item.length
if len(
list(
filter(
lambda r_b: is_in_bounds((start, end), r_b),
regions_bounds
)
)
):
return True
return False
regions_w_metadata = list(regions_w_metadata)
pprint(list(reg[0].name for reg in regions_w_metadata))
regs_bounds = list(
[(reg[0].start, reg[0].end) for reg in regions_w_metadata]
)
for_fade = filter(
lambda item: item_for_selection(item, regs_bounds), all_items
)
handlers = [
ItemHandler(sr=self.sr, item=item) for item in for_fade
]
print(list(handl.item.position for handl in handlers))
ih = ItemsHandler(sr=self.sr, item_handlers=handlers)
ih.fade_out(
ty.cast(float, values[self.ns + 'fade_time']),
FADE_SHAPES[ty.cast(str, values[self.ns + 'fade_shape'])],
)
| Levitanus/sample_editor | sample_editor/widgets.py | widgets.py | py | 5,282 | python | en | code | 3 | github-code | 90 |
36332745607 | from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from .forms import QueryForm
from .models import USING_DATABASE, OperationTime, SaveData
# To show available files
import os
import json
from django.conf import settings
from django.http import JsonResponse
# Create your views here.
def index(request):
return query(request)
def send_path(path=[], mother_dir=os.path.join(settings.MEDIA_ROOT, 'operationtiming')):
"""
Liet ke cac file va folder trong thu muc <path>
"""
for root, dirs, files in os.walk(os.path.join(mother_dir, os.sep.join(path))):
if 'readme.md' in files:
files.remove('readme.md')
if dirs or files:
dirs.sort()
files.sort()
return {'dirs': dirs, 'files': files}
return {'empty': 'True'}
def query(request):
context = {}
# all data in database
records = None
# Neu co yeu cau query data
if request.GET:
# Get request form
query_form = QueryForm(request.GET)
# Kiem tra query form co hop le khong
if query_form.is_valid():
data = query_form.cleaned_data
# Xu ly khi nguoi dung nhap fromDate > toDate
if data['fromDate'] > data['toDate']:
query_form.add_error('toDate', 'Sai ngày')
else:
# Get data
records = OperationTime.objects.using(USING_DATABASE).filter(
date__gte=data['fromDate'],
date__lte=data['toDate']
)
# Neu chi GET page
else:
query_form = QueryForm()
# Xu ly data de hien thi len Page
send_data = []
if records:
temp = [] # Luu tam thoi gia tri cho tung dot chay lo
hour = 0 # Tong thoi gian van hanh
before = 0 # Kiem tra xem thoi gian lan nay co trung voi lan truoc khong => Fix: bi dap lo xong len lai
convert_MWd_to_U235_factor = 1.23
start_query = False # Fix: Lay du lieu tu dau moi dot chay lo
for record in records:
# Bat dau khi from_power == 0
if not start_query and not record.from_power:
start_query = True
if start_query:
if record.power:
# Ngay khac lan truoc => dot chay lo moi => luu du lieu de hien thi
if before and temp and before != record.date:
temp[1] = before
temp[3] = round(temp[3], 5)
temp[4] = round(temp[4], 5)
temp[5] = round(temp[4] * convert_MWd_to_U235_factor, 4)
hour += temp[6]
temp[7] = round(hour / 60, 1)
send_data.append(temp)
temp = []
before = 0
# Luu tiep gia tri vao temp
if temp:
temp[2].append([record.power, round(record.time_for_Mwd_up + record.time_for_Mwd_steady)])
temp[3] += record.MWd_up + record.MWd_steady
temp[4] = record.MWd_total
temp[6] += record.operation_time_up + record.operation_time_steady
# Neu !temp thi khoi tao cac gia tri theo cau truc sau
else:
temp = [
# 0. fromDate
record.date,
# 1. toDate
record.date,
# 2. for print power x time (minute)
[[record.power, round(record.time_for_Mwd_up + record.time_for_Mwd_steady)]],
# 3. MWd
record.MWd_up + record.MWd_steady,
# 4. MWd_total
record.MWd_total,
# 5. U235 in total from MWd_total
0,
# 6. operation time in minute each period
record.operation_time_up + record.operation_time_steady,
# 7. operation time in hour in total
0
]
else:
if temp:
# Luu gia tri ngay dung lo de kiem tra xem co len cong suat lai khong?
before = record.date
# Fix: Chu ki cuoi cung cua vong lap for khong duoc ghi
if temp:
# Fix: Chi lay du lieu khi dot chay lo da ket thuc
if not record.power:
temp[1] = record.date
temp[3] = round(temp[3], 5)
temp[4] = round(temp[4], 5)
temp[5] = round(temp[4] * convert_MWd_to_U235_factor, 4)
hour += temp[6]
temp[7] = round(hour / 60, 1)
send_data.append(temp)
# Bang tong ket
send_data_total = []
if send_data:
if query_form.is_valid():
data = query_form.cleaned_data
send_data_total = [
# 0. fromDate
data['fromDate'],
# 1. toDate
data['toDate'],
# 2. Total operation time
send_data[-1][7],
# 3. MWd total of query period
send_data[-1][4] - send_data[0][4],
# 4. U235 total of query period
(send_data[-1][4] - send_data[0][4]) * convert_MWd_to_U235_factor
]
# Cac file thong ke co san (nam trong media/operationtiming/) khi truy cap vao index
else:
context.update(send_path())
#print(context)
context.update({
'query_form': query_form,
'send_data': [] if request.GET.get('only_total') else send_data,
'send_data_total': send_data_total,
# Thoi gian co trong database
'first_day': SaveData.get_first_date(),
'last_day': SaveData.get_last_date(),
# ON/OFF append_data_from_file
'data_from_file': False,
})
return render(request, 'html/operation_timing_page.html', context)
def get_files_and_folders(request):
if request.method == 'POST':
path = send_path(request.POST.get('path', '').split('/'))
print(path)
return JsonResponse({'path': json.dumps(path)})
@login_required(login_url='/operation_timing')
def append_data_from_file(request):
return index(request)
| hntvinh90/ReactorCenterWebsite | OperationTiming/views.py | views.py | py | 6,543 | python | en | code | 0 | github-code | 90 |
37199388441 | import numpy as np
from particleLocating import flatField, pyFiji
from scipy.interpolate import griddata
from joblib import Parallel,delayed
class arrayThreshold:
"""
A class for applying adpative local threshdoling to image arrays.
Most heavy lifting is done by import functions
"""
def __init__(self,imgPath):
try:
self.imgArray = self.recastImage(flatField.zStack2Mem(imgPath),dtypeOut='uint16')
self.thresholdArray = np.zeros(self.imgArray.shape)
except ValueError:
#print('Assuming input {} is a list of [stack,dict of log values]'.format(imgPath))
self.imgArray = imgPath[0]
self.thresholdArray = np.zeros(self.imgArray.shape)
self.log = imgPath[1]
def localThreshold(self,blockDim,edgeAugment = 'reflect',n_xyz = (3,15,15),parBool=True,n_jobs=4):
"""
Apply a threshold by moving through the imageArray, break into blockDim, and apply thresold algo to block
:param blockDim: int, size of the block in pixels,
:param n_xyz: 3-tuple, number of samples in x,y,z to use in local threshold.
:param edgeAugment: str, how should the local threshold work if blockDim is outside image bounds?
:param thresholdFunc: what, probably adaptive function should we use to compute local threshold?
:param parBool: boolean, should we use parallel processing using joblib package?
:param n_jobs: int, number of paralell jobs to use if parBool==True.
Note I may be memory constrained and not core constrained for parallel processing.
:return: (points, values), tuple containing image coordinates and computed threshold for block centered
at points. This is likely later interpolated to get a threshold array that is used as a mask
"""
# for every pixel, find the block dim and mirror if necessary for edge case
def makeBlock(array, blockDim, pxCoord):
delta = np.floor(blockDim/2).astype(int)
padShift = np.floor(blockDim/2).astype(int)
padded = np.pad(array,np.floor(blockDim/2).astype(int),mode=edgeAugment)
lowRange = pxCoord + padShift - delta
upRange = pxCoord + padShift + delta + 1 # add one for zero indexing
# There is likely a better way to do this that would naturally work for array of any dim
# but for now I will be content with that fact that I didnt use a for loop
if len(lowRange) == 3 :
return padded[lowRange[0] : upRange[0],\
lowRange[1] : upRange[1],\
lowRange[2] : upRange[2]]
elif len(lowRange) == 2:
return padded[lowRange[0]: upRange[0],\
lowRange[1]: upRange[1]]
elif len(lowRange) == 1:
return padded[lowRange[0]: upRange[0]]
else:
print("adaptive Threshold does not work for array of dim larger than 3")
raise IndexError
def partialApplication(i, j, k):
block = np.ndarray.flatten(makeBlock(image, blockDim, np.array([i, j, k])))
# it would be best to implement to take **any** function as threshold algorithm, not just maxEnt as
# current hardcoded.
return (self.maxEntropyThreshold(block), i, j, k)
image = self.imgArray
nx,ny, nz = n_xyz
if parBool == False:
points_z, points_y, points_x = [], [], []
values = []
for i in list(np.linspace(0,image.shape[0],nz).astype(int)):
for j in list(np.linspace(0, image.shape[1], ny).astype(int)):
for k in list(np.linspace(0, image.shape[2], nx).astype(int)):
block = np.ndarray.flatten(makeBlock(image,blockDim,np.array([i,j,k])))
points_z.append(i)
points_y.append(j)
points_x.append(k)
values.append(self.maxEntropyThreshold(block))
elif parBool == True:
parOut = Parallel(n_jobs=n_jobs)(delayed(partialApplication)(i,j,k)\
for i in list(np.linspace(0, image.shape[0], nz).astype(int))\
for j in list(np.linspace(0, image.shape[1], ny).astype(int))\
for k in list(np.linspace(0, image.shape[2], nx).astype(int))\
)
values = [elt[0] for elt in parOut]
points_z = [elt[1] for elt in parOut]
points_y = [elt[2] for elt in parOut]
points_x = [elt[3] for elt in parOut]
else: pass
points = (np.array(points_z).astype(int),\
np.array(points_y).astype(int),\
np.array(points_x).astype(int))
values = np.array(values).astype('uint16')
return points,values
def interpolateThreshold(self,points,values):
"""
Function carries out an interpolation of volumetric image data given linked array of points and values
:param points: 3-tuple of position arrays (position_z,position_y, position_x)
giving n-th (xyz) coordinate -> (points[2][n],points[1][n],points[0][n])
:param values: linked array of sampled image values. n-th coordinate as listed above corresponds values[n]
:return: image data that has been interpolated in 3D and output at uint16.
"""
image = self.imgArray
zz,yy,xx = np.mgrid[0:image.shape[0]:1, 0:image.shape[1]:1, 0:image.shape[2]:1]
return griddata(points,values,(zz,yy,xx), method='linear').astype('uint16')
def maxEntropyThreshold(self,stack):
"""
Computes the maximum entropy threshdold from image histogram as implemented in Fiji > Threshold > MaxEnt
This follows:
Reference:
Kapur, J. N., P. K. Sahoo, and A. K. C.Wong. ‘‘A New Method for Gray-Level
Picture Thresholding Using the Entropy of the Histogram,’’ Computer Vision,
Graphics, and Image Processing 29, no. 3 (1985): 273–285.
and kapur_threshold() function in pythreshold package.
:param stack:
:return:
"""
hist, _ = np.histogram(stack, bins=range(2**16),density=True)
c_hist = hist.cumsum()
c_hist_i = 1.0 - c_hist
# To avoid invalid operations regarding 0 and negative values.
c_hist[c_hist <= 0] = 1
# I think this is a logical index on the boolean expression: if c_hist<=0, set that value to 1
c_hist_i[c_hist_i <= 0] = 1
c_entropy = (hist * np.log(hist + (hist <= 0))).cumsum() # add logical array hist<=0 to make sure you dont take log(0)
b_entropy = -c_entropy / c_hist + np.log(c_hist)
c_entropy_i = c_entropy[-1] - c_entropy
f_entropy = -c_entropy_i / c_hist_i + np.log(c_hist_i)
#self.thresholdArray[:] = np.argmax(b_entropy + f_entropy)
print("Complete max ent threshold on one block!")
return np.argmax(b_entropy + f_entropy)
def linearShade(self, x, left, right, min, max):
""" as x goes from left to right, the output will go continuously from min to max
and saturate if x<left or x>right
"""
if x < left: return min
elif x > right: return max
else:
m = (max - min) / (right - left)
b = max - m * right
return m * x + b
@staticmethod
def recastImage(imgArray, dtypeOut, clip = False, clipPercent = (0.01,99.99)):
"""
output an array where each value has been recast to a new data type without any other change
The entire dynamic range of the image is remapped to the output bit depth. There is no clipping.
:param imgArray: np.array of image data
:param dtypeOut: str specifying output data type. Currently either 'uint16' or 'uint8'
:return:
Added default option of clipping top and bottom 0.1% of pixels.
"""
if dtypeOut == 'uint16':
if not clip: min,max = 0.99*np.nanmin(imgArray) ,1.01*np.nanmax(imgArray)
else:
min, max = np.percentile(imgArray, clipPercent[0]), np.percentile(imgArray, clipPercent[1])
imgArray[imgArray <= min] = min
imgArray[imgArray >= max] = max
m = 2**16/(max-min)
b = 2**16-m*max
mArray = np.full(imgArray.shape,m)
bArray = np.full(imgArray.shape,b)
return np.array(np.multiply(mArray,imgArray) + bArray).astype('uint16')
elif dtypeOut == 'uint8':
min, max = 0.99*np.nanmin(imgArray), 1.01*np.nanmax(imgArray)
m = 2 ** 8 / (max - min)
b = 2 ** 8 - m * max
mArray = np.full(imgArray.shape, m)
bArray = np.full(imgArray.shape, b)
return np.array(np.multiply(mArray, imgArray) + bArray).astype('uint8')
else: raise ValueError('recasting is only availabe to uint8 and uint16, not dtypeOut=',dtypeOut)
def applyThreshold(self,recastBool = True,scaleFactor=1.0):
"""
This function does not compute a threshold. It just takes self.imgArray and self.thresholdArray
and outputs an 16 bit image of the threshold with optional recasting to image to 16 bit depth.
:return:
"""
# change type to enable subtraction
out = self.imgArray.astype('float32') - scaleFactor*self.thresholdArray.astype('float32')
# clip everything below zero
positive = out # make a deep copy in case we also want to return the thresholded parts.
negative = out * -1
positive[positive<0] = 0 # now use logical indexing to reassign all negative values to zero
negative[negative < 0] = 0
if recastBool == True:
positive = self.recastImage(positive,'uint16') # rescale the dynamic range after thresholding to span 16bits
negative = self.recastImage(negative, 'uint16') # rescale the dynamic range after thresholding to span 16bits
return positive,negative
if __name__ == "__main__":
fPath = '/Users/zsolt/Colloid/DATA/DeconvolutionTesting_Huygens_DeconvolutionLab2/'\
'OddysseyHashScripting/stitch/smartCrop/smartCrop/tfrGel09052019b_shearRun05062019i_smartCrop_hv00040.tif'
testImgPath = '/Users/zsolt/Colloid/DATA/DeconvolutionTesting_Huygens_DeconvolutionLab2/' \
'OddysseyHashScripting/pyFiji/testImages'
instance = arrayThreshold(fPath)
#%%
#pyFiji.send2Fiji(instance.imgArray,wdir=testImgPath)
instance.imgArray = instance.recastImage(instance.imgArray,'uint16')
import seaborn as sns
import matplotlib.pyplot as plt
sns.distplot(instance.imgArray.ravel())
plt.show()
print(pyFiji.send2Fiji(instance.imgArray,wdir=testImgPath))
#%%
tmp = instance.localThreshold(50,n_xyz=(3,3,3),parBool=True,n_jobs=8)
instance.thresholdArray = instance.interpolateThreshold(tmp[0],tmp[1])
positive, negative = instance.applyThreshold()
print(pyFiji.send2Fiji(positive,wdir=testImgPath))
| tlozsolt/TractionRheoscopy | particleLocating/threshold.py | threshold.py | py | 11,245 | python | en | code | 0 | github-code | 90 |
72975124456 | from airflow import DAG
from airflow.models.variable import Variable
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python import BranchPythonOperator
from airflow.providers.amazon.aws.operators.lambda_function import AwsLambdaInvokeFunctionOperator
from airflow.utils.task_group import TaskGroup
from Airflow.framework.utils.data_inspect import DataInspect
from datetime import timedelta, datetime
import json
import logging
# Setting up logging
# Create and configure logger
logging.basicConfig(
format="%(name)s -> %(levelname)s: %(message)s",
filemode="a",
level=logging.DEBUG
)
# Creating an object
logger = logging.getLogger()
# Setting some global vars
SILVER_BUCKET = "silver-test-edgui"
BRONZE_BUCKET = "bronze-test-edgui"
SCHEDULE_INTERVAL = "@hourly"
default_args = {
"owner": "Edson G. A. Correia",
"start_date": datetime(2022, 2, 20),
"depends_on_past": False,
"email": ["ed.guilherme.correia@gmail.com"],
"email_on_failure": False,
"email_on_retry": False,
"retries": 0,
"retry_delay": timedelta(minutes=5),
"catchup": False,
}
with DAG(
dag_id="oracle_hourly",
schedule_interval=None,
# schedule_interval=schedule_interval,
dagrun_timeout=timedelta(minutes=55),
tags=["pipeline", "oracle", "daily"],
max_active_runs=1,
max_active_tasks=2,
default_args=default_args
) as dag:
tasks_groups = []
start_task = DummyOperator(task_id="start_task")
# Getting the metadata from the tables
oracle_metadata = Variable.get("oracle_metadata", default_var=False, deserialize_json=True)
oracle_schemas = list(oracle_metadata.keys())
# Getting the credentials
oracle_credentials = Variable.get("oracle_credentials_secret", default_var=False, deserialize_json=True)
redshift_credentials = Variable.get("redshift_credentials_secret", default_var=False, deserialize_json=True)
for schema in oracle_schemas:
with TaskGroup(group_id=schema) as db_tg:
data_inspect = BranchPythonOperator(
task_id=f"{schema}_data_inspect",
python_callable=DataInspect.oracle_check_table_data,
provide_context=True,
depends_on_past=False,
trigger_rule="none_failed",
op_kwargs={
"ts": "{{ ts }}",
"schema": schema,
"oracle_credentials": oracle_credentials,
"oracle_schema_metadata": oracle_metadata[schema],
"interval": SCHEDULE_INTERVAL
}
)
for table_metadata in oracle_metadata[schema]:
table_name = list(table_metadata.keys())[0]
functions = table_metadata[table_name]["functions"]
upsert_col = table_metadata[table_name]["upsert_col"]
constraint_conflict = table_metadata[table_name]["constraint_conflict"]
extraction_task = AwsLambdaInvokeFunctionOperator(
task_id=f"{table_name}_extrac",
function_name=functions["extraction"],
trigger_rule="all_success",
payload=json.dumps({
"bucket": BRONZE_BUCKET,
"bucket_path": f"{schema}/{table_name}",
"schema": schema,
"table_name": table_name,
"dt_execution": "{{ ts }}",
"upsert_col": upsert_col,
"host": oracle_credentials["host"],
"port": oracle_credentials["port"],
"tns": oracle_credentials["tns"],
"user": oracle_credentials["user"],
"psswd": oracle_credentials["psswd"],
"interval": SCHEDULE_INTERVAL
})
)
transformation_task = AwsLambdaInvokeFunctionOperator(
task_id=f"{table_name}_transform",
function_name=functions["transformation"],
trigger_rule="all_success",
payload=json.dumps({
"source_bucket": BRONZE_BUCKET,
"target_bucket": SILVER_BUCKET,
"schema": schema,
"table_name": table_name,
"dt_execution": "{{ ts }}",
"interval": SCHEDULE_INTERVAL
})
)
load_task = AwsLambdaInvokeFunctionOperator(
task_id=f"{table_name}_transform",
function_name=functions["transformation"],
trigger_rule="all_success",
payload=json.dumps({
"source_bucket": SILVER_BUCKET,
"schema": schema,
"table_name": table_name,
"dt_execution": "{{ ts }}",
"constraint_conflict": constraint_conflict,
"interval": SCHEDULE_INTERVAL,
"host": redshift_credentials["host"],
"port": redshift_credentials["port"],
"db": redshift_credentials["db"],
"user": redshift_credentials["user"],
"psswd": redshift_credentials["psswd"]
})
)
start_task >> data_inspect >> extraction_task >> transformation_task >> load_task
| edgui-appolonicorreia/pyroduct | Airflow/dags/main_dag.py | main_dag.py | py | 5,613 | python | en | code | 1 | github-code | 90 |
44907777033 | #!/usr/local/bin/python3
import os
import argparse
import tempfile
import string
import shutil
from pathlib import Path
from typing import Generator, Any
from contextlib import contextmanager
from build_kernel_headers import copy_kernel_headers
from build_utils import run_and_check, run_and_capture_output_and_check, copied_file_is_outdated
from build_meson_projects import build_meson_projects
from build_rust_toolchain import build_rust_programs, build_kernel_rust_libs
from run_axle import run_iso
ARCH = "x86_64"
_REPO_ROOT = Path(__file__).parents[1]
def _is_macos() -> bool:
return os.uname().sysname == 'Darwin'
@contextmanager
def _get_mounted_iso(image_name: Path) -> Generator[Path, Any, Any]:
disk_size_in_mb = 128
sector_size = 512
sector_count = (disk_size_in_mb * 1024 * 1024) / sector_size
if _is_macos():
mounted_disk_name = run_and_capture_output_and_check(
["hdiutil", "attach", "-imagekey", "diskimage-class=CRawDiskImage", "-nomount", image_name.as_posix()]
).strip(f"{string.whitespace}\n")
print(f"Mounted disk name: {mounted_disk_name}")
run_and_check(["newfs_msdos", "-F", "32", "-S", str(sector_size), "-s", str(int(sector_count)), mounted_disk_name])
yield Path(mounted_disk_name)
else:
run_and_check(['mkfs.vfat', image_name.as_posix()])
with tempfile.TemporaryDirectory() as temp_dir:
mount_point = Path(temp_dir) / "mnt"
mount_point.mkdir()
run_and_check(['sudo', 'mount', '-o', 'loop', image_name.as_posix(), mount_point.as_posix()])
yield mount_point
run_and_check(['sudo', 'umount', image_name.as_posix()])
def build_iso() -> Path:
image_name = Path(__file__).parents[1] / "axle.iso"
bootloader_binary_path = Path(__file__).parents[1] / "bootloader" / "BOOTX64.EFI"
if not bootloader_binary_path.exists():
raise ValueError(f"Bootloader binary missing: {bootloader_binary_path}")
kernel_binary_path = _REPO_ROOT / "isodir" / "boot" / "axle.bin"
if not kernel_binary_path.exists():
raise ValueError(f"Kernel binary missing: {kernel_binary_path}")
fs_server_path = _REPO_ROOT / "axle-sysroot" / "usr" / "applications" / "initrd_fs"
if not fs_server_path.exists():
raise ValueError(f"fs_server missing: {fs_server_path}")
initrd_path = _REPO_ROOT / "isodir" / "boot" / "initrd.img"
if not initrd_path.exists():
raise ValueError(f"initrd missing: {initrd_path}")
ap_bootstrap_path = _REPO_ROOT / ".compiled_ap_bootstrap"
if not ap_bootstrap_path.exists():
raise ValueError(f"AP bootstrap missing: {ap_bootstrap_path}")
run_and_check(["dd", "if=/dev/zero", f"of={image_name.as_posix()}", "bs=512", "count=262144"])
with _get_mounted_iso(image_name) as mount_point:
run_and_check(["mmd", "-i", image_name.as_posix(), "::/EFI"])
run_and_check(["mmd", "-i", image_name.as_posix(), "::/EFI/BOOT"])
run_and_check(["mcopy", "-i", image_name.as_posix(), bootloader_binary_path.as_posix(), "::/EFI/BOOT"])
run_and_check(["mmd", "-i", image_name.as_posix(), "::/EFI/AXLE"])
run_and_check(["mcopy", "-i", image_name.as_posix(), kernel_binary_path.as_posix(), "::/EFI/AXLE/KERNEL.ELF"])
run_and_check(["mcopy", "-i", image_name.as_posix(), fs_server_path.as_posix(), "::/EFI/AXLE/FS_SERVER.ELF"])
run_and_check(["mcopy", "-i", image_name.as_posix(), initrd_path.as_posix(), "::/EFI/AXLE/INITRD.IMG"])
run_and_check(["mcopy", "-i", image_name.as_posix(), ap_bootstrap_path.as_posix(), "::/EFI/AXLE/AP_BOOTSTRAP.BIN"])
return image_name
def build_initrd() -> None:
mkinitrd_path = Path(__file__).parent / "mkinitrd"
if not mkinitrd_path.exists():
raise RuntimeError(f"mkinitrd directory missing, expected at {mkinitrd_path}")
# This will also build mkinitrd, if necessary
run_and_check(['cargo', 'run', '--release'], cwd=mkinitrd_path)
# TODO(PT): How to select whether to build or not?
# run_and_check(["./target/release/mkinitrd"], cwd=mkinitrd_path)
generated_initrd = mkinitrd_path / "output.img"
if not generated_initrd.exists():
raise RuntimeError(f"mkinitrd did not generate initrd at {generated_initrd}")
staged_initrd = Path(__file__).parents[1] / "isodir" / "boot" / "initrd.img"
shutil.copy(generated_initrd.as_posix(), staged_initrd.as_posix())
def build_dist_tree() -> None:
dist_folder = Path(__file__).parents[1] / "os_dist"
sysroot = Path(__file__).parents[1] / "axle-sysroot"
for path in dist_folder.rglob("*"):
if path.name == '.DS_Store':
continue
relative_to_root = path.relative_to(dist_folder)
sysroot_path = sysroot / relative_to_root
if path.is_dir():
sysroot_path.mkdir(exist_ok=True)
continue
if not sysroot_path.exists() or copied_file_is_outdated(path, sysroot_path):
print(f'Copying {path} to {sysroot_path}')
shutil.copy(path.as_posix(), sysroot_path.as_posix())
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--force_rebuild_programs", nargs="*", action="store")
parser.add_argument("--force_rebuild_everything", action="store_true")
parser.add_argument("--no_run", action="store_true")
parser.add_argument("--run_only", action="store_true")
parser.add_argument("--debug", action="store_true")
parser.set_defaults(force_rebuild_everything=False)
parser.set_defaults(no_run=False)
parser.set_defaults(debug=False)
args = parser.parse_args()
if args.run_only:
image_name = Path(__file__).parents[1] / "axle.iso"
run_iso(image_name, debug_with_gdb=args.debug)
return
# Stage kernel headers
copy_kernel_headers()
# Stage architecture-specific source files
kernel_root = Path(__file__).parents[1] / "kernel"
arch_specific_assembly = [
kernel_root / "boot" / "boot.s",
kernel_root / "kernel" / "util" / "walk_stack.s",
kernel_root / "kernel" / "multitasking" / "tasks" / "process_small.s",
kernel_root / "kernel" / "segmentation" / "gdt_activate.s",
kernel_root / "kernel" / "interrupts" / "idt_activate.s",
kernel_root / "kernel" / "interrupts" / "int_handler_stubs.s",
kernel_root / "kernel" / "pmm" / "pmm_int.h",
kernel_root / "kernel" / "vmm" / "vmm.h",
kernel_root / "kernel" / "vmm" / "vmm.c",
]
for file in arch_specific_assembly:
arch_specific_file = file.parent / f"{file.name}.{ARCH}.arch_specific"
if not arch_specific_file.exists():
raise ValueError(f"Missing arch-specific file {arch_specific_file}")
if not file.exists() or copied_file_is_outdated(arch_specific_file, file):
print(f"\tCopying arch-specific code {arch_specific_file} to {file}...")
shutil.copy(arch_specific_file.as_posix(), file.as_posix())
# Build bootloader
env = {"USE_GCC": "1", "SHELL": "sh -xv"}
run_and_check(["make"], cwd=Path(__file__).parents[1] / "bootloader" / "uefi", env_additions=env)
run_and_check(["make"], cwd=Path(__file__).parents[1] / "bootloader", env_additions=env)
# Build the Rust kernel libraries
build_kernel_rust_libs()
# Build the AP bootstrap, which needs to be outside the kernel proper
# TODO(PT): Render this based on the target architecture
run_and_check(["nasm", "-f", "bin", (_REPO_ROOT / "ap_bootstrap.s.x86_64.arch_specific").as_posix(), "-o", (_REPO_ROOT / ".compiled_ap_bootstrap").as_posix()])
# Build the C and assembly portions of the kernel, and link with the Rust libraries
run_and_check(["make"])
# Build Rust programs before C programs as the C programs might
# need headers installed by Rust build scripts
build_rust_programs()
# Build user programs
build_meson_projects()
build_dist_tree()
# Build ramdisk
build_initrd()
# Build disk image
image_name = build_iso()
# Copy kernel to USB
# USB available?
usb_root = Path("/Volumes/NO NAME")
if usb_root.exists():
kernel_path = usb_root / "EFI" / "AXLE" / "KERNEL.ELF"
fs_server_path = usb_root / "EFI" / "AXLE" / "FS_SERVER.ELF"
initrd_path = usb_root / "EFI" / "AXLE" / "INITRD.IMG"
if kernel_path.exists() and fs_server_path.exists():
kernel_src = Path(__file__).parents[1] / "isodir" / "boot" / "axle.bin"
fs_server_src = Path(__file__).parents[1] / "axle-sysroot" / "usr" / "applications" / "initrd_fs"
initrd_src = Path(__file__).parents[1] / "isodir" / "boot" / "initrd.img"
print("\n\n\nCopying kernel to USB\n\n\n")
shutil.copy(kernel_src.as_posix(), kernel_path.as_posix())
shutil.copy(fs_server_src.as_posix(), fs_server_path.as_posix())
shutil.copy(initrd_src.as_posix(), initrd_path.as_posix())
else:
print("\n\n\nUnexpected USB tree, will not update image\n\n\n")
else:
print("\n\n\nNo USB detected, will not update image\n\n\n")
if not args.no_run:
run_iso(image_name, args.debug)
if __name__ == "__main__":
main()
| codyd51/axle | scripts/build_os_image.py | build_os_image.py | py | 9,306 | python | en | code | 536 | github-code | 90 |
16085895776 | import torch
import torch.nn as nn
import os
class Encoder(nn.Module):
def __init__(self, feature_dim=256, encoder_size=[8192], z_dim=16, dropout=0.5, dropout_input=0.0, leak=0.2):
super(Encoder, self).__init__()
self.first_linear = nn.Linear(feature_dim*2, encoder_size[0])#全连接层(in_feature_size,out_feature_size)特征维度*2,输出四倍
linear = []
#print(encoder_size[0]) 8192
#print(encoder_size) {list:1}存的值为8192
for i in range(len(encoder_size) - 1):
linear.append(nn.Linear(encoder_size[i], encoder_size[i+1]))
linear.append(nn.LeakyReLU(leak))
linear.append(nn.Dropout(dropout))
self.linear = nn.Sequential(*linear)
self.final_linear = nn.Linear(encoder_size[-1], z_dim)#全连接层,下标-1指的是最后一个数
self.lrelu = nn.LeakyReLU(leak)#激活
self.dropout_input = nn.Dropout(dropout_input)
self.dropout = nn.Dropout(dropout)
self.Tanh = nn.Tanh()
self.encoder = nn.Sequential(
nn.Linear(feature_dim * 2, feature_dim * 4),
nn.Tanh(),
nn.Linear(feature_dim * 4, z_dim),
nn.Tanh(),
)
def forward(self, features, reference_features):#这里是张量,训练阶段:feature、reference_feature都是(128,2048)
features = self.dropout_input(features)
x = torch.cat([features, reference_features], 1)#在维度1对给定的张量进行拼接(128,4096)
#x(1024,4096),feature/reference_features(1024,2048)
#训练阶段:x(128,4096)
# print("features shape is:", features.shape, reference_features.shape)
# print(x.shape)
x = self.first_linear(x)#(1024,8192)训练阶段:(128,8192)
x = self.Tanh(x)#同上
x = self.final_linear(x)#(1024,16)训练阶段:(128,16)
# x = self.encoder(x)
return x
class Decoder(nn.Module):
def __init__(self, feature_dim=256, decoder_size=[8192], z_dim=16, dropout=0.5, leak=0.2):
super(Decoder, self).__init__()
self.first_linear = nn.Linear(z_dim+feature_dim, decoder_size[0])
linear = []
for i in range(len(decoder_size) - 1):
linear.append(nn.Linear(decoder_size[i], decoder_size[i+1]))
linear.append(nn.LeakyReLU(leak))
linear.append(nn.Dropout(dropout))
self.linear = nn.Sequential(*linear)
self.final_linear = nn.Linear(decoder_size[-1], feature_dim)
self.lrelu = nn.LeakyReLU(leak)
self.dropout = nn.Dropout(dropout)
self.Tanh = nn.Tanh()
self.decoder = nn.Sequential(
nn.Linear(z_dim+feature_dim, feature_dim*4),
nn.Tanh(),
nn.Linear(feature_dim*4, feature_dim),
nn.Sigmoid()
)
def forward(self, reference_features, code):
x = torch.cat([reference_features, code], 1)
x = self.first_linear(x)
x = self.Tanh(x)
x = self.final_linear(x)
# x = self.decoder(x)
return x
# class Encoder(nn.Module):
# def __int__(self, in_shape=640, out_shape=16, dropout=0.5):
# super(Encoder, self).__int__()
# self.in_shape = in_shape
# self.out_shape = out_shape
# self.Encoder = nn.Sequential(
# nn.Linear(self.in_shape, 640),
# nn.Tanh(),
# nn.Linear(640, 320),
# nn.Tanh(),
# nn.Linear(320, 160),
# nn.Tanh(),
# nn.Linear(160, self.out_shape),
# nn.Tanh()
# )
#
# def forward(self, reference_features, x):
# x = torch.cat([reference_features, x], 1)
# encoder = self.Encoder(x)
# return encoder
#
#
# class Decoder(nn.Module):
# def __int__(self, in_shape=16, out_shape=640, dropout=0.5):
# super(Decoder, self).__int__()
# self.in_shape = in_shape
# self.out_shape = out_shape
# self.Decoder = nn.Sequential(
# nn.Linear(self.in_shape, 160),
# nn.Tanh(),
# nn.Linear(160, 320),
# nn.Tanh(),
# nn.Linear(320, self.out_shape),
# nn.Sigmoid()
# )
#
# def forward(self, reference_features, x):
# x = torch.cat([reference_features, x], 1)
# decoder = self.Decoder(x)
# return decoder | cmh1779/DEP-net | deltaencoder.py | deltaencoder.py | py | 4,399 | python | en | code | 0 | github-code | 90 |
30946999838 | # -*- coding: utf-8 -*-
"""
Created on Mon Aug 6 09:29:30 2018
@author: user
"""
import pandas as pd
import matplotlib.pyplot as plt
path = r"C:\Users\user\Desktop\Niranjana\1 aug\rent_dataset.xlsx"
file = pd.read_excel(path, header=[0])
print(file)
count = list()
sum = 0
count = {}
for i in file.index:
sum += file['Rent'][i]
count[i] = 0
for j in file.index:
if i==j:
count[i] += 1
print('Count of '+str(file['Rent'][i]+' : '+str(count[i])))
mean = sum/i
median = i/2
top = 0
for i in file.index:
if count[i]>top:
top = i
mode = file['Rent'][top]
print(mean)
print(median)
print(mode) | ranjuinrush/excercise | 1 aug/rent.py | rent.py | py | 671 | python | en | code | 0 | github-code | 90 |
4239340148 | #
# @lc app=leetcode id=60 lang=python3
#
# [60] Permutation Sequence
#
# @lc code=start
class Solution:
def getPermutation(self, n: int, k: int) -> str:
def factorial(i : int):
prod_ = 1
while(i > 1):
prod_ *= i
i -= 1
return int(prod_)
if n == 1:
return "1"
res = ""
target = k - 1
num_of_cat = factorial(n)
selections = list(range(1, n+1))
while(selections):
num_of_cat = num_of_cat // (n)
num_ = selections.pop(target // num_of_cat)
res += str(num_)
target = target % num_of_cat
n -= 1
return res
# @lc code=end
| wangyerdfz/python_lc | 60.permutation-sequence.py | 60.permutation-sequence.py | py | 743 | python | en | code | 0 | github-code | 90 |
18446150279 | city={1:0, 2:0, 3:0, 4:0}
for _ in range(3):
a,b=map(int, input().split())
city[a]+=1
city[b]+=1
if list(city.values()).count(2)==2:
print("YES")
else:
print("NO") | Aasthaengg/IBMdataset | Python_codes/p03130/s242330009.py | s242330009.py | py | 188 | python | en | code | 0 | github-code | 90 |
3691943974 | # -*- coding: utf-8 -*-
import numpy as np
import os
import keras
from keras import optimizers,initializers
from keras.models import Sequential
from keras.layers import Dense,Activation
from keras.layers.normalization import BatchNormalization
from keras.callbacks import ModelCheckpoint
from keras.utils.np_utils import to_categorical
from keras import regularizers
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
Num_classes =4
img_nums = 137
img_w = 64
img_h =64
img_c = 1
epoch_num =200
data_path = '../data_train/train.npy'
models_path = './models'
if os.path.exists(models_path) ==False:
os.makedirs(models_path)
all_data = np.load(data_path)
x_train = all_data[:,:-1]
y_train = all_data[:,-1:]
y_train = to_categorical(y_train,num_classes = 4)
x_train = np.reshape(x_train,(img_nums,img_h,img_w,img_c))
x_test ,y_test = x_train,y_train
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Conv2D(128, (3, 3)))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(Num_classes))
model.add(Activation('softmax'))
# policy
adam =optimizers.Adam(lr=0.001)
model.compile(loss = 'categorical_crossentropy', optimizer = adam,metrics=['accuracy'])
# checkpoint
filepath=models_path+os.sep+"model-{epoch:02d}-{acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='acc', verbose=0, save_best_only=True,mode='max')
callbacks_list = [checkpoint]
# fit
hist = model.fit(x_train, y_train,batch_size = 6 ,epochs=epoch_num,
shuffle=True,verbose=1,callbacks= callbacks_list)
file = open('./trian.log','w')
ctx = str(hist.history)
file.write(ctx)
file.close
| zhaoxuli/Play_Keras | train/train.py | train.py | py | 2,544 | python | en | code | 0 | github-code | 90 |
41157825506 | import requests
from bs4 import BeautifulSoup
import json
import re
query_url = "https://www.change.org/search?q=stop+asian+hate"
resp = requests.get(query_url)
soup = BeautifulSoup(resp.content, features="lxml")
items = soup.find_all("div", {"class": "search-result"})
details = soup.find_all("div", {"class": "type-ellipsis type-weak mbxxs xs-mtxs xs-mbn"})
all_titles = soup.find_all("h3", {"class": "mtn mbn prxs xs-mbs"})
all_imgs = soup.find_all("div", {"class": "flex-embed-ratio flex-embed-16x9"})
all_petitions = {}
i = 0
for item in items:
peti = {}
peti["url"] = "https://www.change.org" + item.contents[0].attrs["href"]
peti["petition_to"] = details[i].contents[0].contents[0]
title = all_titles[i].contents
full_title = ""
for words in title:
full_title = full_title + str(words)
peti["title"] = re.sub(r"<?/*mark>", "", full_title)
img_url = all_imgs[0].contents[0].attrs["style"]
peti["img_url"] = "https:" + img_url[img_url.find("('")+2:-2]
all_petitions["petitions_no"+str(i)] = peti
i = i + 1
base_path = "../frontend/src/Components/fetchPetitions/"
with open(base_path + "all_petitions.json", 'w') as outfile:
json.dump(all_petitions, outfile, sort_keys=True, indent=4) | YuqiZ2020/LAHacks-StandWithAsians | scrap_resource/extract_funds_resource.py | extract_funds_resource.py | py | 1,246 | python | en | code | 0 | github-code | 90 |
38396231225 | """User state management."""
# Standard Library
import logging
from typing import Optional
# Pyramid
from transaction import TransactionManager
# Websauna
from websauna.system.model.retry import ensure_transactionless
from websauna.system.model.retry import retryable
from websauna.system.user.models import User
from .mailgun import Mailgun
logger = logging.getLogger(__name__)
def import_subscriber(mailgun: Mailgun, address: str, user: User, upsert=True) -> bool:
"""Add one subscriber to the mailing list.
:return: True if user was fresh and imported
"""
# Track import status in user_data JSON, so we don't do double requests if the user has already been subscribed once
mailing_list_subscribes = user.user_data.get("mailing_list_subscribes", [])
if address not in mailing_list_subscribes:
# Some sanity logic to filter out emails that are legit in some services, unlegit in Mailgun
first_part, second_part = address.split("@")
if first_part.startswith(".") or first_part.endswith("."):
logger.info("Bad email address: %s", address)
return False
logger.info("Subscribing %s to %s", user.email, address)
# Don't set subscribed field, so that we don't accidentally update unsubscribed users
data = {
"address": user.email,
"name": user.friendly_name,
"upsert": upsert and "yes" or "no",
}
try:
mailgun.update_subscription(address, data)
except Exception as e:
logger.error("Failed to subscribe email %s: %s", user.email, e)
return False
mailing_list_subscribes.append(address)
user.user_data["mailing_list_subscribes"] = mailing_list_subscribes
return True
return False
def import_all_users(mailgun: Mailgun, dbsession, address: str, tm: Optional[TransactionManager] = None) -> int:
"""Update Mail subscribers database from Websauna internal database.
:return: Imported count
"""
if tm is None:
tm = dbsession.transaction_manager
count = 0
for obj in dbsession:
print(obj)
# Make sure we don't have a transaction in progress as we do batching ourselves
ensure_transactionless(transaction_manager=tm)
@retryable(tm=tm)
def tx1():
"""Get user ids on the first transaction."""
return [u.id for u in dbsession.query(User.id).all()]
@retryable(tm=tm)
def tx_n(id):
"""For each user, import it in a subsequent transaction."""
u = dbsession.query(User).get(id)
if import_subscriber(mailgun, address, u):
return 1
else:
return 0
user_ids = tx1()
for id in user_ids:
count += tx_n(id)
logger.info("Imported %d users", count)
return count
| websauna/websauna.newsletter | websauna/newsletter/importer.py | importer.py | py | 2,840 | python | en | code | 1 | github-code | 90 |
41364078640 | a = 'a'
print(ord(a))
print(chr(ord(a) - 32))
a = input()
b = []
c = []
b = a.split(' ')
for item in b:
if 64 < ord(item[0]) < 91 or 47 < ord(item[0]) < 58:
c.append(item)
else:
c.append(chr(ord(item[0]) - 32) + item[1:])
s = ' '.join(map(str,c))
print(s) | SalvadorGuido/Python | CodeWars/JadenCastingStrings.py | JadenCastingStrings.py | py | 283 | python | en | code | 0 | github-code | 90 |
34869038594 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('app', '0029_auto_20150615_1640'),
]
operations = [
migrations.AlterModelOptions(
name='sheet',
options={'permissions': (('view_sheet', 'Peut voir la fiche'),)},
),
]
| grodino/refiche | app/migrations/0030_auto_20150618_1651.py | 0030_auto_20150618_1651.py | py | 416 | python | en | code | 0 | github-code | 90 |
39818496096 | from core.apis import decorators
from flask import Blueprint, request
from core import db
from core.apis.responses import APIResponse
from core.models.assignments import Assignment
from .schema import AssignmentSchema, AssignmentSubmitSchema, GradeEnumSchema
teacher_assignments_resources = Blueprint('teacher_assignments_resources', __name__)
#new
@teacher_assignments_resources.route('/assignments', methods=['GET'], strict_slashes=False)
@decorators.auth_principal
def list_assignment(p):
teachers_assignments = Assignment.get_assignments_by_teacher(p.teacher_id)
teachers_assignments_dump = AssignmentSchema().dump(teachers_assignments, many=True)
return APIResponse.respond(data=teachers_assignments_dump)
@teacher_assignments_resources.route('/assignments/grade/', methods=['POST'], strict_slashes=False)
@decorators.accept_payload
@decorators.auth_principal
def grade_assignment(p, incoming_payload=None):
"""Grade an assignment"""
grade_assignment_payload = GradeEnumSchema().load(incoming_payload)
assignment_id = incoming_payload.get('id')
assignment = Assignment.get_assignments_by_teacher(assignment_id, p.teacher_id)
graded_assignment = Assignment.set_grade(
assignment_id=assignment_id,
grade= grade_assignment_payload['grade'],
principal=p
)
db.session.commit()
graded_assignment_dump = AssignmentSchema().dump(graded_assignment)
return APIResponse.respond(data=graded_assignment_dump)
| zuggernautt/Amit_fyle_be_assignment | core/apis/assignments/teacher.py | teacher.py | py | 1,492 | python | en | code | 0 | github-code | 90 |
7584435999 | # coding=gbk
import numpy as np
import pandas as pd
from pandas import DataFrame
from pandas import Series
import time
import h5py
from private_ipynb.lgb import lgb_model
from private_ipynb.xgb import xgb_method
'''
初步方案打算使用以下方式的模型进行加权融合:
几种预处理和特征构建的形式(注重数据来源):
1.初始特征
2.初始特征+补缺失+统计特征
3.初始特征+补缺失+统计特征+排序特征
4.初始特征+筛选的统计特征+筛选的排序特征(后续)
备用的参与模型融合的模型(注重模型运算上的差异):
lgb、xgb、cab
此处过程就能够体现代码之间的耦合性多重要了,我需要把每个处理过程都能够随时控制去除,这样的话,就能减少非常多的代码重复量。
这样融合下来可能稳定性更强一些,因为能够应对多种突发情况, 因为每一种方案都有适用情况。这样下来综合方案适用更加广阔
'''
####################################### 处理方式一 ##################################
def do_process1():
# 读取文件
train_xy = pd.read_csv("data/train_xy.csv",header=0,sep=",")
train_x = pd.read_csv("data/train_x.csv",header=0,sep=",")
test_all = pd.read_csv("data/test_all.csv",header=0,sep=",")
print(train_xy.shape)
print(train_x.shape)
print(test_all.shape)
train = train_xy.copy()
test = test_all.copy()
test['y'] = -1
# 合并一下train 和 test
data = pd.concat([train,test],axis = 0) # train_xy,test_all索引上连接
print(train.shape)
print(test.shape)
print(data.shape)
######### 列名划分 ########
# 对剩下的特征进行分析,分为数值型 、 类别型
numerical_features = []
categorical_features = []
for i in range(157):
feat = "x_" + str(i+1)
if i <= 94: # 1-95
numerical_features.append(feat)
else:
categorical_features.append(feat)
print("有用的数值型特征:",len(numerical_features))
print("有用的类别型特征:",len(categorical_features))
##########
train = data.loc[data['y']!=-1,:] # train set
test = data.loc[data['y']==-1,:] # test set
print(train.shape)
print(test.shape)
# 获取特征列,去除id,group, y
no_features = ['cust_id','cust_group','y']
features = [feat for feat in train.columns.values if feat not in no_features]
print("所有特征的维度:",len(features))
# 得到输入X ,输出y
train_id = train['cust_id'].values
y = train['y'].values
X = train[features].values
print("X shape:",X.shape)
print("y shape:",y.shape)
test_id = test['cust_id'].values
test_data = test[features].values
return y,X,test_data
####################################### 处理方式二 ##################################
def do_process2():
# 读取文件
train_xy = pd.read_csv("data/train_xy.csv",header=0,sep=",")
test_all = pd.read_csv("data/test_all.csv",header=0,sep=",")
print(train_xy.shape)
print(test_all.shape)
train = train_xy.copy()
test = test_all.copy()
test['y'] = -1
# 合并一下train 和 test
data = pd.concat([train,test],axis = 0) # train_xy,test_all索引上连接
print(train.shape)
print(test.shape)
print(data.shape)
################################################ 列名划分 #################################################################
# 对剩下的特征进行分析,分为数值型 、 类别型
numerical_features = []
categorical_features = []
for i in range(157):
feat = "x_" + str(i+1)
if i <= 94: # 1-95
numerical_features.append(feat)
else:
categorical_features.append(feat)
print("有用的数值型特征:",len(numerical_features))
print("有用的类别型特征:",len(categorical_features))
################################################# 缺失值统计 ################################################################
# 统计每个用户缺失值的个数
def get_nan_count(data):
df = data.copy()
df = df.replace(-99,np.nan)
df['nan_count'] = df.shape[1] - df.count(axis = 1).values # 列数 - 非nan数
dummy = pd.get_dummies(pd.cut(df['nan_count'],7),prefix = 'nan') # 对缺失数据进行离散化,划分为7个区间
print(dummy.shape)
res = pd.concat([data,dummy],axis = 1) # 合并到原来的数据
print(res.shape)
return res
data = get_nan_count(data)
####################################################### 缺失填充 ##########################################################
# 重要性top24
imp_feat = [ 'x_80', 'x_2', 'x_81', 'x_95', 'x_1',
'x_52', 'x_63', 'x_54', 'x_43', 'x_40',
'x_93', 'x_42', 'x_157', 'x_62', 'x_29',
'x_61', 'x_55', 'x_79', 'x_59', 'x_69',
'x_48', 'x_56', 'x_7', 'x_64']
print("重要的特征个数:",len(imp_feat))
# 对一些重要的特征进行填充,
for feat in imp_feat[:10]: # 填充top 10 ,而不是所有
if feat in numerical_features: # 数值型用均值
data[feat] = data[feat].replace(-99,np.nan)
data[feat] = data[feat].fillna(data[feat].mean()) # 非nan均值
if feat in categorical_features: # 类别型:不处理、中位数 、众数
print("这是类别特征:",feat)
pass
#################################################################################################################
train = data.loc[data['y']!=-1,:] # train set
test = data.loc[data['y']==-1,:] # test set
print(train.shape)
print(test.shape)
# 获取特征列,去除id,group, y
no_features = ['cust_id','cust_group','y']
features = [feat for feat in train.columns.values if feat not in no_features]
print("所有特征的维度:",len(features))
# 得到输入X ,输出y
train_id = train['cust_id'].values
y = train['y'].values
X = train[features].values
print("X shape:",X.shape)
print("y shape:",y.shape)
test_id = test['cust_id'].values
test_data = test[features].values
print("test shape",test_data.shape)
################# 下面方式为了半监督学习,保留所属的组信息 ##################
train.to_csv('../feature_data/suse_all_train.csv')
return y,X,test_data
####################################### 处理方式三 ##################################
def do_process3():
# 读取文件
train_xy = pd.read_csv("data/train_xy.csv", header=0, sep=",")
test_all = pd.read_csv("data/test_all.csv", header=0, sep=",")
train = train_xy.copy()
test = test_all.copy()
test['y'] = -1
# 合并一下train 和 test 这样以同样的方式进行处理,更加省事些
data = pd.concat([train, test], axis=0) # train_xy,test_all索引上连接
# 删除一些不必要的特征(噪音、缺失严重、单值、重复等) 删除掉训练集和测试集中同时缺失大于百分之95的
# train ,test 分开分析
# 处理一下缺失值严重的特征列,删除
def get_nan_feature(train, rate=0.95):
total_num = train.shape[0]
train_nan_feats = []
for i in range(157):
feat = 'x_' + str(i + 1)
nan_num = train.loc[train[feat] == -99, :].shape[0]
nan_rate = nan_num / float(total_num)
if nan_rate == 1.0: # 只有nan
train_nan_feats.append(feat)
if nan_rate > rate: # 有缺失值 nan,而且缺失严重
if len(train[feat].unique()) == 2: # 只有nan + 一个其他值
train_nan_feats.append(feat)
print("一共有 %d 个特征列的缺失值严重,超过%f " % (len(train_nan_feats), rate))
return train_nan_feats
train_nan_feats = get_nan_feature(train)
test_nan_feats = get_nan_feature(test)
print("缺失严重的特征:train =?= test------", np.all(train_nan_feats == test_nan_feats))
# 对这些特征取并集:28个
nan_feats = list(set(train_nan_feats) | set(test_nan_feats)) # 按照train | test的结果,并集,交集,A or B 都一样 并集方式的,其实可以多尝试
print('严重缺失的特征有 %d 个。' % (len(nan_feats)))
# 总的删除的特征(发现删重复的5个特征,效果不好,所以只删除28个缺失严重的特征,这是有尝试过得)
drop_feats = nan_feats
print('一共删除的特征有 %d 个。' % (len(drop_feats)))
print(drop_feats)
# 删除缺失值严重的特征列
train = train.drop(drop_feats, axis=1)
test = test.drop(drop_feats, axis=1)
data = data.drop(drop_feats, axis=1)
print(data.shape)
print(train.shape)
print(test.shape)
# 删除的特征,全部是特征重要性=0的特征,所以删除 = 原始的特征(不影响精确效果,但是可以减少噪音)
# 删除了x_92 , x_94 是数值型的,其他 24 个 全部是 类别型
# 对剩下的特征进行分析,分为数值型 、 类别型 (这里有疑问,难道默认0-94列特征是数值型特征吗,这很不合理啊)
numerical_features = []
categorical_features = []
for i in range(157):
feat = "x_" + str(i+1)
if feat not in drop_feats:
if i <= 94: # 1-95
numerical_features.append(feat)
else:
categorical_features.append(feat)
# 统计每个样本缺失值的个数 统计缺失样本数
def get_nan_count(data, feats, bins=7):
df = data[feats].copy()
df = df.replace(-99, np.nan)
print('总列数:', df.shape[1]) # 这列展示了每一行缺失的特征的数量
print('每行非空列数:', df.count(axis=1).values) # 这列展示了每一行缺失的特征的数量
df['nan_count'] = df.shape[1] - df.count(axis=1).values # 列数 - 非nan数
print('每行空列数:', df['nan_count']) # 这列展示了每一行缺失的特征的数量
dummy = pd.get_dummies(pd.cut(df['nan_count'], bins),
prefix='nan') # 把每行空列数,做7分离散化再转one-hot编码 对缺失数据进行离散化,划分为7个区间,对于划分区间,这里根据空值情况来造dummies特征
print(dummy.shape)
res = pd.concat([data, dummy], axis=1) # 合并到原来的数据
print(res.shape)
return res
# 在全部特征上面统计缺失值 新加入了对缺失值统计的7列
data = get_nan_count(data, data.columns.values, 7)
print('训练集的特征列:', data.columns)
# 获取缺失很少的数值型的特征 缺失少的数值型用均值
def get_little_nan_feats(df, numerical_features, rate=0.1):
total_num = df.shape[0]
little_nan_feats = []
for feat in numerical_features:
nan_num = df.loc[df[feat] == -99, :].shape[0]
nan_rate = nan_num / float(total_num)
if nan_rate <= rate:
little_nan_feats.append(feat)
# print("feature:",feat,"nan_num = ",nan_num,"nan_rate = ",nan_rate)
print("一共有 %d 个特征列的缺失值较少,低于%f " % (len(little_nan_feats), rate))
return little_nan_feats
little_nan_feats = get_little_nan_feats(data, numerical_features)
# 对数值型的特征,处理为rank特征(鲁棒性好一点) 数值本身就代表大小的意思,这里构建排序特征并进行归一化,效果会更加鲁棒一些。
for feat in numerical_features:
#print('rank前:',data[feat])
data[feat] = data[feat].rank() / float(data.shape[0]) # 排序,并且进行归一化 这样也行?
#print('rank后:',data[feat])
print('训练集的特征列:',data.columns)
# 对重要的特征进行填充,之后还会想着对着重要的特征利用其进行采样操作
imp_feat = ['x_80', 'x_2', 'x_81', 'x_95', 'x_1', 'x_52', 'x_63', 'x_54', 'x_43', 'x_40', 'x_93', 'x_42', 'x_157',
'x_62', 'x_29', 'x_61', 'x_55']
print('假定的重要特征个数为:', len(imp_feat))
for feat in imp_feat[:10]:
if feat in numerical_features:
print('进行填充吧')
data[feat] = data[feat].replace(-99, np.nan)
data[feat] = data[feat].fillna(data[feat].mean())
if feat in categorical_features:
print('这是类别特征:', feat)
train = data.loc[data['y'] != -1, :] # train set
test = data.loc[data['y'] == -1, :] # test set
no_features = ['cust_id', 'cust_group', 'y']
features = [feat for feat in train.columns.values if feat not in no_features]
train_id = train.pop('cust_id')
y = train['y'].values
X = train[features].values
print('X features :', features)
print("X shape:", X.shape)
print("y shape:", y.shape)
test_id = test.pop('cust_id')
test_data = test[features].values
return y, X, test_data
if __name__=='__main__':
print('-------- 储备五种方式的处理形式 ---------')
print('第一种形式的文件生成')
y, X, test_data=do_process1()
print('第二种形式的文件生成')
y2, X2, test_data2=do_process2()
print('第三种形式的文件生成')
y3, X3, test_data3 = do_process3()
print('-------- 构建不同模型的数据利用 ---------')
the_score_lg1,r_lg1=lgb_model(X, y, test_data)
the_score_lg2,r_lg2=lgb_model(X2, y2, test_data2)
the_score_lg3,r_lg3=lgb_model(X3, y3, test_data3)
the_score_xg1,r_xg1=xgb_method(X, y, test_data)
the_score_xg2,r_xg2=xgb_method(X2, y2, test_data2)
the_score_xg3,r_xg3=xgb_method(X3, y3, test_data3)
print('-------- 融合处理 ---------')
print('线下的得分依次是:',the_score_lg1,' ',the_score_lg2,' ',the_score_lg3,' ',the_score_xg1,' ',the_score_xg2,' ',the_score_xg3)
the_record_score = (the_score_lg1 + the_score_lg2 + the_score_lg3 + the_score_xg1 + the_score_xg2 + the_score_xg3) / 6
the_avg_sub=0*r_lg1+0*r_lg2+1*r_lg3+0*r_xg1+0*r_xg2+0*r_xg3
filepath = '../result/多数据集多特征多模型融合方案' + str(the_record_score) + '.csv' # 线下平均分数
# 转为array
print('result shape:', the_avg_sub.shape)
sub_sample = pd.read_csv('../result/xgb_nan.csv')
result = DataFrame()
result['cust_id'] = sub_sample['cust_id']
result['pred_prob'] = the_avg_sub
result.to_csv(filepath, index=False, sep=",")
'''
对6种方式融合的效果:
第一种:
0.05*r_lg1+0.15*r_lg2+0.3*r_lg3+0.1*r_xg1+0.3*r_xg2+0.1*r_xg3
mean auc: 0.8190709923066477
总的结果: (5, 10000)
result shape: (10000,)
-------- 融合处理 ---------
线下的得分依次是: 0.8191922695131673 0.8206291690409669 0.8192571164144196 0.8194091860993773 0.819775776724755 0.8190709923066477
result shape: (10000,)
线上:0.75188
第二种:
0.1*r_lg1+0.2*r_lg2+0.2*r_lg3+0.1*r_xg1+0.2*r_xg2+0.2*r_xg3
''' | isthegoal/XWBank_Top2_solution | private_ipynb/big_model_emsemble.py | big_model_emsemble.py | py | 14,043 | python | zh | code | 3 | github-code | 90 |
73845215016 | """
This program will allow the user to enter the name of an MLB baseball team and display the number of times the team has won the World Series in the years from 1903 through 2009.
Name: Justin Rivas
Date: 7/27/2021
Version: 1.0.0
"""
def main():
try:
# open the file and read the contents
file = open('WorldSeriesWinners.txt', 'r')
# if filename incorrect give this error.
except IOError as error:
print(f"Trouble opening file. Try again.{error}")
exit()
# create a list called champions and store the teams in the list.
champions = file.readlines()
try:
# iterate over the list and strip the white space off the end of the name. Convert the names to title case as well.
for champs in range(len(champions)):
champions[champs] = champions[champs].rstrip('\n').title()
except IndexError:
print("There was an index error.")
# ask the user to input a team they want to check
user_input = input("Enter the name of a team.").title()
try:
win_count = 0
# check if user input is in the list champions, if it is count how many times they've won the World Series.
if user_input in champions:
for team in champions:
if team == user_input:
win_count += 1
print(f"The {user_input.title()} have won the World Series {win_count} times between 1903 and 2009.")
else:
print(f"The {user_input.title()} have not won a World Series between 1903 and 2009....sorry")
except:
print("Something went wrong.")
main()
| jdr1813/Programming-ACC-1 | rivas_lab7.py | rivas_lab7.py | py | 1,661 | python | en | code | 0 | github-code | 90 |
73121251816 | import numpy as np
from flask import Flask, request, jsonify, render_template
import pickle
app = Flask(__name__)
model = pickle.load(open('model.pkl', 'rb'))
@app.route('/')
def home():
return render_template('index.html')
@app.route('/predict',methods=['POST'])
def predict():
int_features = [int(x) for x in request.form.values()]
final_features = [np.array(int_features)]
prediction = model.predict(final_features)
output = round(prediction[0], 2)
if output == 0:
output = 'Not Purchase'
else:
output = 'Purchased'
return render_template('index.html', prediction_text='The Customer {}'.format(output))
@app.after_request
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
if __name__ == "__main__":
app.run() | zanuarts/customer-behaviour | app.py | app.py | py | 1,091 | python | en | code | 0 | github-code | 90 |
20289424277 | #Project 5: Indefinite Iteration
#1: Number Guessing Game
import random
print ('Choose a number between 1 to 100 and the Number Genie will find it for you!'
'All you have to do is type Up for a higher number or down for a lower number.'
'If I get it correct just type in yes!')
high = 100
low = 0
answer = ''
number = random.randint(low, high)
while answer != 'yes':
print ("Is the number", number,"?")
answer = input()
if answer.lower() == "up":
low = number + 1
number = random.randint(low, high)
elif answer.lower() == "down":
high = number - 1
number = random.randint(low, high)
elif answer.lower() == "yes":
print ("Well you have two more wishes, so get thinking!")
else:
print ("That is an invalid answer. Try up, down or yes")
| sankumsek/assorted-python | While Loop.py | While Loop.py | py | 827 | python | en | code | 0 | github-code | 90 |
5682517150 | # -*- coding: utf-8 -*-
import tensorflow.contrib.slim as slim
from tensorpack import (ModelDesc, get_current_tower_context,
regularize_cost_from_collection, get_global_step_var)
import tensorflow as tf
from dataset import get_dataset
class OrdinaryCNNModel(ModelDesc):
"""CNN model.
"""
def __init__(self, config):
self.config = config
def inputs(self):
return [tf.placeholder(tf.float32, [None, self.config.width, self.config.height, self.config.channels], 'images'),
tf.placeholder(tf.float32, [None, self.config.target_length], 'labels')]
def build_graph(self, images, labels):
"""Build graph use tf
"""
ctx = get_current_tower_context()
is_training = ctx.is_training
# images =
with slim.arg_scope([slim.layers.conv2d, slim.layers.fully_connected],
weights_regularizer=tf.contrib.layers.l2_regularizer(self.config.weight_decay),
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_regularizer=tf.contrib.layers.l2_regularizer(self.config.bias_decay),
biases_initializer=tf.zeros_initializer(),
activation_fn=tf.nn.relu):
net = images
# build layers
# conv2d(inputs, num_outputs, kernel_size, stride=1, padding='SAME')
net = slim.layers.conv2d(net, 256, [3, 3], padding='VALID', scope='conv1_1')
net = slim.layers.dropout(net, is_training=is_training)
net = slim.layers.conv2d(net, 256, [3, 3], padding='VALID', scope='conv1_2')
tf.summary.histogram('conv1_output', net)
net = slim.layers.dropout(net, is_training=is_training)
# max_pool2d(inputs, kernel_size, stride=2, padding='VALID')
net = slim.layers.max_pool2d(net, [2, 2], scope='pool1')
net = slim.layers.conv2d(net, 128, [3, 3], padding='VALID', scope='conv2_1')
net = slim.layers.dropout(net, is_training=is_training)
net = slim.layers.conv2d(net, 128, [3, 3], padding='VALID', scope='conv2_2')
tf.summary.histogram('conv2_output', net)
net = slim.layers.dropout(net, is_training=is_training)
net = slim.layers.max_pool2d(net, [2, 2], scope='pool2')
net = slim.layers.conv2d(net, 64, [3, 3], padding='VALID', scope='conv3_1')
net = slim.layers.dropout(net, is_training=is_training)
net = slim.layers.conv2d(net, 64, [3, 3], padding='VALID', scope='conv3_2')
tf.summary.histogram('conv3_output', net)
net = slim.flatten(net, scope='flatten')
net = slim.layers.max_pool2d(net, [2, 2], scope='pool3')
net = slim.layers.fully_connected(net, 128, scope='fc1')
tf.summary.histogram('fc1_output', net)
net = slim.layers.fully_connected(net, 50, scope='fc2')
tf.summary.histogram('fc2_output', net)
# define loss
logits = tf.identity(net, name='logits_export')
self.loss = tf.losses.mean_squared_error(labels, logits)
tf.summary.scalar('loss', self.loss)
self.cost = self.loss + regularize_cost_from_collection()
def optimizer(self):
lr = tf.train.exponential_decay(
learning_rate=1e-3,
global_step=get_global_step_var(),
decay_steps=468 * 10,
decay_rate=0.3, staircase=True, name='learning_rate')
return tf.train.AdamOptimizer(lr)
def get_data():
return get_dataset()
| xiaozhuka/face_landmark | tensorpack_implementaion/model.py | model.py | py | 3,743 | python | en | code | 0 | github-code | 90 |
25251215439 |
class Solution:
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
ln = len(nums)
dp = [1] * ln
maxl = 0
for i in range(0, ln):
for j in range(0, i):
if nums[j] < nums[i] and dp[i]<dp[j]+1:
dp[i] = dp[j] + 1
if maxl < dp[i]:
maxl = dp[i]
return maxl
import bisect
class Solution:
def lengthOfLIS(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
ln = len(nums)
dp = []
for i in range(0, ln):
if not dp or dp[-1] < nums[i]:
dp.append(nums[i])
else:
ind = bisect.bisect_left(dp, nums[i])
dp[ind] = nums[i]
return len(dp)
nums = [10,9,2,5,3,7,101,18]
nums = [-1, 2, 2, 2, 3, 0, 0, 0, 1, 2]
solu = Solution()
print(solu.lengthOfLIS(nums))
| sevenseablue/leetcode | src/leet/300-longest-increasing-subsequence.py | 300-longest-increasing-subsequence.py | py | 967 | python | en | code | 0 | github-code | 90 |
14073931801 | import sys
import scrap_package as sp
user_args = sys.argv
def description():
"""
Affiche un exemple des différentes utilisations du script.
:return:
Un print.
"""
print('\n> python llopis_scraper_books_online.py book http://book_url'
'\n> python llopis_scraper_books_online.py category http://category_url/index.html'
'\n> python llopis_scraper_books_online.py all'
'\n> python llopis_scraper_books_online.py input')
def main(user_args):
"""
Détection du l'option choisit.
Selon l'option du script choisit, appelle la fonction main_de_l'option.
Les fonctions main_des_options sont dans le module main_p02_scrap.py
:proceedings:
Essaye de d'appeller une fontion, selon son user_args[0].
Sinon, afficher l'exception et retourne description.
:param user_args[0]:
Définit l'option choisit par l'utilisateur, parmi les suivantes :
-'book'
-'category'
-'all'
-'input'
Ce paramètre est à entrer dans le terminal, après le nom du script.
:example:
python llopis_scraper_books_online.py all
:param user_args[1]:
Pour les options 'book' et 'category', il est nécessaire de renseigner
un paramétre supplémentaire. Il s'agit de l'url d'un livre, prise sur le
site 'books.toscrape.com/', ou de l'url d'une category, prise sur le
même site que cité précédemment.
:example:
python llopis_scraper_books_online.py book
http://books.toscrape.com/catalogue/its-only-the-himalayas_981/index
.html
ou
python llopis_scraper_books_online.py category
http://books.toscrape.com/catalogue/category/books/travel_2/index
.html
:type user_args[0]:
str
:type user_args[1]:
str, plus précisément, une url provenant du site
'http://books.toscrape.com/'
:return:
Appelle la fonction main de l'option choisit.
:exception:
Si les paramètres ne correspondent pas à l'utilisation du script,
affiche le type d'erreur, puis affiche la description.
"""
try:
if user_args[1] == 'book':
return sp.main.main_with_book(user_args[2])
elif user_args[1] == 'category':
return sp.main.main_with_category(user_args[2])
elif user_args[1] == 'all':
return sp.main.main_with_all()
elif user_args[1] == 'input':
return sp.main.main_with_input()
else:
return description()
except Exception as e:
print(str(e))
return description()
main(user_args)
| geoffrey-ll/OC_P02_BookScrape | llopis_scraper_books_online.py | llopis_scraper_books_online.py | py | 2,728 | python | fr | code | 0 | github-code | 90 |
25530725271 | import numpy as np
from numpy import linalg
###################
# util functions
###################
def init_G():
return np.random.binomial(1, 0.3, size=[N,N])
# test
# return np.ones([N,N])
def init_A():
return np.random.uniform(0, 1, size=[N,N])
# test
# return np.ones([N,N])
def init_x():
return np.random.normal(0, 1, size=[N*R*2])
# test
# return np.ones([N*R*2])
def BC_2_x(B, C):
return np.append(B.reshape([N*R]), C.reshape([N*R]))
def x_2_BC(x):
return x[:N*R].reshape([N,R]), x[N*R:].reshape([R,N])
def func_diff_d_b(i, j, k, l):
if k == i:
return C[l, j]
return 0
def func_diff_d_c(i, j, m, n):
if n == j:
return B[i, m]
return 0
def func_diff_d_b_c(i, j, k, l, m, n):
if k==i and m==l and n==j:
return 1
return 0
###################
# diff functions
###################
def func_diff_f_b(k, l):
res = 0
D = np.matmul(B, C)
for i in xrange(N):
for j in xrange(N):
res += G[i,j]*2*(A[i,j]-D[i,j]) * (-func_diff_d_b(i,j,k,l))
res += mu*B[k,l]
return res
def func_diff_f_c(m, n):
res = 0
D = np.matmul(B, C)
for i in xrange(N):
for j in xrange(N):
res += G[i,j]*2*(A[i,j]-D[i,j]) * (-func_diff_d_c(i,j,m,n))
res += mu*C[m,n]
return res
###################
# f, g, H
###################
def func_f(x):
res = 0
B, C = x_2_BC(x)
D = np.matmul(B, C)
for i in xrange(N):
for j in xrange(N):
res += G[i,j]*(A[i,j]-D[i,j])**2
res += 0.5*mu*(np.sum(np.square(B)) + np.sum(np.square(C)))
return res
def func_g():
g = []
for k in xrange(N):
for l in xrange(R):
g.append(func_diff_f_b(k, l))
for m in xrange(R):
for n in xrange(N):
g.append(func_diff_f_c(m, n))
g = np.array(g)
return g
###################
# trust region subproblem
###################
if __name__ == '__main__':
N = 10
R = 2
mu = 0.1
np.random.seed(21)
A = init_A()
G = init_G()
alpha = 0.01
eta = 0.2
x = init_x()
cnt = 0
while True:
B, C = x_2_BC(x)
g = func_g()
x = x - alpha*g
f_x = func_f(x)
print(cnt)
print('fx', f_x)
if cnt > 100:
break
cnt += 1
print('- - - result - - - ')
print(func_f(x))
original_mat = G*A
B, C = x_2_BC(x)
recovered_mat = G*np.matmul(B,C)
xs, ys = np.nonzero(G)
print(np.stack([original_mat[xs,ys], recovered_mat[xs,ys]]).transpose())
print(np.mean(np.abs(original_mat[xs,ys]-recovered_mat[xs,ys])))
| LihangLiu/CS395T-Numerical-Optimization | low-rank-matrix/gradient_descent.py | gradient_descent.py | py | 2,676 | python | en | code | 2 | github-code | 90 |
22494699810 | def read_files_v2(file_name):
"""Retourne un array de int sur 64 bits constitué des données du fichier"""
blocks = []
with open(file_name, mode='rb') as f:
data = f.read()
if len(data) % 8 != 0:
for i in range(8 - len(data) % 8):
data = b"".join([data, b"\x00"])
for i in range(0, len(data), 8):
blocks.append(int.from_bytes(data[i:i+8], byteorder='big', signed=False))
return blocks
def write_file(file_name, data):
"""
Ecrit le tableau d'entier donné en entrée en bytes dans le fichier spécifié par file_name.
:param file_name:
:param data: list of 64 bit unsigned integer
:return:
"""
bin_data = []
for a in data:
bin_data.append(int(a).to_bytes(8, byteorder='big', signed=False))
bin_data = b"".join(bin_data)
with open(file_name, mode='wb') as f:
f.write(bin_data)
def read_in_bin(filename):
"""Extrait le contenu binaire d'un fichier, et le retourne sous la forme d'une str de 0 et de 1."""
bin_str = ""
with open(filename, mode='rb') as f:
data = f.read()
for a in data:
bin_str += format(a, 'b')
return bin_str
if __name__ == '__main__':
data = read_in_bin(r'D:\Users\Crowbar\PycharmProjects\GS15\test.txt')
print(data) | cuckool/GS15 | chiffrement/file_management.py | file_management.py | py | 1,359 | python | fr | code | 0 | github-code | 90 |
5099978898 | n=int(input())
c=[]
s=[]
f=[]
for i in range(n-1):
ci,si,fi=map(int,input().split())
c.append(ci)
s.append(si)
f.append(fi)
for h in range(n):
now=0
for i in range(h,n-1):
if(now <= s[i]):
now = s[i]+c[i]
else:
now = s[i] + (-(-(now-s[i])//f[i]))*f[i] + c[i]
# print("i:"+str(i)+" now:"+str(now))
print(now) | WAT36/procon_work | procon_python/src/atcoder/abc/past/C_084_SpecialTrains.py | C_084_SpecialTrains.py | py | 383 | python | en | code | 1 | github-code | 90 |
72639153577 | import os
import pandas as pd
from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN, BorderlineSMOTE, SVMSMOTE
from sklearn.svm import SVC
import sys
import warnings
import logging
# Configure logging
logging.basicConfig(filename='Project_log.log', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
# Suppress specific warnings
warnings.filterwarnings("ignore", category=FutureWarning, module="imblearn.over_sampling._smote.filter")
warnings.filterwarnings("ignore", category=FutureWarning, module="imblearn.over_sampling._adasyn")
warnings.filterwarnings("ignore", category=FutureWarning, module="imblearn.over_sampling")
# Dictionary of oversamplers
oversampler_dict = {
'oversampler_random': RandomOverSampler(sampling_strategy='auto', random_state=0),
'oversampler_smote': SMOTE(sampling_strategy='auto', random_state=0, k_neighbors=5, n_jobs=4),
'oversampler_adasyn': ADASYN(sampling_strategy='auto', random_state=0, n_neighbors=5, n_jobs=4),
'oversampler_border1': BorderlineSMOTE(sampling_strategy='auto', random_state=0, k_neighbors=5, m_neighbors=10,
kind='borderline-1', n_jobs=4),
'oversampler_border2': BorderlineSMOTE(sampling_strategy='auto', random_state=0, k_neighbors=5, m_neighbors=10,
kind='borderline-2', n_jobs=4),
# 'oversampler_svm': SVMSMOTE(sampling_strategy='auto', random_state=0, k_neighbors=5, m_neighbors=10, n_jobs=4,
# svm_estimator=SVC(kernel='linear')),
}
def oversampler_run(fold, target="income_above_limit", input_dir="data/processed", output_dir="data/processed/sample_data"):
try:
logging.info(
f"-----------------------------------------Data Oversampler Started-----------------------------------------")
# Create the output directory if it doesn't exist
os.makedirs(output_dir, exist_ok=True)
# Load the processed data from the input file
input_file = os.path.join(input_dir, "FE_output.csv")
df = pd.read_csv(input_file)
# training data is where kfold is not equal to provided fold
df_train = df[df.kfold != fold].reset_index(drop=True)
# validation data is where kfold is equal to provided fold
df_valid = df[df.kfold == fold].reset_index(drop=True)
# drop the label column from dataframe and convert it to
y = df_train[target]
X = df_train.drop([target, "kfold"], axis=1)
for oversampler_name, oversampler in oversampler_dict.items():
X_resampled, y_resampled = oversampler.fit_resample(X, y)
combined_df_train = pd.concat([X_resampled, y_resampled], axis=1)
# Define file names for train and test data
train_output_file = os.path.join(output_dir, f"{oversampler_name}_{fold}_train.csv")
test_output_file = os.path.join(output_dir, f"{oversampler_name}_{fold}_test.csv")
# Save the resampled train data
combined_df_train.to_csv(train_output_file, index=False)
# Save the validation data as the test data
df_valid.to_csv(test_output_file, index=False)
oversampler_name = oversampler_name.replace("oversampler_", "")
# Print the oversampler type and fold after saving
print(f"Generates synthetic samples using technique {oversampler_name} data for fold {fold}.")
logging.info(f"Generates synthetic samples using technique {oversampler_name} data for fold {fold}.")
logging.info(f"Fold {fold} oversampling completed successfully.")
except Exception as e:
logging.error(f"Error in oversampler_run for fold {fold}: {str(e)}")
# Example usage:
if __name__ == "__main__":
try:
if len(sys.argv) != 3 and len(sys.argv) != 5:
raise ValueError("Invalid number of arguments. Usage: python script.py input_dir_path output_dir_path")
input_d = sys.argv[1]
output_d = sys.argv[2]
for fold_ in range(5):
oversampler_run(fold_, input_dir=input_d, output_dir=output_d)
logging.info(
f"-----------------------------------------Oversampling process completed.-----------------------------------------")
except Exception as e:
logging.error(f"Error: {str(e)}")
| zubairashfaque/ML_Predicting_Income_Inequality | src/oversampler.py | oversampler.py | py | 4,377 | python | en | code | 0 | github-code | 90 |
12827116794 | #!/usr/bin/python3
import struct, matplotlib.colors as colors
from jaettu import kuvat
from matplotlib.pyplot import *
from matplotlib.cm import get_cmap
import numpy as np
import xarray as xr
#ensin tehdään kartat kaikista vuosista ohjelmilla pktied.c ja pitkart_kartoista.c
#näistä 10 50 90 prosenttiosuus ohjelmalla xkarttoja.c
#sitten käytetään tätä
with open('kartmask.bin', 'rb') as f:
kartmask = f.read()
kartmask = np.frombuffer(kartmask,dtype=np.int8)
dat = xr.open_dataset('bathy_meter.nc')
nemomask = np.array(dat.bdy_msk)
dat.close()
nemomask = np.flip(nemomask, 0)
#värikartta saadaan epäjatkuvana
kartta = get_cmap('gnuplot2')
raja1=220
rajat = np.arange(0,raja1+1,20)
norm = colors.BoundaryNorm(rajat, kartta.N, clip=True)
otsikot = {'A':'Mean(MP,HC)', 'B':'EC-Earth', 'K':'Ice charts'}
kirjaimet = 'BAK'
fig = False
yrako = 0.035
Alue = (0, 0, 0.9, 1)
xgrid = 3; ygrid = 3
alue = lambda i,j: (Alue[0]+Alue[2]/xgrid*i, Alue[1]+Alue[3]/ygrid*(ygrid-1-j),
Alue[2]/xgrid, Alue[3]/ygrid-yrako)
#x-ero on pienempi kuin y-ero
#määrä on eri simulaatiossa ja kartassa
#tässä käytetään suhdetta 62:nnella leveyspiirillä
simsuhde = 0.78
karsuhde = 0.94
#rajataan reunoilta tyhjää pois
rvasen = 15
roikea = 15
for jkuva,arvo in enumerate(['10', '50', '90']):
ikuva=0
for kirjain in kirjaimet:
y0 = 0 if kirjain == 'K' else 10
with open("pituus%s_%s001.bin" %(arvo,kirjain), "rb") as f:
sisalto = f.read()
xpit,ypit,v0,v1 = struct.unpack('hhhh', sisalto[0:8])
if not fig:
fig = figure(figsize=((xpit-rvasen-roikea)/100*xgrid/Alue[2]*simsuhde, (ypit-y0)/100*ygrid/Alue[3]))
fig.set_facecolor('#cccccc')
fig.add_axes(alue(ikuva,jkuva))
kuva = np.empty((ypit-y0,xpit), dtype=float)
muoto = 'h'*xpit
for j in range(y0,ypit):
kuva[ypit-1-j,:] = struct.unpack(muoto, sisalto[8+j*xpit*2:8+(j+1)*xpit*2])
if kirjain == 'A':
with open("pituus%s_D001.bin" %(arvo), "rb") as f:
sisalto = f.read()
for j in range(y0,ypit):
kuva[ypit-1-j,:] += struct.unpack(muoto, sisalto[8+j*xpit*2:8+(j+1)*xpit*2])
kuva /= 2
if kirjain == 'K':
if arvo == '10':
kartmask = np.flip(np.reshape(kartmask,(ypit,xpit)), 0)
kuva[kartmask==1] = np.nan
else:
kuva[nemomask[:-y0,:]==0] = np.nan
kuva = kuva[:,rvasen:-roikea]
imshow(kuva, cmap=kartta, interpolation='nearest')#, norm=norm) #nan-arvot sotkevat ellei interpolointi ole 'nearest'
if kirjain == 'K':
gca().set_aspect(1/karsuhde)
else:
gca().set_aspect(1/simsuhde)
title("%s %s %%" %(otsikot[kirjain],arvo), fontsize=15)
clim(0,raja1)
axis(False)
ikuva += 1
alue = fig.add_axes((Alue[0]+Alue[2]-0.008, 0.1, 0.04, 0.8))
colorbar(cax=alue, ticks=rajat)
yticks(fontsize=15)
if(sys.argv[-1] == '1'):
savefig("%s/pituuskartta_33.png" %kuvat)
else:
show()
| aerkkila/merianalyysi | pitkartta33.py | pitkartta33.py | py | 3,104 | python | fi | code | 0 | github-code | 90 |
41630560724 | """
Example file to download: https://d37ci6vzurychx.cloudfront.net/trip-data/yellow_tripdata_2022-01.parquet
"""
import awswrangler as wr
from datetime import datetime
import pandas as pd
from prefect import task, get_run_logger
from typing import List
from urllib.error import HTTPError
from urllib.request import urlopen
from dataplatform.blocks.postgres_pandas import PostgresPandas
MAIN_URL = "https://d37ci6vzurychx.cloudfront.net/trip-data/"
@task
def get_files_to_process_aws(
year: int = 2022, service_type: str = "yellow"
) -> List[str]:
files = wr.s3.list_objects(f"s3://nyc-tlc/trip data/{service_type}_tripdata_{year}")
return [f.replace("s3://nyc-tlc/trip data/", "") for f in files]
@task
def get_files_to_process(year: int = 2022, service_type: str = "yellow") -> List[str]:
svc = f"{service_type}_tripdata_{year}"
files = [f"{svc}-{str(i).zfill(2)}.parquet" for i in range(1, 13)]
valid_files = []
for file in files:
try:
status_code = urlopen(f"{MAIN_URL}{file}").getcode()
if status_code == 200:
valid_files.append(file)
except HTTPError:
pass
return valid_files
@task
def extract(file_name: str) -> pd.DataFrame:
logger = get_run_logger()
try:
raw_df = pd.read_parquet(f"{MAIN_URL}{file_name}")
logger.info("Extracted %s with %d rows", file_name, len(raw_df))
return raw_df
except HTTPError:
logger.warning("File %s is not available in TLC Trip Record Data")
@task
def extract_from_s3(file_name: str) -> pd.DataFrame:
logger = get_run_logger()
raw_df = wr.s3.read_parquet(f"s3://nyc-tlc/trip data/{file_name}")
logger.info("Extracted %s with %d rows", file_name, len(raw_df))
return raw_df
@task
def transform(
df: pd.DataFrame, file_name: str, service_type: str = "yellow"
) -> pd.DataFrame:
df["file"] = file_name
df[service_type] = service_type
df["ingested"] = datetime.utcnow().isoformat()
return df
@task
def load_to_postgres(df: pd.DataFrame, tbl: str, if_exists: str = "append") -> None:
logger = get_run_logger()
block = PostgresPandas.load("default")
block.load_data(df, tbl, if_exists)
logger.info("%d rows loaded to table %s", len(df), tbl)
@task
def extract_jaffle_shop(dataset: str) -> pd.DataFrame:
file = f"https://raw.githubusercontent.com/dbt-labs/jaffle_shop/main/seeds/{dataset}.csv"
return pd.read_csv(file)
| anna-geller/prefect-dataplatform | dataplatform/tasks.py | tasks.py | py | 2,470 | python | en | code | 66 | github-code | 90 |
30154539215 | #!/usr/bin/env python
import os, shutil
def options(ctx):
ctx.add_option("--pdk-path", action="store", default="/opt/PalmPDK", dest="pdk_path")
def configure(ctx):
prefix = ctx.options.pdk_path + "/arm-gcc/bin/arm-none-linux-gnueabi-"
ctx.env.CXX = prefix + "gcc"
ctx.env.AR = prefix + "ar"
ctx.check_tool("g++")
def build(ctx):
pdk = ctx.options.pdk_path
src = "../src"
ctx.program(
source=ctx.path.find_node(src).ant_glob("**/*.cpp"),
target="app/amity",
lib=[
"pdl",
"SDL", "SDL_image", "GLES_CM",
"js_static", "nspr4", "plc4", "plds4",
"curl", "rt" ],
use=[ "js_static", "curl" ],
includes=[
src,
pdk + "/include",
pdk + "/include/SDL",
"../external/build/webos/include",
"../external/build/webos/include/js",
],
libpath=[
pdk+"/device/lib",
"../../external/build/webos/lib",
],
cxxflags=
"-O2 -D__WEBOS__ " +
"-mcpu=cortex-a8 -mfpu=neon -mfloat-abi=softfp " +
"--sysroot="+pdk+"/arm-gcc/sysroot "
,
linkflags=[ "-Wl,--allow-shlib-undefined" ]
)
ctx(rule="cp ${SRC} ${TGT}", source="appinfo.json", target="app/appinfo.json")
ctx(rule="echo filemode.755=amity > app/package.properties", shell=True, always=True)
ctx.add_group() # Finish the above before running palm-package
ctx(rule="palm-package app", always=True)
# Install and run the package on the device
def push(ctx):
os.system("palm-install build/com.threerings.amity_1.0.0_all.ipk")
os.system("palm-launch com.threerings.amity")
| aduros/amity | webos/wscript | wscript | 1,714 | python | en | code | 3 | github-code | 90 | |
35024870297 | #!/usr/bin/env python
# -*- coding: utf-8 -*- #
AUTHOR = 'Michael Jurasovic'
SITENAME = 'Michael Jurasovic'
SITEURL = 'https://jurasofish.github.io'
PATH = 'content'
TIMEZONE = 'Australia/Melbourne'
DEFAULT_LANG = 'English'
# Feed generation is usually not desired when developing
FEED_ALL_ATOM = 'feeds/all.atom.xml'
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
# Blogroll
LINKS = (
('Github', 'https://github.com/jurasofish'),
)
# Social widget
SOCIAL = (
('Github', 'https://github.com/jurasofish'),
)
MENUITEMS = (
('GitHub', 'https://github.com/jurasofish'),
)
DEFAULT_PAGINATION = False
OUTPUT_PATH = 'docs/'
# Uncomment following line if you want document-relative URLs when developing
#RELATIVE_URLS = True
PLUGIN_PATHS = ["plugins"]
PLUGINS = [
'pelican.plugins.render_math',
'i18n_subsites',
]
THEME = 'themes/pelican-bootstrap3'
JINJA_ENVIRONMENT = {'extensions': ['jinja2.ext.i18n']}
# BOOTSTRAP_THEME = 'flatly'
BOOTSTRAP_THEME = 'yeti'
PYGMENTS_STYLE = 'autumn'
CUSTOM_CSS = 'static/css/custom.css'
STATIC_PATHS = [
'images',
'extra',
]
EXTRA_PATH_METADATA = {'extra/custom.css': {'path': 'static/css/custom.css'}}
HIDE_SIDEBAR = True
PADDED_SINGLE_COLUMN_STYLE = True
DISPLAY_CATEGORIES_ON_MENU = False
RELATIVE_URLS = True
| jurasofish/jurasofish.github.io | pelicanconf.py | pelicanconf.py | py | 1,340 | python | en | code | 0 | github-code | 90 |
15082453226 | import cv2
import tensorflow as tf
import numpy as np
import time
# Load the saved model
model_path = 'C:/Users/ee20m/Documents/Advance_model'
loaded_model = tf.saved_model.load(model_path)
input_tensor_info = loaded_model.signatures['serving_default'].inputs[0]
# Set up video capture
cap = cv2.VideoCapture(0)
# Define the desired input shape
input_shape = (256, 256, 3)
# Initialize variables for FPS calculation
frame_count = 0
start_time = time.time()
while True:
ret, frame = cap.read()
if not ret:
break
# Preprocess the frame
frame = cv2.resize(frame, (input_shape[1], input_shape[0]))
frame = frame / 255.0
# Expand dimensions to create a batch of size 1
frame_batch = np.expand_dims(frame, axis=0)
# Convert the frame batch to a tensor
frame_tensor = tf.constant(frame_batch, dtype=tf.float32)
# Run the inference
output = loaded_model(frame_tensor)
# Extract the depth map from the output tensor
depth_map = output.numpy()[0, :, :, 0]
# Normalize the depth map for visualization
depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())
# Rescale the depth map to 0-255
depth_map = (depth_map * 255).astype(np.uint8)
# Convert the depth map to grayscale
depth_map_gray = cv2.cvtColor(depth_map, cv2.COLOR_GRAY2BGR)
# Display the depth map
cv2.imshow('Depth Estimation', depth_map_gray)
frame_count += 1
# Calculate FPS every 5 frames
if frame_count % 5 == 0:
end_time = time.time()
fps = frame_count / (end_time - start_time)
print(f'FPS: {fps:.2f}')
if cv2.waitKey(1) == ord('q'):
break
# Release resources
cap.release()
cv2.destroyAllWindows()
| SahinMjks/Monocular-Depth-Estimation-and-Object-Detection-for-Lower-End-Device | Depth_estimation_live_3.py | Depth_estimation_live_3.py | py | 1,806 | python | en | code | 0 | github-code | 90 |
20312647129 | # -*- coding:utf-8 -*-
import scrapy
from shiyanlou.items import SylgitItem
class SylgitSpider(scrapy.Spider):
name = 'shiyanlou-github'
start_urls = ['https://github.com/shiyanlou?tab=repositories']
def parse(self, response):
for repo in response.css('.public'):
item = SylgitItem()
item['name'] = repo.xpath('div/h3/a/text()').extract_first().strip()
item['update_time'] = repo.xpath('div/relative-time/text()').extract_first()
repo_url = 'https://github.com/shiyanlou/' + item['name']
request = scrapy.Request(repo_url, callback=self.parse_repo)
request.meta['item'] = item
yield request
next_page = response.xpath("//a[@rel='nofollow']").re('href="(.*?)">Next')[0]
if next_page is not None:
yield scrapy.Request(next_page, callback=self.parse)
def parse_repo(self, response):
item = response.meta['item']
numbers_summary = response.xpath('//ul[@class="numbers-summary"]/li/a/span/text()').extract()
item['commits'] = numbers_summary[0].split()[0]
item['branches'] = numbers_summary[1].split()[0]
item['releases'] = numbers_summary[2].split()[0]
yield item
| gantrol/pySpider | shiyanlou/shiyanlou/spiders/sylgit.py | sylgit.py | py | 1,248 | python | en | code | 1 | github-code | 90 |
27925928171 | TYPE_NIL = 0
TYPE_NUMBER = 1
TYPE_STRING = 2
TYPE_TABLE = 3
TYPE_TORCH = 4
TYPE_BOOLEAN = 5
TYPE_FUNCTION = 6
TYPE_RECUR_FUNCTION = 8
LEGACY_TYPE_RECUR_FUNCTION = 7
import sys
import struct
from array import array
from collections import namedtuple
from functools import wraps
import torch
import torch.legacy.nn as nn
import torch.cuda
from torch._thnn import type2backend
from torch._utils import _import_dotted_name
HAS_CUDA = torch.cuda.is_available()
LuaFunction = namedtuple('LuaFunction', ['size', 'dumped', 'upvalues'])
class hashable_uniq_dict(dict):
"""
Subclass of dict with equality and hashing semantics changed:
equality and hashing is purely by reference/instance, to match
the behaviour of lua tables.
Supports lua-style dot indexing.
This way, dicts can be keys of other dicts.
"""
def __hash__(self):
return id(self)
def __getattr__(self, key):
return self.get(key)
def __eq__(self, other):
return id(self) == id(other)
# TODO: dict's __lt__ etc. still exist
class TorchObject(object):
"""
Simple torch object, used by `add_trivial_class_reader`.
Supports both forms of lua-style indexing, i.e. getattr and getitem.
Use the `torch_typename` method to get the object's torch class name.
Equality is by reference, as usual for lua (and the default for Python
objects).
"""
def __init__(self, typename, obj):
self._typename = typename
self._obj = obj
def __getattr__(self, k):
return self._obj.get(k)
def __getitem__(self, k):
return self._obj.get(k)
def torch_typename(self):
return self._typename
def __repr__(self):
return "TorchObject(%s, %s)" % (self._typename, repr(self._obj))
def __str__(self):
return repr(self)
def __dir__(self):
keys = list(self._obj.keys())
keys.append('torch_typename')
return keys
reader_registry = {}
def get_python_class(typename):
module, _, cls_name = typename.rpartition('.')
if cls_name.startswith('Cuda'):
module = module + '.cuda'
cls_name = cls_name[4:]
if cls_name == 'Storage' or cls_name == 'Tensor':
cls_name = 'Float' + cls_name
return _import_dotted_name(module + '.' + cls_name)
def make_tensor_reader(typename):
python_class = get_python_class(typename)
def read_tensor(reader, version):
# source:
# https://github.com/torch/torch7/blob/master/generic/Tensor.c#L1243
ndim = reader.read_int()
# read size:
size = torch.LongStorage(reader.read_long_array(ndim))
# read stride:
stride = torch.LongStorage(reader.read_long_array(ndim))
# storage offset:
storage_offset = reader.read_long() - 1
# read storage:
storage = reader.read()
if storage is None or ndim == 0 or len(size) == 0 or len(stride) == 0:
# empty torch tensor
return python_class()
return python_class().set_(storage, storage_offset, torch.Size(size), tuple(stride))
return read_tensor
def make_storage_reader(typename):
python_class = get_python_class(typename)
# TODO: be smarter about this
element_size = python_class().element_size()
def read_storage(reader, version):
# source:
# https://github.com/torch/torch7/blob/master/generic/Storage.c#L244
size = reader.read_long() * element_size
return python_class.from_buffer(reader.f.read(size), 'native')
return read_storage
def register_torch_class(obj_kind, reader_factory):
for t in ['Double', 'Float', 'Half', 'Long', 'Int', 'Short', 'Char', 'Byte']:
for prefix in ['', 'Cuda']:
if prefix == 'Cuda' and not HAS_CUDA:
continue
if t == 'Half' and prefix == '':
continue
if prefix == 'Cuda' and t == 'Float':
cls_name = 'torch.Cuda' + obj_kind
else:
cls_name = 'torch.' + prefix + t + obj_kind
reader_registry[cls_name] = reader_factory(cls_name)
register_torch_class('Storage', make_storage_reader)
register_torch_class('Tensor', make_tensor_reader)
################################################################################
# Reader function for tds.Vector and tds.Hash
################################################################################
def tds_Vec_reader(reader, version):
length = reader.read_long()
return [reader.read() for i in range(length)]
def tds_Hash_reader(reader, version):
length = reader.read_long()
obj = {}
for i in range(length):
k = reader.read()
v = reader.read()
obj[k] = v
return obj
reader_registry['tds.Vec'] = tds_Vec_reader
reader_registry['tds.Hash'] = tds_Hash_reader
################################################################################
# Reader function for nn modules
################################################################################
def _load_backend(obj):
if hasattr(obj, '_type'):
obj._backend = type2backend[obj._type]
return
# Try to find tensor attributes and infer type from them
for key in dir(obj):
attr = getattr(obj, key)
if isinstance(attr, torch.Tensor):
try:
obj._backend = type2backend[attr.type()]
except KeyError:
pass
# Monkey patch the forward to capture the type of input
updateOutput_orig = obj.updateOutput
def updateOutput_patch(*args):
input = args[0]
while not isinstance(input, torch.Tensor):
input = input[0]
obj._backend = type2backend[input.type()]
obj.updateOutput = updateOutput_orig
return obj.updateOutput(*args)
obj.updateOutput = updateOutput_patch
def nn_reader(cls):
def read_nn_class(reader, version):
obj = cls.__new__(cls)
attributes = reader.read()
obj.__dict__.update(attributes)
_load_backend(obj)
return obj
return read_nn_class
reader_registry.update({('nn.' + name): nn_reader(module)
for name, module in nn.__dict__.items()
if name[0] != '_' and name[0].upper() == name[0]})
def custom_reader(cls):
def reader_factory(fn):
base = nn_reader(cls)
def wrapper(reader, version):
obj = base(reader, version)
fn(reader, version, obj)
return obj
reader_registry['nn.' + cls.__name__] = wrapper
return wrapper
return reader_factory
def BatchNorm_reader(reader, version, obj):
if version < 2 and hasattr(obj, 'running_std'):
obj.running_var = obj.running_var.pow(-2).add(-obj.eps)
del obj.running_std
for prefix in ['', 'Spatial', 'Volumetric']:
name = prefix + 'BatchNormalization'
custom_reader(getattr(nn, name))(BatchNorm_reader)
@custom_reader(nn.Transpose)
def Transpose_reader(reader, version, obj):
obj.permutations = list(
map(lambda swap: [swap[0] - 1, swap[1] - 1], obj.permutations))
@custom_reader(nn.SpatialDivisiveNormalization)
def SpatialDivisiveNormalization_reader(reader, version, obj):
obj.stdestimator.modules[-2].dim += 1
obj.meanestimator.modules[-1].dim += 1
@custom_reader(nn.SpatialContrastiveNormalization)
def SpatialContrastiveNormalization_reader(reader, version, obj):
raise RuntimeError("loading of SpatialContrastiveNormalization is disabled for now")
@custom_reader(nn.GradientReversal)
def GradientReversal_reader(reader, version, obj):
if version < 2:
setattr(obj, 'lambda', 1)
@custom_reader(nn.VolumetricAveragePooling)
def VolumetricAveragePooling_reader(reader, version, obj):
obj.padT, obj.padH, obj.padW = 0, 0, 0
obj.ceil_mode = False
obj.count_include_pad = True
################################################################################
# Functions for patching objects so that they work with legacy modules
################################################################################
def registry_addon(fn):
def wrapper_factory(module_name, *args, **kwargs):
module_name = 'nn.' + module_name
build_fn = reader_registry[module_name]
def wrapper(reader, version):
obj = build_fn(reader, version)
fn(obj, *args, **kwargs)
return obj
reader_registry[module_name] = wrapper
return wrapper_factory
@registry_addon
def attr_map(obj, attribute_map):
for src, dst in attribute_map.items():
setattr(obj, dst, getattr(obj, src))
delattr(obj, src)
@registry_addon
def ensure_attr(obj, *attrs):
for attr in attrs:
if not hasattr(obj, attr):
setattr(obj, attr, None)
@registry_addon
def make_none_attr(obj, *attrs):
for attr in attrs:
setattr(obj, attr, None)
@registry_addon
def decrement(obj, *attrs):
for attr in attrs:
value = getattr(obj, attr)
value -= 1
setattr(obj, attr, value)
@registry_addon
def decrement_positive(obj, *attrs):
for attr in attrs:
value = getattr(obj, attr)
if value > 0:
value -= 1
setattr(obj, attr, value)
@registry_addon
def storage_to_size(obj, *attrs):
for attr in attrs:
value = getattr(obj, attr)
setattr(obj, attr, torch.Size(value))
@registry_addon
def ensure_type(obj, type_map):
for attr, converter in type_map.items():
value = getattr(obj, attr)
setattr(obj, attr, getattr(value, converter)())
ensure_attr('Linear', 'bias', 'gradWeight', 'gradBias', 'addBuffer')
ensure_attr('CAddTable', 'inplace')
ensure_attr('SpatialFractionalMaxPooling', 'outW', 'outH', 'ratioW', 'ratioH')
ensure_attr('BatchNormalization', 'weight', 'bias', 'gradWeight', 'gradBias',
'save_mean', 'save_std')
ensure_attr('SpatialBatchNormalization', 'weight', 'bias', 'gradWeight', 'gradBias',
'save_mean', 'save_std')
ensure_attr('VolumetricBatchNormalization', 'weight', 'bias', 'gradWeight', 'gradBias')
ensure_attr('LookupTable', 'maxNorm', 'normType', '_gradOutput', '_sorted', '_indices')
ensure_attr('MixtureTable', 'table')
ensure_attr('WeightedEuclidean', 'fastBackward')
ensure_attr('VolumetricMaxPooling', 'ceil_mode')
ensure_attr('BCECriterion', 'buffer')
ensure_attr('SpatialClassNLLCriterion', 'weights')
ensure_attr('ClassNLLCriterion', 'weights')
ensure_attr('ParallelCriterion', 'repeatTarget')
ensure_attr('MultiMarginCriterion', 'weights')
ensure_attr('SpatialConvolution', 'bias', 'gradWeight', 'gradBias', '_gradOutput')
ensure_attr('SpatialCrossMapLRN', 'scale')
ensure_attr('Dropout', 'inplace')
make_none_attr('SpatialConvolution', 'finput', 'fgradInput', '_input')
attr_map('ReLU', {'val': 'value'})
attr_map('Threshold', {'val': 'value'})
attr_map('Unsqueeze', {'pos': 'dim'})
attr_map('HardShrink', {'lambda': 'lambd'})
attr_map('SoftShrink', {'lambda': 'lambd'})
attr_map('GradientReversal', {'lambda': 'lambd'})
attr_map('SpatialAdaptiveMaxPooling', {'H': 'h', 'W': 'w'})
decrement('Index', 'dimension')
decrement('SelectTable', 'index')
decrement('SplitTable', 'dimension')
decrement_positive('JoinTable', 'dimension')
decrement('Parallel', 'inputDimension', 'outputDimension')
decrement('Concat', 'dimension')
decrement('DepthConcat', 'dimension')
decrement('Squeeze', 'dim')
decrement('Unsqueeze', 'dim')
decrement('Replicate', 'dim')
decrement('MixtureTable', 'dim')
decrement('Narrow', 'dimension', 'index')
decrement('NarrowTable', 'offset')
decrement('LookupTable', 'paddingValue')
decrement('SpatialConvolutionMap', 'connTable')
decrement('SpatialFullConvolutionMap', 'connTable')
decrement('Select', 'dimension', 'index')
decrement('Padding', 'dim', 'index')
decrement('PartialLinear', 'partition')
decrement_positive('Sum', 'dimension')
decrement_positive('Max', 'dimension')
decrement_positive('Min', 'dimension')
decrement_positive('Mean', 'dimension')
storage_to_size('View', 'size')
storage_to_size('DepthConcat', 'outputSize')
storage_to_size('MixtureTable', 'size')
ensure_type('PartialLinear', {'partition': 'long'})
class T7ReaderException(Exception):
pass
class T7Reader:
def __init__(self,
fileobj,
list_heuristic=True,
int_heuristic=True,
unknown_classes=False,
long_size=None):
"""
Params:
* `fileobj` file object to read from, must be actual file object
as it must support array, struct, and numpy
* `list_heuristic`: automatically turn tables with only consecutive
positive integral indices into lists
(default True)
* `int_heuristic`: cast all whole floats into ints (default True)
* `force_deserialize_classes`: deserialize all classes, not just the
whitelisted ones (default True)
"""
self.f = fileobj
self.memo = {}
self.list_heuristic = list_heuristic
self.int_heuristic = int_heuristic
self.unknown_classes = unknown_classes
self.long_size = long_size
def _read(self, fmt):
sz = struct.calcsize(fmt)
result = struct.unpack(fmt, self.f.read(sz))
if len(result) == 1:
return result[0]
return result
def read_boolean(self):
return self.read_int() == 1
def read_int(self):
return self._read('i')
def read_long(self):
if self.long_size is None:
return self._read('l')
elif self.long_size is 8:
return self._read('q')
else:
return self._read('i')
def read_long_array(self, n):
if self.long_size is not None:
lst = []
for i in range(n):
lst.append(self.read_long())
return lst
else:
LONG_SIZE_ARR = 'q' if sys.version_info[0] == 3 else 'l'
arr = array(LONG_SIZE_ARR)
arr.fromfile(self.f, n)
return arr.tolist()
def read_float(self):
return self._read('f')
def read_double(self):
return self._read('d')
def read_string(self):
size = self.read_int()
byte_str = self.f.read(size)
if not isinstance(byte_str, str):
byte_str = str(byte_str, 'ascii')
return byte_str
def read_number(self):
x = self.read_double()
# Extra checking for integral numbers:
if self.int_heuristic and x.is_integer():
return int(x)
return x
def memoize_index(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
index = self.read_int()
if index in self.memo:
return self.memo[index]
result = fn(self, *args, **kwargs)
self.memo[index] = result
return result
return wrapper
@memoize_index
def read_function(self):
size = self.read_int()
dumped = self.f.read(size)
upvalues = self.read()
return LuaFunction(size, dumped, upvalues)
@memoize_index
def read_object(self):
version_str = self.read_string()
if version_str.startswith('V '):
version = int(version_str.partition(' ')[2])
cls_name = self.read_string()
else:
cls_name = version_str
version = 0 # created before existence of versioning
if cls_name in reader_registry:
return reader_registry[cls_name](self, version)
if self.unknown_classes:
return TorchObject(cls_name, self.read())
raise T7ReaderException(("don't know how to deserialize Lua class "
"{}. If you want to ignore this error and load this object "
"as a dict, specify unknown_classes=True in reader's "
"constructor").format(cls_name))
def _can_be_list(self, table):
def is_natural(key):
return (isinstance(key, int) or
(isinstance(key, float) and key.is_integer()) and
k > 0)
natural_keys = all(map(is_natural, table.keys()))
if not natural_keys:
return False
key_sum = sum(table.keys())
n = len(table)
return n * (n + 1) == 2 * key_sum
@memoize_index
def read_table(self):
size = self.read_int()
table = hashable_uniq_dict() # custom hashable dict, can be a key
for i in range(size):
k = self.read()
v = self.read()
table[k] = v
if self.list_heuristic and self._can_be_list(table):
return [table[i] for i in range(1, len(table) + 1)]
return table
def read(self):
typeidx = self.read_int()
if typeidx == TYPE_NIL:
return None
elif typeidx == TYPE_NUMBER:
return self.read_number()
elif typeidx == TYPE_BOOLEAN:
return self.read_boolean()
elif typeidx == TYPE_STRING:
return self.read_string()
elif (typeidx == TYPE_FUNCTION or typeidx == TYPE_RECUR_FUNCTION or
typeidx == LEGACY_TYPE_RECUR_FUNCTION):
return self.read_function()
elif typeidx == TYPE_TORCH:
return self.read_object()
elif typeidx == TYPE_TABLE:
return self.read_table()
else:
raise T7ReaderException("unknown type id {}. The file may be "
"corrupted.".format(typeidx))
def load_lua(filename, **kwargs):
"""
Loads the given t7 file using default settings; kwargs are forwarded
to `T7Reader`.
"""
with open(filename, 'rb') as f:
reader = T7Reader(f, **kwargs)
return reader.read()
| sibozhang/Text2Video | venv_vid2vid/lib/python3.7/site-packages/torch/utils/serialization/read_lua_file.py | read_lua_file.py | py | 17,997 | python | en | code | 381 | github-code | 90 |
25856198974 | def toNum(self):
try:
self = int(self)
return self
except ValueError:
self = float(self)
return self
n1, a, n2 = input("계산할 식을 입력하시오\n").split()
n1 = toNum(n1)
n2 = toNum(n2)
if a == '+':
op = n1 + n2
elif a == '-':
op = n1 - n2
elif a == '*':
op = n1 * n2
elif a == '/':
op = n1 / n2
else:
op = 'unknown'
print(n1, a, n2, '=', op)
| kjdrv9/First_Python | Done/calculator.py | calculator.py | py | 417 | python | en | code | 0 | github-code | 90 |
18113811539 | def allocation():
n, cars = map(int, input().split())
weight = [int(input()) for i in range(n)]
left = max(weight)
right = sum(weight)
while left < right:
mid = (left + right) // 2
if check(weight, cars, mid):
right = mid
else:
left = mid + 1
print(left)
def check(weight, cars, capacity):
t = 0
c = 1
for w in weight:
t += w
if t > capacity:
t = w
c += 1
if c <= cars:
return True
else:
return False
if __name__ == '__main__':
allocation() | Aasthaengg/IBMdataset | Python_codes/p02270/s474826128.py | s474826128.py | py | 599 | python | en | code | 0 | github-code | 90 |
17996678179 | N,M = map(int,input().split())
ABC = []
for i in range(M):
a,b,c=map(int,input().split())
c=-c
ABC.append([a,b,c])
def BellmanFord():
inf=float("Inf")
dist=[inf for i in range(N)]
dist[0]=0
for i in range(N):
for edge in ABC:
if dist[edge[1]-1] > dist[edge[0]-1] + edge[2]:
dist[edge[1]-1] = dist[edge[0]-1] + edge[2]
if i==N-1 and edge[1]-1==N-1: return inf
return -dist[N-1]
print(BellmanFord()) | Aasthaengg/IBMdataset | Python_codes/p03722/s474464090.py | s474464090.py | py | 451 | python | en | code | 0 | github-code | 90 |
74819717737 | import glob
from PIL import Image
import cv2
import numpy as np
images = glob.glob('./images/*.jpg')
for index,image_path in enumerate(images):
image = cv2.imread(image_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (25,25), 0)
thresh = cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_MEAN_C,\
cv2.THRESH_BINARY_INV,55,10)
thresh = cv2.erode(thresh, None, iterations=2)
thresh = cv2.dilate(thresh, None, iterations=2)
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[1]
c = max(cnts, key=cv2.contourArea)
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
cv2.drawContours(image, [box], 0, (0, 0, 255),5)
cv2.imwrite('./images-result/'+image_path.split('/')[-1],image)
print ("Done " + str(index) + ' !!')
print("Finished !!")
| anmolaithinker/Boundary-Detection | BulkImages.py | BulkImages.py | py | 919 | python | en | code | 1 | github-code | 90 |
9664045405 | class Worker:
employee_counter = 0
raise_amt = 1.04
def __init__(self, first, last, salary):
self.first = first
self.last = last
self.salary = salary
self.email = str.lower(first + "." + last + "@gmail.com")
Worker.employee_counter += 1
def fullname(self):
return '{} {} '.format(self.first, self.last)
def full_details(self):
return 'Full name : {} {} \nEmail: {} \nSalary: {}'.format(self.first, self.last, self.email, self.salary)
def apply_raise(self):
self.salary = int(self.raise_amt * self.salary)
@classmethod # class methods work with class instead of the instance
def set_raise_amount(cls, amount):
cls.raise_amt = amount
if __name__ == "__main__":
sm = Worker("Sergei", "Miroshnikov", 130000)
mk = Worker("Muki", "Buhbut", 4500)
print(sm.full_details())
print(mk.full_details())
"""
print(Worker.fullname(bob)) # this line is actually WHAT HAPPENS inside python
"""
print(sm.raise_amt)
print(mk.raise_amt)
Worker.set_raise_amount(1.14)
# this method will change CLASS VARIABLE even if i will run this on instance of the class
# e.g sm.set_raise_amount(1.14) will change raise amount for all class instances
print(sm.raise_amt)
print(mk.raise_amt)
print(sm.salary)
sm.apply_raise()
print(sm.salary)
| smiroshnikov/telegramBotforMiningControl | warmup/Worker.py | Worker.py | py | 1,391 | python | en | code | 0 | github-code | 90 |
34855994853 | #What is a clean, Pythonic way to have multiple constructors in Python
class Sample:
def __init__(self, ans):
self.ans = ans
@classmethod
def twoargument(cls, args):
op_two_args = (args[0] + args[1])
return cls(op_two_args)
@classmethod
def threearguments(cls, args):
op_three_args = (args[0]* args[1] * args[2])
return cls(op_three_args)
lst = [[1,2], [1,2,3]]
for i in lst:
if len(i) == 2:
obj = Sample.twoargument(i)
print(obj.ans)
elif len(i) == 3:
obj = Sample.threearguments(i)
print(obj.ans) | mukulverma2408/PracticeGeeksforGeeks | PythonPracticeQuestion-Part2/OopsExercise-15.py | OopsExercise-15.py | py | 603 | python | en | code | 0 | github-code | 90 |
18263854359 | n = int(input())
s = input()
q = int(input())
class SegmentTree:
def __init__(self, a, func=max, one=-10 ** 18):
self.logn = (len(a) - 1).bit_length()
self.n = 1 << self.logn
self.func = func
self.one = one
self.b = [self.one] * (2 * self.n - 1)
for i, j in enumerate(a):
self.b[i + self.n - 1] = j
for i in reversed(range(self.n - 1)):
self.b[i] = self.func(self.b[i * 2 + 1], self.b[i * 2 + 2])
self.ll = []
self.rr = []
i = self.n
for _ in range(self.logn+1):
for j in range(0, self.n, i):
self.ll.append(j)
self.rr.append(j + i)
i //= 2
def get_item(self, i):
return self.b[i + self.n - 1]
def update(self, index, x):
i = index + self.n - 1
self.b[i] = x
while i != 0:
i = (i - 1) // 2
self.b[i] = self.func(self.b[i * 2 + 1], self.b[i * 2 + 2])
def update_func(self, index, x):
i = index + self.n - 1
self.b[i] = self.func(self.b[i], x)
while i != 0:
i = (i - 1) // 2
self.b[i] = self.func(self.b[i * 2 + 1], self.b[i * 2 + 2])
def get_segment(self, l, r, i=0):
ll = self.ll[i]
rr = self.rr[i]
if l <= ll and rr <= r:
return self.b[i]
elif rr <= l or r <= ll:
return self.one
else:
return self.func(self.get_segment(l, r, i * 2 + 1),
self.get_segment(l, r, i * 2 + 2))
def get_int(s):
abc = "abcdefghijklmnopqrstuvwxyz"
return 1 << abc.index(s)
seg = SegmentTree([get_int(i) for i in s], int.__or__, 0)
for _ in range(q):
q, i, j = input().split()
if q == "1":
seg.update(int(i) - 1, get_int(j))
else:
aa = seg.get_segment(int(i) - 1, int(j))
ans = 0
for i in range(26):
if (aa >> i) & 1 == 1:
ans += 1
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02763/s391060691.py | s391060691.py | py | 2,017 | python | en | code | 0 | github-code | 90 |
26704221549 | from django.urls import path
from .views import (
IndexView,
UserLoginView, UserHomeView, UserLogoutView,
ContactFormView, PrivacyPolicyView
)
app_name = 'account'
urlpatterns = [
path('', IndexView.as_view(), name='index'),
path('login', UserLoginView.as_view(), name='login'),
path('home', UserHomeView.as_view(), name='home'),
path('logout', UserLogoutView.as_view(), name='logout'),
path('contact', ContactFormView.as_view(), name='contact'),
path('privacy-policy', PrivacyPolicyView.as_view(), name='privacy_policy')
]
| takyam15/my-django | account/urls.py | urls.py | py | 561 | python | en | code | 0 | github-code | 90 |
18420802409 | import sys
import itertools
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
s_readline = sys.stdin.readline
N, K = map(int, readline().split())
S = list(map(int, s_readline().strip()))
def solve():
s_grp = itertools.groupby(S)
zl = []
ol = []
llen = 0
f = False
for k, l in s_grp:
l = list(l)
if k == 0:
llen += len(l)
f = True
elif k == 1:
ol.append(len(l))
if f:
llen += len(l)
zl.append(llen)
llen = 0
else:
if k == 0:
zl.append(llen)
if S[0] == 0:
ol = [0] + ol
# print(zl)
# print(ol)
zl_N = len(zl)
zl_tsum = [0 for _ in range(zl_N + 1)]
for i in range(zl_N):
zl_tsum[i + 1] = zl_tsum[i] + zl[i]
Km = min(K, zl_N)
ans = 0
for i in range(zl_N - Km + 1):
t = zl_tsum[Km + i] - zl_tsum[i] + ol[i]
ans = max(ans, t)
print(ans)
if __name__ == '__main__':
solve()
| Aasthaengg/IBMdataset | Python_codes/p03074/s872207846.py | s872207846.py | py | 1,081 | python | en | code | 0 | github-code | 90 |
71026306217 | from copy import copy
import logging
import subprocess
from build_migrator.helpers import (
get_module_target,
ModuleTypes,
get_source_file_reference,
)
from build_migrator.common.argument_parser_ex import ArgumentParserEx
import build_migrator.common.os_ext as os_ext
from .base.compiler_parser import CompilerParser
logger = logging.getLogger(__name__)
class ReplacePrefixArg(object):
def __init__(self, replace_prefix_arg=None):
# prefix_arg: -D, -I, etc
assert len(replace_prefix_arg) == 2, replace_prefix_arg
self._replace_prefix_arg = replace_prefix_arg
def __call__(self, args):
# 1. _replace_prefix_arg=-a, args=[--arch=value], result=-avalue
# 2. _replace_prefix_arg=-a, args=[-A, value], result=-avalue
# 3. _replace_prefix_arg=-a, args=[-Avalue], result=-avalue
if args and self._replace_prefix_arg:
if len(args) == 1:
args = args[0].split("=")
else:
args = copy(args)
if len(args) == 2:
args[0] = self._replace_prefix_arg
elif len(args) == 1:
args[0] = self._replace_prefix_arg + args[0][2:]
else:
assert False, args
return "".join(args)
# 1. [-d, A=1] => -DA=1
# 1. [-dA=1] => -DA=1
def prefix_arg_toupper(args):
if args:
args = copy(args)
if len(args) == 1:
args[0] = args[0][0:2].upper() + args[0][2:]
else:
args[0] = args[0].upper()
return "".join(args)
class Yasm(CompilerParser):
filename_re = os_ext.Windows.get_program_path_re("yasm")
priority = 7
def __init__(self, context, ignore_compile_flags=None):
CompilerParser.__init__(
self, context, ignore_compile_flags=ignore_compile_flags
)
# YASM arguments
# http://www.tortall.net/projects/yasm/manual/html/yasm-options.html
self.parser = ArgumentParserEx()
self.parser.set_defaults(compile_flags=[], include_dirs=[])
self.parser.set(dest=None, raw_dest="compile_flags")
# select architecture
self.parser.add_argument("--arch", "-a", raw_format=ReplacePrefixArg("-a"))
# select parser
self.parser.add_argument("--parser", "-p", raw_format=ReplacePrefixArg("-p"))
# select preprocessor
self.parser.add_argument("--preproc", "-r", raw_format=ReplacePrefixArg("-r"))
# object format
self.parser.add_argument("--oformat", "-f", raw_format=ReplacePrefixArg("-f"))
# debugging format
self.parser.add_argument("--dformat", "-g", raw_format=ReplacePrefixArg("-d"))
# name of object-file output
self.parser.add_argument("--objfile", "-o", dest="output", raw_dest=None)
# select machine
self.parser.add_argument("--machine", "-m", raw_format=ReplacePrefixArg("-m"))
# treat all sized operands as if `strict' was used
self.parser.add_argument("--force-strict", action="store_true")
# add include path
self.parser.add_argument(
"-I", action="append", dest="include_dirs", ignore_case=True, raw_dest=None
)
# pre-include file
self.parser.add_argument(
"-P", action="append", dest="preinclude_files", default=[], raw_dest=None
)
# pre-define a macro
self.parser.add_argument("-D", ignore_case=True, raw_format=prefix_arg_toupper)
# undefine a macro
self.parser.add_argument("-U", ignore_case=True, raw_format=prefix_arg_toupper)
# disable all warnings
self.parser.add_argument("-w", action="store_true")
# enables/disables warning
self.parser.add_argument("-W", prefix=True)
# redirect error messages to file
self.parser.add_argument("-E")
# redirect error messages to stdout
self.parser.add_argument("-s", action="store_true")
# select error/warning message style (`gnu' or `vc')
self.parser.add_argument("-X")
# prepend argument to name of all external symbols
self.parser.add_argument("--prefix")
# append argument to name of all external symbols
self.parser.add_argument("--suffix", "--postfix")
self.parser.add_argument("file", dest="file", raw_dest=None)
# TODO: this function is mostly the same for GCC, NASM, YASM. Make a base class.
def _add_implicit_dependencies(
self,
compiler,
dependencies,
compile_flags,
include_dirs,
preinclude_files,
source,
cwd,
):
include_dir_args = ["-I" + d for d in include_dirs]
preinclude_args = ["-P" + p for p in preinclude_files]
cmd = (
[compiler, "--preproc-only", "-M"]
+ compile_flags
+ preinclude_args
+ include_dir_args
+ [source]
)
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
)
stdout, stderr = p.communicate()
if type(stdout) is not str:
stdout = stdout.decode("utf-8", "replace")
if type(stderr) is not str:
stderr = stderr.decode("utf-8", "replace")
retcode = p.poll()
if retcode:
logger.error(
"Command '{}' returned non-zero exit code {}\n{}".format(
" ".join(cmd), retcode, stderr
)
)
implicit_dependencies = []
for line in stdout.splitlines():
files = line.rstrip("\\").lstrip().split(" ")
for f in files:
if not f or f.endswith(":") or f == source:
continue
implicit_dependencies.append(f)
return [
self.context.get_file_arg(self.context.normalize_path(dep), dependencies)
for dep in implicit_dependencies
]
# TODO: this function is mostly the same for all compiler parsers. Make a base class.
def parse(self, target):
tokens = target.get("tokens") or []
if not tokens:
return target
if not self.filename_re.match(tokens[0]):
return target
compiler = tokens.pop(0)
compiler = self.context.apply_path_aliases(self.context.normalize_path(compiler, ignore_working_dir=True))
namespace = self.parser.parse_args(tokens)
dependencies = []
output = self.context.get_output(self.context.normalize_path(namespace.output))
self.process_namespace(namespace)
self._add_implicit_dependencies(
compiler,
dependencies,
namespace.compile_flags,
namespace.include_dirs,
namespace.preinclude_files,
namespace.file,
self.context.working_dir,
)
compile_flags = namespace.compile_flags
for preinclude in namespace.preinclude_files:
path = self.context.get_file_arg(
self.context.normalize_path(preinclude), dependencies
)
compile_flags.append("-P" + path)
source = get_source_file_reference(
self.context.get_file_arg(
self.context.normalize_path(namespace.file), dependencies
),
"YASM",
compile_flags=compile_flags,
)
self.process_namespace(source)
include_dirs = []
for dir in namespace.include_dirs:
# include dir may not exist, it's normal
dir = self.context.normalize_path(dir)
arg = self.context.get_dir_arg(dir, dependencies)
if arg:
include_dirs.append(arg)
return get_module_target(
ModuleTypes.object_lib,
None,
output,
dependencies=dependencies,
compile_flags=[],
sources=[source],
include_dirs=include_dirs,
)
__all__ = ["Yasm"]
| KasperskyLab/BuildMigrator | build_migrator/parsers/yasm.py | yasm.py | py | 8,021 | python | en | code | 30 | github-code | 90 |
15049748129 | from math import sin, cos
from pylx16a.lx16a import *
import time
LX16A.initialize("/dev/ttyUSB0", 0.1)
try:
servo1 = LX16A(1)
servo2 = LX16A(2)
servo1.set_angle_limits(0, 240)
servo2.set_angle_limits(0, 240)
except ServoTimeoutError as e:
print(f"Servo {e.id_} is not responding. Exiting...")
quit()
t = 0
while True:
servo1.move(sin(t) * 60 + 60)
servo2.move(cos(t) * 60 + 60)
time.sleep(0.05)
t += 0.1
| ethanlipson/PyLX-16A | hello-world.py | hello-world.py | py | 449 | python | en | code | 30 | github-code | 90 |
2409158369 | from io import BytesIO
import docx
import requests
from docx.shared import Pt
year = 2023
month = 7
day = 28
bc = "白班"
dutier = "陈焕鑫 王胜保"
path = "./temple_raw.docx"
new_path = "./templer_real.docx"
data = {
"eng_room": [
{
"title": "1、各设备运行正常。",
"value": 1
},
{
"title": "2、动环、监控、BA系统运行正常。",
"value": 0
}
],
"files": [
{
"title": "1、日常巡检表、水电表齐全,填写完整。",
"value": 1
},
{
"title": "2、交接班表齐全,填写完整。",
"value": 1
}
],
"tools": [
{
"title": "1、工具仪表是否齐全。",
"value": 1
},
{
"title": "2、值班手机、对讲机是否完好。",
"value": 1
},
{
"title": "3、U盘、仓库应急卡、钥匙是否完好。",
"value": 1
}
]
}
docx_file = requests.get("https://opspre.imyunxia.com/file/2023_07_28/2023_07_28_14_23_10_81.docx")
doc = docx.Document(BytesIO(docx_file.content))
# for i, paragraph in enumerate(doc.paragraphs):
# if i == 1:
# print(paragraph.text)
# temp_text = paragraph.text
# temp_text = temp_text.replace("[1]", str(year))
# temp_text = temp_text.replace("[2]", str(month))
# temp_text = temp_text.replace("[3]", str(day))
# temp_text = temp_text.replace("[4]", bc)
# paragraph.text = temp_text
#
# for run in paragraph.runs:
# run.font.name = "宋体"
# run.font.size = Pt(10)
# run.font.bold = True
table = doc.tables[0]
# 值班人员
# dutier_cell = table.rows[0].cells[0]
# dutier_cell.text = dutier_cell.text.replace("[1]", dutier)
# for paragraph in dutier_cell.paragraphs:
# for run in paragraph.runs:
# run.font.name = "宋体"
# run.font.size = Pt(7.5)
# run.font.bold = True
# print(cell.text)
# cell.text = "2333333333333"
def confirm_items(table, row, col, data, title):
tools_cell = table.rows[row].cells[col]
tools_info_list = []
for info in data:
status_str = "( √ )" if info["value"] else "( × )"
tools_info_list.append(info["title"] + status_str)
tools_cell.text = f"{title} " + '\t'.join(tools_info_list)
for paragraph in tools_cell.paragraphs:
for run in paragraph.runs:
run.font.name = "宋体"
run.font.size = Pt(7.5)
run.font.bold = True
# 机房运行情况交接
# eng_room_cell = table.rows[1].cells[0]
# eng_room_info_list = []
# for info in data["eng_room"]:
# status_str = "( √ )" if info["value"] else "( × )"
# eng_room_info_list.append(info["title"] + status_str)
#
# eng_room_cell.text = "机房运行情况交接: " + '\t'.join(eng_room_info_list)
# for paragraph in eng_room_cell.paragraphs:
# for run in paragraph.runs:
# run.font.name = "宋体"
# run.font.size = Pt(7.5)
# run.font.bold = True
# 工具、器具情况交接
# tools_cell = table.rows[2].cells[0]
# tools_info_list = []
# for info in data["tools"]:
# status_str = "( √ )" if info["value"] else "( × )"
# tools_info_list.append(info["title"] + status_str)
#
# tools_cell.text = "工具、器具情况交接: " + '\t'.join(tools_info_list)
# for paragraph in tools_cell.paragraphs:
# for run in paragraph.runs:
# run.font.name = "宋体"
# run.font.size = Pt(7.5)
# run.font.bold = True
# 文件情况交接
# confirm_items(table, 3, 0, data["files"], "文件情况交接")
# 总结
summary = """交接情况总结:
1. 当班交接班表齐全,填写完整
2. 当班工具、仪表数量交接完整
3. 当班事件已交接清楚
交班人确认:常亮 甄雨澎 林景俊1
接班人确认:马渊耀 潘文亮 陈奋
"""
summery_cell = table.rows[-1].cells[0]
summery_cell.text = summary
upload_url = "https://opspre.imyunxia.com/upload/manage/upload"
byte_io = BytesIO()
doc.save(byte_io)
resp = requests.post(upload_url, data={"project_id": 1}, files={"file": byte_io.getvalue()})
print(resp.json())
print()
doc.save(new_path)
# if __name__ == '__main__':
# with open("template.docx","r") as f:
# print(f.read()) | ZainLiu/YXtest | 操作word文档/op.py | op.py | py | 4,426 | python | en | code | 0 | github-code | 90 |
14079900911 | from django.urls import path
from . import views
urlpatterns = [
path('register/', views.registerPage, name="register"),
path('login/', views.loginPage, name="login"),
path('logout/', views.logoutUser, name="logout"),
path('userSettings/', views.userPage, name="user-page"),
path('', views.home, name="home"),
path('products/', views.products, name="products"),
path('create_product/', views.createProduct, name="create_product"),
path('update_product/<str:pk>/', views.updateProduct, name="update_product"),
path('delete_product/<str:pk>/', views.deleteProduct, name="delete_product"),
path('customerlist/', views.customerlist, name="customerlist"),
path('customerDetail/<str:pk>/', views.customerDetail, name="customerdetail"),
path('create_customer/', views.createCustomer, name="create_customer"),
path('update_customer/<str:pk>/',
views.updateCustomer, name="update_customer"),
path('delete_customer/<str:pk>/',
views.deleteCustomer, name="delete_customer"),
path('orderslist/', views.orderlist, name="orderslist"),
path('create_order/', views.createOrder, name="create_order"),
path('update_order/<str:pk>/', views.updateOrder, name="update_order"),
path('delete_order/<str:pk>/', views.deleteOrder, name="delete_order"),
path('stafflist/', views.stafflist, name="stafflist"),
]
| Soj2wal/ord-process-live | accounts/urls.py | urls.py | py | 1,420 | python | en | code | 0 | github-code | 90 |
6425707736 | # task 8
import os
import gensim.models
from w2vec import w2vec_vectorize
model = gensim.models.Word2Vec.load('../../assets/w2v_model')
test_path = "C:/Users/User/Desktop/dataset/20news-bydate-test/"
result = ""
count = 0
for category in os.listdir(test_path):
for filename in os.listdir(test_path + category):
print(filename, count)
count += 1
with open(test_path + category + '/' + filename) as file:
vector = w2vec_vectorize(file.read(), model)
embedding_str = ""
for embedding in vector:
embedding_str += '\t' + str(round(embedding, 6))
result += (category + '/' + filename + embedding_str + '\n')
with open('../../assets/annotated-corpus/test-embeddings.tsv', 'w') as result_file:
result_file.write(result)
| MANASLU8/nlp-22-autumn | projects/cake-crusher/source/vectorization/test_vectorizer.py | test_vectorizer.py | py | 810 | python | en | code | 0 | github-code | 90 |
72058811177 | numNames = 'zero', 'one', 'two', 'three', 'four', 'five', ' six', 'seven', 'eight', 'nine', 'ten', 'eleven', 'twelve', \
'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen', 'twenty'
while True:
userNum = int(input('Enter a number between 0 and 20(if you want stop, enter -1): '))
if userNum == -1:
print('program finished')
break
while not 0 <= userNum <= 20:
userNum = int(input('Enter a valid number. Enter a number between 0 and 20: '))
print(f'You enter a {numNames[userNum]}')
| vytorrennan/Curso-de-python | ex/ex072.py | ex072.py | py | 565 | python | en | code | 0 | github-code | 90 |
37246229411 | # Unit 2 Lab
# Sum Calculator Program
# create function
def sum(numbers):
#initialize variable for list sum
total = 0
# loop to find add all values in list
for number in numbers:
total += number
return total
# initialize number list
number_list = []
# create number list from user inputs
while True:
user = input("Enter a number or 'done' to quit: ")
if user == 'done':
break
else:
number_list.append(int(user))
# display information and sum
print(f'You entered {number_list}')
print(f'The sum of the numbers is {sum(number_list)}') | dani3lng/pdx_code | programming_102/code/unit_02/lab-2.2.py | lab-2.2.py | py | 596 | python | en | code | 0 | github-code | 90 |
18376643369 | N = 1000000007
def fac(n,r,N):
ans=1
for i in range(r):
ans=ans*(n-i)%N
return ans
def combi(n,r,N):
if n<r or n<0 or r<0:
ans = 0
return ans
r= min(r, n-r)
ans = fac(n,r,N)*pow(fac(r,r,N),N-2,N)%N
return ans
n , k = map(int, input().split())
for i in range(1,k+1):
print(combi(k-1,i-1,N)*combi(n-k+1,i,N)%N)
| Aasthaengg/IBMdataset | Python_codes/p02990/s121702208.py | s121702208.py | py | 370 | python | en | code | 0 | github-code | 90 |
7331734551 | """ Вычислить число c заданной точностью d
Пример:
- при $d = 0.001, π = 3.141.$ $10^{-1} ≤ d ≤10^{-10}$ """
acc = list(input("Задайте точность для вычисления pi: "))
n = len(acc)-2
pi = round(sum(1/16**x*(4/(8*x + 1) - 2/(8*x + 4) - 1/(8*x + 5) - 1/(8*x + 6)) for x in range(n)), n)
print(pi)
""" Задайте натуральное число N. Напишите программу, которая составит список простых множителей числа N. """
num = int(input('Введите число: '))
def fac(n):
delitor = []
i = 2
while i * i <= n:
while n % i == 0:
n //= i
delitor.append(i)
i += 1
if n > 1:
delitor.append(n)
return delitor
print(fac(num))
""" Задайте последовательность чисел.
Напишите программу, которая выведет список неповторяющихся элементов исходной последовательности. """
from random import randint
min = 1
max = 10
size = 20
my_list = [randint(min,max) for i in range(size)]
uniq_list =[]
for i in range(len(my_list)):
count = 0
for r in my_list:
if my_list[i] == r: count +=1
print(f'Рандом: {my_list}')
print(f'Ряд без повторений: {set(my_list)}')
""" Задана натуральная степень k.
Сформировать случайным образом список коэффициентов (значения от 0 до 100)
многочлена и записать в файл многочлен степени k.
"""
from random import randint
import itertools
k = int(input('Задайте натуральную степень k: '))
ratios_list = list([randint(0, 101) for i in range(k+1)])
if ratios_list[0] == 0:
ratios_list[0] = randint(1, 101)
def get_polynomial(k, ratio_list):
var = ['*x^']*(k-1) + ['*x']
polynom = [[a, b, c] for a, b, c in itertools.zip_longest(ratio_list, var, range(k, 1, -1), fillvalue = '') if a !=0]
for x in polynom:
x.append(' + ')
polynom = list(itertools.chain(*polynom))
polynom[-1] = ' = 0'
return "".join(map(str, polynom)).replace(' 1*x',' x')
polynomial = get_polynomial(k, ratios_list)
print(polynomial)
with open('Polynomial.txt', 'w') as data:
data.write(polynomial) | pepZ1/pyHW | homework4.py | homework4.py | py | 2,483 | python | ru | code | 0 | github-code | 90 |
40957875184 | """
Question: Create a function that takes any string as input and returns the number of words for that string.
"""
def count_words(strng):
strng_list = strng.split()
return len(strng_list)
take = input("User input:")
print(count_words(take)) | MMR1998-DEV/python-excersise-workbook | 35.py | 35.py | py | 264 | python | en | code | 0 | github-code | 90 |
13159581963 | import requests
from telegram import ParseMode, Update
from telegram.ext import CallbackContext, run_async
from SophiaBot import dispatcher
from SophiaBot.modules.disable import DisableAbleCommandHandler
@run_async
def covid(update: Update, context: CallbackContext):
message = update.effective_message
text = message.text.split(" ", 1)
if len(text) == 1:
r = requests.get("https://corona.lmao.ninja/v2/all").json()
reply_text = f"**Global Totals** Ъда\nCases: {r['cases']:,}\nCases Today: {r['todayCases']:,}\nDeaths: {r['deaths']:,}\nDeaths Today: {r['todayDeaths']:,}\nRecovered: {r['recovered']:,}\nActive: {r['active']:,}\nCritical: {r['critical']:,}\nCases/Mil: {r['casesPerOneMillion']}\nDeaths/Mil: {r['deathsPerOneMillion']}"
else:
variabla = text[1]
r = requests.get(f"https://corona.lmao.ninja/v2/countries/{variabla}").json()
reply_text = f"**Cases for {r['country']} Ъда**\nCases: {r['cases']:,}\nCases Today: {r['todayCases']:,}\nDeaths: {r['deaths']:,}\nDeaths Today: {r['todayDeaths']:,}\nRecovered: {r['recovered']:,}\nActive: {r['active']:,}\nCritical: {r['critical']:,}\nCases/Mil: {r['casesPerOneMillion']}\nDeaths/Mil: {r['deathsPerOneMillion']}"
message.reply_text(reply_text, parse_mode=ParseMode.MARKDOWN)
COVID_HANDLER = DisableAbleCommandHandler(["covid", "corona"], covid)
dispatcher.add_handler(COVID_HANDLER)
__mod_name__ = "Covid"
__help__ = """
*Commands*
Рђб /covid - To Get Global Stats of Covid.
Рђб /covid [COUNTRY] - To Get Stats of A Single Country.
Рђб /corona - Same as `/covid`
"""
| dihanofficial/SophiaBot | SophiaBot/modules/covid_info.py | covid_info.py | py | 1,597 | python | en | code | 44 | github-code | 90 |
25300840476 | #
# @lc app=leetcode.cn id=188 lang=python3
#
# [188] 买卖股票的最佳时机 IV
#
# @lc code=start
class Solution:
# https://leetcode-cn.com/problems/best-time-to-have-and-no-stock-iv/solution/si-wei-dao-tu-zheng-li-dpshu-zu-gou-jian-e97c/
# 方法一:二个状态的DP(空间未优化)
# 在上述的状态转移方程中,确定边界条件是非常重要的步骤。我们可以考虑将所有的have[0][0..k] 以及no[0][0..k]设置为边界。
# 对于have[0][0..k],由于只有prices[0]是唯一的股价,因此我们不可能进行过任何交易,那么我们可以将所有的have[0][1..k]设置为一个非常小的值,表示不合法的状态。而对于have[0][0],它的值为−prices[0],即“我们在第0天以prices[0]的价格买入股票”是唯一满足手上持有股票的方法。
# 对于no[0][0..k],同理我们可以将所有的no[0][1..k]设置为一个非常小的值,表示不合法的状态。而对于no[0][0],它的值为0,即“我们在第0天不做任何事”是唯一满足手上不持有股票的方法。
# 在设置完边界之后,我们就可以使用二重循环,在i∈[1,n),j∈[0,k]的范围内进行状态转移。需要注意的是,no[i][j]的状态转移方程中包含have[i−1][j−1],在j=0时其表示不合法的状态,因此在j=0时,我们无需对no[i][j]进行转移,让其保持值为0即可。
def maxProfit(self, k: int, prices: List[int]) -> int:
if k == 0 or not prices:
return 0
n = len(prices)
k = min(k, n//2)
have = [[0] * (k+1) for _ in range(n)]
no = [[0] * (k+1) for _ in range(n)]
for i in range(1, k+1):
have[0][i] = no[0][i] = float('-inf')
have[0][0] = -prices[0]
no[0][0] = 0
for i in range(1,n):
have[i][0] = max(have[i-1][0], no[i-1][0] - prices[i])
for j in range(1,k+1):
have[i][j] = max(have[i-1][j], no[i-1][j] - prices[i])
no[i][j] = max(no[i-1][j], have[i-1][j-1] + prices[i])
return max(no[-1])
# 方法二:二个状态的DP(空间优化)
def maxProfit(self, k: int, prices: List[int]) -> int:
if k == 0 or not prices:
return 0
n = len(prices)
k = min(k, n//2)
have = [-prices[0]] + [float('-inf')] * k
no = [0] + [float('-inf')] * k
for i in range(1,n):
have[0] = max(have[0], no[0] - prices[i])
# 由于have和no的值完全依赖于上一个循环状态的值,所以在将二维数组优化为一维数组时,内层循环需要倒序,这与0-1背包问题的空间优化原理是一致的
for j in range(k,0,-1):
have[j] = max(have[j], no[j] - prices[i])
no[j] = max(no[j], have[j-1] + prices[i])
return max(no)
# @lc code=end
| HughTang/Leetcode-Python | Dynamic Programming/股票交易/188.买卖股票的最佳时机-iv.py | 188.买卖股票的最佳时机-iv.py | py | 3,016 | python | zh | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.