seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
71719419537 | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
df = pd.read_csv('countries.csv')
df_mex = df[df.country == "Mexico"]
df_mex.plot.scatter(x='year', y='lifeExp')
x = np.asanyarray(df_mex[['year']])
y = np.asanyarray(df_mex[['lifeExp']])
model = linear_model.LinearRegression()
model.fit(x, y)
Years = np.array([2005, 2019, 3019, 542])
Years = Years.reshape(-1, 1)
print(model.predict(Years)) | CEOE1996/AI-Practices | Regresion Lineal Simple.py | Regresion Lineal Simple.py | py | 464 | python | en | code | 0 | github-code | 13 |
3697943037 | #
# Filename: http_server.py
# Author: Harrison Hubbell
# Date: 09/01/2014
# Description: Is responsible for serving data over HTTP
#
from socketserver import ThreadingMixIn
from multiprocessing import Process, Lock
from . import exception, handler
import logging
import http.server
import io
import socket
import qrcode
import qrcode.image.svg
import urllib.parse
import gzip
class RequestHandler(http.server.BaseHTTPRequestHandler):
_INDEX = 'index.html'
def get_content_type(self, req):
"""
@author: Harrison Hubbell
@created: 09/01/2014
@description: Return content type based on file. Essentially
just a lot of if statements
"""
if req.endswith('.css'): return 'text/css'
if req.endswith('.html'): return 'text/html'
if req.endswith('.ico'): return 'image/x-icon'
if req.endswith('.js'): return 'application/javascript'
if req.endswith('.pdf'): return 'application/pdf'
if req.endswith('.png'): return 'image/png'
if req.endswith('.svg'): return 'image/svg+xml'
def get_resource(self):
"""
@author: Harrison Hubbell
@created: 10/05/2014
@description: Locates the requested resource.
"""
page_buffer = None
content_type = None
if self.path[1:4] == 'api':
page_buffer, content_type = self.server.api.handle(
self.command,
self.path[5:],
self.headers,
self.rfile
)
elif self.path[1:4] == 'sse':
page_buffer, content_type = self.server.sse.handle()
else:
page = self.path[1:] or self._INDEX
content_type = self.get_content_type(page)
with open(self.server.root + page, 'rb') as f:
page_buffer = f.read()
return page_buffer, content_type
def encode(self, stream):
"""
@author: Harrison Hubbell
@created: 04/06/2015
@description: Compresses response body.
"""
ENCODING = 'gzip'
output = io.BytesIO()
encoding = None
fbuffer = stream
if ENCODING in self.headers['Accept-Encoding'] and stream is not None:
with gzip.GzipFile(fileobj=output, mode='w', compresslevel=5) as f:
f.write(stream)
encoding = ENCODING
fbuffer = output.getvalue()
return fbuffer, encoding
def log_message(self, fmt, *args):
"""
@author: Harrison Hubbell
@created: 09/01/2014
@description: Overrides standard logging functionality to
log server actions to a file.
"""
logging.debug(args)
def do_GET(self):
"""
@author: Harrison Hubbell
@created: 09/01/2014
@description: Handles GET requests
"""
try:
data, content_type = self.get_resource()
data, content_encoding = self.encode(data)
self.send_response(200)
self.send_header("Content-type", content_type)
if content_encoding:
self.send_header("Content-encoding", content_encoding)
self.end_headers()
self.wfile.write(data)
except IOError as e:
self.send_error(404)
logging.info(e)
except excepiton.APINotConnectedError as e:
self.send_error(503)
logging.error(e)
except excepiton.APIMalformedError as e:
self.send_error(400)
logging.info(e)
except excepiton.APIForbiddenError as e:
self.send_error(403)
logging.info(e)
except Exception as e:
self.send_error(500)
logging.critical('%s %s caused an Internal Server Error',
self.command,
self.path
)
logging.critical(e)
def do_POST(self):
"""
@author: Harrison Hubbell
@created: 11/21/2014
@description: Handles POST requests
"""
try:
data, content_type = self.get_resource()
data, content_encoding = self.encode(data)
self.send_response(200)
self.send_header("Content-type", content_type)
if content_encoding:
self.send_header("Content-encoding", content_encoding)
self.end_headers()
self.wfile.write(data)
except IOError as e:
self.send_error(404)
logging.info(e)
except excepiton.APINotConnectedError as e:
self.send_error(503)
logging.error(e)
except excepiton.APIMalformedError as e:
self.send_error(400)
logging.info(e)
except excepiton.APIForbiddenError as e:
self.send_error(403)
logging.info(e)
except Exception as e:
self.send_error(500)
logging.critical('%s %s caused an Internal Server Error',
self.command,
self.path
)
logging.critical(e)
class ThreadedHTTPServer(ThreadingMixIn, http.server.HTTPServer):
"""Handle Requests in a Seperate Thread."""
def __init__(self, addr, handler, api, sse, root):
super(ThreadedHTTPServer, self).__init__(addr, handler)
self.api = api
self.sse = sse
self.root = root
class HTTPServerManager(object):
def __init__(self, host, port, path, dbi=None):
self.host = host
self.port = port
self.path = path
self.dbi = dbi
self.update_id = 0
self.lock = Lock()
self.httpd = None
self.create_qrcode()
def create_qrcode(self):
"""
@author: Harrison Hubbell
@created: 11/18/2014
@description: Gets the current interface address of the server,
and renders a QR Code that allows devices to go
to that address.
"""
GOOGLE = ('8.8.8.8', 80)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(GOOGLE)
address = 'http://' + sock.getsockname()[0]
sock.close()
factory = qrcode.image.svg.SvgPathImage
factory.QR_PATH_STYLE = 'fill:#C0392B;fill-opacity:1;fill-rule:nonzero;stroke:none'
qr = qrcode.QRCode(version=1, box_size=10, border=0)
qr.image_factory = factory
qr.add_data(address)
qr.make()
image = qr.make_image()
image.save(self.path + 'static/img/qrcode.svg')
def sse_response(self, data):
"""
@author: Harrison Hubbell
@created: 04/06/2015
@description: Manages setting the HTTPServer sse reponse.
"""
self.lock.acquire()
with open(self.path + '/static/sse.txt', 'w') as f:
self.update_id += 1
f.write('id: {}\ndata: {}\n\n'.format(self.update_id, data))
self.lock.release()
def spawn_server(self, host=None, port=None):
"""
@author: Harrison Hubbell
@created: 04/13/2015
@description: Spawn the http server instance.
"""
host = host if host is not None else self.host
port = port if port is not None else self.port
self.httpd = ThreadedHTTPServer(
(host, port),
RequestHandler,
handler.APIHandler(self.dbi),
handler.SSEHandler(self.path + '/static/sse.txt', self.lock),
self.path
)
self.httpd.serve_forever()
def start(self):
"""
@author: Harrison Hubbell
@created: 04/13/2015
@description: Spawn a thread and a shared memory pool for
setting new SSE responses.
"""
Process(target=self.spawn_server).start()
| hhubbell/smartkeg | smartkeg/http/http.py | http.py | py | 8,112 | python | en | code | 0 | github-code | 13 |
7277561194 | import numpy as np
import mne
import matplotlib
import matplotlib.pyplot as plt
from scipy import stats
from mne.stats import fdr_correction, bonferroni_correction
def Stats_Sigs(G1, G2, numbins, name, mode):
# G1: Pre-Stim. File with all subjects
# G2: Pos-Stim. File with all subjects
# numbins: Number of windows where to perform a T-test
# name: name to save the output image
# mode: Either 'time', 'freq' or 'gr' analysis of the electrodes
# Time Parameters TF
min_time = -1500
max_time = 1500
num_time = 750
timex = np.linspace(min_time, max_time, num_time)
# Frequency Parameters
min_freq = 2
max_freq = 42
num_freq = 40
frex = np.linspace(min_freq, max_freq, num_freq)
# Granger Time windows
timegr = np.linspace(-100, 700, num=32, endpoint=True)#ms
bins = np.empty((numbins,2))
# Binning for Multiple comparisons
if mode == 'time':
val_max = 3000
aux_bins = np.ceil(np.linspace(0, val_max, numbins))
elif mode == 'freq':
val_max = 42
aux_bins = np.ceil(np.linspace(2, val_max, numbins))
elif mode == 'gr':
val_max = 700
aux_bins = np.ceil(np.linspace(-100, val_max, numbins))
print(aux_bins)
for a, b in enumerate(aux_bins):
bins[a, 1] = np.ceil((b * G1.shape[0]) / val_max)
bins = bins.astype(int)
bins[1:,0] = [bins[c,1] for c,_ in enumerate(bins[1:,1])]
print (bins)
# Sliding T-test
sig = np.empty([numbins,2])
pvalues = np.zeros([numbins])
for idx,_ in enumerate(bins):
[stat, pval] = stats.ttest_rel(G1[bins[idx,0]:bins[idx,1]], G2[bins[idx,0]:bins[idx,1]])
if pval < 0.01:
pvalues[idx] = pval
sig[idx,:] = np.array([bins[idx,0], bins[idx,1]])
else:
pvalues[idx] = pval
sig[idx,:] = 0
pv = np.isnan(pvalues)
pvalues[pv] = 100
#print()
reject_H0, fdr_pvals = fdr_correction(pvalues, 0.01) #False Discovery Rate
#print(reject_H0)
fdr = np.where(reject_H0 == True)
#print(sig)
#print(sig[fdr])
fig1 = plt.figure(figsize=(13.0, 7.5))
if mode == 'time':
#ax1 = fig2.add_subplot(2,1,1)
stdev1 = np.std(G1)
plt.plot(timex[350:550], G1[350:550], label='Pre')
plt.tick_params(labelsize=20)
plt.fill_between(timex[350:550], G1[350:550]+stdev1, G1[350:550]-stdev1, alpha=.1)
stdev2 = np.std(G2)
plt.plot(timex[350:550], G2[350:550], label='Pos')
plt.tick_params(labelsize=20)
plt.fill_between(timex[350:550], G2[350:550]+stdev2, G2[350:550]-stdev2, alpha=.1)
for s in sig[fdr]:
plt.fill_between(s, 0, 1, color='lightgray')
plt.axvline(s[0], color='r') # Show Stim Onset
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
plt.ylabel('Pair Phase Synchrony - PLV', fontsize=20)
plt.xlabel('Time(s)', fontsize=20)
plt.ylim([0,0.6])
fig1.savefig('Stats_PLV_%s' %name)
if mode == 'freq':
stdev3 = np.std(G1)
plt.plot(frex, G1, label='Pre')
plt.tick_params(labelsize=20)
plt.fill_between(frex, G1+stdev3, G1-stdev3, alpha=.1)
plt.plot(frex, G2, label='Pos')
stdev4 = np.std(G2)
plt.tick_params(labelsize=20)
plt.fill_between(frex, G2+stdev4, G2-stdev4, alpha=.1)
for s in sig[fdr]:
plt.fill_between(s, 0, 1, color='lightgray')
plt.tick_params(labelsize=20)
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
plt.ylabel('Pair Phase Synchrony - PLV', fontsize=20)
plt.xlabel('Frequency(Hz)', fontsize=20)
plt.ylim([0,0.4])
fig1.savefig('Stats_PLV_%s' %name)
if mode == 'gr':
stdev5 = np.std(G1)
plt.plot(timegr, G1, label='v5 to v1')
plt.tick_params(labelsize=20)
plt.fill_between(timegr, G1+stdev5, G1-stdev5, alpha=.1)
stdev6 = np.std(G2)
plt.plot(timegr, G2, label='v1 to v5')
plt.tick_params(labelsize=20)
plt.fill_between(timegr, G2+stdev6, G2+stdev6, alpha=.1)
for s in sig[fdr]:
plt.fill_between(s, 0, 1, color='lightgray')
plt.ylim([0.0,0.01])
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=2, mode="expand", borderaxespad=0.)
plt.xlabel('Time (s)')
plt.ylabel('G-causality')
fig1.savefig('Stats_%s' %name) | RobertoFelipeSG/PhD | Stats_Sigs.py | Stats_Sigs.py | py | 4,527 | python | en | code | 1 | github-code | 13 |
24154314534 | from fastapi import APIRouter
from loguru import logger
from models.Users import UserIn
from repositories.users import UserRepository
user_router = APIRouter()
@user_router.post("/create/")
async def create_user(user_in: UserIn):
"""
Эндпоинт для создания юзера
:param user_in: Pydantic модель
:return: msg со статусом
"""
user = UserRepository()
try:
user_in.is_valid()
await user.create(user_in)
return {"msg": "Ok"}
except ValueError:
logger.warning("Не верно ввден пароль")
return {"msg": "Не совпадают пароли"}
except Exception as error:
logger.warning(error)
return {"msg": "Юзер уже существует"}
@user_router.post("/update/")
async def update_user(user_in: UserIn):
"""
Эндпоинт для обновления юзера
:param user_in: Pydsntic user in
:return: msg json
"""
user = UserRepository()
try:
instance = await user.get_by_email(user_in.email)
await user.update(instance.id, user_in)
return {"msg": "Ok"}
except Exception as error:
logger.warning(error)
return {"msg": error}
| ilnrzakirov/parts_service | endpoint/user_endpoints.py | user_endpoints.py | py | 1,269 | python | ru | code | 1 | github-code | 13 |
72216420499 | import urllib2
from beautifulsoup import listTexts
from regexp import processNames, processDates
#from pygoogle import pygoogle
def getAnswers(query):
results={}
urllist=[]
#g = pygoogle("What is your problem")
#g.pages = 1
#urllist = g.get_urls()
urllist.append("http://www.politifact.com/texas/statements/2014/mar/19/kesha-rogers/four-us-citizens-killed-obama-drone-strikes-3-were/")
urllist.append("https://docs.python.org/2/howto/urllib2.html")
urllist.append("http://www.politifact.com/texas/statements/2014/mar/19/kesha-rogers/four-us-citizens-killed-obama-drone-strikes-3-were/")
stringlist=listTexts(urllist)
if query.find("Who")>=0 or query.find("who")>=0:
results = processNames(stringlist)
#print results
elif query.find("When")>=0 or query.find("when")>=0:
results = processDates(stringlist)
else:
results["I'm sorry, I didn't understand that"]=5;
return results
| genjinoguchi/softdev_homework_1 | search.py | search.py | py | 928 | python | en | code | 0 | github-code | 13 |
21580336835 | import os
import re
from util import *
from glob import glob
# from utilCapacity import get_capacity
LIMIT_NUM = 20
Brand_list_1 = [i.strip() for i in set(open("Labels/148_brand_list_1", encoding="utf-8").readlines())]
Brand_list_2 = [i.strip() for i in set(open("Labels/148_brand_list_2", encoding="utf-8").readlines())]
Taste_list = [i.strip() for i in set(open("Labels/148_taste_list", encoding="utf-8").readlines())]
Type_list = [i.strip() for i in set(open("Labels/148_type_list", encoding="utf-8").readlines())]
absor_taste = [i for i in Brand_list_1 if "味" in i]
absor_taste.append("味之")
#提取口味信息
def get_taste(texts_list,product_name):
'''
提取口味信息,
提取依据:148定义文档、及人为标注Excel测试数据、KWPO品类数据审核_20211231.xlsx
提取思路:
1、形状包含:粒状、砖状、球状、短厚片状、长薄片状、块状、卷状、圆环状、其它(请注明)
2、一个产品如果有多种口味,用中文“,”隔开
口味可以是:巧克力/咖啡/香草/红豆/绿豆/桔子/其它(请注明)
:param texts_list: 有序文本列表
:param product_name: 商品全称
:return:
'''
pattern = "(\w+味)"
result = get_info_list_by_list([[product_name, ], ], Taste_list)
if len(result) == 0:
p_res = re.compile(pattern).findall(product_name)
if len(p_res) > 0 and p_res[0] not in ["口味", "新口味"]:
Flag = True
for i in Taste_Abort_List:
if i in p_res[0]:
Flag = False
break
if Flag:
result.append(p_res[0])
if len(result) == 0:
result= get_taste_normal(texts_list, Taste_list)
return result
else:
# result = list(set(result))
return ",".join(result)
# return result[0]
def get_package(texts_list):
pattern = "[两\d]+支装?$"
num = "不分"
for texts in texts_list:
for index, text in enumerate(texts):
p_res_1 = get_info_by_pattern(text, pattern)
if len(p_res_1) > 0 and "1支" not in text:
num = re.compile("[两\d]+支").findall(text)[0]
return "多包装", num
pattern = "^支装?$"
num = "不分"
for texts in texts_list:
for index,text in enumerate(texts):
p_res_1 = get_info_by_pattern(text, pattern)
total_len = len(texts)
if len(p_res_1) > 0:
for i in [-2,-1,1,2]:
if index + i >=0 and index + i <total_len:
p_res_tmp = re.compile("^\d{1,2}$").findall(texts[index + i])
if len(p_res_tmp) > 0:
num = int(p_res_tmp[0])
break
return "多包装",num
return "单包装",num
def get_package_size(texts_list):
pattern = "\w*家庭装$"
for texts in texts_list:
for text in texts:
p_res_1 = get_info_by_pattern(text, pattern)
if len(p_res_1) > 0:
return "家庭装"
return "不分"
def get_type_bak(texts_list):
pattern_1 = "(饮用水|纯净水|\W*水\W+|冰棍|冰棒)"
pattern_2 = "(牛奶|乳清?粉|奶粉|牛乳|奶$)"
flag_1 = False
flag_2 = False
for texts in texts_list:
for text in texts:
if not flag_1:
p_res_1 = get_info_by_pattern(text, pattern_1)
if len(p_res_1) > 0:
flag_1 = True
if not flag_2:
p_res_2 = get_info_by_pattern(text, pattern_2)
if len(p_res_2) > 0:
flag_2 = True
if flag_1 and not flag_2:
return "纯冰"
elif flag_2 and not flag_1:
return "纯牛奶"
elif flag_1 and flag_2:
return "混合"
else:
return "混合"
#提取类型
def get_type(texts_list):
'''
提取类型,
提取依据:148定义文档、及人为标注Excel测试数据、KWPO品类数据审核_20211231.xlsx
提取思路:
纯冰: 配料中含水,不含奶的
纯牛奶:配料中含奶,不含水的
混合: 配料中又含水又含奶的
:param texts_list: 有序文本列表
:return:
'''
pattern_1 = "(饮用水|纯净水|\W*水\W+|冰棍|冰棒|配料水$)"
pattern_2 = "(牛奶|乳清?粉|奶粉|牛乳|奶$|乳固体|乳制品)"
flag_1 = False
flag_2 = False
for texts in texts_list:
for text in texts:
if not flag_1:
p_res_1 = get_info_by_pattern(text, pattern_1)
if len(p_res_1) > 0:
flag_1 = True
if not flag_2:
p_res_2 = get_info_by_pattern(text, pattern_2)
if len(p_res_2) > 0:
flag_2 = True
if flag_1 and not flag_2:
return "纯冰"
elif flag_2 and not flag_1:
return "纯牛奶"
elif flag_1 and flag_2:
return "混合"
else:
return "混合"
#提取商品全称
def get_productName_voting(texts_list,kvs_list):
pattern_pres = "容量|包含|[的是]"
product_name=''
result_list = []
pre_result_list = []
abort_list =['容量','包含','的','是']
pattern_1 = "("
for i in Type_list:
pattern_1 += "\w+" + i + "|"
pattern_1 = pattern_1[:-1] + ")$"
pattern_2 = pattern_1.replace("+","*")[:-1]
for texts in texts_list:
for text in texts:
p_res = get_info_by_pattern(text, pattern_1)
if len(p_res) > 0 and '类型' not in text:
if len(re.compile(pattern_pres).findall(p_res[0])) == 0:
result_list.append(p_res[0])
if len(result_list) > 0:
result_list.sort(key=len, reverse=True)
count = Counter(result_list).most_common(2)
product_name = count[0][0]
if len(product_name)==0:
for texts in texts_list:
for text in texts:
p_res = get_info_by_pattern(text, pattern_2)
if len(p_res) > 0 and '类型' not in text:
if len(re.compile(pattern_pres).findall(p_res[0])) == 0:
result_list.append(p_res[0])
if len(result_list) > 0:
result_list.sort(key=len, reverse=True)
count = Counter(result_list).most_common(2)
product_name = count[0][0]
for kvs in kvs_list:
for kv in kvs:
for k in kv.keys():
if "品名" in k:
if len(kv[k]) > 1 :
flag = True
for it in abort_list:
if it in kv[k]:
flag = False
break
if flag:
pre_result_list.append(kv[k])
if len(pre_result_list) > 0 :
pre_result_list.sort(key=len, reverse=True)
if len(pre_result_list[0])>=len(product_name):
product_name = pre_result_list[0]
if len(product_name) >0:
return product_name
return "不分"
#取出所有品牌,目的是为了刷选品牌用
def get_brand_list_test(texts_list):
brand_1_list = []
brand_2 = []
for texts in texts_list:
for text in texts:
for b1 in Brand_list_1:
if b1.upper() in text.upper() or b1 in text:
brand_1_list.append(b1)
if len(brand_2) > 0:
brand_2 = ",".join(list(set(brand_2)))
else:
brand_2 = "不分"
if len(brand_1_list) == 0:
brand_1 = "不分"
else:
brand_1_list.sort(key=len,reverse=True)
count = Counter(brand_1_list).most_common(6)
brand_1 = ",".join([i[0] for i in count])
return brand_1,brand_2
def get_Capacity(kvs_list,texts_list):
pattern = r'(净含量?|净重|^[\u4e00-\u9fa5]?含量$|[Nn][Ee][Tt][Ww]|重量)'
# pattern = r'(\d+\.?\d*)\s?(G|g|克|千克|kg|KG|毫升|升|L|ml|ML|mL)'
pattern2 = r'(\d+\.?\d*|I\.?\d*)\s?(G|g|克|千克|kg|KG|毫升|升|L|ml|ML|mL)'
p = re.compile(pattern)
for kvs in kvs_list:
for kv in kvs:
for k in kv.keys():
p_res = p.findall(k)
if len(p_res) > 0:
kvp = kv[k].replace('I', '1')
p_res = re.compile(pattern2).findall(kvp)
if len(p_res) > 0:
p_res = p_res[0]
if p_res[0][0] != "0":
if p_res[1] in ["千克", "kg", "KG"]:
if float(p_res[0]) <= 10:
return p_res[0] + p_res[1]
else:
if float(p_res[0]) < 5000 and float(p_res[0]) >= 1:
return p_res[0] + p_res[1]
pattern = r'(规格)'
p = re.compile(pattern)
for kvs in kvs_list:
for kv in kvs:
for k in kv.keys():
p_res = p.findall(k)
if len(p_res) > 0:
# pattern = r'(\d+\.?\d*)\s?(G|g|克|千克|kg|KG|毫升|升|ml|L|ML|mL)'
kvp = kv[k].replace('I','1')
p_res = re.compile(pattern2).findall(kvp)
if len(p_res) > 0:
p_res = p_res[0]
if p_res[0][0] != "0":
if p_res[1] in ["千克", "kg", "KG"]:
if float(p_res[0]) <= 10:
return p_res[0] + p_res[1]
else:
if float(p_res[0]) < 5000 and float(p_res[0]) >= 1:
return p_res[0] + p_res[1]
return "不分"
def get_Capacity_bak(texts_list):
p = re.compile(r'(\d+\.?\d*)\s?(G|g|千克|克|kg|KG|Kg|ml|ML|mL|毫升)')
for texts in texts_list:
tmp_list = []
for index, text in enumerate(texts):
p_res = p.findall(text)
if len(p_res) > 0 and float(p_res[0][0]) < 10000:
if not isNutritionalTable(text, texts, index):
continue
if "每份" in text:
continue
tmp_list.append(p_res[0][0] + p_res[0][1])
if len(tmp_list) == 1:
return tmp_list[0]
result_list = []
p = re.compile(r'(\d+\.?\d*)\s?(G|g|千克|克|kg|KG|Kg|ml|ML|mL|毫升)')
for texts in texts_list:
for index, text in enumerate(texts):
p_res = p.findall(text)
if len(p_res) > 0:
if not isNutritionalTable(text, texts, index):
continue
if "每份" in text:
continue
p_res = p_res[0]
if p_res[1] in ["Kg","kg","KG","千克","升","L"]:
if float(p_res[0]) <= 30:
result_list.append(p_res[0] + p_res[1])
else:
if float(p_res[0]) < 5000 and "." not in p_res[0]:
result_list.append(p_res[0] + p_res[1])
if len(result_list) == 0:
return "不分"
count = Counter(result_list).most_common(2)
return count[0][0]
def get_Capacity_bak_2(texts_list):
pattern = r'(净含量?|净重|^[\u4e00-\u9fa5]?含量$|[Nn][Ee][Tt][Ww]|重量)'
num = "不分"
for texts in texts_list:
for index,text in enumerate(texts):
p_res_1 = get_info_by_pattern(text, pattern)
total_len = len(texts)
if len(p_res_1) > 0:
for i in [-2,-1,1,2]:
if index + i >=0 and index + i <total_len:
p_res_tmp = re.compile("^\d{1,2}$").findall(texts[index + i])
if len(p_res_tmp) > 0:
num = p_res_tmp[0] + "克"
break
return num
return num
def get_Capacity_2(texts_list):
pattern = r'\d+\.?\d*\D*[Gg克lL升]\D{0,3}\d+\D?[包袋盒支杯个]装?\)?'
pattern_2 = r'(\d+\.?\d*)\W*(G|g|克|kg|KG|Kg|ml|ML|mL|毫升)\D{0,3}(\d+)\D?[包袋盒支杯个]装?\)?'
p = re.compile(pattern)
for text_list in texts_list:
for text in text_list:
if len(re.split("[*xX]\d", text)) > 2:
continue
if "每份" in text:
continue
p_res = p.findall(text)
if len(p_res) > 0:
p_res_2 = re.compile(pattern_2).findall(p_res[0])
if len(p_res_2) > 0:
p_res_2 = p_res_2[0]
unit = p_res_2[1]
if len(p_res_2) == 3:
if p_res_2[2] != "0" and p_res_2[2] != "":
if float(p_res_2[0]) >= 1 and float(p_res_2[0]) <= 5000 and float(p_res_2[2]) < 201:
if "*" in p_res[0] or "x" in p_res[0] or "X" in p_res[0] or float(p_res_2[0]) < 100:
return ("%.1f%s" % (float(p_res_2[0]) * float(p_res_2[2]), unit)), re.sub(u"\)", "",p_res[0])
else:
return "不分", re.sub(u"\)", "", p_res[0])
else:
return "不分", re.sub(u"\)", "", p_res[0])
pattern = r'\d+\.?\d*\D*[Gg克lL升][*xX]\d+[包袋盒支杯个\)]?'
pattern_2 = r'(\d+\.?\d*)\W*(G|g|克|kg|KG|Kg|ml|ML|mL|毫升)[*xX](\d+)[包袋盒支杯个\)]?'
p = re.compile(pattern)
for text_list in texts_list:
for text in text_list:
if len(re.split("[*xX]\d", text)) > 2:
continue
p_res = p.findall(text)
if len(p_res) > 0:
if len(re.compile("\d+\.\d+克\([\dg]\)").findall(text)) > 0:
continue
if "(9)" in text:
continue
p_res_2 = re.compile(pattern_2).findall(p_res[0])
if len(p_res_2) > 0:
p_res_2 = p_res_2[0]
unit = p_res_2[1]
if len(p_res_2) == 3:
if p_res_2[2] != "0" and p_res_2[2] != "":
if float(p_res_2[0]) >= 1 and float(p_res_2[0]) <= 5000:
if "*" in p_res[0] or "x" in p_res[0] or "X" in p_res[0]:
return ("%.1f%s" % (float(p_res_2[0]) * float(p_res_2[2]), unit)), re.sub(u"\)", "",
p_res[0])
else:
return "不分", re.sub(u"\)", "", p_res[0])
else:
return "不分", re.sub(u"\)", "", p_res[0])
pattern = r'\d+[包袋盒支杯个][*xX]\d+\.?\d*\D*[Gg克lL升]'
pattern_2 = r'(\d+)[包袋盒支杯个][*xX](\d+\.?\d*)\W*(G|g|克|kg|KG|Kg|ml|ML|mL|毫升)'
p = re.compile(pattern)
for text_list in texts_list:
for text in text_list:
if len(re.split("[*xX]\d", text)) > 2:
continue
p_res = p.findall(text)
if len(p_res) > 0:
if len(re.compile("\d+\.\d+克\([\dg]\)").findall(text)) > 0:
continue
if "(9)" in text:
continue
p_res_2 = re.compile(pattern_2).findall(p_res[0])
if len(p_res_2) > 0:
p_res_2 = p_res_2[0]
unit = p_res_2[2]
if len(p_res_2) == 3:
if p_res_2[0] != "0" and p_res_2[0] != "":
if float(p_res_2[0]) >= 1 and float(p_res_2[0]) <= 5000:
if "*" in p_res[0] or "x" in p_res[0] or "X" in p_res[0]:
return ("%.1f%s" % (float(p_res_2[0]) * float(p_res_2[1]), unit)), re.sub(u"\)", "",
p_res[0])
else:
return "不分", re.sub(u"\)", "", p_res[0])
else:
return "不分", re.sub(u"\)", "", p_res[0])
pattern = r'\d+\.?\d*\D*[Gg克lL升]\D{0,3}\d+\D*\)$'
pattern_2 = r'(\d+\.?\d*)\W*(G|g|克|kg|KG|Kg|ml|ML|mL|毫升)\D{0,3}(\d+)\D*'
p = re.compile(pattern)
for text_list in texts_list:
for text in text_list:
p_res = p.findall(text)
if len(p_res) > 0:
if len(re.compile("\d+\.\d+克\([\dg]\)").findall(text)) > 0:
continue
if "(9)" in text:
continue
p_res_2 = re.compile(pattern_2).findall(p_res[0])
if len(p_res_2) > 0:
return "不分", re.sub(u"\)", "", p_res[0])
return "不分", "不分"
def get_Capacity_2_bak(texts_list):
p_bak = re.compile(r'(\d+)(\s?[包袋盒支杯个]装)')
for texts in texts_list:
for text in texts:
p_res = p_bak.findall(text)
if len(p_res) > 0:
p_res = p_res[0]
if int(p_res[0]) <= 200:
return p_res[0] + p_res[1]
p_bak = re.compile(r'(\d+)([包袋盒支杯个])\w*(装)$')
for texts in texts_list:
for text in texts:
p_res = p_bak.findall(text)
if len(p_res) > 0:
p_res = p_res[0]
if int(p_res[0]) <= 200:
return p_res[0] + p_res[1] + p_res[2]
p_bak = re.compile(r'内[装含](\d+)(小?[包袋盒支杯个])')
for texts in texts_list:
for text in texts:
p_res = p_bak.findall(text)
if len(p_res) > 0:
p_res = p_res[0]
if int(p_res[0]) <= 200:
return "内装"+ p_res[0] + p_res[1]
return "不分"
#提取冰激凌形状
def get_icecream_shape(texts_list,capcity_1,product_name):
'''
提取冰激凌形状,
提取依据:148定义文档、及人为标注Excel测试数据、KWPO品类数据审核_20211231.xlsx
提取思路:共有:条/棍/棒/杯/桶/盒/筒/砖/袋/其他等形状
1、杯、桶装通常以克重来判断,如遇内部有多个的,例如带棍的需给“条/棍/棒”,内部为小块儿状的需给砖
2、杯:雪糕存放在纸杯/胶杯中,通常要用匙来进食的,重量≤200克
3、桶:雪糕存放在纸杯/胶杯中,通常要用匙来进食的,重量>200克
4、条/棍/棒定义:产品附在一根木或塑料的小棒上,用作把手(包括放在杯里带条/棍/棒的小支
5、筒:定义:独立包装的威化筒,内藏雪糕,通常在雪糕面上加上果仁或朱古力酱等大是
6、盒。定义:雪糕存放在盒中,盒子通常有盖或塑料纸覆盖,通常要用匙来进食的。(不区分克重,只要是四方形的就给盒)
7、砖。定义:根据产品描述是砖形(四方形)的冰淇淋,不用任何器具就可以直接吃的(豆腐、方糕、冰淇淋派、千层雪)
8、其他。定义:除了以上描述的形态的, 包括雪糕糯米糍,卷状等的产品
:param texts_list: 有序文本列表
:param capcity_1: 重容量
:param product_name: 商品全称
:return:
'''
pattern_gun = '棍'
pattern1='\d+'
pattern_bei = '杯'
# 如果是碗,当成杯
pattern_wan = '碗'
pattern_tong1 ='桶盖'
pattern_he = '盒'
pattern_tong2 = '筒'
pattern_dai = '袋'
pattern_bang = '棒'
pattern_zuan = '砖'
p_res1 = get_info_by_pattern(capcity_1, pattern1)
weight = 0
if len(p_res1) > 0:
weight = int(p_res1[0])
# if ('豆腐' in product_name or '方糕' in product_name or '冰淇淋派' in product_name or '千层雪' in product_name or '充电宝' in product_name):
# if ('豆腐' in product_name or '方糕' in product_name or '冰淇淋派' in product_name or '千层雪' in product_name):
# shape = pattern_zuan
# return shape
if (pattern_bei in product_name or pattern_wan in product_name):
shape = pattern_bei
return shape
if pattern_tong1 in product_name :
shape = pattern_tong1.replace('盖','')
return shape
if pattern_he in product_name:
shape = pattern_he
return shape
if pattern_tong2 in product_name:
shape = pattern_tong2
return shape
if pattern_dai in product_name:
shape = pattern_dai
return shape
if pattern_bang in product_name:
shape = pattern_bang
return shape
if '冰棍' in product_name:
shape = pattern_gun
return shape
for texts in texts_list:
for text in texts:
# if ('豆腐' in text or '方糕' in text or '冰淇淋派' in text or '千层雪' in text or '充电宝' in text):
if ('豆腐' in text or '方糕' in text or '冰淇淋派' in text or '千层雪' in text):
shape = pattern_zuan
return shape
if (pattern_bei in text or pattern_wan in text):
shape = pattern_bei
return shape
if pattern_tong1 in text :
shape = pattern_tong1.replace('盖','')
return shape
if pattern_he in text and '元/盒' not in text:
shape = pattern_he
return shape
if '升' in capcity_1 or 'L' in capcity_1:
shape = pattern_tong1.replace('盖','')
return shape
return pattern_gun
#提取产品形态
def get_product_shape(shape,type,product_name,texts_list):
'''
提取产品形态
提取依据:148定义文档、及人为标注Excel测试数据、KWPO品类数据审核_20211231.xlsx
提取思路:
1、形状为条/棍/棒: (作为判断种类字段“是否有巧克力包裹”的依据)该类型的产品形态没有不分!!!
1.1、 有巧克力包裹的:冰淇淋表面有巧克力脆皮的,白巧克力也可以(配料表、网搜图)
1.2、 类型为纯冰的:有巧克力包裹的优先给巧克力包裹,否则给:纯水/纯冰:
2、除‘条/棍/棒’外的其它形状,如杯、桶、盒、筒、砖、其他的:只要类型是“混合”或“纯牛奶”都给:以奶成分为主;
只要类型是“纯冰”的,都给“纯水/纯冰”。
否则 给‘不分’
:param shape: 冰激凌形状
:param type: 类型(混合、纯冰、纯牛奶)
:param product_name: 商品全称
:param texts_list: 有序文本
:return:
'''
if '条' in shape or '棍' in shape or '棒' in shape:
if '巧克力脆皮' in product_name:
return '有巧克力包裹'
count1 = 0
count2 = 0
for texts in texts_list:
for text in texts:
if '巧克力脆皮' in text:
return '有巧克力包裹'
if '巧克力' in text or '生巧' in text or '克力' '生巧' in text:
count1+=1
if '脆皮' in text or '脆' in text:
count2+=1
if count1 >= 1 and count2 >= 0:
return '有巧克力包裹'
return '无巧克力包裹'
# if type=='纯冰':
# return '纯水/纯冰'
# else:
# return '无巧克力包裹'
else:
return '不分'
# # if '杯' in shape or '桶' in shape or '盒' in shape or '筒' in shape:
# if type=='混合' or type=='纯牛奶':
# return '以奶成分为主'
# elif type=='纯冰':
# return '纯水/纯冰'
# else:
# return '不分'
#提取包装类型
def get_package_148_unit(base64strs):
url = url_classify + ':5040/yourget_opn_classify'
task = MyThread(get_url_result, args=(base64strs, url,))
task.start()
# 获取执行结果
result = task.get_result()
result_list =[]
for it in result:
#只选出塑料杯、塑料盒、冰淇淋筒三种类型的,其他数据过滤掉
if '塑料杯' in it or '塑料盒' in it or '冰淇淋筒' in it:
it = re.sub("塑料杯", "杯", it)
it = re.sub("塑料盒", "盒", it)
it = re.sub("冰淇淋筒", "筒", it)
result_list.append(it)
if len(result_list) == 0:
return "不分"
#塑料杯、塑料盒、冰淇淋筒
res = Counter(result_list).most_common(1)[0]
if len(result)>5 and int(res[1])>1 or len(result)<5:
return res[0]
else:
return '不分'
#规则提取总函数
def category_rule_148(datasorted,dataprocessed,dataoriginal,base64strs,uie_obj = None):
result_dict = {}
brand_1 = "不分"
brand_2 = "不分"
brand_3 = "不分"
type = "不分"
taste = "不分"
capcity_1 = "不分"
capcity_2 = "不分"
product_name = "不分"
package_size = "不分"
package = "不分"
num_package = "不分"
shape = "不分"
shape_type = "不分"
dataprocessed.sort(key=len, reverse=True)
datasorted.sort(key=len)
# brand_1 = get_keyValue(dataprocessed, ["商标"])
brand_1_test=''
if brand_1 == "不分":
brand_1, brand_2 = get_brand_list(datasorted, Brand_list_1, [], ["NOC","FSC"], [])
brand_1 = re.sub("MAGNUM","梦龙",brand_1)
# product_name = get_keyValue(dataprocessed, ["品名"])
if product_name == "不分":
product_name = get_productName_voting(datasorted,dataprocessed)
product_name = re.sub('\W', "", product_name)
if brand_1.title() in product_name.title():
product_name = product_name.title().replace(brand_1.title(),'')
# product_name = re.sub('\W', "", product_name)
product_name = re.sub('榴连', "榴莲", product_name)
product_name = re.sub('^系列', "", product_name)
if len(product_name) < 2:
product_name = "不分"
capcity_1 = get_Capacity(dataprocessed, datasorted)
capcity_1_bak, capcity_2 = get_Capacity_2(datasorted)
if capcity_1_bak != "不分":
if capcity_1 == "不分":
capcity_1 = capcity_1_bak
elif re.compile("\d+\.?\d*").findall(capcity_1)[0] in capcity_2:
capcity_1 = capcity_1_bak
if capcity_1 == "不分":
capcity_1 = get_Capacity_bak_2(datasorted)
if capcity_1 == "不分":
capcity_1 = get_Capacity_bak(datasorted)
if capcity_2 != "不分":
try:
num_0 = float(re.compile("\d+\.?\d*").findall(capcity_1)[0])
num_1, num_2 = re.compile("\d+\.?\d*").findall(capcity_2)
if float(num_1) * float(num_2) != num_0 and num_0 != float(num_1) and num_0 != float(num_2):
capcity_2 = "不分"
except:
pass
if capcity_2 == "不分":
capcity_2 = get_Capacity_2_bak(datasorted)
# # 包袋盒罐支杯粒瓶片
# capcity_1, capcity_2 = get_capacity(dataprocessed, datasorted, "G|g|克|千克|kg|KG|斤|公斤", "包袋盒支杯个", 0)
if type == "不分":
type = get_type(datasorted)
if taste == "不分":
taste = get_taste(datasorted,product_name)
if len(product_name) == 2 and product_name != "不分" and taste != "不分":
product_name=taste+product_name
if package == "不分":
package, num_package = get_package(datasorted)
package = package if capcity_2 == "不分" else "多包装"
if capcity_2 == "不分" and num_package != "不分":
capcity_2 = "%s装" % str(num_package)
if package_size == "不分":
package_size = get_package_size(datasorted)
if package_size == "不分" and capcity_1 != "不分":
num_res = re.compile("\d+").findall(capcity_1)
if len(num_res) > 0:
num = num_res[0]
if int(num) > 200 or "千克" in capcity_1:
package_size = "家庭装"
if package_size == "不分":
package_size = "即食"
if len(re.compile("^味").findall(product_name)) > 0 or len(re.compile("^[口风]味").findall(product_name)) > 0:
product_name = taste.split("味")[0] + product_name.split("味")[-1]
if type == "纯冰" and "雪糕" in product_name:
type = "混合"
# base64strs = ["/data/zhangxuan/images/43-product-images" + i.split("格式化数据-43")[-1].replace("\\", "/") for i in image_list]
# 测试用,需要把路径转一下,正式的时候不用修改路径
# image_list = ["/data/zhangxuan/images/43-product-images" + i.split("ocr_test")[-1].replace("\\", "/") for i in base64strs]
shape = '棍'
if '可爱多' in product_name or '火炬' in product_name:
shape='桶'
elif ('豆腐' in product_name or '方糕' in product_name or '冰淇淋派' in product_name or '千层雪' in product_name or '充电宝' in product_name):
shape = '砖'
else:
shape = get_package_148_unit(base64strs)
if shape == '不分':
shape = get_icecream_shape(datasorted, capcity_1, product_name)
else:
if shape == '杯':
#如果是杯子,通过质量来判断杯子和桶,一般杯子:重量≤200克 桶:重量>200克
if '千克' in capcity_1 or 'L' in capcity_1 or '升' in capcity_1 or 'KG' in str(capcity_1).upper():
shape = '桶'
else:
pattern1 = '\d+'
p_res1 = get_info_by_pattern(capcity_1, pattern1)
if len(p_res1) > 0 and int(p_res1[0]) >= 500:
shape = '桶'
shape_type = get_product_shape(shape,type,product_name,datasorted)
# 品牌3
result_dict['info1'] = brand_3
# 口味
result_dict['info2'] = taste
# 类型
result_dict['info3'] = type
# 单包装/多包装
result_dict['info4'] = package
# 包装大小
result_dict['info5'] = package_size
# 形状
result_dict['info6'] = shape
# 产品形态
result_dict['info7'] = shape_type
result_dict['brand1'] = brand_1
result_dict['brand2'] = brand_2
result_dict['capacitysum'] = capcity_1
result_dict['capacityamount'] = capcity_2
result_dict['commodityname'] = product_name
for k in result_dict.keys():
result_dict[k] = re.sub("[,,::]", "", result_dict[k])
#测试用
# result_dict['info8'] = shape2
real_use_num = 7
sub_num = LIMIT_NUM - real_use_num
for i in range(sub_num):
item_index = i + real_use_num + 1
key_name = 'info' + str(item_index)
result_dict[key_name] = []
return result_dict
if __name__ == '__main__':
root_path = r'D:\Data\商品识别\stage_2\148-冰淇淋'
for product in os.listdir(root_path)[:100]:
image_list = []
product = "3072938"
for image_path in glob(os.path.join(root_path, product) + "\*g"):
image_list.append(image_path)
result_dict = category_rule_148(image_list)
with open(os.path.join(root_path, product) + r'\%s_ppocr.json' % (product), "w", encoding="utf-8") as f:
json.dump(result_dict, f, ensure_ascii=False, indent=4) | liuyubiao/test_2 | category/category_148.py | category_148.py | py | 32,413 | python | en | code | 0 | github-code | 13 |
1795467621 | from django.core.cache import cache as _cache
class CachedProperty(property):
"""
Decorator much like django cached_property however it also caches to a
'real' cache and exposes some additional functionality for setting/deleting
the cache value along with the ability to perform additional actions when
the cache value is set or deleted
"""
# use the default django cache
cache = _cache
# use str on the object (this should be overridden)
key_fmt = '{object}'
def __get__(self, obj, objtype=None):
if obj is None:
return self
key = self.key_fmt.format(object=obj)
value = self.cache.get(key)
if value is not None:
return value
value = super(CachedProperty, self).__get__(obj, objtype)
self.cache.set(key, value)
return value
def __set__(self, obj, value):
key = self.key_fmt.format(object=obj)
self.cache.set(key, value)
if self.fset is not None:
super(CachedProperty, self).__set__(obj, value)
def __delete__(self, obj):
key = self.key_fmt.format(object=obj)
self.cache.delete(key)
if self.fdel is not None:
super(CachedProperty, self).__delete__(obj)
on_set = property.setter
on_del = property.deleter
class CachedClassProperty(CachedProperty):
"""
Same as CachedProperty decorator but acts as a class proprty rather than an
instance property.
"""
def __get__(self, obj, objtype=None):
if objtype is None:
objtype = type(obj)
return super(CachedClassProperty, self).__get__(objtype)
def __set__(self, obj):
super(CachedClassProperty, self).__set__(type(obj))
def __delete__(self, obj):
super(CachedClassProperty, self).__delete__(type(obj))
def real_cached_property(key_fmt, cache=None):
"""
Return a CachedProperty decorator that utilises the suppiled key_fmt and
cache.
"""
class RealCachedProperty(CachedProperty):
def __init__(self, *args, **kwargs):
super(RealCachedProperty, self).__init__(*args, **kwargs)
self.key_fmt = key_fmt
if cache is not None:
self.cache = cache
return RealCachedProperty
def real_cached_classproperty(key_fmt, cache=None):
"""
Return a CachedClassProperty decorator that utilises the suppiled key_fmt
and cache.
"""
class RealCachedClassProperty(CachedClassProperty):
def __init__(self, *args, **kwargs):
super(RealCachedClassProperty, self).__init__(*args, **kwargs)
self.key_fmt = key_fmt
if cache is not None:
self.cache = cache
return RealCachedClassProperty
| greenbender/django-gravy | gravy/functional.py | functional.py | py | 2,769 | python | en | code | 2 | github-code | 13 |
23777951058 | import pandas as pd
import numpy as np
from scipy.spatial.distance import cdist
from pyproj import Transformer
# See other answer about the always_xy=True parameter
TRAN_3008_TO_4326 = Transformer.from_crs("EPSG:3008", "EPSG:4326")
def mytransform(lat, lon):
return TRAN_3008_TO_4326.transform(lat, lon)
box_df = pd.read_csv(r'P:\Workspace\pool_detection\pool_boxes_075.csv')
box_df['wld_path'] = box_df['path'].str.replace('jpg', 'wld')
adress_df = pd.read_csv(r'P:\Workspace\pool_detection\Adresser_Malmö.csv', sep=';', encoding='latin', dtype=str)
adress_df = adress_df[['Beladress', 'Xkoord', 'Ykoord']]
adress_df['Xkoord'] = pd.to_numeric(adress_df['Xkoord'].astype(str).str.replace(',', '.'))
adress_df['Ykoord'] = pd.to_numeric(adress_df['Ykoord'].astype(str).str.replace(',', '.'))
adress_list = adress_df['Beladress'].tolist()
adress_coords = []
for k in range(len(adress_df)):
adress_coords.append([adress_df['Xkoord'][k], adress_df['Ykoord'][k]])
adress_coords = np.array(adress_coords)
shortest_dist_list = [None]*len(adress_list)
for i in range(len(box_df)):
box = [box_df['startX'][i], box_df['startY'][i], box_df['endX'][i], box_df['endY'][i]]
x_pixel = (box_df['startX'][i] + box_df['endX'][i])/2
y_pixel = (box_df['startY'][i] + box_df['endY'][i])/2
path = box_df['path'][i]
wld_path = box_df['wld_path'][i]
with open(wld_path) as WorldFile:
a = float(WorldFile.readline())
d = float(WorldFile.readline())
b = float(WorldFile.readline())
e = float(WorldFile.readline())
c = float(WorldFile.readline())
f = float(WorldFile.readline())
y_coord = a*x_pixel + b*y_pixel + c
x_coord = d*x_pixel + e*y_pixel + f
pool_coords = np.array([[x_coord, y_coord]])
dist_list = cdist(pool_coords, adress_coords)
shortest_dist = np.min(dist_list)
dist_index = np.where(dist_list==shortest_dist)[1][0]
shortest_dist_list[dist_index] = shortest_dist
pool_df = pd.DataFrame(list(zip(adress_list, shortest_dist_list)), columns =['adress', 'distance'])
pool_df.dropna(inplace=True)
pool_df = pool_df.loc[pool_df['distance'] <= 30]
adress_df.rename(columns = {'Beladress' : 'adress'}, inplace=True)
pool_df = pd.merge(pool_df, adress_df, on='adress', how='left')
pool_df['adress'] = pool_df['adress'].str.rstrip()
pool_df['adress'] = pool_df['adress'].str.lstrip()
pool_df.drop(columns=['distance'], inplace=True)
fastighetstyp_df = pd.read_csv(r'P:\Workspace\pool_detection\EDP_alla.csv')
fastighetstyp_df = fastighetstyp_df.loc[fastighetstyp_df['Faktgrupp'] == 'MALMÖ']
fastighetstyp_df = fastighetstyp_df[['Anladress', 'Anlkat']]
fastighetstyp_df.rename(columns={'Anladress':'adress', 'Anlkat':'Fastighetstyp'}, inplace=True)
fastighetstyp_df['adress'] = fastighetstyp_df['adress'].str.upper()
fastighetstyp_df['adress'] = fastighetstyp_df['adress'].str.rstrip()
fastighetstyp_df['adress'] = fastighetstyp_df['adress'].str.lstrip()
pool_df = pd.merge(pool_df, fastighetstyp_df, on='adress', how='left')
pool_df = pool_df.loc[pool_df['Fastighetstyp'] == 'VILLA']
pool_df.drop_duplicates(inplace=True)
x_list = pool_df['Xkoord'].to_list()
y_list = pool_df['Ykoord'].to_list()
x_coord_4326 = []
y_coord_4326 = []
for i in range(len(x_list)):
coords_4326 = mytransform(x_list[i], y_list[i])
x_coord_4326.append(coords_4326[0])
y_coord_4326.append(coords_4326[1])
pool_df['Xkoord'] = x_coord_4326
pool_df['Ykoord'] = y_coord_4326
pool_df.drop(columns=['Fastighetstyp'], inplace=True)
pool_df.to_csv(r'P:\Workspace\pool_detection\adresser_med_pool_malmö_075.csv', encoding='latin', index=False) | VASYD-SOU/pool_detection | pool_coordinates.py | pool_coordinates.py | py | 3,720 | python | en | code | 0 | github-code | 13 |
7006543605 | from .base_page import BasePage
from .locators import BasketPageLocators
class BasketPage(BasePage):
def get_products_in_basket(self):
products = []
for product in self.browser.find_elements(*BasketPageLocators.ITEMS_IN_BASKET):
products.append(product.text)
return products | KatherineSycheva/test-project-for-stepik-course | pages/basket_page.py | basket_page.py | py | 317 | python | en | code | 0 | github-code | 13 |
17048375564 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AnttechBlockchainTwcUserinfoMatchModel(object):
def __init__(self):
self._alipay_user_id = None
self._call_no_hash = None
self._unify_no = None
self._unify_no_hash = None
@property
def alipay_user_id(self):
return self._alipay_user_id
@alipay_user_id.setter
def alipay_user_id(self, value):
self._alipay_user_id = value
@property
def call_no_hash(self):
return self._call_no_hash
@call_no_hash.setter
def call_no_hash(self, value):
self._call_no_hash = value
@property
def unify_no(self):
return self._unify_no
@unify_no.setter
def unify_no(self, value):
self._unify_no = value
@property
def unify_no_hash(self):
return self._unify_no_hash
@unify_no_hash.setter
def unify_no_hash(self, value):
self._unify_no_hash = value
def to_alipay_dict(self):
params = dict()
if self.alipay_user_id:
if hasattr(self.alipay_user_id, 'to_alipay_dict'):
params['alipay_user_id'] = self.alipay_user_id.to_alipay_dict()
else:
params['alipay_user_id'] = self.alipay_user_id
if self.call_no_hash:
if hasattr(self.call_no_hash, 'to_alipay_dict'):
params['call_no_hash'] = self.call_no_hash.to_alipay_dict()
else:
params['call_no_hash'] = self.call_no_hash
if self.unify_no:
if hasattr(self.unify_no, 'to_alipay_dict'):
params['unify_no'] = self.unify_no.to_alipay_dict()
else:
params['unify_no'] = self.unify_no
if self.unify_no_hash:
if hasattr(self.unify_no_hash, 'to_alipay_dict'):
params['unify_no_hash'] = self.unify_no_hash.to_alipay_dict()
else:
params['unify_no_hash'] = self.unify_no_hash
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AnttechBlockchainTwcUserinfoMatchModel()
if 'alipay_user_id' in d:
o.alipay_user_id = d['alipay_user_id']
if 'call_no_hash' in d:
o.call_no_hash = d['call_no_hash']
if 'unify_no' in d:
o.unify_no = d['unify_no']
if 'unify_no_hash' in d:
o.unify_no_hash = d['unify_no_hash']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/AnttechBlockchainTwcUserinfoMatchModel.py | AnttechBlockchainTwcUserinfoMatchModel.py | py | 2,535 | python | en | code | 241 | github-code | 13 |
42478298444 | #! /usr/bin/env python3
from __future__ import print_function
import argparse
import glob
import os
from datetime import date
import shutil
import sys
import subprocess
import logging
import click
from .log import get_logger
from .filename import parse_filename, format_filename
from .tags import get_tags, set_tags
def quote(s):
if sys.version_info < (3, 3):
import pipes
return pipes.quote(s)
else:
import shlex
return shlex.quote(s)
level = logging.DEBUG
logger = get_logger("sort", level)
def move(f, basedir, dr=False):
if not os.path.exists(f):
logger.error("%s not found", f)
return
logger.info(f)
name_info = parse_filename(os.path.basename(f))
if name_info.dt is not None:
month = name_info.dt.month
year = name_info.dt.year
else:
mtime = date.fromtimestamp(os.path.getmtime(f))
month = mtime.month
year = mtime.year
name_info.dt = mtime
root, ext = os.path.splitext(os.path.basename(f))
destdir = os.path.join(basedir, str(year), "{:02d}".format(month))
if not dr:
os.system('mkdir -p "{}"'.format(destdir))
# fname = "{}-{}".format(mtime.strftime("%Y-%m-%d"), root)
# dest = os.path.join(destdir, fname + ext)
tags = get_tags(f) | name_info.tags
dest = os.path.join(
destdir, format_filename(name_info.name, name_info.dt, tags=tags)
)
logger.info("=> %s", dest)
logger.debug("Setting finder tags to: %s", ", ".join(tags))
if not dr:
cmd = "mv {} {}".format(quote(f), quote(dest))
os.system(cmd)
set_tags(dest, tags)
@click.command("sort_docs")
@click.argument(
"files",
nargs=-1,
type=click.Path(exists=True, readable=True, file_okay=True, dir_okay=False),
)
@click.option(
"--outputdir",
envvar="DOCUMENT_HELPERS_SORT_OUTPUT_DIR",
required=True,
type=click.Path(exists=True, writable=True, file_okay=False, dir_okay=True),
)
@click.option("--dry-run", "-s", is_flag=True)
def main(files, outputdir, dry_run):
try:
if len(files) == 0:
print("No files given, do nothing")
if len(files) == 1 and files[0] == "-":
# read from stdin
files = sys.stdin.read().strip().split("\n")
logger.debug("Destination: %s", outputdir)
for f in files:
move(f, outputdir, dr=dry_run)
except Exception as e:
print("blub")
logger.error("Caught exception: %s" % str(e), exc_info=True)
if __name__ == "__main__":
main()
| paulgessinger/document_helpers | src/document_helpers/sort.py | sort.py | py | 2,576 | python | en | code | 0 | github-code | 13 |
9018666298 | import socket
target_host = "127.0.0.1"
target_port = 8080
#ソケットオブジェクトの作成
client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
client.bind(('127.0.0.3', 8080))
#サーバーへ接続
client.connect((target_host, target_port))
#データの送信
client.send(b"Data by TCP Client!!")
#データの受信
response = client.recv(4096)
print("success!")
print(response.decode())
client.close() | ryu1998/Security_Practice | base practice/tcp_client.py | tcp_client.py | py | 427 | python | ja | code | 0 | github-code | 13 |
42650474578 | # -*- coding: UTF-8 -*-
from datetime import datetime
from pprint import pprint
db = {}
with open('trd.csv', 'r') as f:
for row in f:
r = row.split(',')
t = datetime.time(datetime.strptime(r[0].split('.')[0], '%I:%M:%S'))
b = r[3].replace('\n', '')
del r[3]
del r[0]
if b in db.keys():
if t in db[b].keys():
db[b][t].append(r)
else:
db[b][t] = []
db[b][t].append(r)
else:
db[b] = {}
db[b][t] = []
db[b][t].append(r)
ans = {}
for b, t_data in db.items():
ans[b] = {}
ans[b]['quantity'] = 0
ans[b]['time'] = ''
for k, v in t_data.items():
if len(v) > ans[b]['quantity']:
ans[b]['quantity'] = len(v)
ans[b]['time'] = k
pprint(ans)
M = {}
for b, d in ans.items():
if d['time'] in M.keys():
M[d['time']] = M[d['time']] + d['quantity']
else:
M[d['time']] = d['quantity']
pprint(M)
| EvgeniyUS/dataParsing | trd.py | trd.py | py | 930 | python | en | code | 0 | github-code | 13 |
32859296908 | #!/usr/bin/env python3
import re
from urllib.parse import unquote, urlparse, parse_qs
from html import unescape
from .. import Unit
from ...lib.decorators import unicoded
class urlguards(Unit):
"""
Restores the original URLs from their 'protected' versions as generated by
Outlook protection and ProofPoint.
"""
@unicoded
def process(self, data: str) -> str:
def proofpoint_replacer(match):
self.log_info('proofpoint match:', match.group(1))
argmatch = re.search(r'u=(.+?)&', match.group(2))
if not argmatch:
self.log_warn('not able to translate unexpected proofpoint format:', match)
return match.group(0)
encoded = argmatch.group(1)
if match.group(1) == '2':
encoded = encoded.translate(str.maketrans('-_', '%/'))
return unescape(unquote(encoded))
def outlook_replacer(match):
result = match.group(0)
self.log_info('outlook match:', result)
parsed = urlparse(result)
params = parse_qs(parsed.query)
try:
result = unquote(params['url'][0])
except Exception:
pass
return result
data = re.sub(
r'https?://urldefense.proofpoint.com/v([12])/url([/_=?#&.,\w\%\-]+)',
proofpoint_replacer,
data
)
data = re.sub(
r'https?://\w+.safelinks.protection.outlook.com/([/_=\?#&.,\w\%\-]+)',
outlook_replacer,
data
)
return data
| chubbymaggie/refinery | refinery/units/pattern/urlguards.py | urlguards.py | py | 1,609 | python | en | code | null | github-code | 13 |
1362431235 | # -*-coding:Utf-8 -*
#Tests de conditions
#mon_age = 5
#if mon_age > 20:
# print("Tu as bien grandi!")
#elif mon_age >= 16:
# print("et tu est meme presque majeur")
# if mon_age == 17:
# print("Well done!!")
#else:
# print("Okayyyy")
#Test de predicat
#age = 20
#majeur = False
#if age >= 18:
# majeur == True
#Test and or not
#age = 19
#if age < 18 or age > 21:
# print("age ok")
#else:
# print("not at all")
#Test de is not
#majeur = False
#if majeur is not True:
# print("pas bon")
#else:
# print("okkkk")
# Programme année Bissextile
#annee = input("Merci de saisir une année : ")
#result4 = int(annee) / 4
#result100 = int(annee) / 100
#result400 = int(annee) / 400
#if result4.is_integer() and result100.is_integer() and result400.is_integer():
# print(result4)
# print(result100)
# print(result400)
# print("L\' Annee est bien Bissextile")
#else:
# print(result4)
# print(result100)
# print(result400)
# print("L\' Annee n\'est pas Bissextile")
# Variante Programme année Bissextile
annee = input("Merci de saisir une année : ")
try:
if int(annee) % 400 == 0 or (int(annee) % 4 == 0 and int(annee) % 100 != 0):
print(annee)
print("L\' Annee est bien Bissextile")
else:
print(annee)
print("L\' Annee n\'est pas Bissextile")
except ValueError:
annee = input("Mauvaise saisie, merci de saisir un chiffre :")
| Gilloufcr/MacGyvrer | Tests_Cours/cours.py | cours.py | py | 1,442 | python | fr | code | 0 | github-code | 13 |
17053042714 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InsMktObjectDTO(object):
def __init__(self):
self._obj_id = None
self._type = None
@property
def obj_id(self):
return self._obj_id
@obj_id.setter
def obj_id(self, value):
self._obj_id = value
@property
def type(self):
return self._type
@type.setter
def type(self, value):
self._type = value
def to_alipay_dict(self):
params = dict()
if self.obj_id:
if hasattr(self.obj_id, 'to_alipay_dict'):
params['obj_id'] = self.obj_id.to_alipay_dict()
else:
params['obj_id'] = self.obj_id
if self.type:
if hasattr(self.type, 'to_alipay_dict'):
params['type'] = self.type.to_alipay_dict()
else:
params['type'] = self.type
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InsMktObjectDTO()
if 'obj_id' in d:
o.obj_id = d['obj_id']
if 'type' in d:
o.type = d['type']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/InsMktObjectDTO.py | InsMktObjectDTO.py | py | 1,236 | python | en | code | 241 | github-code | 13 |
28599897220 |
from numpy import *
import astropy.io.fits as pyfits
for i in range(8):
hdu=pyfits.open('non_drizzled-image-{0}.fits'.format(i+1),mode = 'update')
hdu[0].header["EXPTIME"]=1
hdu.flush()
psf=pyfits.open('non_drizzled_psf-{0}.fits'.format(i+1),mode = 'update')
psf[0].header["EXPTIME"]=1
psf.flush()
hdu=pyfits.open('rmsSQ-{0}.fits'.format(i+1),mode = 'update')
hdu[0].header["EXPTIME"]=1
hdu.flush()
| dartoon/my_code | projects/Sim_HST_JWST/drizzle_F160W_temp/header.py | header.py | py | 411 | python | en | code | 0 | github-code | 13 |
38755701782 | # coding=utf-8
# author= YQZHU
from django.conf.urls import url, include
from . import crawler_views
urlpatterns = [
url(r'^keywords$', crawler_views.list_keywords.as_view(), name='keywords-list'),
url(r'^keywords/add$', crawler_views.keyword_add.as_view(), name='keyword-add'),
url(r'keyword/(?P<pk>[0-9]+)/delete/$', crawler_views.keyword_delete.as_view(), name='keyword-delete'),
] | lianhuness/django1 | crawler/crawler_urls.py | crawler_urls.py | py | 404 | python | en | code | 0 | github-code | 13 |
41419137844 | import warnings
import jax
import jax.numpy as jnp
import flax
import numpy as np
from jax.experimental import PartitionSpec as P
from jax.experimental.compilation_cache import compilation_cache as cc
from transformers import (
AutoTokenizer,
GenerationConfig
)
from . import FlaxCodeGenRLForCausalLM, CodeGenRLConfig
from leti.utils.jax.checkpoints import Checkpointer
from leti.utils.jax.train_state import InferenceState
from leti.utils.jax.partitioning import PjitPartitioner
cc.initialize_cache("/tmp/jax_cache")
warnings.filterwarnings("ignore")
warnings.filterwarnings("ignore", category=ResourceWarning)
if jax.process_index() == 0:
warnings.filterwarnings("default")
# print but only on the first node
def head_print(*args, **kwargs):
if jax.process_index() == 0:
print(*args, **kwargs)
# 2D parameter and activation partitioning
logical_axis_rules_full = [
('batch', 'data'),
('mlp', 'model'),
('heads', 'model'),
('vocab', 'model'),
# shard both activations and weight matrices on the remaining available axis
('embed', 'model'),
('embed', 'data'),
('kv', None),
('joined_kv', None),
('relpos_buckets', None),
('abspos_buckets', None),
('length', None),
('layers', None),
('stack', None),
('mlp_activations', None),
]
class Inferencer:
def __init__(
self,
hf_ckpt=None,
t5x_path=None,
num_partitions=4,
generation_kwargs: dict = {},
# When running training
config: None = None,
tokenizer: None = None,
model: None = None,
partitioner: None = None,
state_axes: None = None,
):
# Only required for loading from checkpoint
self.hf_ckpt = hf_ckpt
self.path = t5x_path
# Config
if config is None:
config = CodeGenRLConfig.from_pretrained(self.hf_ckpt)
else:
config = config
# Tokenizer
if tokenizer is None:
self.tokenizer = AutoTokenizer.from_pretrained(self.hf_ckpt)
self.tokenizer.padding_side = "left"
if self.tokenizer.pad_token is None:
self.tokenizer.pad_token = self.tokenizer.eos_token
else:
self.tokenizer = tokenizer
assert self.tokenizer.pad_token is not None
assert self.tokenizer.padding_side == "left"
# Partitioner
if partitioner is None:
self.partitioner = PjitPartitioner(
num_partitions=num_partitions,
logical_axis_rules=logical_axis_rules_full
)
else:
self.partitioner = partitioner
# State axes
if state_axes is not None:
self.params_spec = state_axes.params
# Model
if model is None:
self.model = FlaxCodeGenRLForCausalLM(config, _do_init=False, dtype=jnp.bfloat16)
# Only consider init state for partitioning when model is not provided
def init_state():
rng = jax.random.PRNGKey(42)
initial_vars = self.model.init_weights(rng, input_shape=(1, 1))
return InferenceState.create(initial_vars)
state_shapes = jax.eval_shape(init_state)
self.params_spec = self.partitioner.get_mesh_axes(state_shapes).params
# Instantiate checkpointer
self.checkpointer = Checkpointer(
state_shapes,
self.partitioner,
self.path,
use_gda=True,
restore_dtype=jnp.bfloat16,
save_dtype=jnp.bfloat16
)
else:
self.model = model
assert partitioner is not None, "Partitioner must be provided when model is provided"
assert state_axes is not None, "State axes must be provided when model is provided"
# Generation config
self.extra_generation_kwargs = {
"pad_token_id": self.tokenizer.pad_token_id,
"eos_token_id": self.tokenizer.eos_token_id,
}
self.init_fn()
def init_fn(self):
def infer(params, input_ids, attention_mask):
# generate
output = self.model(
input_ids,
attention_mask=attention_mask,
params=params
)
return output
self.p_infer = self.partitioner.partition(
infer,
in_axis_resources=(
self.params_spec,
self.partitioner.data_partition_spec,
self.partitioner.data_partition_spec,
),
out_axis_resources=self.partitioner.data_partition_spec
)
def generate(
params,
input_ids,
attention_mask,
prng_key,
generation_config: dict
):
generation_config = GenerationConfig(**generation_config)
output_ids = self.model.generate(
input_ids,
generation_config=generation_config,
attention_mask=attention_mask,
params=params,
prng_key=prng_key
).sequences
return output_ids
self.p_generate = self.partitioner.partition(
generate,
in_axis_resources=(
self.params_spec,
self.partitioner.data_partition_spec,
self.partitioner.data_partition_spec,
None,
# ignore generation_config since it is a compile-time constant
),
static_argnums=(4,),
out_axis_resources=self.partitioner.data_partition_spec
)
def load_model_and_params(self):
# load state
assert self.path is not None, "Path must be provided when loading from checkpoint"
self.loaded_state = self.checkpointer.restore(path=self.path)
def generate(
self,
inputs,
params=None,
generation_rng=None,
generation_config={},
only_decode_generation=False
):
generation_config = flax.core.freeze({
**generation_config,
**self.extra_generation_kwargs
}) # make generation config hashable
if isinstance(inputs, list):
inputs = self.tokenizer(
inputs,
return_tensors="jax",
padding=True,
pad_to_multiple_of=8,
)
if params is None:
params = self.loaded_state.params
assert params is not None, "No params provided"
if inputs["input_ids"].shape[1] > generation_config["max_length"]:
gen_ids = inputs["input_ids"]
else:
# This will auto-magically run in mesh context
gen_ids = self.p_generate(
params,
inputs["input_ids"],
inputs["attention_mask"],
generation_rng,
generation_config
)
# convert jax.Array to numpy.ndarray
# This will block jax's async dispatch! use with caution
gen_ids = np.array(gen_ids)
if only_decode_generation:
input_seq_len = inputs["input_ids"].shape[1]
gen_ids = gen_ids[:, input_seq_len:]
generated_text = self.tokenizer.batch_decode(gen_ids, skip_special_tokens=True)
return generated_text
def generate_fast(
self,
inputs,
params=None,
generation_rng=None,
generation_config={}
):
generation_config = flax.core.freeze({
**generation_config,
**self.extra_generation_kwargs
}) # make generation config hashable
if params is None:
params = self.loaded_state.params
assert params is not None, "No params provided"
if inputs["input_ids"].shape[1] >= generation_config["max_length"]:
gen_ids = inputs["input_ids"]
else:
# This will auto-magically run in mesh context
gen_ids = self.p_generate(
params,
inputs["input_ids"],
inputs["attention_mask"],
generation_rng,
generation_config
)
return gen_ids
def infer(self, inputs, params=None):
if isinstance(inputs, list):
inputs = self.tokenizer(
inputs,
return_tensors="jax",
padding=True,
pad_to_multiple_of=8,
)
if params is None:
params = self.loaded_state.params
assert params is not None, "No params provided"
# This will auto-magically run in mesh context
outputs = self.p_infer(
params,
inputs["input_ids"],
inputs["attention_mask"]
)
return outputs
| xingyaoww/LeTI | leti/models/jax_inferencer.py | jax_inferencer.py | py | 8,947 | python | en | code | 58 | github-code | 13 |
7832621083 | # -.- coding:latin1 -.-
# @author : Nicolas
""" Ce code analyse les données de l'expérience maison sur le pendule et
fourni les graphiques et les résultats voulus
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
def f(x, a, b):
return a * x + b
l = 1.05
dL = 0.05
m = 137e-3
dM = 0.5e-3
d = 5.5e-2
dD = 0.5e-2
# Partie (1) du labo
a1 = np.array([5, 10, 15, 20, 25, 30])
dA1 = 0.5
t1 = np.array([1.78, 2, 1.9, 1.95, 2.04, 1.78])
dT1 = 0.3
plt.figure(1)
plt.plot(a1, t1, '.', label="Points expérimentaux", color='b')
# On modifie les paramètres esthétiques du graphique
plt.errorbar(a1, t1, dT1, dA1, ls='None', color='b')
plt.xlabel("Amplitude de départ de l'oscillation (degré)")
plt.ylabel("Durée d'une oscillation (s)")
plt.legend()
plt.savefig("LabPenduleFig1.png")
# Partie (2) du labo
a2 = a1
dA2 = dA1
t2 = np.array([20.63, 20.64, 20.79, 20.85, 20.9, 21.17])
dT2 = dT1
plt.figure(2)
plt.plot(a2, t2, '.', label="Points expérimentaux", color='b')
# On modifie les paramètres esthétiques du graphique
plt.errorbar(a2, t2, dT2, dA2, ls='None', color='b')
plt.xlabel("Amplitude de départ de l'oscillation (degré)")
plt.ylabel("Durée de 10 oscillations (s)")
plt.legend()
plt.savefig("LabPenduleFig2.png")
a3 = np.array([30, 22, 18, 14, 13, 11, 10, 8, 7, 6, 6, 5, 5, 4, 4, 4, 3, 3, 3,
2, 2])
dA3 = 0.7
t3 = np.array([0., 20.57, 41.67, 62.56, 83.54, 104.52, 125.47, 146.34, 167.18,
188.06, 209.02, 229.82, 250.61, 271.45, 292.31, 313.1, 333.92, 354.74,
375.65, 396.29, 417.25])
dT3 = dT2
# On trouve la hauteur du pendule
xExp = l * (1 - np.cos(a3 * np.pi / 180))
dXExp = np.sqrt(dL * (1 - np.cos(a3 * np.pi / 180)) ** 2 + (dA3 * np.pi / 180 *
l * (1 + np.sin(a3 * np.pi / 180))) ** 2)
# On trouve les beta expérimentaux pour chaque oscillation
betaExp = -np.log(xExp[1:] / xExp[0]) / t3[1:]
dBetaExp = np.sqrt((-xExp[0] * np.log(xExp[1:]) * dXExp[1:] / t3[1:]) ** 2 +
(-np.log(1 / xExp[0]) * dXExp[1:] / (t3[1:] * xExp[1:])) ** 2 +
(-np.log(xExp[1:] / xExp[0]) * dT3 / t3[1:] ** 2) ** 2)
# On fait la moyenne sur les betas obtenus
betaExpMoy = np.mean(betaExp)
dBetaExpMoy = np.std(betaExp)
xExp = xExp[0] * np.exp(-betaExpMoy * t3)
aExp = aTh = np.arccos((l - xExp) / l) * 180 / np.pi
print(betaExpMoy, dBetaExpMoy)
# On trouve b théorique
b = 3 * np.pi * d * 1.7e-5
dB = np.sqrt((3 * np.pi * 1.7e-5 * dD) ** 2 + (3 * np.pi * d * 0.2e-5) ** 2)
# On troube beta theorique
betaTh = b / (2 * m)
dBetaTh = np.sqrt((b * dM / (2 * m ** 2)) ** 2 + (dB / (2 * m)) ** 2)
print(betaTh, dBetaTh)
# On trouve les x théoriques
xTh = xExp[0] * np.exp(-betaTh * t3)
dXTh = np.sqrt((-xExp[0] * t3 * dBetaTh * np.exp(-betaTh * t3)) ** 2 +
(-xExp[0] * dT3 * betaTh * np.exp(-betaTh * t3) ** 2) + (dXExp[0] *
np.exp(-betaTh * t3)) ** 2)
# On trouve les amplitudes théoriques
aTh = np.arccos((l - xTh) / l) * 180 / np.pi
dATh = np.sqrt((((1 - 1 / l) * dXTh * np.sqrt(1 - ((l - xTh) / l) ** 2) **
-1) ** 2 + ((1 - xTh / l ** 2) * dL * np.sqrt(1 - ((l - xTh) / l) ** 2) **
-1) ** 2) * 180 / np.pi)
plt.figure(3)
plt.plot(t3, a3, '.', label="Points expérimentaux", color='b')
plt.plot(t3, aTh, '-', label=r"Courbe avec $\beta_{th}$", color='r')
plt.plot(t3, aExp, '-', label=r"Courbe avec $\beta_{exp}$", color='y')
# On modifie les paramètres esthétiques du graphique
plt.errorbar(t3, a3, dA3, dT3, ls='None', color='b')
plt.xlabel("Temps écoulé (s)")
plt.ylabel("Amplitude de l'oscillation (degrés)")
plt.legend()
plt.savefig("LabPenduleFig3.png")
# On trouve w1
w1 = np.array([])
for i in range(0, len(t3) - 1):
w1 = 10 / (t3[i + 1] - t3[i])
# On trouve w0
w0 = np.sqrt(w1 ** 2 + betaExp ** 2)
w0Moy = np.mean(w0)
dW0Moy = np.std(w0)
# On trouve l'accélération gravitationnelle expérimentale
g = l * w0Moy ** 2
dG = np.sqrt((w0Moy ** 2 * dL) ** 2 + (2 * l * w0Moy * dW0Moy) ** 2)
print(w0Moy, dW0Moy)
print(g, dG)
plt.show() | dslap0/Universite-Python | PHY1501/LabPendule.py | LabPendule.py | py | 4,083 | python | fr | code | 0 | github-code | 13 |
5823390151 | import azure.functions as func
from azure.identity import DefaultAzureCredential
from azure.mgmt.storage import StorageManagementClient
from azure.mgmt.storage.models import StorageAccountCreateParameters
def main(req: func.HttpRequest) -> func.HttpResponse:
nomeSito = req.params.get('nomeSito')
gruppoRisorse = req.params.get('gruppoRisorse')
if not nomeSito and not gruppoRisorse:
try:
req_body = req.get_json()
except (ValueError, KeyError):
return func.HttpResponse(body="ERRORE -> Mancano i parametri nomeSito e gruppoRisorse", status_code=400)
else:
nomeSito = req_body.get('nomeSito')
gruppoRisorse = req_body.get('gruppoRisorse')
if nomeSito and gruppoRisorse:
credential = DefaultAzureCredential()
subscription_id = "6a6034ce-5623-4822-8c31-de299765adbe"
resource_group_name = gruppoRisorse
storage_account_name = nomeSito
storage_client = StorageManagementClient(credential, subscription_id)
params = StorageAccountCreateParameters(
sku={"name": "Standard_RAGRS"},
kind="StorageV2",
location="westeurope",
minimum_tls_version="TLS1_2",
allow_blob_public_access=True,
allow_shared_key_access=True,
enable_https_traffic_only=True,
dns_endpoint_type="Standard",
public_network_access="Enabled",
access_tier="Hot",
encryption={
"services": {
"blob": {"enabled": True},
"file": {"enabled": True},
"table": {"enabled": True},
"queue": {"enabled": True},
},
"key_source": "Microsoft.Storage",
},
supports_https_traffic_only=True
)
storage_account = storage_client.storage_accounts.begin_create(
resource_group_name,
storage_account_name,
params
).result()
return func.HttpResponse(body="SUCCESSO", status_code=200)
else:
return func.HttpResponse(body="ERRORE -> Mancano i parametri nomeSito e gruppoRisorse", status_code=400) | Progetto-SRS/function-app | functions/create-account-storage/__init__.py | __init__.py | py | 2,495 | python | en | code | 0 | github-code | 13 |
42040326279 |
from gtts import gTTS
import os
def t2s (text):
mytext = text
language = 'en'
myobj = gTTS(text=mytext, lang=language, slow=False)
myobj.save("welcome.mp3")
os.system("nvlc welcome.mp3")
t2s(input()) | yazidmarzuk/SchoolAR | shibu2.py | shibu2.py | py | 243 | python | en | code | 0 | github-code | 13 |
24772158924 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import dataloader.cifar10
import dataloader.dogs
import dcgan
def train_cifar10():
print('*** DCGAN trained with cifar10 ***')
data = dataloader.cifar10.load('cifar10')['img']
model = dcgan.DCGAN(data)
try:
model.train(steps=3000)
except Exception as e:
print(e)
finally:
model.save_log('train_log.pickle')
model.G.save('generator.h5')
def train_dogs():
print('*** DCGAN trained with dogs ***')
data = dataloader.dogs.load('dogs')
model = dcgan.DCGAN(data)
try:
model.train(steps=10000, save_interval=500)
except Exception as e:
print(e)
finally:
model.save_log('train_log.pickle')
model.G.save('generator.h5')
if __name__ == '__main__':
train_dogs()
# train_cifar10()
| Linyxus/dcgan | main.py | main.py | py | 840 | python | en | code | 4 | github-code | 13 |
42514113295 | #default parameter
def area(radius,pi = 3.14):
result = pi * radius * radius
return result
def main():
rvalue = 10.5
pivalue = 3.14
#positinal argument
ans =area(rvalue,pivalue)
print("Atre of circle : ",ans) # ans=area(10.5,3.14)
#keyword argument
ans = area(radius=rvalue,pi= pivalue)
print("Area of circle : ", ans)
# positinal argument and second is default
ans = area(10.5)
print("Area of circle : ",ans)
#keyword argument and second is default
ans = area(radius=10.5)
print("Area of circle : ", ans)
#keyword argument
ans = area(pi = 7.10 ,radius=10.5)
print("Area of circle : ", ans)
if __name__=="__main__":
main() | Shantanu-gilbile/Python-Programs | default.py | default.py | py | 746 | python | en | code | 0 | github-code | 13 |
74815216336 |
import argparse
class TaskQueueServer:
def __init__(self, ip, port, path, timeout):
pass
def run(self):
pass
def parse_args():
parser = argparse.ArgumentParser(description='This is a simple task queue server with custom protocol')
parser.add_argument(
'-p',
action="store",
dest="port",
type=int,
default=5555,
help='Server port')
parser.add_argument(
'-i',
action="store",
dest="ip",
type=str,
default='0.0.0.0',
help='Server ip adress')
parser.add_argument(
'-c',
action="store",
dest="path",
type=str,
default='./',
help='Server checkpoints dir')
parser.add_argument(
'-t',
action="store",
dest="timeout",
type=int,
default=300,
help='Task maximum GET timeout in seconds')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
server = TaskQueueServer(**args.__dict__)
server.run()
| VadimPushtaev/applied-python | homeworks/task_queue/server.py | server.py | py | 1,063 | python | en | code | 86 | github-code | 13 |
31202975330 | from queue import PriorityQueue
class Edge(object):
def __init__(self, v, w):
self.dst = v
self.weight = w
# vertex in a graph
class Vertex(object):
def __init__(self, u):
self.key = u
self.adj_list = []
# vertex in dijkstra
class Vex(object):
def __init__(self, u, dist):
self.key = u
self.dist = dist
# self-defined comparator
def __lt__(self, other):
return self.dist < other.dist
# use adjList to represent a graph
class Graph(object):
def __init__(self, v_num):
self.vertex_list = [Vertex(i) for i in range(0, v_num + 1)]
self.vertex_num = v_num
# append an edge into the graph
def add_edge(self, src, dst, weight):
new_e = Edge(dst, weight)
self.vertex_list[src].adj_list.append(new_e)
# output the graph
def output(self):
for v in self.vertex_list:
print(v.key)
for e in v.adj_list:
tu = (e.dst, e.weight)
print(tu)
# Dijkstra shortest path algo: O(elgv)
def Dijkstra(graph, src, dst):
# use a minimum priority queue
INFINITY = (1 << 31) - 1
pq = PriorityQueue()
# init single source distance
visit = [0]*(graph.vertex_num + 1)
dist = [INFINITY]*(graph.vertex_num + 1)
dist[src] = 0
# start iteration
pq.put(Vex(src, dist[src]))
while not pq.empty():
vex = pq.get()
if visit[vex.key] == 1:
continue
# marks that the key is visited
visit[vex.key] = 1
if vex.key == dst:
break
# relax the edges
for e in graph.vertex_list[vex.key].adj_list:
if dist[vex.key] + e.weight < dist[e.dst]:
dist[e.dst] = dist[vex.key] + e.weight
pq.put(Vex(e.dst, dist[e.dst]))
# regard as INF
if dist[dst] > 100000000:
print(-1)
else:
print(dist[dst])
def main():
# vertex num, edge num, source and destination
v_num, e_num, src, dst = map(int, input().strip().split())
# init Graph
graph = Graph(v_num)
# input edge and weight
for i in range(0, e_num):
start, end, weight = map(int, input().strip().split())
graph.add_edge(start, end, weight)
# graph.output()
# search shortest path
Dijkstra(graph, src, dst)
if __name__ == "__main__":
main()
| fanweneddie/algorithm_lab | lab5/source/Dijkstra.py | Dijkstra.py | py | 2,419 | python | en | code | 0 | github-code | 13 |
5400105284 | # %%
import numpy as np
import torch
# Input (temp, rainfall, humidity)
inputs = np.array([[73, 67, 43],
[91, 88, 64],
[87, 134, 58],
[102, 43, 37],
[69, 96, 70]], dtype='float32')
# Targets (apples, oranges)
targets = np.array([[56, 70],
[81, 101],
[119, 133],
[22, 37],
[103, 119]], dtype='float32')
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
print(inputs)
print(targets)
# %%
""" ADD Weight(w) & Bias(b) """
w = torch.randn(2, 3, requires_grad=True)
b = torch.randn(2, requires_grad=True)
# linear regression model
def model(x):
# @ present the matrix multiplication
return x @ w.t() + b
print(w)
print(b)
# %%
preds = model(inputs)
print('preds:')
print(preds)
print('target:')
print(targets)
# %%
# MSE loss
def mse(t1, t2):
diff = t1 - t2
# torch.tensor.numel returns the number of elements
return torch.sum(diff*diff) / diff.numel()
loss = mse(preds, targets)
print(loss)
# %%
# compute gradients
loss.backward()
# %%
print(w)
print(w.grad)
print(b)
print(b.grad)
# %%
# reset the gradients to zero
w.grad.zero_()
b.grad.zero_()
print(w.grad)
print(b.grad)
# %%
# adjist weights & auto reset gradients
preds = model(inputs)
loss = mse(preds, targets)
loss.backward()
with torch.no_grad():
w -= w.grad * 1e-5
b -= b.grad * 1e-5
w.grad.zero_()
b.grad.zero_()
print(w)
print(b)
print(loss)
# %%
# train for 100 epochs
for i in range(100):
preds = model(inputs)
loss = mse(preds, targets)
loss.backward()
with torch.no_grad():
w -= w.grad * 1e-5
b -= b.grad * 1e-5
w.grad.zero_()
b.grad.zero_()
# calculate the final loss
preds = model(inputs)
loss = mse(preds, targets)
print(loss)
print(preds)
print(targets)
| a23956491z/deep-learning-research | python/pytorch-practice/linear_regression/linear_regression.py | linear_regression.py | py | 1,904 | python | en | code | 0 | github-code | 13 |
70268966418 | from six import iteritems
import ducky.config
import ducky.devices.terminal
import ducky.errors
import ducky.log
import ducky.machine
from .. import TestCase, mock, common_run_machine
def common_case(**kwargs):
machine_config = ducky.config.MachineConfig()
input_section = machine_config.add_device('input', 'ducky.devices.keyboard.Backend')
output_section = machine_config.add_device('output', 'ducky.devices.tty.Backend')
terminal_section = machine_config.add_device('terminal', 'ducky.devices.terminal.StandalonePTYTerminal', input = input_section, output = output_section)
machine_config.set(input_section, 'master', terminal_section)
machine_config.set(output_section, 'master', terminal_section)
machine_config.set(terminal_section, 'input', input_section + ':ducky.devices.keyboard.Frontend')
machine_config.set(terminal_section, 'output', output_section + ':ducky.devices.tty.Frontend')
for name, value in iteritems(kwargs):
machine_config.set(terminal_section, name, value)
M = common_run_machine(machine_config = machine_config, post_setup = [lambda _M: False])
return M.get_device_by_name(terminal_section, klass = 'terminal')
class TestsStandalonePTYTerminal(TestCase):
def test_sanity(self):
t = common_case()
t._input = mock.create_autospec(t._input)
t._output = mock.create_autospec(t._output)
t.boot()
assert t._input.boot.called
assert t._output.boot.called
assert t.pttys is not None
assert t.terminal_device is not None
t.halt()
assert t._input.halt.called
assert t._output.halt.called
assert t.pttys is None
assert t.terminal_device is None
| happz/ducky-legacy | tests/devices/terminal.py | terminal.py | py | 1,655 | python | en | code | 5 | github-code | 13 |
393434243 | import sqlite3
import telebot
bot_token = '5961557186:AAFOKKlACzYLZ0PWxKCeu5KOqtIqDLMLhuw'
bot = telebot.TeleBot(bot_token)
5
@bot.message_handler(commands=['start'])
def send_welcome(message):
bot.reply_to(message, "ادخل الاسم الاول")
@bot.message_handler(func=lambda message: True)
def search_person(message):
search_first = message.text
bot.reply_to(message, "ادخل اسم الاب")
bot.register_next_step_handler(message, search_father, search_first)
def search_father(message, search_first):
search_father = message.text
bot.reply_to(message, "ادخل اسم الجد")
bot.register_next_step_handler(message, search_grand, search_first, search_father)
def search_grand(message, search_first, search_father):
search_grand = message.text
conn = sqlite3.connect('meaan.sqlite')
c = conn.cursor()
c.execute(f"SELECT * FROM PERSON WHERE p_first LIKE '%{search_first}%' AND p_father LIKE '%{search_father}%' AND p_grand LIKE '%{search_grand}%'")
matching_rows = c.fetchall()
if matching_rows:
fam_nos = [row[1] for row in matching_rows]
c.execute(f"SELECT * FROM PERSON WHERE fam_no IN ({','.join(['?']*len(fam_nos))})", fam_nos)
rows = c.fetchall()
results = "Results found:\n\n"
for row in rows:
results += f"الاسم الاول: {row[3]}, الاب: {row[4]}, الجد: {row[5]}, مواليد {row[7]}\n"
bot.reply_to(message, results)
else:
bot.reply_to(message, "No results found.")
conn.close()
bot.polling()
| jobaeyyuiij/jojo | source.py | source.py | py | 1,636 | python | en | code | 0 | github-code | 13 |
11434178693 | from application_services.imdb_artists_resource import IMDBArtistResource
from application_services.UsersResource.user_service import UserResource, AddressResource
from application_services.imdb_users_resource import IMDBUserResource
from database_services.RDBService import RDBService as RDBService
from middleware import security
from flask import Flask, redirect, url_for, request, render_template, Response
from flask_dance.contrib.google import make_google_blueprint, google
from flask_cors import CORS
import json, os
import logging
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
app = Flask(__name__)
CORS(app)
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
os.environ['OAUTHLIB_RELAX_TOKEN_SCOPE'] = '1'
client_id = "126427133643-l3o225t3jkjie0rfud0971i29p4peeqn.apps.googleusercontent.com"
client_secret = "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"
app.secret_key = "supersekrit"
blueprint = make_google_blueprint(
client_id=client_id,
client_secret=client_secret,
scope=["profile", "email"]
)
app.register_blueprint(blueprint, url_prefix="/login")
g_bp = app.blueprints.get("google")
# @app.before_request
# def before_request():
# print("running before_request")
# print(request)
# result = security.security_check(request, google, g_bp)
# if not result:
# return redirect(url_for("google.login"))
@app.route("/", methods = ['GET'])
def hi():
return "Hello, World!"
@app.route("/index", methods = ['GET'])
def index():
if not google.authorized:
return redirect(url_for("google.login"))
google_data = google.get('/oauth2/v2/userinfo')
assert google_data.ok, google_data.text
# print(json.dumps(google_data, indent=2))
# return "You are {email} on Google".format(email=google_data.json()["email"])
#res = UserResource.get_by_template({"email":google_data.json()["email"]}) # return list of dict
res = UserResource.get_by_template({"email":google_data.json()["email"]}) # return list of dict
if len(res) == 0:
rsp = Response(json.dumps({
"firstName": google_data.json()["given_name"],
"lastName": google_data.json()["family_name"],
"email":google_data.json()["email"]
}, default=str), status=200, content_type="application/json")
else:
rsp = Response(json.dumps(res, default=str), status=200, content_type="application/json")
return rsp
# return render_template("index.html", email=google_data.json()["email"])
@app.route('/api/users', methods = ['GET'])
def get_users():
if request.args.get('limit'):
limit = request.args.get('limit')
else:
limit = "10"
if request.args.get('offset'):
offset = request.args.get('offset')
else:
offset = "0"
res = UserResource.get_by_template(None, limit, offset)
for item in res:
item["links"] = [
{"rel": "self", "href": f"/api/users/{item['id']}"},
{"rel": "address", "href":f"/api/address/{item['address_id']}"}
]
rsp = Response(json.dumps(res, default=str), status=200, content_type="application/json")
return rsp
@app.route('/api/users/<prefix>', methods = ['GET'])
def get_users_resource(prefix):
res = UserResource.get_by_template({"id": prefix})
res[0]["links"] = [
{"rel": "self", "href": f"/api/users/{res[0]['id']}"},
{"rel": "address", "href":f"/api/address/{res[0]['address_id']}"}
]
rsp = Response(json.dumps(res[0], default=str), status=200, content_type="application/json")
return rsp
@app.route('/api/address/<prefix>', methods = ['GET'])
def get_address_resource(prefix):
res = AddressResource.get_by_template({"address_id": prefix})
rsp = Response(json.dumps(res, default=str), status=200, content_type="application/json")
return rsp
@app.route('/api/create', methods = ['POST'])
def create_user():
firstName = request.form.get('firstName')
lastName = request.form.get('lastName')
email = request.form.get('email')
address = request.form.get('address')
zip_code = request.form.get('zip')
next_id = int(UserResource.get_next_id("id")[0]["max_id"]) + 1
next_address_id = int(AddressResource.get_next_id("address_id")[0]["max_id"]) + 1
AddressResource.create_data_resource({
"address_id": next_address_id,
"address": address,
"zip": zip_code
})
UserResource.create_data_resource({
"firstName": firstName,
"lastName": lastName,
"email": email,
"id": next_id,
"address_id": next_address_id
})
return f"{firstName} are now a user! Checkout /api/users/{next_id}"
if __name__ == '__main__':
app.run(host="0.0.0.0", port=5000)
| YowKuan/E6156-team-project | UserService/app.py | app.py | py | 4,806 | python | en | code | 0 | github-code | 13 |
10859699605 | #https://www.acmicpc.net/problem/1874
#스택, 그리디
#스택에 원소를 삽입할 때는 단순히 특정 수에 도달할 때까지 삽입
#스택에서 원소를 연달아 빼낼 때 내림차순을 유지할 수 있는지 확인
n = int(input())
count = 1
stack = []
result = []
for _ in range(n): #원소 개수만큼 반복
num = int(input())
while count <= num: #입력받은 숫자에 도달할 때까지 삽입
stack.append(count)
count += 1
result.append('+')
if stack[-1] == num: #스택의 최상위 원소가 입력받은 숫자와 같을 때 출력
stack.pop()
result.append('-')
else: #불가능한 경우
print('NO')
exit(0) #프로그램 종료
print('\n'.join(result)) #가능한 경우
| wooryung/Coding-Test | BOJ/BOJ1874(other).py | BOJ1874(other).py | py | 815 | python | ko | code | 0 | github-code | 13 |
72727934099 | #!/usr/bin/python3
'''
Plot differences for samples from uncertainty analysis.
'''
import operator
import os.path
import sys
from matplotlib import colorbar
from matplotlib import colors
from matplotlib import gridspec
from matplotlib import pyplot
from matplotlib import ticker
from matplotlib.backends import backend_pdf
import numpy
import seaborn
sys.path.append(os.path.dirname(__file__)) # For Sphinx.
import common
sys.path.append('..')
import model
# Pairs (baseline, baseline + vaccine)
targets = [model.target.all_[i : i + 2]
for i in range(0, len(model.target.all_), 2)]
_cmap_base = 'cubehelix'
cmap = common.cmap_reflected(_cmap_base)
def _get_percentiles(x):
p = numpy.linspace(0, 100, 101)
# Plot the points near 50% last, so they show up clearest.
# This gives [0, 100, 1, 99, 2, 98, ..., 48, 52, 49, 51, 50].
M = len(p) // 2
p_ = numpy.column_stack((p[ : M], p[-1 : -(M + 1) : -1]))
p_ = p_.flatten()
if len(p) % 2 == 1:
p_ = numpy.hstack((p_, p[M]))
q = numpy.percentile(x, p_, axis = 0)
C = numpy.outer(p_, numpy.ones(numpy.shape(x)[1]))
return (q, C)
def _plot_cell(ax, results, country, targets, stat,
country_label = None, stat_label = None,
space_to_newline = False):
info = common.get_stat_info(stat)
if ((results[targets[0]] is not None)
and (results[targets[1]] is not None)):
v_base = getattr(results[targets[0]], stat)
v_intv = getattr(results[targets[1]], stat)
data = v_base - v_intv
# data = (v_base - v_intv) / v_base
# Drop infinite data.
ix = numpy.all(numpy.isfinite(data), axis = 0)
q, C = _get_percentiles(data[:, ix])
if info.scale is None:
info.autoscale(data)
if info.units is None:
info.autounits(data)
col = ax.pcolormesh(common.t[ix], q / info.scale, C,
cmap = cmap)
# shading = 'gouraud')
# TODO: Do a better job with making the lower ylim 0.
if numpy.all(q > 0):
ax.set_ylim(bottom = 0)
tick_interval = 10
a = int(numpy.floor(common.t[0]))
b = int(numpy.ceil(common.t[-1]))
ticks = range(a, b, tick_interval)
if ((b - a) % tick_interval) == 0:
ticks = list(ticks) + [b]
ax.set_xticks(ticks)
ax.set_xlim(a, b)
common.format_axes(ax, country, info, country_label, stat_label,
space_to_newline = space_to_newline)
return col
def plot_selected():
for targs in targets:
baseline = targs[0]
print(baseline)
fig = pyplot.figure(figsize = (8.5, 11))
# Bottom row is colorbar.
nrows = len(common.effectiveness_measures) + 1
ncols = len(common.countries_to_plot)
legend_height_ratio = 1 / 3
gs = gridspec.GridSpec(nrows, ncols,
height_ratios = ((1, ) * (nrows - 1)
+ (legend_height_ratio, )))
for (col, country) in enumerate(common.countries_to_plot):
print('\t', country)
results = common.get_country_results(country, targs)
for (row, stat) in enumerate(common.effectiveness_measures):
ax = fig.add_subplot(gs[row, col])
stat_label = 'ylabel' if ax.is_first_col() else None
country_label = 'title' if ax.is_first_row() else None
_plot_cell(ax, results, country, targs, stat,
country_label = country_label,
stat_label = stat_label)
ax = fig.add_subplot(gs[-1, :])
colorbar.ColorbarBase(ax,
cmap = cmap,
norm = colors.Normalize(vmin = 0, vmax = 100),
orientation = 'horizontal',
label = 'Percentile',
format = '%g%%')
fig.tight_layout()
fileroot = '{}_{}'.format(common.get_filebase(),
str(baseline).replace(' ', '_'))
common.savefig(fig, '{}.pdf'.format(fileroot))
common.savefig(fig, '{}.png'.format(fileroot))
def plot_all():
countries = common.all_regions_and_countries
for targs in targets:
baseline = targs[0]
print(baseline)
filename = '{}_{}_all.pdf'.format(common.get_filebase(),
str(baseline).replace(' ', '_'))
with backend_pdf.PdfPages(filename) as pdf:
nrows = len(common.effectiveness_measures) + 1
ncols = 1
legend_height_ratio = 1 / 3
gs = gridspec.GridSpec(nrows, ncols,
height_ratios = ((1, ) * (nrows - 1)
+ (legend_height_ratio, )))
for country in common.countries_to_plot:
print('\t', country)
results = common.get_country_results(country, targs)
fig = pyplot.figure(figsize = (8.5, 11))
for (row, stat) in enumerate(common.effectiveness_measures):
ax = fig.add_subplot(gs[row, 0])
stat_label = 'ylabel' if ax.is_first_col() else None
country_label = 'title' if ax.is_first_row() else None
_plot_cell(ax, results, country, targs, stat,
country_label = country_label,
stat_label = stat_label,
space_to_newline = True)
fig.tight_layout()
pdf.savefig(fig)
pyplot.close(fig)
break
if __name__ == '__main__':
plot_selected()
pyplot.show()
# plot_all()
| janmedlock/HIV-95-vaccine | plots/differences.py | differences.py | py | 5,922 | python | en | code | 1 | github-code | 13 |
26964761785 | # 1202 Program Alarm
from DataGetter import get_data
from Ship import IntcodeComputer
from Timer import timer
DAY = 2
data = get_data(DAY)
data = [i for i in map(int, data.strip('\n').split(','))]
def _comp(comp, noun, verb):
comp.overwrite_intr(noun, 1)
comp.overwrite_intr(verb, 2)
comp.compute()
val = comp.retr_intr(0)
comp.reset()
return val
@timer
def compute(*args):
return _comp(*args)
@timer
def gravity_assist(comp, output):
for noun in range(100):
for verb in range(100):
if _comp(comp, noun, verb) == output:
val = (noun * 100) + verb
return val
print('Unable to compute for {}'.format(output))
comp = IntcodeComputer(data)
comp.verbose = False
output = 19690720
# problem 1
print(compute(comp, 12, 2))
# problem 2
print(gravity_assist(comp, output)) | SvbZ3r0/Advent-of-Code | 2019/day02.py | day02.py | py | 793 | python | en | code | 0 | github-code | 13 |
72089356818 | import transformers
from transformers.models.pegasus.tokenization_pegasus_fast import PegasusTokenizerFast
from qag_pegasus.min_ref_loss_model import CustomPegasusForConditionalGeneration
import unicodedata as ud
import torch
class QAGPegasus:
def __init__(self, model_name_or_path: str):
self.tokenizer = PegasusTokenizerFast.from_pretrained(model_name_or_path)
self.model = CustomPegasusForConditionalGeneration.from_pretrained(model_name_or_path)
@staticmethod
def normalize(text):
text = ud.normalize("NFC", text)
text = " ".join(text.split())
return text
# def push_to_hub_hgf(self, repo_name: str):
# self.model.push_to_hub()
# self.tokenizer.push_to_hub()
def generate_qa(
self,
context: str,
num_return_sequences=4,
max_length=None,
num_beams=None,
do_sample=True,
top_k=None,
top_p=0.9,
temperature=0.7,
no_repeat_ngram_size=2,
early_stopping=True
):
context = self.normalize(context)
inputs = self.tokenizer(context, return_tensors="pt")
outputs = self.model.generate(
inputs=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
max_length=max_length,
num_beams=num_beams,
do_sample=do_sample,
top_k=top_k,
top_p=top_p,
temperature=temperature,
num_return_sequences=num_return_sequences,
no_repeat_ngram_size=no_repeat_ngram_size,
early_stopping=early_stopping,
decoder_start_token_id=self.model.config.decoder_start_token_id,
eos_token_id=self.tokenizer.eos_token_id,
)
outputs = self.tokenizer.batch_decode(outputs)
outputs = [s.replace("<pad>", "").strip() for s in outputs]
return outputs
| XuanLoc2578/QAG | qag_pegasus/__init__.py | __init__.py | py | 1,896 | python | en | code | 0 | github-code | 13 |
23890113304 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange
import pickle
import random
import tensorflow as tf
# Prepares a vocabulary and a set of training files filled with
# tf.SequenceExamples.
flags = tf.app.flags
FLAGS = flags.FLAGS
flags.DEFINE_string('vocab', '/dev/null',
'Location to store the vocabulary in.')
flags.DEFINE_integer('sequence_length', 200,
'How long each training sequence should be.')
flags.DEFINE_integer('num_sequences', 100,
'How many sequence examples to extract.')
flags.DEFINE_string('output', '',
'Location to store example sequences. A suffix will be appended.')
flags.DEFINE_integer('sequences_per_file', -1,
'Max sequences per file. If unspecified, unlimited.')
def learn_vocab(paths):
vocab = set()
for p in paths:
with open(p) as f:
for line in f:
for c in line:
vocab.add(c)
return sorted(list(vocab))
def get_example(data, integerization_map):
start = random.randint(0, len(data) - FLAGS.sequence_length - 2)
one_past_padded_end = start + FLAGS.sequence_length + 1
padded_seq = [integerization_map[c] for c in data[start:one_past_padded_end]]
seq = padded_seq[:FLAGS.sequence_length]
target = padded_seq[1:]
example = tf.train.SequenceExample()
example.context.feature['length'].int64_list.value.append(FLAGS.sequence_length)
input_tokens = example.feature_lists.feature_list['inputs']
target_tokens = example.feature_lists.feature_list['targets']
for i, t in zip(seq, target):
input_tokens.feature.add().int64_list.value.append(i)
target_tokens.feature.add().int64_list.value.append(t)
return example
def save_vocab(vocab):
with open(FLAGS.vocab, 'w') as f:
pickle.dump(vocab, f)
def load_data(paths):
data = ''
for p in paths:
with open(p) as f:
data += f.read()
return data
def get_reverse_map(vocab):
return dict([(v, i) for i, v in enumerate(vocab)])
def main(argv):
input_list = argv[1:]
if len(input_list) < 1:
print('No input files provided.')
exit(1)
if FLAGS.output == '':
print('No output pattern provided.')
exit(1)
vocab = learn_vocab(input_list)
integerization_map = get_reverse_map(vocab)
save_vocab(vocab)
data = load_data(input_list)
if FLAGS.sequences_per_file > 0:
num_files = FLAGS.num_sequences // FLAGS.sequences_per_file
if FLAGS.num_sequences % FLAGS.sequences_per_file > 0:
num_files += 1
else:
num_files = 1
total_sequences = 0
while total_sequences < FLAGS.num_sequences:
if FLAGS.sequences_per_file > 0:
file_id = total_sequences // FLAGS.sequences_per_file
else:
file_id = 0
filename = '{}_{:06d}_of_{:06d}.pb'.format(FLAGS.output, file_id + 1, num_files)
with open(filename, 'w') as f:
writer = tf.python_io.TFRecordWriter(f.name)
examples_in_this_file = 0
while (FLAGS.sequences_per_file < 0 or examples_in_this_file < FLAGS.sequences_per_file) and total_sequences < FLAGS.num_sequences:
example = get_example(data, integerization_map)
writer.write(example.SerializeToString())
examples_in_this_file += 1
total_sequences += 1
writer.close()
print('Wrote {} tf.ExampleSequences to {}'.format(examples_in_this_file, filename))
if __name__ == '__main__':
tf.app.run()
| sanchom/tensorflow_learning | char_rnn/create_sequence_examples_from_text.py | create_sequence_examples_from_text.py | py | 3,480 | python | en | code | 1 | github-code | 13 |
35793799269 |
class groupby(object):
# [k for k, g in groupby('AAAABBBCCDAABBB')] --> A B C D A B
# [list(g) for k, g in groupby('AAAABBBCCD')] --> AAAA BBB CC D
def __init__(self, iterable, key=None):
if key is None:
key = lambda x: x
self.keyfunc = key
self.it = iter(iterable)
self.tgtkey = self.currkey = self.currvalue = object()
def __iter__(self):
return self
def next(self):
while self.currkey == self.tgtkey:
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
self.tgtkey = self.currkey
return (self.currkey, self._grouper(self.tgtkey))
def _grouper(self, tgtkey):
while self.currkey == tgtkey:
yield self.currvalue
self.currvalue = next(self.it) # Exit on StopIteration
self.currkey = self.keyfunc(self.currvalue)
| greshem/develop_python | group_by_src.py | group_by_src.py | py | 933 | python | en | code | 1 | github-code | 13 |
25059133690 | import turtle
import pandas
screen = turtle.Screen()
screen.title("U.S. States Game")
image = "blank_states_img.gif"
screen.addshape(image)
turtle.shape(image)
data = pandas.read_csv("50_states.csv")
score = 0
states_list = []
while score != 50:
user_answer = screen.textinput(title=f"{score}/50 States Correct", prompt="What's another states name?").title()
if user_answer == "Exit":
missing_states = [state for state in data["state"].tolist() if state not in states_list]
new_data = pandas.DataFrame(missing_states)
new_data.to_csv("states_to_lean.csv")
break
if user_answer in data["state"].tolist() and user_answer not in states_list:
new_state = turtle.Turtle()
new_state.hideturtle()
new_state.penup()
state_row = data[data["state"] == user_answer]
new_state.goto(x=int(state_row.x), y=int(state_row.y))
new_state.write(user_answer)
states_list.append(user_answer)
score += 1
| Dhyan-P-Shetty/us-states-game | main.py | main.py | py | 1,032 | python | en | code | 0 | github-code | 13 |
20337166874 | #!/usr/bin/env python
# coding: utf-8
# # The Multidimensional Knapsack Problem
# Mohammed Alagha, July 2021
#
# Glasgow, UK
# A mathematical model for the MKP problem.
# Modeled using IBM CPLEX
# In[1]:
# Importing relevant libraries
import cplex
from docplex.mp.model import Model
# In[2]:
# # Import the reading function
import MKP_populate_function as rdmkp
# In[11]:
# Call the function on a given instance
instance = 'mknap07_1.txt'
c, A, b = rdmkp.MKPpopulate(instance)
# Define the ranges for variables and constraints
nCols, nRows = range(len(c)), range(len(b))
# In[12]:
# Create an empty model
mkp = Model('Mkp')
# In[13]:
# Define decision variables
x = mkp.binary_var_list(nCols, lb = 0, ub = 1, name = 'x')
# In[14]:
# Define constraints
constraints = mkp.add_constraints(sum(A[i][j] * x[j] for j in nCols) <= b[i] for i in nRows)
# In[15]:
# Define objective function
profit = mkp.sum(c[j] * x[j] for j in nCols)
# In[16]:
# Add objective function as a kpi to the model
mkp.add_kpi(profit, 'profit')
# Set objective sense to 'maximization'
objective = mkp.maximize(profit)
# In[17]:
# Solving the model
mkp.solve()
# In[18]:
# Reporting results
mkp.report()
# In[ ]:
| AghaMS/Multidimensional_Knapsack_Problem_Modelling | MKP_Math_Model.py | MKP_Math_Model.py | py | 1,235 | python | en | code | 1 | github-code | 13 |
35361389754 | # Definition for a binary tree node
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class BSTIterator(object):
def __init__(self, root):
"""
:type root: TreeNode
"""
self.stack = []
while root:
self.stack.append(root)
root = root.left
def hasNext(self):
"""
:rtype: bool
"""
return len(self.stack) > 0
def next(self):
"""
:rtype: int
"""
current = self.stack.pop()
temp = current.right
while temp:
self.stack.append(temp)
temp = temp.left
return current.val
# Your BSTIterator will be called like this:
# i, v = BSTIterator(root), []
# while i.hasNext(): v.append(i.next())
| FeiZhan/Algo-Collection | answers/leetcode/Binary Search Tree Iterator/Binary Search Tree Iterator.py | Binary Search Tree Iterator.py | py | 856 | python | en | code | 3 | github-code | 13 |
7658707202 | class Solution:
def countAndSay(self, n: int) -> str:
if (n == 1):
return "1"
res = self.countAndSay(n-1)
newRes = ""
lastChar = res[0]
count = 1
for char in range(1, len(res)):
if (res[char] == lastChar):
count += 1
else:
newRes += str(count) + str(lastChar)
count = 1
lastChar = res[char]
newRes += str(count) + str(lastChar)
return newRes
| pamtabak/LeetCode | 38_count_and_say.py | 38_count_and_say.py | py | 510 | python | en | code | 0 | github-code | 13 |
74164706256 | # %% Imports
import os
os.chdir('../ssl_neuron/')
import json
import pickle
import numpy as np
import pandas as pd
from tqdm import tqdm
import matplotlib.pyplot as plt
import networkx as nx
from allensdk.core.cell_types_cache import CellTypesCache
from ssl_neuron.datasets import AllenDataset
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.preprocessing import StandardScaler, LabelEncoder, OneHotEncoder
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
from sklearn.svm import SVR
from sklearn.metrics import r2_score
from scipy.stats import pearsonr
import time
# %% Setup
config = json.load(open('./ssl_neuron/configs/config.json'))
config['data']['n_nodes'] = 1000
ctc = CellTypesCache(manifest_file='./ssl_neuron/data/cell_types/manifest.json')
cells = ctc.get_cells()
ephys_features = ctc.get_ephys_features()
ef_df = pd.DataFrame(ephys_features)
morphology_features = ctc.get_morphology_features()
morph_df = pd.DataFrame(morphology_features)
cell_df = pd.DataFrame(cells)
dset = AllenDataset(config, mode='all')
latents = np.load('../analysis/latents.npy')
ef_df = ef_df.set_index('specimen_id').loc[dset.cell_ids]
morph_df = morph_df[~morph_df.superseded].set_index('specimen_id').loc[dset.cell_ids]
cell_df = cell_df.set_index('id').loc[dset.cell_ids]
# %% Prep features
cell_type_input_columns = ['reporter_status',
'structure_layer_name', 'structure_area_id', 'structure_area_abbrev',
'transgenic_line',
'dendrite_type', 'apical', 'reconstruction_type',
'structure_hemisphere',
'normalized_depth']
cell_features = []
for column in cell_type_input_columns:
data = cell_df[column]
if column == 'normalized_depth':
data = data.to_numpy()
data = StandardScaler().fit_transform(data[:, None])
elif column == 'cell_soma_location':
data = np.array(data.tolist())
data = StandardScaler().fit_transform(data)
else:
if column == 'structure_area_id':
data = np.array([str(sa_id) for sa_id in data], dtype='object')
else:
data = data.to_numpy()
data = OneHotEncoder().fit_transform(data[:, None]).todense()
cell_features.append(data)
cell_features = np.concatenate(cell_features, axis=1)
morph_input_columns = ['average_bifurcation_angle_local',
'average_contraction', 'average_diameter', 'average_fragmentation',
'average_parent_daughter_ratio',
'max_branch_order', 'max_euclidean_distance', 'max_path_distance',
'number_bifurcations', 'number_branches',
'number_nodes', 'number_stems', 'number_tips', 'overall_depth',
'overall_height', 'overall_width', 'soma_surface', 'total_length',
'total_surface', 'total_volume']
morph_features = []
for column in morph_input_columns:
data = morph_df[column]
data = data.to_numpy()
data = StandardScaler().fit_transform(data[:, None])
morph_features.append(data)
morph_features = np.concatenate(morph_features, axis=1)
# %% Define function
def fit_eval_decoder(input_features, target_df, skip_cols=[], to_str_cols=[],
regression_model=Ridge, regression_params={'alpha': np.logspace(-8, 3, 12)},
classification_model=LogisticRegression, classification_params={},
seed=0, return_models=False):
np.random.seed(seed)
score_dict = {}
score_std_dict = {}
pred_truth_dict = {}
model_dict = {}
for col in target_df.columns:
if col in skip_cols:
continue
elif col in to_str_cols:
targets = np.array([str(item) for item in target_df[col]], dtype='object')
else:
targets = target_df[col].to_numpy()
if targets.dtype == float or targets.dtype == int:
gscv = GridSearchCV(regression_model(), regression_params)
# gscv = GridSearchCV(SVR(), {'C': np.logspace(-8, 0, 5)})
mask = np.isnan(targets)
if np.sum(~mask) == 0:
print(f'skipping col {col} because there is no valid data')
continue
inputs = input_features[~mask]
targets = targets[~mask]
targets = StandardScaler().fit_transform(targets[:, None]).flatten()
elif targets.dtype == bool:
if len(np.unique(targets)) < 2:
print(f'skipping col {col} because there is only one value')
continue
inputs = input_features
targets = targets.astype(int)
gscv = GridSearchCV(classification_model(), classification_params)
elif type(targets[0]) == str:
if len(np.unique(targets)) < 2:
print(f'skipping col {col} because there is only one value')
continue
inputs = input_features
targets = LabelEncoder().fit_transform(targets)
gscv = GridSearchCV(classification_model(), classification_params)
else:
print(f'skipping col {col} due to unsupported dtype {targets.dtype}')
continue
perm = np.random.permutation(inputs.shape[0])
gscv.fit(inputs[perm], targets[perm])
score_dict[col] = gscv.best_score_
score_std_dict[col] = gscv.cv_results_['std_test_score'][gscv.best_index_]
# escore_dict[col] = gscv.score(inputs, targets)
pred_truth_dict[col] = (gscv.predict(inputs), targets)
if return_models:
model_dict[col] = gscv
if return_models:
return score_dict, score_std_dict, pred_truth_dict, model_dict
else:
return score_dict, score_std_dict, pred_truth_dict
# %%
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn.base import BaseEstimator, RegressorMixin
class MLP(nn.Module):
def __init__(self, input_size, layer_sizes, output_size, nonlinearity='relu', output_activation='none', dropout=0.1):
super(MLP, self).__init__()
self.layers = nn.ModuleList([
nn.Linear(
(input_size if i == 0 else layer_sizes[i - 1]),
(output_size if i == len(layer_sizes) else layer_sizes[i]),
bias=True
) for i in range(len(layer_sizes) + 1)
])
if nonlinearity == 'sigmoid':
self.nonlinearity = torch.sigmoid
else:
self.nonlinearity = getattr(F, nonlinearity) if nonlinearity != 'none' else lambda x: x
self.output_activation = getattr(F, output_activation) if output_activation != 'none' else lambda x: x
self.dropout_rate = dropout
def forward(self, X):
for i, layer in enumerate(self.layers):
X = layer(X)
if i == len(self.layers) - 1:
X = self.output_activation(X)
else:
X = self.nonlinearity(X)
X = F.dropout(X, p=self.dropout_rate, training=self.training)
return X
def set_dropout(self, dropout):
self.dropout_rate = dropout
class MLPEstimator(BaseEstimator, RegressorMixin):
def __init__(self, layer_sizes=[8], nonlinearity='relu',
output_activation='none', dropout=0.1, weight_decay=0., max_iters=2000, patience=200):
super(MLPEstimator, self).__init__()
# self.model = MLP(
# input_size=input_size, layer_sizes=layer_sizes, output_size=output_size, nonlinearity=nonlinearity,
# output_activation=output_activation, dropout=dropout
# )
self.model = None
self.max_iters = max_iters
self.patience = patience
self.weight_decay = weight_decay
self.dropout = dropout
self.layer_sizes = layer_sizes
self.nonlinearity = nonlinearity
self.output_activation = output_activation
def fit(self, X, y):
if y.ndim == 1:
y = y[:, None]
if True: # self.model is None:
self.model = MLP(X.shape[1], self.layer_sizes, y.shape[1], self.nonlinearity, self.output_activation, self.dropout)
X = torch.from_numpy(X).to(torch.float)
y = torch.from_numpy(y).to(torch.float)
optimizer = optim.Adam(self.model.parameters(), lr=1e-2, weight_decay=self.weight_decay)
lr_scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.5, patience=20)
best_loss = np.inf
last_improv = 0
for i in range(self.max_iters):
optimizer.zero_grad()
pred = self.model(X)
loss = F.mse_loss(pred, y)
loss.backward()
optimizer.step()
lr_scheduler.step(loss)
if loss.item() < best_loss:
best_loss = loss.item()
last_improv = 0
else:
last_improv += 1
if last_improv > self.patience:
break
self.best_loss = best_loss
self.final_loss = loss.item()
# print(self.best_loss, self.final_loss)
def predict(self, X):
if self.model is None:
raise AssertionError("Model not fit yet")
X = torch.from_numpy(X).to(torch.float)
y = self.model(X)
y = y.detach().cpu().numpy()
return y
def get_params(self, deep=True):
return {
"weight_decay": self.weight_decay,
"dropout": self.dropout,
"layer_sizes": self.layer_sizes,
"nonlinearity": self.nonlinearity,
"output_activation": self.output_activation,
}
def set_params(self, **parameters):
for parameter, value in parameters.items():
if parameter == 'dropout':
self.dropout = value
# self.model.set_dropout(value)
elif parameter == 'weight_decay':
self.weight_decay = value
return self
# %% Latents only
print('latents')
print(time.time())
latent_features = StandardScaler().fit_transform(latents)
skip_cols = ['electrode_0_pa', 'has_burst', 'has_delay', 'has_pause', 'id', 'rheobase_sweep_id', 'rheobase_sweep_number', 'vm_for_sag']
llscore_dict, llscore_std_dict, llpred_truth_dict = fit_eval_decoder(
latent_features, ef_df, skip_cols=skip_cols,
regression_model=Ridge, regression_params={'alpha': np.logspace(-6, 4, 22)},
seed=0
)
lnscore_dict, lnscore_std_dict, lnpred_truth_dict = fit_eval_decoder(
latent_features, ef_df, skip_cols=skip_cols,
regression_model=SVR, regression_params={'C': np.logspace(-7, 3, 22)},
seed=0
)
for d in [llscore_dict, llscore_std_dict, lnscore_dict, lnscore_std_dict]:
d['input_features'] = 'latents'
# lcscore_dict, lcscore_std_dict, lcpred_truth_dict = fit_eval_decoder(
# latents, ef_df, skip_cols=skip_cols,
# regression_model=MLPEstimator,
# regression_params={'weight_decay': np.logspace(-6, 4, 2), 'dropout': np.linspace(0, 0.2, 2)},
# classification_model=LogisticRegression,
# seed=0
# )
# print(lcscore_dict)
# import pdb; pdb.set_trace()
# %% Cell type only
print('cell type')
print(time.time())
clscore_dict, clscore_std_dict, clpred_truth_dict = fit_eval_decoder(
cell_features, ef_df, skip_cols=skip_cols,
regression_model=Ridge, regression_params={'alpha': np.logspace(-6, 4, 22)},
seed=0
)
cnscore_dict, cnscore_std_dict, cnpred_truth_dict = fit_eval_decoder(
cell_features, ef_df, skip_cols=skip_cols,
regression_model=SVR, regression_params={'C': np.logspace(-7, 3, 22)},
seed=0
)
for d in [clscore_dict, clscore_std_dict, cnscore_dict, cnscore_std_dict]:
d['input_features'] = 'cell'
# %% Morph only
print('morphology')
print(time.time())
mlscore_dict, mlscore_std_dict, mlpred_truth_dict = fit_eval_decoder(
morph_features, ef_df, skip_cols=skip_cols,
regression_model=Ridge, regression_params={'alpha': np.logspace(-6, 4, 22)},
seed=0
)
mnscore_dict, mnscore_std_dict, mnpred_truth_dict = fit_eval_decoder(
morph_features, ef_df, skip_cols=skip_cols,
regression_model=SVR, regression_params={'C': np.logspace(-7, 3, 22)},
seed=0
)
for d in [mlscore_dict, mlscore_std_dict, mnscore_dict, mnscore_std_dict]:
d['input_features'] = 'morph'
# %% Latents + Cell type
print('latents + cell type')
print(time.time())
inputs = np.concatenate([latent_features, cell_features], axis=1)
lclscore_dict, lclscore_std_dict, lclpred_truth_dict = fit_eval_decoder(
inputs, ef_df, skip_cols=skip_cols,
regression_model=Ridge, regression_params={'alpha': np.logspace(-6, 4, 22)},
seed=0
)
lcnscore_dict, lcnscore_std_dict, lcnpred_truth_dict = fit_eval_decoder(
inputs, ef_df, skip_cols=skip_cols,
regression_model=SVR, regression_params={'C': np.logspace(-7, 3, 22)},
seed=0
)
for d in [lclscore_dict, lclscore_std_dict, lcnscore_dict, lcnscore_std_dict]:
d['input_features'] = 'latents+cell'
# %% Latents + morph
print('latents + morph')
print(time.time())
inputs = np.concatenate([latent_features, morph_features], axis=1)
lmlscore_dict, lmlscore_std_dict, lmlpred_truth_dict = fit_eval_decoder(
inputs, ef_df, skip_cols=skip_cols,
regression_model=Ridge, regression_params={'alpha': np.logspace(-6, 4, 22)},
seed=0
)
lmnscore_dict, lmnscore_std_dict, lmnpred_truth_dict = fit_eval_decoder(
inputs, ef_df, skip_cols=skip_cols,
regression_model=SVR, regression_params={'C': np.logspace(-7, 3, 22)},
seed=0
)
for d in [lmlscore_dict, lmlscore_std_dict, lmnscore_dict, lmnscore_std_dict]:
d['input_features'] = 'latents+morph'
# %% Cell type + morph
print('cell type + morph')
print(time.time())
inputs = np.concatenate([cell_features, morph_features], axis=1)
cmlscore_dict, cmlscore_std_dict, cmlpred_truth_dict = fit_eval_decoder(
inputs, ef_df, skip_cols=skip_cols,
regression_model=Ridge, regression_params={'alpha': np.logspace(-6, 4, 22)},
seed=0
)
cmnscore_dict, cmnscore_std_dict, cmnpred_truth_dict = fit_eval_decoder(
inputs, ef_df, skip_cols=skip_cols,
regression_model=SVR, regression_params={'C': np.logspace(-7, 3, 22)},
seed=0
)
for d in [cmlscore_dict, cmlscore_std_dict, cmnscore_dict, cmnscore_std_dict]:
d['input_features'] = 'cell+morph'
# %% Latents + cell types + morph
print('all three')
print(time.time())
inputs = np.concatenate([latent_features, cell_features, morph_features], axis=1)
lcmlscore_dict, lcmlscore_std_dict, lcmlpred_truth_dict = fit_eval_decoder(
inputs, ef_df, skip_cols=skip_cols,
regression_model=Ridge, regression_params={'alpha': np.logspace(-6, 4, 22)},
seed=0
)
lcmnscore_dict, lcmnscore_std_dict, lcmnpred_truth_dict = fit_eval_decoder(
inputs, ef_df, skip_cols=skip_cols,
regression_model=SVR, regression_params={'C': np.logspace(-7, 3, 22)},
seed=0
)
for d in [lcmlscore_dict, lcmlscore_std_dict, lcmnscore_dict, lcmnscore_std_dict]:
d['input_features'] = 'latents+cell+morph'
# %%
# print(time.time())
# import pdb; pdb.set_trace()
# linear_scores = [llscore_dict, clscore_dict, mlscore_dict, lclscore_dict, lmlscore_dict, cmlscore_dict, lcmlscore_dict]
# linear_scores = pd.DataFrame(linear_scores)
# linear_scores.to_csv('../analysis/linear_scores.csv')
# linear_score_stds = [llscore_std_dict, clscore_std_dict, mlscore_std_dict, lclscore_std_dict, lmlscore_std_dict, cmlscore_std_dict, lcmlscore_std_dict]
# linear_score_stds = pd.DataFrame(linear_score_stds)
# linear_score_stds.to_csv('../analysis/linear_score_stds.csv')
# nonlinear_scores = [lnscore_dict, cnscore_dict, mnscore_dict, lcnscore_dict, lmnscore_dict, cmnscore_dict, lcmnscore_dict]
# nonlinear_scores = pd.DataFrame(nonlinear_scores)
# nonlinear_scores.to_csv('../analysis/nonlinear_scores.csv')
# nonlinear_score_stds = [lnscore_std_dict, cnscore_std_dict, mnscore_std_dict, lcnscore_std_dict, lmnscore_std_dict, cmnscore_std_dict, lcmnscore_std_dict]
# nonlinear_score_stds = pd.DataFrame(nonlinear_score_stds)
# nonlinear_score_stds.to_csv('../analysis/nonlinear_score_stds.csv')
# %%
# linear_scores = pd.read_csv('../analysis/linear_scores.csv')
# linear_score_stds = pd.read_csv('../analysis/linear_score_stds.csv')
# nonlinear_scores = pd.read_csv('../analysis/nonlinear_scores.csv')
# nonlinear_score_stds = pd.read_csv('../analysis/nonlinear_score_stds.csv')
# %%
# for feature in linear_scores.columns:
# if feature == 'input_features':
# continue
# fig, axs = plt.subplots(1, 2, figsize=(12,6), sharey=True)
# axs[0].bar(np.arange(7), linear_scores[feature], yerr=(linear_score_stds[feature] / np.sqrt(5)))
# axs[0].set_xticks(np.arange(7))
# axs[0].set_xticklabels(linear_scores['input_features'], rotation=90)
# axs[0].set_title('Linear')
# axs[1].bar(np.arange(7), nonlinear_scores[feature], yerr=(nonlinear_score_stds[feature] / np.sqrt(5)))
# axs[1].set_xticks(np.arange(7))
# axs[1].set_xticklabels(nonlinear_scores['input_features'], rotation=90)
# axs[1].set_title('Non-linear')
# plt.tight_layout()
# plt.savefig(f'../analysis/score_plots/{feature}.png')
# plt.close()
| felixp8/bmed7610-final-project | analysis/ephys_regression.py | ephys_regression.py | py | 17,259 | python | en | code | 0 | github-code | 13 |
9205029493 | import sys
import os
from numpy import fmax
from utils import optimizer_utils, image_utils
import torch
from torchvision.transforms import transforms
import scipy.ndimage
from datasets.ffhq import process_image
def add_batch(image: torch.Tensor):
while len(image.shape) < 4:
image = image.unsqueeze(0)
return image
class Parsing:
def __init__(self, size=1024):
self.size = size
self.resize = transforms.Resize((size, size))
def get_Face_Noface(self, face_mask: torch.Tensor, hair_mask: torch.Tensor):
if len(face_mask.shape) == 3:
face_mask.unsqueeze(0)
if len(hair_mask.shape) == 3:
hair_mask.unsqueeze(0)
assert(face_mask.shape == (1, 3, self.size, self.size))
assert(hair_mask.shape == (1, 3, self.size, self.size))
FM_face = face_mask[0][2]
HM_face = face_mask[0][0]
FM_delate = scipy.ndimage.binary_dilation(
FM_face.cpu(), iterations=5
)
HM_delate = scipy.ndimage.binary_dilation(
HM_face.cpu(), iterations=5
)
FM_delate = torch.from_numpy(FM_delate).float().cuda()
HM_delate = torch.from_numpy(HM_delate).float().cuda()
# bg = (FM_delate - FM_face) * (1 - HM_delate)
bg = torch.ones_like(FM_face) - FM_delate
return torch.cat([torch.zeros((1, 1, self.size, self.size)).cuda(),
torch.zeros((1, 1, self.size, self.size)).cuda(),
torch.zeros((1, 1, self.size, self.size)).cuda(),
add_batch(bg)],
dim=1
)
def get_NoHair(self, face_mask, hair_mask):
if len(face_mask.shape) == 3:
face_mask.unsqueeze(0)
if len(hair_mask.shape) == 3:
hair_mask.unsqueeze(0)
assert(face_mask.shape == (1, 1, self.size, self.size))
assert(hair_mask.shape == (1, 1, self.size, self.size))
HM_hair = hair_mask[0][0]
FM_hair = face_mask[0][0]
HM_delate = scipy.ndimage.binary_dilation(
HM_hair.cpu(), iterations=5
)
FM_delate = scipy.ndimage.binary_dilation(
FM_hair.cpu(), iterations=5
)
HM_delate = torch.from_numpy(HM_delate).float().cuda()
FM_delate = torch.from_numpy(FM_delate).float().cuda()
bg = ((torch.ones_like(HM_hair) - HM_delate - FM_delate) > 0.5)
bg_erode = scipy.ndimage.binary_dilation(
bg.float().cpu(), iterations=3
)
bg_erode = torch.from_numpy(bg_erode).float().cuda().unsqueeze(0).unsqueeze(0)
return bg_erode
if __name__ == '__main__':
raw = "data/images"
mask = "data/masks"
background = "data/backgrounds"
softmask = "data/softmasks"
image1 = "02602.jpg"
image2 = "08244.jpg"
image_files = image_utils.getImagePaths(raw, mask, background, image1, image2)
I_1, M_1, HM_1, H_1, FM_1, F_1 = process_image(
image_files['I_1_path'], image_files['M_1_path'], size=1024, normalize=1)
I_2, M_2, HM_2, H_2, FM_2, F_2 = process_image(
image_files['I_2_path'], image_files['M_2_path'], size=1024, normalize=1)
I_1, M_1, HM_1, H_1, FM_1, F_1 = optimizer_utils.make_cuda(
[I_1, M_1, HM_1, H_1, FM_1, F_1])
I_2, M_2, HM_2, H_2, FM_2, F_2 = optimizer_utils.make_cuda(
[I_2, M_2, HM_2, H_2, FM_2, F_2])
I_1, M_1, HM_1, H_1, FM_1, F_1 = image_utils.addBatchDim(
[I_1, M_1, HM_1, H_1, FM_1, F_1])
I_2, M_2, HM_2, H_2, FM_2, F_2 = image_utils.addBatchDim(
[I_2, M_2, HM_2, H_2, FM_2, F_2])
parsing = Parsing(1024)
mask = parsing.get_Face_Noface(M_1, M_2)
mask[0][0] = mask[0][3]
print(mask[0])
image_utils.writeImageToDisk(
[mask[:,0:3,:,:].clone()], [f'temp.png'], './results'
)
| VioletSabers/HairEditing | src/faceparsing.py | faceparsing.py | py | 3,889 | python | en | code | 8 | github-code | 13 |
17160637242 | import sys
sys.stdin = open('input.txt')
T = int(input())
for tc in range(1, T+1):
N = int(input())
count = [0] * 201
# 시작 부터 끝점까지 각각 count를 올린다
# 그럼 겹치는 횟수가 각 복도에 나올 것이고
# 이의 최대 값이 곧 걸리는 시간이 된다.
result = 0
for _ in range(N):
start, end = map(int, input().split())
# 1-2 같은 라인 3-4 같은 라인
# 즉 홀수면 //2+1, 짝수면 //2
start = start//2 + 1 if start%2 else start//2
end = end//2 + 1 if end%2 else end//2
if start > end:
start, end = end, start
for i in range(start, end+1):
count[i] += 1
if result < count[i]:
result = count[i]
print('#{} {}'.format(tc, result))
| jiyong1/problem-solving | swea/4408/solution.py | solution.py | py | 879 | python | ko | code | 2 | github-code | 13 |
17228653899 | import torch
import random
# 二进制交叉熵损失函数的稳定版本 用来实现数值的稳定 log+sum+exp
def bce_loss(input, target):
"""
Numerically stable version of the binary cross-entropy loss function.
As per https://github.com/pytorch/pytorch/issues/751
See the TensorFlow docs for a derivation of this formula:
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
Input:
- input: PyTorch Tensor of shape (N, ) giving scores.
- target: PyTorch Tensor of shape (N,) containing 0 and 1 giving targets.
Output:
- A PyTorch Tensor containing the mean BCE loss over the minibatch of
input data.
"""
"""
二进制交叉熵损失函数的数值稳定版本。
根据https://github.com/pytorch/pytorch/issues/751
有关此公式的推导,请参见TensorFlow文档:
https://www.tensorflow.org/api_docs/python/tf/nn/sigmoid_cross_entropy_with_logits
输入:
-输入:形状(N,)的PyTorch张量给出分数。
-目标:形状(N,)的PyTorch张量包含0和1给出的目标。
输出:
-一个PyTorch张量,其中包含在
输入数据。
"""
neg_abs = -input.abs()
loss = input.clamp(min=0) - input * target + (1 + neg_abs.exp()).log()
return loss.mean()
# 计算生成器的损失
def gan_g_loss(scores_fake):
"""
Input:
- scores_fake: Tensor of shape (N,) containing scores for fake samples 生成器生成样本的得分
Output:
- loss: Tensor of shape (,) giving GAN generator loss
"""
y_fake = torch.ones_like(scores_fake) * random.uniform(0.7, 1.2) #返回一个填充了标量值1的张量,其大小与之相同 input。
return bce_loss(scores_fake, y_fake) # random.uniform(x, y) 方法将随机生成一个实数,它在 [x,y] 范围内。
# 计算判别器的损失
def gan_d_loss(scores_real, scores_fake):
"""
Input:
- scores_real: Tensor of shape (N,) giving scores for real samples
- scores_fake: Tensor of shape (N,) giving scores for fake samples
Output:
- loss: Tensor of shape (,) giving GAN discriminator loss
"""
y_real = torch.ones_like(scores_real) * random.uniform(0.7, 1.2)
y_fake = torch.zeros_like(scores_fake) * random.uniform(0, 0.3)
loss_real = bce_loss(scores_real, y_real)
loss_fake = bce_loss(scores_fake, y_fake)
return loss_real + loss_fake
def l2_loss(pred_traj, pred_traj_gt, loss_mask, random=0, mode='average'):
"""
Input:
- pred_traj: Tensor of shape (seq_len, batch, 2). Predicted trajectory.
- pred_traj_gt: Tensor of shape (seq_len, batch, 2). Groud truth
predictions.
- loss_mask: Tensor of shape (batch, seq_len)
- mode: Can be one of sum, average, raw
Output:
- loss: l2 loss depending on mode
输入:
-pred_traj:形状的张量(seq_len,批处理,2)。 预测的轨迹。
-pred_traj_gt:形状的张量(seq_len,批处理,2)。 真实轨迹
预测。
-loss_mask:形状的张量(批处理,seq_len)
-模式:可以是总和,平均值,原始值之一
输出:
-损失:l2损失取决于模式
"""
seq_len, batch, _ = pred_traj.size()
loss = (loss_mask.unsqueeze(dim=2) *
(pred_traj_gt.permute(1, 0, 2) - pred_traj.permute(1, 0, 2))**2) # unsqueeze去掉纬度值为一的tensor permute将tensor的维度换位。
if mode == 'sum':
return torch.sum(loss)
elif mode == 'average':
return torch.sum(loss) / torch.numel(loss_mask.data)
elif mode == 'raw':
return loss.sum(dim=2).sum(dim=1)
# 计算欧几里得误差
def displacement_error(pred_traj, pred_traj_gt, consider_ped=None, mode='sum'):
"""
Input:
- pred_traj: Tensor of shape (seq_len, batch, 2). Predicted trajectory.
- pred_traj_gt: Tensor of shape (seq_len, batch, 2). Ground truth
predictions.
- consider_ped: Tensor of shape (batch)
- mode: Can be one of sum, raw
Output:
- loss: gives the eculidian displacement error
"""
seq_len, _, _ = pred_traj.size()
loss = pred_traj_gt.permute(1, 0, 2) - pred_traj.permute(1, 0, 2)
loss = loss**2
if consider_ped is not None:
loss = torch.sqrt(loss.sum(dim=2)).sum(dim=1) * consider_ped
else:
loss = torch.sqrt(loss.sum(dim=2)).sum(dim=1)
if mode == 'sum':
return torch.sum(loss)
elif mode == 'raw':
return loss
#计算中带你的欧几里得误差
def final_displacement_error(pred_pos, pred_pos_gt, consider_ped=None, mode='sum'):
"""
Input:
- pred_pos: Tensor of shape (batch, 2). Predicted last pos.
- pred_pos_gt: Tensor of shape (seq_len, batch, 2). Groud truth
last pos
- consider_ped: Tensor of shape (batch)
Output:
- loss: gives the eculidian displacement error
"""
loss = pred_pos_gt - pred_pos
loss = loss**2
if consider_ped is not None:
loss = torch.sqrt(loss.sum(dim=1)) * consider_ped
else:
loss = torch.sqrt(loss.sum(dim=1))
if mode == 'raw':
return loss
else:
return torch.sum(loss)
| ZhoubinXM/project_of_article | losses.py | losses.py | py | 5,279 | python | en | code | 0 | github-code | 13 |
19651257225 | import copy
import datetime
import json
import requests
from django.conf import settings
class WeatherDataProcessor:
"""Class to process weather data retrieval from an external API.
Manages fetching weather data based on user input and classifies it as historical or forecast data.
"""
def __init__(self, data: dict):
self.data = data
self.forecast_weather_data, self.historical_weather_data = {}, {}
def get_weather_data_from_API(self) -> dict:
"""Fetch weather data from an external API based on user input.
Determines whether to fetch historical or forecast data, or both, based on user-defined date ranges.
"""
if self.data['start_date'] > datetime.date.today():
self.forecast_weather_data = ForecastWeatherRetriever(self.data).get_data_from_API()
elif self.data['end_date'] <= datetime.date.today():
self.historical_weather_data = HistoryWeatherRetriever(self.data).get_data_from_API()
else:
self.forecast_weather_data = ForecastWeatherRetriever(self.data).get_data_from_API()
self.historical_weather_data = HistoryWeatherRetriever(self.data).get_data_from_API()
return {'historical_weather_data': self.historical_weather_data,
'forecast_weather_data': self.forecast_weather_data, }
class AbstractWeatherAPIRetriever:
"""Abstract class for fetching data from an external API.
Contains common methods and properties for all weather data retrieval classes.
"""
api_key = settings.WEATHER_API_KEY
api_url = settings.WEATHER_API_URL
api_method = None
api_language_code = settings.WEATHER_API_LANGUAGE_CODE
def __init__(self, data: dict):
self.data = data
def get_response(self) -> dict:
"""Send a request to the external API and return the response."""
query_params = self.get_query_params()
response = request_to_api(self.api_url + self.api_method, query_params)
return response
def get_query_params(self) -> dict:
"""Define query parameters for the API request."""
query_params = {'key': self.api_key,
'q': self.data['city'],
'lang': self.api_language_code}
return query_params
class HistoryWeatherRetriever(AbstractWeatherAPIRetriever):
"""Class for fetching historical weather data from an external API.
Retrieves historical weather data within a given date range.
"""
api_method = settings.WEATHER_API_METHOD['history']
max_days_range = settings.WEATHER_API_LIMITS['max_days_range_for_history_request']
def get_data_from_API(self):
"""Fetch historical weather data from the API, considering API's evening behavior.
This method retrieves historical weather data within the specified date range,
accounting for the API's behavior. In the evening, the API provides weather data
for the next day in historical data. Therefore, if the end date is greater than today,
it's adjusted to today's date.
"""
data = copy.deepcopy(self.data)
if data['end_date'] > datetime.date.today():
data['end_date'] = datetime.date.today()
subperiod_list = split_data_period(data['start_date'], data['end_date'], self.max_days_range)
response_list = self.get_combined_response(subperiod_list)
result_response = response_list[0]
if len(response_list) > 1:
for i in range(1, len(response_list)):
result_response['forecast']['forecastday'].extend(response_list[i]['forecast']['forecastday'])
return result_response
def get_combined_response(self, subperiod_list: list[dict]):
"""Fetch historical weather data for multiple subperiods."""
combined_response = []
for subperiod in subperiod_list:
self.data['start_date'] = subperiod['start_date']
self.data['end_date'] = subperiod['end_date']
response = self.get_response()
combined_response.append(response)
return combined_response
def get_query_params(self) -> dict:
"""Define query parameters specific to historical weather data."""
query_params = super().get_query_params()
query_params.update({'dt': self.data['start_date'],
'end_dt': self.data['end_date']})
return query_params
class ForecastWeatherRetriever(AbstractWeatherAPIRetriever):
"""Class for fetching forecast weather data from an external API.
Retrieves forecast weather data within a given date range.
"""
api_method = settings.WEATHER_API_METHOD['forecast']
api_limit = settings.WEATHER_API_LIMITS['forecast_days_limit']
def get_data_from_API(self) -> dict:
"""Fetch forecast weather data from the API based on user input.
The API allows querying data for a specific number of days starting from today.
To accommodate this limitation, the method fetches data for the entire available period
and then filters it to provide the data relevant to the user's specified date range.
This ensures that the user gets the data they requested."""
response = self.get_response()
return self.date_filter(response)
def date_filter(self, response: dict) -> dict:
"""Filter forecast weather data based on the user-defined date range.
This method takes the raw forecast weather data from the API and filters it
to retain only the data that falls within the user-specified date range.
"""
forecastdays = response['forecast']['forecastday']
filtered_forecast = []
for day in forecastdays:
date = datetime.datetime.strptime(day['date'], "%Y-%m-%d").date()
if self.data['start_date'] <= date <= self.data['end_date'] and datetime.date.today() < date:
filtered_forecast.append(day)
response['forecast']['forecastday'] = filtered_forecast
return response
def get_query_params(self) -> dict:
"""Define query parameters specific to forecast weather data."""
query_params = super().get_query_params()
query_params.update({'days': self.api_limit, })
return query_params
class CitySearcher(AbstractWeatherAPIRetriever):
"""Class to search for city names and retrieve city-related data from an external API."""
api_method = settings.WEATHER_API_METHOD['search']
def get_data_from_API(self) -> dict:
"""Fetch city name suggestions based on user input.
This method queries an external API to retrieve city name
suggestions matching the user's input.
"""
return self.get_response()
def split_data_period(start_date: datetime.date, end_date: datetime.date, interval_in_day: int) -> list[dict]:
"""Split a date range into subperiods based on a specified interval."""
if end_date - start_date < datetime.timedelta(interval_in_day):
return [{'start_date': start_date, 'end_date': end_date}, ]
subperiod_list = []
while start_date <= end_date:
end_of_period = start_date + datetime.timedelta(interval_in_day)
if end_of_period > end_date:
end_of_period = end_date
subperiod_list.append({'start_date': start_date, 'end_date': end_of_period})
start_date = end_of_period + datetime.timedelta(days=1)
return subperiod_list
def request_to_api(url: str, params: dict):
"""Send a request to an external API and return the response"""
response = requests.get(url, params)
if response.status_code == 200:
return json.loads(response.text)
else:
raise response.raise_for_status()
| yurii-onyshchuk/WeatherApp | weather_app/services/weather_api_service.py | weather_api_service.py | py | 7,778 | python | en | code | 0 | github-code | 13 |
8866019641 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 8 12:00:24 2019
@author: james
"""
import os
import numpy as np
import pandas as pd
import time
import datetime as dt
from copy import deepcopy
import re
def list_dirs(path):
"""
list all directories in a given directory
args:
:param path: string with the path to the search directory
out:
:return: all directories within the search directory
"""
dirs = [d for d in os.listdir(path) if os.path.isdir(os.path.join(path,d))]
return sorted(dirs)
def list_files(path):
"""
lists all filenames in a given directory
args:
:param path: string with the path to the search directory
out:
:return: all files within the search directory
"""
files = [f for f in os.listdir(path) if os.path.isfile(os.path.join(path,f))]
return sorted(files)
def extract_station_info(info,paths):
"""
Parameters
----------
info : dataframe
dataframe with information about each station read in from Excel
paths : dictionary
dictionary from config file with paths for each type of data
Returns
-------
paths : dictionary
dictionary from config file with paths for each type of data, updated with stations
pv_systems : dictionary
dictionary with each PV system, that will in the end contain all information and data
"""
pv_systems = {sys : {} for sys in info.index}
#Import latitude and longitude and check for commas!!
for key in pv_systems:
pv_systems[key].update({'lat_lon':[info.loc[key].Breitengrad,info.loc[key].Laengengrad]})
for i in range(2):
if type(pv_systems[key]['lat_lon'][i]) == str:
pv_systems[key]['lat_lon'][i] = float(pv_systems[key]
['lat_lon'][i].replace(',','.'))
pv_systems[key]['lat_lon'] = [np.round(coord,6) for coord in pv_systems[key]['lat_lon']]
if 'auew' in paths:
paths['auew'].update({'stations':[]})
for station in info.index:
if info.loc[station].Lastmessung_AUEW == 'ja':
paths['auew']['stations'].append(station)
if 'egrid' in paths:
paths['egrid'].update({'stations':[]})
for station in info.index:
if info.loc[station].egrid_Messbox == 'ja':
paths['egrid']['stations'].append(station)
if 'solarwatt' in paths:
paths['solarwatt'].update({'stations':[]})
for station in info.index:
if info.loc[station].PV_Messung_DC == 'ja':
paths['solarwatt']['stations'].append(station)
if 'inverter' in paths:
paths['inverter'].update({'stations':[]})
for station in info.index:
if info.loc[station].PV_Messung_DC == 'ja':
paths['inverter']['stations'].append(station)
return paths, pv_systems
def convert_wrong_format (input_value):
"""
Convert values with two decimal points in the CSV file
"""
if type(input_value) == str:
if input_value:
split_string = input_value.split(".")
if len(split_string) == 2:
value = float(input_value)
elif len(split_string) == 3:
value = float(".".join(("".join(split_string[0:2]),split_string[-1])))
else:
value = np.nan
else:
value = input_value
return value
def filter_suntracker_time (value):
"""
Special filter for the suntracker data
Parameters
----------
value : string
string to be filtered
Returns
-------
new_time : string
filtered string
"""
new_time = value.split('-')[2] + ':' + value.split('-')[1] +\
':' + value.split('-')[0][0:2]
return new_time
def filter_suntracker_irrad (value):
"""
Filter for the irradiance data from suntracker
Parameters
----------
value : string
irradiance value as string
Returns
-------
new_irrad : float
irradiance filtered
"""
new_irrad = float(value.split('-')[0][2:])
return new_irrad
def downsample(dataframe,old_period,new_period):
"""
Downsample the data to required frequency
args:
:param dataframe: Dataframe to be resampled
:param old_period: timedelta, old period given in seconds
:param new_period: timedelta, new period given in seconds
out:
:return: resampled dataframe
"""
#Check whether there are gaps in the data
t_delta_max = dataframe.index.to_series().diff().max() #.round('1s')
#If some parts of series are different, upsample them!
if t_delta_max > old_period:
start_ts = dataframe.first_valid_index().round(old_period)
end_ts = dataframe.last_valid_index().round(old_period)
#Resample at higher frequency
newindex = pd.date_range(freq=old_period/2,start=start_ts,end=end_ts)
df_upsample = dataframe.reindex(newindex.union(dataframe.index)).interpolate('linear')
#Go back to desired frequency
newindex = pd.date_range(freq=old_period,start=newindex[0],end=newindex[-1])
#Make sure we don't have extra entries from tomorrow!
newindex = newindex[newindex.date==newindex[0].date()]
df_upsample = df_upsample.reindex(newindex)
else:
df_upsample = dataframe
#Calculate number of periods to shift by
shift_periods = int(new_period/old_period/2)
#Shift to the left, resample to new period (with the mean), shift back to the right
df_rs = df_upsample.shift(-shift_periods).resample(new_period).mean().shift(1)
return df_rs
def interpolate(dataframe,new_period):
"""
Interpolate data by upsampling
args:
:param dataframe: pandas dataframe to be resampled
:param new_period: timedelta, new period for resampling
out:
:return: dataframe with resampled data
"""
df_upsample = dataframe.resample(new_period).interpolate('linear')
return df_upsample
def shift_auew_data(dataframe,config):
"""
Shift AÜW data to take into account for the fact that the measured values are
actually the moving average power values of the last 15 minutes, or in other words
the energy is counted every 15 minutes and the power is simply assigned to the end
time stamp
args:
:param dataframe: dataframe to be shifted
:param config: dictionary with information about time shift
out:
:return: modified dictionary
"""
timeres = config["time_integral"]
units = config["units"]
#Convert new time resolution to a timedelta object
if units == "seconds" or units == "s" or units == "secs" or units == "sec":
t_res_new = pd.Timedelta(int(timeres/60),'m')
elif units == "minutes" or units == "m" or units == "mins" or units == "min":
t_res_new = pd.Timedelta(int(timeres),'m')
if t_res_new.components.minutes in dataframe.index.minute:
t_half = str(t_res_new.components.minutes/2.)
#Create new index to take averaging into account
shifted_index = dataframe.index - pd.Timedelta(t_half + 'm')
df_shifted = dataframe.reindex(shifted_index,method='bfill')
#Resample data at double frequency and linearly interpolate
df_rs = df_shifted.resample(t_half + 'T').interpolate('linear')
#Put new values into dataframe
df_new = df_rs.reindex(dataframe.index)
return df_new
def shift_module_temperature_data(dataframe, config):
"""
This function corrects the time shift in the module temperature data
args:
:param dataframe: Dataframe with module temperature data
:param config: dictionary with configuration for timeshift
slope: float, slope of the time correction in [s/s]
start_time: string, start time at which it is assumed
that the time is synchronised
out:
:return: dataframe with corrected temperature data
"""
t_delta = dataframe.index.to_series().diff()
resolution = int(t_delta.min().round('1s').total_seconds())
slope = config["slope"]
start_time = config["start_time"]
data_start_datetime = dataframe.index[0]
time_synch_datetime = pd.to_datetime(start_time)
time_delta = (data_start_datetime - time_synch_datetime).total_seconds()
offset = slope*time_delta
# Time correction
# if round(resolution) != resolution:
# print("Fehler. Die zeitliche Auflösung lautet nicht auf volle Sekunden.")
j_vec = np.arange(len(dataframe))
f = dt.timedelta(seconds = slope)*resolution*j_vec + \
dt.timedelta(seconds = offset)
td_idx = pd.TimedeltaIndex(data = f)
td_idx_round = td_idx.round('1s')
index_shifted = dataframe.index + td_idx_round
df_corrected = pd.DataFrame(data=dataframe.values, index=index_shifted,
columns=dataframe.columns)
# Durchführen der Interpolation und Anpassen an das ursprüngliche Zeitgit-
# ter ohne Verwenden eines Zwischengitters
df_corrected_rs = \
df_corrected.reindex(df_corrected.index.union(dataframe.index)).interpolate('index').\
reindex(dataframe.index)
return df_corrected_rs
def resample_interpolate_merge(raw_data,station,timeres,process_config,datatypes):
"""
Resample dataframe to the required resolution, as set in the config file,
then merge all dataframes into one
args:
:param raw_data: dictionary with raw data separated into types
:param station: string, name of station
:param timeres: dictionary with required time resolution for further simulation
:param process_config: dictionary with information for data processing
:param datatypes: list with different data types
out:
:return: dictionary of PV systems with resampled data
"""
# #Create a copy of the dictionary
raw_data_rs = deepcopy(raw_data) #.copy()
del raw_data
#
if timeres != "raw":
t_res_new = pd.to_timedelta(timeres)
#Go through all the data and resample, interpolate, merge
print(("Processing data from %s" % station))
#This will be the merged dataframe with all values averaged to the same time stamp
df_merge = pd.DataFrame()
#Loop through the datatypes
for idata in datatypes:
if idata in raw_data_rs and raw_data_rs[idata]:
#This is a list of dataframes, one per sensor of one data type
#resampled to the desired resolution
dfs_total_time = []
#Dictionary for full dataframes, to be added to main dictionary later (for completeness, 31.07.2019)
dict_full = {}
#Loop through the substations of one datatype
for i, substat in enumerate(raw_data_rs[idata]):
if raw_data_rs[idata][substat]:
dfs_total_time.append(pd.DataFrame())
#Full dataframe for one sensor, except duplicates or wrong time stamps
df_full = pd.concat(raw_data_rs[idata][substat][1],axis=0)
#Remove duplicates
if df_full.index.duplicated().any():
df_full = df_full[~df_full.index.duplicated()]
#Check for nonsensical timestamps that occur in the wrong place
t_delta = df_full.index.to_series().diff()
idx_negative = df_full[t_delta < pd.Timedelta(0)].index
for idx in idx_negative:
int_idx = df_full.index.get_loc(idx)
if df_full.index[int_idx] - df_full.index[int_idx - 2] > pd.Timedelta(0):
df_full.drop(df_full.index[int_idx - 1],inplace=True)
else:
df_full.drop(df_full.index[int_idx],inplace=True)
#Round timestamps to nearest second
df_full = df_full.reindex(df_full.index.round('S'),
method='nearest')
#Full dataframe of all values
dict_full.update({'df_' + substat:df_full})
#Redefine lists of dataframes after duplicates have been removed
dfs = [group[1] for group in df_full.groupby(df_full.index.date)]
days = pd.to_datetime([group[0] for group in df_full.groupby(df_full.index.date)])
raw_data_rs[idata][substat] = (days,dfs)
if timeres != "raw":
#Iterate through days to interpolate / resample
for ix, iday in enumerate(raw_data_rs[idata][substat][0]):
dataframe = raw_data_rs[idata][substat][1][ix]
if len(dataframe) > int(pd.Timedelta('1D')/t_res_new/100):
#Check for duplicates and throw away if necessary
if dataframe.index.duplicated().any():
dataframe = dataframe[~dataframe.index.duplicated()]
#This would shift the data, but is not general enough!
#t_shift = pd.Timedelta(t_delta_mean)*dataframe.index.duplicated()
#new_index = dataframe.index + t_shift
#dataframe = pd.DataFrame(index=new_index,data=dataframe.values,
# columns=dataframe.columns)
dataframe.sort_index(axis=0,inplace=True)
#Check for nonsensical timestamps that occur in the wrong place
t_delta = dataframe.index.to_series().diff()
idx_negative = dataframe[t_delta < pd.Timedelta(0)].index
for idx in idx_negative:
int_idx = dataframe.index.get_loc(idx)
if dataframe.index[int_idx] - dataframe.index[int_idx - 2] > pd.Timedelta(0):
dataframe.drop(dataframe.index[int_idx - 1],inplace=True)
else:
dataframe.drop(dataframe.index[int_idx],inplace=True)
t_delta = dataframe.index.to_series().diff()
#Check if there are big gaps in the data
#t_delta_max = dataframe.index.to_series().diff().max().round('1s')
#time_max = dataframe.index.to_series().diff().idxmax()
#If more than one hour in the day time is missing, throw away the day of data
#if t_delta_max < pd.Timedelta(1,'h') or time_max.hour > 19 or time_max.hour < 3:
#Find the frequency of the dataframe
t_delta_min = t_delta.min() #.round('1s')
#SHIFT AUEW data by 15 minutes!!
if 'auew' in substat:
dataframe = shift_auew_data(dataframe,process_config["auew"])
#print('AUEW data for %s, %s shifted' % (station,substat))
#Resampling and interpolation
if t_delta_min != t_res_new:
try:
if t_delta_min < t_res_new:
try:
dataframe = downsample(dataframe,t_delta_min,t_res_new)
except:
print(('error in data from %s, %s on %s' % (station,substat,iday)))
elif t_delta_min > t_res_new:
dataframe = interpolate(dataframe,t_res_new)
except:
print(('error %s, %s, %s' % (station,substat,iday)))
else:
#Check if timestamps are correct
new_index = pd.date_range(start=dataframe.index[0].round(timeres),
end=dataframe.index[-1].round('T'),freq=timeres)
dataframe = dataframe.reindex(new_index,method='nearest').loc[iday.strftime('%Y-%m-%d')]
dfs_total_time[i] = pd.concat([dfs_total_time[i],dataframe],axis=0)
else: print(('Data has less than 1/100 of a day, throwing away %s' % iday.date()))
#Create Multi-Index
if type(dfs_total_time[i].columns) != pd.MultiIndex:
col_index = pd.MultiIndex.from_product([dfs_total_time[i].columns.values.tolist(),[substat]],
names=['variable','substat'])
dfs_total_time[i].columns = col_index
if process_config:
#Shift module temperature data by a specific time shift
if "PV-Modul_Temperatursensor" in substat and\
station in process_config["module_temp"] and\
process_config["module_temp"][station]["flag"]:
dfs_total_time[i] = shift_module_temperature_data(dfs_total_time[i],
process_config["module_temp"][station])
print(('Module temperature data for %s, %s shifted' % (station,substat)))
#Concatenate different substations
if len(dfs_total_time) > 1:
#Create multiindex, with substation and variable
df_total = pd.concat(dfs_total_time,axis=1)
#,keys=raw_data_rs[idata].keys(),
#names=['substat','variable'])
#df_total.columns = df_total.columns.swaplevel(0,1)
else:
df_total = dfs_total_time[0]
if type(df_total.columns) != pd.MultiIndex:
df_total.columns = pd.MultiIndex.from_product([df_total.columns.values.tolist(),[substat]],
names=['variable','substat'])
#This is to rename a column since substation name changed between campaigns!
if process_config:
for substat in raw_data_rs[idata]:
if "substat_switch" in process_config and \
station in process_config["substat_switch"] and substat ==\
process_config["substat_switch"][station]["old_name"]:
oldname = process_config["substat_switch"][station]["old_name"]
newname = process_config["substat_switch"][station]["new_name"]
df_total.rename(columns={oldname:newname},
level='substat',inplace=True)
print(('Substation name changed from %s to %s for %s' % (oldname,newname,station)))
#Merge dataframes into one
if df_merge.empty:
df_merge = df_total
#Added this for Spyder bug but not sure if it is a good idea...
#If the station has only one datatype then this will drop the Nans
df_merge.dropna(axis=0,how='all',inplace=True)
else:
#Here there should be no rows with only Nans, except those from interpolation
df_merge = pd.merge(df_merge,df_total,how="outer",left_index=True,right_index=True)
#This overwrites the dictionaries of tuples, now a dictionary of long dataframes for each sensor
raw_data_rs[idata] = dict_full
else:
del raw_data_rs[idata]
print(("Data from %s has been resampled to %s" % (station,timeres)))
return df_merge, raw_data_rs
def load_pv_data(pv_systems,info,paths,description,process):
"""
Load PV power data into dataframe
args:
:param pv_systems: dictionary of PV systems
:param info: dictionary with information about PV systems
:param paths: dictionary of paths
:param description: string, description of measurement campaign
:param process: dictionary with process configuration
out:
:return: dictionary of PV systems
"""
for station in info.index:
station_dirs = list_dirs(os.path.join(paths['mainpath'],station))
pv_systems[station]['pv'] = {}
pv_systems[station]['irrad'] = {}
pv_systems[station]['temp'] = {}
for substat in station_dirs:
#read in 15 minute PV data
if 'auew' in paths and substat == paths['auew']['path']:
path = os.path.join(paths['mainpath'],station,paths['auew']['path'])
files = list_files(path)
if not files:
print(('Station %s has no AUEW power data' % station))
paths['auew']['stations'].remove(station)
else:
if "2018" in description:
#Only use first column with MEZ
dfs_all = [pd.read_csv(os.path.join(path,ifile),header=None,sep=';',usecols=[0,2],index_col=0,
skiprows=6,names=['Timestamp','P_kW'],converters={'P_kW':convert_wrong_format})
for ifile in files]
elif "2019" in description:
dfs_all = [pd.read_csv(os.path.join(path,ifile),header=None,sep=';',usecols=[0,2],index_col=0,
skiprows=6,names=['Timestamp','P_kW'])
for ifile in files]
#If there are several files create new substation dictionaries
for i, dataframe in enumerate(dfs_all):
substat_name = 'auew_' + str(i + 1)
pv_systems[station]['pv'][substat_name] = ()
dataframe.index = pd.to_datetime(dataframe.index,format='%d.%m.%Y %H:%M:%S')
for cols in dataframe.columns:
dataframe[cols] = convert_wrong_format(dataframe[cols].values)
#Shift to UTC, data files are given in CET (only first column)
dataframe.index = dataframe.index - pd.Timedelta(hours=1)
#get list of unique days
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
pv_systems[station]['pv'][substat_name] = (days,dfs)
print(('15 minute PV power data from station %s successfully imported' % station))
#read in 1s PV data
if 'egrid' in paths and substat == paths['egrid']['path']:
dfs = []
print(("Importing 1s PV data from %s, please wait....." % station))
path = os.path.join(paths['mainpath'],station,paths['egrid']['path'])
files = list_files(path)
dirs = list_dirs(path)
if not files:
if not dirs:
print(('Station %s has no egrid power data' % station))
paths['egrid']['stations'].remove(station)
else:
for wr in dirs:
substat_name = 'egrid_' + wr[-1]
pv_systems[station]['pv'][substat_name] = ()
files = list_files(os.path.join(path,wr))
if not files:
print(('Station %s, %s has no egrid power data' % (station,wr)))
else:
dfs = [pd.read_csv(os.path.join(path,wr,ifile),header=0,sep=',',
index_col=0,comment='#',
names=['Timestamp','P_W']) for ifile in files]
dataframe = pd.concat(dfs,axis='index')
dataframe.index = pd.to_datetime(dataframe.index,format='%Y.%m.%d %H:%M:%S')
#Throw away nonsense data with wrong year
if "2018" in description:
dataframe = dataframe[dataframe.index.year == 2018]
elif "2019" in description:
dataframe = dataframe[dataframe.index.year == 2019]
dataframe['P_kW'] = dataframe.P_W/1000
dataframe.drop(['P_W'],axis=1,inplace=True)
#get list of unique days
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
pv_systems[station]['pv'][substat_name] = (days,dfs)
print(('1 second PV power data from station %s, %s successfully imported' % (station,wr)))
else:
pv_systems[station]['pv']['egrid'] = ()
dfs = [pd.read_csv(os.path.join(path,ifile),header=0,sep=',',index_col=0,
comment='#',names=['Timestamp','P_W']) for ifile in files]
dataframe = pd.concat(dfs,axis='index')
dataframe.index = pd.to_datetime(dataframe.index,format='%Y.%m.%d %H:%M:%S')
dataframe['P_kW'] = dataframe.P_W/1000
dataframe.drop(['P_W'],axis=1,inplace=True)
#get list of unique days
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
pv_systems[station]['pv']['egrid'] = (days,dfs)
print(('1 second PV power data from station %s successfully imported' % station))
#read in 1s PV data
if 'solarwatt' in paths and substat == paths['solarwatt']['path']:
print(("Importing Solarwatt PV data from %s, please wait....." % station))
path = os.path.join(paths['mainpath'],station,paths['solarwatt']['path'])
files = list_files(path)
if "2018" in description:
dfs = [pd.read_csv(os.path.join(path,ifile),header=0,sep='|',index_col=0)
for ifile in files]
dataframe = pd.concat(dfs,axis=1)
try:
dataframe.index = pd.to_datetime(dataframe.index,format='%Y.%m.%d %H:%M:%S')
except TypeError:
print("Wrong datetime format")
dataframe.index.rename('Timestamp',inplace=True)
#Shift data to UTC
dataframe.index = dataframe.index - pd.Timedelta(hours=2)
#Change label to P_kW
dataframe['P_kW'] = dataframe.P_PV/1000
dataframe.drop(['P_PV'],axis=1,inplace=True)
elif "2019" in description:
dfs = [pd.read_csv(os.path.join(path,ifile),header=0,sep=',',index_col=0,na_values=(''))
for ifile in files]
dataframe = pd.concat(dfs,axis=0)
dataframe.index = pd.to_datetime(dataframe.index,format='%Y-%m-%d %H:%M:%S',
errors='coerce')
#Set timezone, times early on last Sunday in October will be ambiguous - marked as nat
dataframe.index = dataframe.index.tz_localize(tz='Europe/Berlin',
ambiguous='NaT')
#Convert data to UTC
dataframe.index = dataframe.index.tz_convert('UTC')
#Change label to P_kW
dataframe['P_kW'] = dataframe["V_PV"]*dataframe["I_PV_filtered"]/1000
dataframe.drop(['P_PV'],axis=1,inplace=True)
dataframe['Idc_A'] = dataframe["I_PV_filtered"]
dataframe.drop(['I_PV_filtered'],axis=1,inplace=True)
elif "2021" in description:
dfs = [pd.read_csv(os.path.join(path,ifile),header=0,sep=',',index_col=0,na_values=(''))
for ifile in files]
dataframe = pd.concat(dfs,axis=0)
dataframe.index = pd.to_datetime(dataframe.index,format='%Y-%m-%d %H:%M:%S',
errors='coerce')
#Set timezone, times early on last Sunday in October will be ambiguous - marked as nat
dataframe.index = dataframe.index.tz_localize(tz='UTC',
ambiguous='NaT')
#Change label to P_kW
dataframe['P_kW'] = dataframe["VPV"]*dataframe["IPV"]/1000
#dataframe.drop(['P_PV'],axis=1,inplace=True)
dataframe['Idc_A'] = dataframe["IPV"]
dataframe.drop(['IPV'],axis=1,inplace=True)
if "2021" not in description:
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
pv_systems[station]['pv']['myreserve'] = (days,dfs)
else:
dataframe.rename(columns={"GHI":"Etotdown_RT1_Wm2","GTI":"Etotpoa_RT1_Wm2"},inplace=True)
dataframe.rename(columns={"T_module":"T_module_C","T_ambient":"T_ambient_C"},inplace=True)
df_pv = dataframe[["P_kW","Idc_A","VPV","IBat","VBat","SoC"]]
df_rad = dataframe[["Etotdown_RT1_Wm2","Etotpoa_RT1_Wm2"]]
df_temp = dataframe[["T_module_C","T_ambient_C"]]
days = pd.to_datetime([group[0] for group in df_pv.groupby(df_pv.index.date)])
dfs = [group[1] for group in df_pv.groupby(df_pv.index.date)]
pv_systems[station]['pv']['myreserve'] = (days,dfs)
days = pd.to_datetime([group[0] for group in df_rad.groupby(df_rad.index.date)])
dfs = [group[1] for group in df_rad.groupby(df_rad.index.date)]
pv_systems[station]['irrad']['RT1'] = (days,dfs)
days = pd.to_datetime([group[0] for group in df_temp.groupby(df_temp.index.date)])
dfs = [group[1] for group in df_temp.groupby(df_temp.index.date)]
pv_systems[station]['temp']['RT1'] = (days,dfs)
if 'inverter' in paths and substat == paths["inverter"]["path"]:
print(('Importing inverter data from %s, please wait.....' % station))
path = os.path.join(paths['mainpath'],station,paths['inverter']['path'])
files = list_files(path)
dfs = [pd.read_csv(os.path.join(path,filename),sep=';',header=0)
for filename in files if "min" in filename]
dataframe = pd.concat(dfs,axis='index')
dataframe.index = pd.to_datetime(dataframe.iloc[:,0] + ' ' +
dataframe.iloc[:,1],format='%d.%m.%y %H:%M:%S')
dataframe.drop(columns=["#Datum","Uhrzeit"],inplace=True)
#Set timezone, times early on last Sunday in October will be ambiguous - marked as nat
#dataframe.index = dataframe.index.tz_localize(tz='Europe/Berlin',ambiguous='NaT')
#Convert data to UTC
dataframe.index = dataframe.index - pd.Timedelta(hours=2)
#dataframe.index = dataframe.index.tz_convert('UTC')
#Sort index since data is back to front
dataframe.sort_index(inplace=True)
dataframe = dataframe.filter(regex='^Pac|^Pdc|^Udc', axis=1)
#Make multiindex and combine inverters in the correct way
dfs = []
inverters = process["inverters"][station]["names"]
n_phase = process["inverters"][station]["phases"]
n_wr = len(inverters)
n_cols = len(dataframe.columns)/n_wr
for ix, inv in enumerate(inverters):
dfs.append(pd.DataFrame)
old_columns = dataframe.columns[int(n_cols*ix):int(n_cols*(ix+1))].values.tolist()
new_columns = []
#This is the case of KACO inverters where there are actually only 3 inverters
if n_phase == 3:
for name in old_columns:
if name.split('.')[0] == "Pdc1":
new_columns.append(name.split('.')[0][0:-1] + '_' + str(ix + 1))
else:
new_columns.append(name.split('.')[0] + '_' + str(ix + 1))
dfs_inv = []
for k, inv in enumerate(inverters):
dfs_inv.append(pd.DataFrame)
col_index =pd.MultiIndex.from_product([new_columns[int(n_cols/n_wr*k)
:int(n_cols/n_wr*(k+1))],[inv]],names=['variable','substat'])
dfs_inv[k] = dataframe.iloc[:,int(n_cols*ix+n_cols/n_wr*k)
:int(n_cols*ix+n_cols/n_wr*(k+1))]
dfs_inv[k].columns = col_index
dfs[ix] = pd.concat(dfs_inv,axis='columns')
#This is the case where there are simply 9 inverters along the columns
elif n_phase == 1:
for name in old_columns:
new_columns.append(name.split('.')[0])
col_index =pd.MultiIndex.from_product([new_columns,[inv]],names=['variable','substat'])
dfs[ix] = dataframe.iloc[:,int(n_cols*ix):int(n_cols*(ix+1))]
dfs[ix].columns = col_index
dataframe = pd.concat(dfs,axis='columns')
#Sort multi-index (makes it faster)
dataframe.sort_index(axis=1,level=1,inplace=True)
#Add each inverter to a separate tuple in dictionary
for inv in inverters:
if n_phase == 3:
#Calculate sum of three phase power
dataframe[('P_kW',inv)] = dataframe.loc[:,
pd.IndexSlice[['Pac_1','Pac_2','Pac_3'],inv]].sum(axis='columns')/1000.
#Calculate DC current
for nstring in range(int(n_cols/n_wr)):
dataframe[('Idc_' + str(nstring+1),inv)] =\
dataframe[('Pdc_' + str(nstring+1),inv)]/\
dataframe[('Udc_' + str(nstring+1),inv)]
elif n_phase == 1:
dataframe[('P_kW',inv)] = dataframe[('Pac',inv)]/1000.
for nstring in range(int((n_cols - 1)/2)):
dataframe[('Idc' + str(nstring+1),inv)] =\
dataframe[('Pdc' + str(nstring+1),inv)]/\
dataframe[('Udc' + str(nstring+1),inv)]
dataframe.sort_index(axis=1,level=1,inplace=True)
df_inv = dataframe.loc[:,pd.IndexSlice[:,inv]]
days = pd.to_datetime([group[0] for group in df_inv.groupby(df_inv.index.date)])
dfs = [group[1] for group in df_inv.groupby(df_inv.index.date)]
pv_systems[station]['pv'][inv] = (days,dfs)
print(('5 minute inverter data from station %s, inverter %s successfully imported' % (station,inv)))
print('All PV power data imported\n')
return pv_systems
def load_rad_data (pv_systems,info,paths,description):
"""
Load irradiance data into dataframe
args:
:param pv_systems: dictionary of PV systems
:param info: dictionary with information about PV systems
:param paths: dictionary of paths
:param description: string, description of measurement campaign
out:
:return: dictionary of PV systems
"""
for station in info.index:
mainpath = os.path.join(paths['mainpath'],station)
station_dirs = list_dirs(mainpath)
if "irrad" not in pv_systems[station]:
pv_systems[station]['irrad'] = {}
for substat in station_dirs:
if "Pyr" in substat and "old" not in substat:
pv_systems[station]['irrad'][substat] = ()
print(("Importing pyranometer data from %s, %s, please wait....." % (station,substat)))
rad_files = list_files(os.path.join(mainpath,substat))
if not rad_files:
print(('Substation %s at station %s has no radiation data' % (substat,station)))
del pv_systems[station]['irrad'][substat]
else:
#Go through the data files (one for each day) and import to a list
dfs = [pd.read_csv(os.path.join(mainpath,substat,filename)
,header=None,sep='\s+',comment='#',usecols=[0,1,2,3,4]) for filename in rad_files]
#Concatenate list
dataframe = pd.concat(dfs,axis=0)
#Set index to be in datetime object
dataframe.index = pd.to_datetime(dataframe[0] + ' ' + dataframe[1],format='%Y.%m.%d %H:%M:%S')
dataframe.drop(columns=[0,1],inplace=True)
#Name columns
dataframe.rename(columns={2:'Etotdown_pyr_Wm2',3:'Etotpoa_pyr_Wm2',4:'T_amb_pyr_K'},inplace=True)
dataframe['T_amb_pyr_K'] = dataframe['T_amb_pyr_K'] - 273.15
dataframe.rename(columns={'T_amb_pyr_K':'T_ambient_pyr_C'},inplace=True)
#get list of unique days
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
pv_systems[station]['irrad'][substat] = (days,dfs)
print(('Pyranometer data from station %s, substation %s successfully imported' % (station,substat)))
if "Sun-Tracker" in substat:
pv_systems[station]['irrad']['suntracker'] = ()
print(("Importing data from %s, %s, please wait....." %(station,substat)))
rad_files = list_files(os.path.join(mainpath,substat))
if not rad_files:
print(('Substation %s at station %s has no radiation data' % (substat,station)))
del pv_systems[station]['irrad']['suntracker']
else:
#Go through the data files (one for each day) and import to a list
if '2018' in description:
#Go through the data files (one for each day) and import to a list
dfs = [pd.read_csv(os.path.join(mainpath,substat,filename),
header=None,sep=';',comment='#',usecols=[0,1,2,3,4,5,6,7],
names=['Date','Time','Etotdown_CMP11_Wm2','Ediffdown_CMP11_Wm2',
'Etotdown_SP2Lite_Wm2','Edirnorm_CHP1_Wm2','T_pyrhel_C',
'T_ambient_suntrack_C']) for filename in rad_files]
elif '2019' in description:
#Adding index_col = False fixes problems with delimiters at the end of the line
dfs = [pd.read_csv(os.path.join(mainpath,substat,filename),
header=None,sep=';',comment='#',index_col=False,
names=['Date','Time','Etotdown_CMP11_Wm2','Ediffdown_CMP11_Wm2',
'T_module_upper_C','Etotdown_SP2Lite_Wm2','T_module_lower_C',
'Edirnorm_CHP1_Wm2','T_pyrhel_C','T_ambient_suntrack_C']) for filename in rad_files]
#Concatenate list
dataframe = pd.concat(dfs,axis=0)
#for dataframe in dfs:
#Set index to be in datetime object
dataframe.index = pd.to_datetime(dataframe.Date + ' ' + dataframe.Time,errors='coerce',format='%Y.%m.%d %H:%M:%S')
#Shift values to the right
dataframe.iloc[pd.isnull(dataframe.index),2:8] = dataframe.iloc[pd.isnull(dataframe.index),2:8].shift(1,axis=1)
#Extract irradiance from string
dataframe.loc[pd.isnull(dataframe.index),'Etotdown_CMP11_Wm2'] = dataframe.loc[pd.isnull(dataframe.index),'Time'].apply(filter_suntracker_irrad)
#Extract time from string
dataframe.loc[pd.isnull(dataframe.index),'Time'] = dataframe.loc[pd.isnull(dataframe.index),'Time'].apply(filter_suntracker_time)
#Retry index
dataframe.index = pd.to_datetime(dataframe.Date + ' ' + dataframe.Time,errors='coerce',format='%Y.%m.%d %H:%M:%S')
#Drop columns
dataframe.drop(columns=['Date','Time'],inplace=True)
dataframe = dataframe.astype(float)
#get list of unique days
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
pv_systems[station]['irrad']['suntracker'] = (days,dfs)
print(('Irradiance data from station %s, substation %s successfully imported' % (station,substat)))
if "MORDOR" in substat:
pv_systems[station]['irrad']['mordor'] = ()
print(("Importing data from %s, %s, please wait....." %(station,substat)))
rad_files = list_files(os.path.join(mainpath,substat))
if not rad_files:
print(('Substation %s at station %s has no radiation data' % (substat,station)))
del pv_systems[station]['irrad']['mordor']
else:
#Go through the data files (one for each day) and import to a list
dfs = [pd.read_csv(os.path.join(mainpath,substat,filename),
header=None,sep='\s+',comment='#',usecols=[0,1,2,3,4,5,6,7,8],
names=['Date','Time','Edirnorm_MS56_Wm2','Etotdown_CMP21_Wm2','Ediffdown_CMP21_Wm2',
'Etotdownlw_CGR4_Wm2','Ediffdownlw_CGR4_Wm2','Etotdown_ML020VM_Wm2',
'Ediffdown_ML020VM_Wm2'])
for filename in rad_files]
#Concatenate list
dataframe = pd.concat(dfs,axis=0)
#Set index to be in datetime object
dataframe.index = pd.to_datetime(dataframe.Date + ' ' + dataframe.Time,errors='coerce',format='%Y.%m.%d %H:%M:%S')
#Drop columns
dataframe.drop(columns=['Date','Time'],inplace=True)
dataframe = dataframe.astype(float)
#get list of unique days
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
pv_systems[station]['irrad']['mordor'] = (days,dfs)
print(('Irradiance data from station %s, substation %s successfully imported' % (station,substat)))
if "RT1" in substat:
pv_systems[station]['irrad']['RT1'] = ()
print(("Importing data from %s, %s, please wait....." %(station,substat)))
rad_files = list_files(os.path.join(mainpath,substat))
if not rad_files:
print(('Substation %s at station %s has no radiation data' % (substat,station)))
del pv_systems[station]['irrad']['RT1']
else:
#Go through the data files (one for each day) and import to a list
dfs = [pd.read_csv(os.path.join(mainpath,substat,filename),
header=None,sep='\s+',comment='#',usecols=[0,1,2,3,4],skiprows=1,
names=['Date','Time','Etotpoa_RT1_Wm2','T_module_C','p_air_Pa'],
na_values = "---") for filename in rad_files]
#Concatenate list
dataframe = pd.concat(dfs,axis=0)
#Set index to be in datetime object
dataframe.index = pd.to_datetime(dataframe.Date + ' ' +
dataframe.Time,errors='coerce',
format='%d.%m.%Y %H:%M:%S')
#Drop columns
dataframe.drop(columns=['Date','Time'],inplace=True)
#Make sure data is of the right type
dataframe = dataframe.astype(float)
#get list of unique days
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
pv_systems[station]['irrad']['RT1'] = (days,dfs)
print(('Irradiance data from station %s, substation %s successfully imported' % (station,substat)))
if "Jahresstrahlungsmessung" in substat:
#pv_systems[station]['irrad']['Pyr_SiRef'] = ()
print(("Importing data from %s, %s, please wait....." %(station,substat)))
rad_files = list_files(os.path.join(mainpath,substat))
if not rad_files:
print(('Substation %s at station %s has no radiation data' % (substat,station)))
#del pv_systems[station]['irrad']['Pyr_SiRef']
else:
#Go through the data files (one for each day) and import to a list
cols = ['Date','Time','Etotdown_CMP11_Wm2','Etotpoa_32_S_CMP11_Wm2',
'Etotpoa_32_E_Si02_Wm2','Etotpoa_32_S_Si02_Wm2',
'Etotpoa_32_W_Si02_Wm2']
dfs = [pd.read_csv(os.path.join(mainpath,substat,filename),
header=None,sep=';',comment='#',index_col=False,
names=cols,converters=dict(list(zip(cols[2:],
[convert_wrong_format]*len(cols[2:])))))
for filename in rad_files]
#Concatenate list
dataframe = pd.concat(dfs,axis=0)
#Set index to be in datetime object
dataframe.index = pd.to_datetime(dataframe.Date + ' ' +
dataframe.Time,errors='coerce',
format='%Y-%m-%d %H:%M:%S')
#Drop columns
dataframe.drop(columns=['Date','Time'],inplace=True)
# for col in dataframe.columns:
# dataframe[col] = convert_wrong_format(dataframe[col].values)
# for cols in dataframe.columns:
# dataframe[cols] = convert_wrong_format(dataframe[cols].values)
#
#Make sure data is of the right type
dataframe = dataframe.astype(float)
oldcols = cols[2:]
newcols = [re.sub('_32_.', '', col) for col in oldcols]
dataframe.rename(columns=dict(zip(oldcols,newcols)),inplace=True)
for i, substat_rad in enumerate(['CMP11_Horiz','CMP11_32S','SiRef_32E','SiRef_32S','SiRef_32W']):
pv_systems[station]['irrad'][substat_rad] = ()
df_rad = dataframe.iloc[:,[i]]
#get list of unique days
dfs = [group[1] for group in df_rad.groupby(df_rad.index.date)]
days = pd.to_datetime([group[0] for group in df_rad.groupby(df_rad.index.date)])
pv_systems[station]['irrad'][substat_rad] = (days,dfs)
print(('Irradiance data from station %s, substation %s successfully imported' % (station,substat)))
if "Bedeckungsgrad" in substat:
pv_systems[station]['irrad']['cloudcam'] = ()
print(("Importing data from %s, %s, please wait....." %(station,substat)))
rad_files = list_files(os.path.join(mainpath,substat))
if not rad_files:
print(('Substation %s at station %s has no cloudcam data' % (substat,station)))
del pv_systems[station]['irrad']['cloudcam']
else:
#Go through the data files (one for each day) and import to a list
dfs = [pd.read_csv(os.path.join(mainpath,substat,filename),
header=None,sep='\s+',comment='%',usecols=[0,1,3],
names=['Date','Time','cf_cloudcam'],
dtype={"Date":str,"Time":str,"cf_cloudcam":np.float64})
for filename in rad_files]
#Concatenate list
dataframe = pd.concat(dfs,axis=0)
#Set index to be in datetime object
dataframe.index = pd.to_datetime(dataframe.Date + ' ' +
dataframe.Time,errors='coerce',
format='%Y%m%d %H%M%S')
#Drop columns
dataframe.drop(columns=['Date','Time'],inplace=True)
#Make sure data is of the right type
dataframe = dataframe.astype(float)
#get list of unique days
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
pv_systems[station]['irrad']['cloudcam'] = (days,dfs)
print(('Cloud cam data from station %s, substation %s successfully imported' % (station,substat)))
print('All pyranometer data imported\n')
return pv_systems
def load_temp_data (pv_systems,info,paths):
"""
Load temperature data into dataframe
args:
:param pv_systems: dictionary of PV systems
:param info: dictionary with information about PV systems
:param paths: dictionary of paths
out:
:return: dictionary of PV systems
"""
for station in info.index:
mainpath = os.path.join(paths['mainpath'],station)
station_dirs = list_dirs(mainpath)
if "temp" not in pv_systems[station]:
pv_systems[station]['temp'] = {}
for substat in station_dirs:
if substat == paths['temp']['path']:
pv_systems[station]['temp'][substat] = ()
print(("Importing temperature data from %s, %s, please wait....." % (station,substat)))
temp_files = list_files(os.path.join(mainpath,substat))
if not temp_files:
print(('Substation %s at station %s has no temperature data' % (substat,station)))
del pv_systems[station]['temp'][substat]
else:
dfs = [pd.read_csv(os.path.join(mainpath,substat,filename)
,header=None,sep=';',comment='#') for filename in temp_files]
for dataframe in dfs:
dataframe.index = pd.to_datetime(dataframe[0] + ' ' + dataframe[1],format='%Y.%m.%d %H:%M:%S')
dataframe.drop(columns=[0,1],inplace=True)
#In this case we create Multi-Index now since the data has both sensors in one file
dataframe.columns = pd.MultiIndex.from_product([['T_module_C'],
['PVTemp_' + str(i + 1) for i in range(len(dataframe.columns))]],
names=['variable','substat'])
dataframe.dropna(axis=1,how='all',inplace=True)
#Put all data into one frame
dataframe = pd.concat(dfs,axis=0)
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
pv_systems[station]['temp'][substat] = (days,dfs)
print(('Temperature data from station %s, substation %s successfully imported' % (station,substat)))
print('All temperature data imported\n')
return pv_systems
def load_wind_data (pv_systems,info,paths):
"""
Load wind data into dataframe
args:
:param pv_systems: dictionary of PV systems
:param info: dictionary with information about PV systems
:param paths: dictionary of paths
out:
:return: dictionary of PV systems
"""
for station in info.index:
mainpath = os.path.join(paths['mainpath'],station)
station_dirs = list_dirs(mainpath)
if "wind" not in pv_systems[station]:
pv_systems[station]['wind'] = {}
for substat in station_dirs:
if substat == paths['wind']['path']:
pv_systems[station]['wind'][substat] = ()
print(("Importing wind data from %s, %s, please wait....." % (station,substat)))
wind_files = list_files(os.path.join(mainpath,substat))
if not wind_files:
print(('Substation %s at station %s has no wind data' % (substat,station)))
del pv_systems[station]['wind'][substat]
else:
if "Solarwatt" in mainpath:
dfs = [pd.read_csv(os.path.join(mainpath,substat,filename)
,header=None,skiprows=6,sep='\s+',decimal=',',
usecols=[0,1,2,3,5])
for filename in wind_files]
for dataframe in dfs:
dataframe.index = pd.to_datetime(dataframe[0] + ' ' + dataframe[1],format='%d.%m.%Y %H:%M')
dataframe.drop(columns=[0,1],inplace=True)
dataframe.columns = pd.MultiIndex.from_product([['T_ambient_C','dir_wind','v_wind_mast_ms'],
['Windmast']],names=['variable','substat'])
else:
dfs = [pd.read_csv(os.path.join(mainpath,substat,filename)
,header=None,sep=',',comment='#',usecols=[0,1,3,4]) for filename in wind_files]
#In this case we create Multi-Index now since the data has both sensors in one file
for dataframe in dfs:
dataframe.index = pd.to_datetime(dataframe[0] + ' ' + dataframe[1],format='%Y-%m-%d %H:%M:%S')
dataframe.drop(columns=[0,1],inplace=True)
dataframe.columns = pd.MultiIndex.from_product([['T_ambient_C','v_wind_mast_ms'],
['Windmast']],names=['variable','substat'])
#dataframe.fillna(0,inplace=True)
dataframe = pd.concat(dfs,axis=0)
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
pv_systems[station]['wind'][substat] = (days,dfs)
print(('Wind data from station %s, substation %s successfully imported' % (station,substat)))
print('All wind data imported\n')
return pv_systems
def load_pmax_data (pv_systems,info,paths):
"""
Load PMAX-DOAS data into dataframe
args:
:param pv_systems: dictionary of PV systems
:param info: dictionary with information about PV systems
:param paths: dictionary of paths
out:
:return: dictionary of PV systems
"""
for station in info.index:
mainpath = os.path.join(paths['mainpath'],station)
station_dirs = list_dirs(mainpath)
pv_systems[station]['pmax'] = {}
for substat in station_dirs:
if substat == paths['pmaxdoas']['path'][0]:
pv_systems[station]['pmax'][substat] = ()
print(("Importing PMAX-DOAS data from %s, %s, please wait....." % (station,substat)))
pmax_dirs = list_dirs(os.path.join(mainpath,substat))
if not pmax_dirs:
print(('Substation %s at station %s has no PMAX-DOAS data' % (substat,station)))
del pv_systems[station]['pmax'][substat]
else:
for day_dir in pmax_dirs:
filepath = os.path.join(mainpath,substat,day_dir,
paths['pmaxdoas']['path'][1])
pmax_files = list_files(filepath)
dfs = [pd.read_csv(os.path.join(filepath,filename)
,header=0,usecols=(0,1,10,11),sep='\s+',decimal='.')
for filename in pmax_files if "retrieval" in filename
and "aerosol" not in filename]
for dataframe in dfs:
dataframe.index = pd.to_datetime(dataframe["Date"] + ' ' + dataframe["Time"],
format='%d/%m/%Y %H:%M:%S')
dataframe.drop(columns=["Date","Time"],inplace=True)
dataframe.columns = pd.MultiIndex.from_product([['AOD_361','error_AOD_361'],
['PMAX-DOAS']],names=['variable','substat'])
#dataframe.fillna(0,inplace=True)
dataframe = pd.concat(dfs,axis=0)
dfs = [group[1] for group in dataframe.groupby(dataframe.index.date)]
days = pd.to_datetime([group[0] for group in dataframe.groupby(dataframe.index.date)])
pv_systems[station]['pmax'][substat] = (days,dfs)
print(('PMAX-DOAS data from station %s, substation %s successfully imported' % (station,substat)))
print('All wind data imported\n')
return pv_systems
def extract_config_load_data(config,stations,home,info):
"""
Extract station configuration from Excel table and load data
args:
:param config: dictionary loaded from data configuration file
:param stations: string, which station to extract, can also be "all"
:param home: string, homepath
:param info: string, description of simulation
out:
:return py_systems: dictonary of stations with data
:return select_system_info: list of stations that are loaded
:return runtime: time it took to load data
:return loadpath: dictionary with paths for loading data
"""
#Location of PV files
loadpath = config["paths"]
#Configuration for data processing, if necessary
process_config = config["data_processing"]
#get system info from Excel table
system_info = pd.read_excel(os.path.join(home,loadpath["savedata"]["main"],config["configtable"]),index_col=0)
print("System info loaded\n")
print(system_info)
#Choose which stations to load
if stations != "all":
if type(stations) != list:
stations = [stations]
select_system_info = system_info.loc[stations]
else:
select_system_info = system_info
#Extract data from table
paths, pv_systems = extract_station_info(select_system_info,loadpath)
start = time.time()
#Load PV data
pv_systems = load_pv_data(pv_systems,select_system_info,loadpath,info,process_config)
#Load radiation data
pv_systems = load_rad_data(pv_systems,select_system_info,loadpath,info)
if "temp" in loadpath:
#Load temperature data
pv_systems = load_temp_data(pv_systems,select_system_info,loadpath)
if "wind" in loadpath :
#Load wind data
pv_systems = load_wind_data(pv_systems,select_system_info,loadpath)
if "pmaxdoas" in loadpath:
#Load PMAX data
pv_systems = load_pmax_data(pv_systems,select_system_info,loadpath)
end = time.time()
runtime = end - start
print(("Loading data took %g seconds" % runtime))
return pv_systems, select_system_info, runtime, loadpath
def load_binary_data(config,home):
"""
Load data that has been stored as a python binary stream
args:
:param config: config file for data
:param home: string, home path
out:
:return pv_systems: dictionary of PV stations with data
:return sys_info: dataframe with station information from table
"""
savedir = os.path.join(home,config["paths"]["savedata"]["main"])
files = list_files(savedir)
#Choose which stations to load
if config["stations"] == "all":
#get system info from Excel table
sys_info = pd.read_excel(os.path.join(savedir,config["configtable"]),index_col=0)
stations = sys_info.index
else:
sys_info = pd.DataFrame()
stations = config["stations"]
if type(stations) != list:
stations = [stations]
pv_systems = {}
binarypath = os.path.join(savedir,config["paths"]["savedata"]["binary"])
for station in stations:
filename = config["description"] + '_' + station + ".data"
if filename in files:
with open(os.path.join(binarypath,filename), 'rb') as filehandle:
(pvstat, info) = pd.read_pickle(filehandle)
pv_systems.update({station:pvstat})
print(('Data for %s loaded from %s' % (station,filename)))
#Extract config and load data
else:
print(('No binary data file for %s found, loading from CSV...' % station))
pvstat, info, loadtime = extract_config_load_data(config,station,home)
pv_systems.update({list(pvstat.keys())[0]:list(pvstat.values())[0]})
sys_info = pd.concat([sys_info,info],axis=0)
return pv_systems, sys_info
def load_station_data(savedir,filename,data_types,data_flag=False):
"""
Load data that has already been resampled to a specified time resolution
args:
:param savedir: string, path where data is saved
:param filename: string, name of file
:param data_types: dictionary with datatypes
:param data_flag: boolean, whether to keep original data
out:
:return pvstat: dictionary of PV system dataframes and other information
:return info: table with information about each station
"""
try:
with open(os.path.join(savedir,filename), 'rb') as filehandle:
(pvstat, info) = pd.read_pickle(filehandle)
# pvstat.update({"raw_data":{}})
# for idata in data_types:
# if idata in pvstat:
# pvstat["raw_data"].update({idata:pvstat[idata]})
# del pvstat[idata]
#reduce file size by removing original data
# if not data_flag:
# print('Removing original high frequency data')
# del pvstat["raw_data"]
except IOError:
print(('File %s not found' % os.path.join(savedir,filename)))
return None, None
return pvstat, info
def load_resampled_data(timeres,config,home):
"""
Load data that has already been resampled to a specified time resolution
args:
:param timeres: string, timeresolution of the data
:param config: dictionary with paths for loading data
:param home: string, homepath
out:
:return pv_systems: dictionary of PV systems with dataframes and other information
:return sys_info: table with information about each station
"""
savedir = os.path.join(home,config["paths"]["savedata"]["main"])
#Choose which stations to load
if config["stations"] == "all":
#get system info from Excel table
sys_info = pd.read_excel(os.path.join(savedir,config["configtable"]),index_col=0)
stations = sys_info.index
else:
sys_info = pd.DataFrame()
stations = config["stations"]
if type(stations) != list:
stations = [stations]
pv_systems = {}
binarypath = os.path.join(savedir,config["paths"]["savedata"]["binary"])
files = list_files(binarypath)
for station in stations:
filename = config["description"] + '_' + station + "_" + timeres + ".data"
if filename in files:
with open(os.path.join(binarypath,filename), 'rb') as filehandle:
(pvstat, info) = pd.read_pickle(filehandle)
pv_systems.update({station:pvstat})
sys_info = pd.concat([sys_info,info],axis=0)
print(('Data for %s loaded from %s' % (station,filename)))
else:
print('Required file not found')
return pv_systems, sys_info
| jamesmhbarry/PVRAD | pvcal_invert2rad/data_process_functions.py | data_process_functions.py | py | 73,297 | python | en | code | 1 | github-code | 13 |
9483909766 | #!/usr/bin/env python
# -*- encoding:utf-8 -*-
import time
import os
import argparse
from googletranslate.googletranslate import main as gtranslate
def translate_text(text, verbose=False):
class Args:
target: str = 'zh-CN'
query: str = ''
host: str = 'translate.google.com'
proxy: str = ''
alternative: str = 'en'
type: str = 'plain'
synonyms: bool = False
definitions: bool = True
examples: bool = False
tkk: str = ''
Args.proxy = '127.0.0.1:1080'
Args.query = text
trans = []
while True:
result = gtranslate(Args)
if result.startswith('^_^:'):
break
elif result.startswith('Errrrrrrrrror: string index out of range'):
print('Fix:', text)
result = text
break
elif result.startswith('Errrrrrrrrror:'):
print('Error:', text, result)
time.sleep(5)
else:
print(result)
for line in result.split('\n'):
if not line:
continue
elif line == '=========':
break
elif line == '---------':
trans = []
continue
elif line.startswith('^_^:'):
continue
elif line.startswith('0_0:'):
continue
elif line.startswith('#'):
continue
else:
line = '%s' % line
trans.append(line)
return ''.join(trans)
def translate_srt():
cur_dir = os.getcwd()
for root, dirs, files in os.walk(cur_dir):
for f in files:
if not f.endswith('.srt'):
continue
f = os.path.join(root, f)
if '.en.' in f:
en_srt = f
zh_srt = '%s.zh.srt' % f[:-7]
elif '.zh.' in f:
continue
else:
en_srt = '%s.en.srt' % f[:-4]
zh_srt = '%s.zh.srt' % f[:-4]
if not os.path.exists(en_srt):
os.rename(f, en_srt)
print(en_srt)
with open(en_srt, 'rt') as f_en:
en_text = f_en.read()
with open(zh_srt, 'wt') as f_zh:
count = 0
for line in en_text.split('\n'):
if count % 4 == 2:
line = translate_text(line)
f_zh.write('%s\n' % line)
count += 1
time.sleep(1)
class App():
name = 'gtranslate'
description = 'Google Translate'
version = '1.0'
url = ''
author_email = 'author@gmail.com'
license = 'MIT'
@classmethod
def run(cls):
about = f'{App.name} v{App.version} {App.description}'
parser = argparse.ArgumentParser(description=App.description)
parser.add_argument('--version', action='version', version=about,
help='show version')
parser.add_argument('-v', '--verbose', action='count',
default=0, help='verbose output')
parser.add_argument('--translate-text', metavar='<text>')
parser.add_argument('--translate-srt', action='store_true')
args = parser.parse_args()
if args.translate_text:
r = translate_text(args.translate_text, verbose=True)
print(r)
elif args.translate_srt:
translate_srt()
else:
parser.print_help()
if __name__ == '__main__':
App.run()
| liuyug/code_example | gtranslate.py | gtranslate.py | py | 3,520 | python | en | code | 0 | github-code | 13 |
75052989456 | class Solution:
def maxPathSum(self,root):
maxpath=float("-inf")
def maxPath(node):
nonlocal maxpath
if node:
leftmax=max(maxPath(node.left),0)
rightmax=max(maxPath(node.right),0)
currMaxPath=node.val+leftmax+rightmax
maxpath=max(maxpath,currMaxPath)
return node.val+ max(leftmax,rightmax)
else:
return 0
maxPath(root)
return maxpath
if __name__=="__main__":
obj=Solution()
root=[-10,9,20,None,None,15,7]
result=obj.maxPathSum(root)
print(result)
| Roy263/SDE-Sheet | BTreeMaxPathSum/maxpathsum.py | maxpathsum.py | py | 500 | python | en | code | 0 | github-code | 13 |
9513085506 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import urllib
import urllib.parse
from pymongo.results import UpdateResult
import config.db
import crawler.base
class IndexCrawler(crawler.base.BaseCrawler):
"""
index
"""
def _save(self, item):
c = config.db.connect()
item['update_time'] = datetime.datetime.now().astimezone(config.tz_local)
ret = c.update_one({'title': item['title'], 'ver': item['ver']}, {"$set": item}, upsert=True)
if ret and isinstance(ret, UpdateResult):
self.logger.info('save item ok, ret=%s , item = %s', (ret.modified_count, ret.upserted_id), item)
def run(self, **kwargs):
url = 'http://www.jinyongwang.com/book/'
resp = self.request(url)
# parse versions
versions = resp.etree.xpath('//*[@id="qnav"]/ul/li/text()')
index_data = [{'version': v, 'description': [], 'book_list': []} for v in versions]
# parse descriptions
descriptions_nodes = resp.etree.xpath('//*[@id="main"]/div[2]/h2')
for i, dn in enumerate(descriptions_nodes):
for x in dn.itertext():
if x:
index_data[i]['description'].append(x)
# parse book list
book_list_nodes = resp.etree.xpath('//*[@id="main"]/div[2]/ul[@class="list"]')
for i, bln in enumerate(book_list_nodes):
lst = index_data[i]['book_list']
for book_node in bln.xpath('./li'):
info = dict()
title = book_node.xpath("./p[2]//text()")[0]
info['title'] = str.rstrip(title, '小说')
info['url'] = urllib.parse.urljoin(url, book_node.xpath("./p[2]/a/@href")[0])
info['cover'] = urllib.parse.urljoin(url, book_node.xpath("./p[1]/a/img/@src")[0])
extra = [str.strip(x) for x in str.split(book_node.xpath("./p[3]//text()")[0], '/')]
info['press'] = extra[0]
info['year'] = extra[1]
lst.append(info)
result = []
# dump index data
for i, data in enumerate(index_data):
for book in data['book_list']:
item = dict()
item['ver'] = data['version']
item['desc'] = data['description']
item.update(book)
self._save(item)
result.append("{ver} - {year} - {press} - {title}".format(**item))
return result
| plusplus1/louischa | crawler/novels/index.py | index.py | py | 2,481 | python | en | code | 0 | github-code | 13 |
37197215224 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: anya
"""
import numpy as np
import gf
import time
import math
import matplotlib.pyplot as plt
np.set_printoptions(threshold=np.nan)
class BCH(object):
def __init__(self, n, t):
primpoly = 7
self.q = int(math.log(n + 1, 2))
file = open('primpoly.txt', "r")
primpoly = 7
for line in file:
now = line.split(",")
for num in now:
num = int(num)
if (int(num) >= (1 << int(self.q))):
if (int(num) < (1 << int(self.q + 1))):
primpoly = num
break
self.pm = gf.gen_pow_matrix(primpoly)
alpha = 2
self.t = t
self.n = n
deg = alpha
self.zeros = list()
self.zeros.append(alpha)
for i in range (1, 2 * t):
deg = (gf.prod(np.array([alpha]), np.array([deg]), self.pm))[0]
self.zeros.append(deg)
res = gf.minpoly(np.array(self.zeros), self.pm)
self.g = res[0]
self.R = self.zeros
self.k = self.n - self.g.shape[0] + 1
return
def encode(self, U):
enc = list()
maxi = U.shape[0]
if (U.ndim == 1):
maxi = 1
for i in range (0, maxi):
if (U.ndim == 1):
u = U
else:
u = U[i, :]
if (u.shape[0] != self.k):
enc.append(np.array([np.nan]))
continue
deg = np.zeros((self.g.shape[0]))
deg[0] = 1
v = gf.polyprod(u, deg, self.pm)
res = gf.polydiv(v, self.g, self.pm)
v = gf.polyadd(v, res[1])
vadd = np.zeros(self.n)
if (v.shape[0] <= vadd.shape[0]):
vadd[-v.shape[0]:] = v
enc.append(vadd)
return np.asarray(enc)
def decode(self, W, method='euclid'):
dec = list()
maxi = W.shape[0]
if (W.ndim == 1):
maxi = 1
for i in range (0, maxi):
if (W.ndim == 1):
w = W
else:
w = W[i, :]
if (w.shape[0] != self.n):
dec.append(np.array([np.nan]))
continue
s = (gf.polyval(w, self.zeros, self.pm))
s = list(s.tolist())
if (np.any(s) == 0):
dec.append(w[0:self.k])
continue
if (method == 'euclid'):
s.reverse() #syndrom polynom
s.append(1)
deg = np.zeros((2 * self.t + 2))
deg[0] = 1
res = gf.euclid(deg, np.asarray(s), self.pm, self.t)
locator = res[2]
errors = locator.shape[0] - 1
else:
locator = np.array([np.nan])
err = 0
for num in range (self.t, -1, -1):
matr = np.zeros((num, num))
for i in range (0, num):
for j in range (0, num):
matr[i][j] = s[i + j]
b = list()
for i in range (0, num):
b.append(s[num + i])
if (num == 0):
err = 1
break
res = gf.linsolve(matr, np.asarray(b), self.pm)
if (type(res) != float):
locator = res
loc = np.zeros((locator.shape[0] + 1))
loc[:-1] = locator
loc[loc.shape[0] - 1] = 1
locator = loc
errors = num
break
if (err or (locator[0] == np.nan)):
#print("Decode error1")
dec.append(np.array([np.nan]))
continue
els = np.arange(0, (1 << (self.q)), 1)
ans = gf.polyval(locator, els, self.pm)
cnt = 0
j = list()
for i in range (0, len(ans)):
if (ans[i] == 0):
cnt += 1
num = self.pm[i][0]
pos = self.n - 1 - (self.n - num) % self.n
w[int(pos)] = int(w[int(pos)]) ^ 1
if (cnt != errors):
dec.append(np.array([np.nan]))
continue
s = gf.polyval(w, self.zeros, self.pm)
if (np.any(s) == 0):
dec.append(w[0:self.k])
else:
dec.append(np.array([np.nan]))
return np.asarray(dec)
def gen(self, pos, k, inp):
if (pos == k):
if (inp.any() != 0):
self.words[self.num] = inp
self.num += 1
return
inp[pos] = 0
self.gen(pos + 1, k, inp)
inp[pos] = 1
self.gen(pos + 1, k, inp)
return
def dist(self):
k = self.k
self.num = 0
inp = np.zeros((k))
self.words = np.zeros(((1 << k) - 1, k))
self.gen(0, k, inp)
#print(self.words)
encoded = self.encode(self.words)
mincnt = self.n
for i in (encoded):
for j in (encoded):
cntnow = 0
diff = np.logical_xor(i, j)
cntnow = np.sum(diff)
if (cntnow == 0):
continue
if (cntnow < mincnt):
mincnt = cntnow
#print(encoded)
if (mincnt < 2 * self.t + 1):
print("Error distance")
return mincnt
def check():
tot_good = 0
tot_err = 0
tot_wrong = 0
moret_good = 0
moret_err = 0
moret_wrong = 0
for q in range (6, 7):
n = (1 << q) - 1
#print("n = ", n)
for t in range (1, (n - 1) // 2 + 1):
code = BCH(n, t)
mes = np.random.rand(1, code.k)
mes = mes * 10 // 1
mes = mes % 2
encoded = code.encode(mes)
#print(n, t)
#print(mes, encoded)
for i in range (0, 2):
'''new = np.random.rand(1, n)
new = new * 10 // 1
new = new % 2'''
new = np.zeros((n))
if (i % 2):
new[0:t] = 1
else:
new[0:t + 1] = 1
errors = np.sum(np.asarray(new))
new = (new + encoded) % 2
#print(new)
res = code.decode(np.asarray(new))
#print(res)
if (errors <= code.t):
if (res[0].shape[0] == code.k):
if (np.any((res[0, :] + mes) % 2) == 0):
tot_good += 1
else:
tot_wrong += 1
else:
tot_err += 1
else:
if (res[0].shape[0] == code.k):
if (np.any((res[0, :] + mes) % 2) == 0):
moret_good += 1
else:
moret_wrong += 1
else:
moret_err += 1
#print("\n", errors, tot_good, tot_wrong, tot_err)
#print(moret_good, moret_wrong, moret_err)
print("Check encoding, decoding")
print("Mistakes <= t, correct = ", tot_good * 1.0 / (tot_good + tot_wrong + tot_err), "\nMistakes <= t, wrong = ", tot_wrong, "\nMistakes <= t, denied = ", tot_err)
print("Mistakes > t, correct = ", moret_good * 1.0 / (moret_good + moret_wrong + moret_err), "\nMistakes > t, wrong = ", moret_wrong * 1.0 / (moret_good + moret_wrong + moret_err), "\nMistakes > t, denied = ", moret_err * 1.0 / (moret_good + moret_wrong + moret_err))
return 0
#Test1
'''code = BCH(15, 3)
print("encode")
print(code.encode(np.array([0, 1, 1, 0, 1])), "\n")
print("decode")
print(code.decode(np.array([0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0])))
print(code.dist())'''
#Test2
'''code = BCH(7, 3)
print("encode")
print(code.encode(np.array([1])), "\n")
print("decode")
print(code.decode(np.array([1, 1, 1, 1, 1, 1, 1])))
print(code.dist())'''
#Test3
'''code = BCH(7, 1)
print("encode")
print(code.encode(np.array([1, 0, 0, 1])), "\n")
print("decode")
print(code.decode(np.array([1, 1, 0, 1, 1, 1, 0])))
print(code.dist())'''
#Test4
'''code = BCH(31, 6)
print((code.n, code.k, code.dist()))
mes = np.eye(6)
print(mes)
print("encode")
print(code.encode(mes), "\n")
print("decode")
print(code.decode(np.array([[1,1,0,1,0,0,1,1,0,0,1,0,1,1,0,1,1,1,1,0,1,0,1,0,0,0,1,0,0,1,1],
[0,1,0,0,0,0,1,0,1,0,1,1,1,0,1,1,0,0,0,1,1,1,1,1,0,0,1,1,0,1,0],
[1,0,1,0,0,0,0,1,0,1,0,1,1,1,0,1,1,0,0,0,1,1,1,1,1,0,0,1,1,0,1],
[0,0,0,1,0,0,1,1,1,0,0,0,0,0,1,1,0,0,1,0,1,1,0,1,1,1,1,0,1,0,1],
[0,1,0,0,1,0,1,0,1,1,1,0,1,1,0,0,0,1,1,1,1,1,0,0,1,1,0,1,0,0,1],
[1,0,0,0,0,1,1,0,0,1,0,1,1,0,1,1,1,1,0,1,0,1,0,0,0,1,0,0,1,1,1]])))'''
#graphs
'''for q in range (2, 7):
n = (1 << q) - 1
print("n = ", n)
tt = list()
speed = list()
for t in range (1, (n - 1) // 2):
code = BCH(n, t)
tt.append(t)
speed.append(code.k * 1.0 / n)
#d = code.dist()
#if (d > 2 * t + 1):
# print(n, t, d)
#print(n, t, code.k * 1.0 / n)
x = np.asarray(tt)
y = np.asarray(speed)
plt.figure()
plt.plot(x, y, 'r')
plt.xlabel('t')
plt.ylabel('speed')
plt.show()'''
#Seaching for example when d > 2t + 1
'''for q in range (2, 6):
n = (1 << q) - 1
for t in range (1, (n - 1) // 2):
code = BCH(n, t)
d = code.dist()
print(n, t, d, 2 * t + 1)
#if (d > 2 * t + 1):
# print(n, t, d)
#print(n, t, code.k * 1.0 / n)'''
#Stress test
'''check() '''
#Time
'''time0 = time.time()
time1 = time.time()
print("Time ", time1 - time0)'''
| Anyabelle/Algebra | bch.py | bch.py | py | 10,197 | python | en | code | 0 | github-code | 13 |
18104948295 | import collections
import os
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from torchvision.transforms import transforms
from data.dataset import CustomDataset
from data.processor import process_video, process_audio
def prepare_gender():
"""
load the gender file and mapping male and female to 0 and 1.
:return: dictionary mapping subject ID to gender [0, 1]
"""
gender_mapping = {'m': 0, 'f': 1}
gender = {}
with open("../datasets/enterface/gender.txt") as txtfile:
for line in txtfile:
(subject_id, subject_gender) = line.split()
gender[subject_id] = gender_mapping[subject_gender]
return gender
def get_subject_id(path):
"""
:param path: the path of the wav/avi file that contains the subject ID
:return: subject ID, string:
"""
# locate the index of the word 'subject', shift by 8, which will be the index of subject ID.
i = path.find("subject ") + 8
idx = ''
# keep reading digits for the subject id
while path[i].isdigit():
idx += path[i]
i += 1
return idx
def split(dataset):
"""
split the dataset into train, test, and valudation.with respect to file path.
:param dataset: dictionary mapping emotion id to list of path tuples (video path avi, audio path wav)
:return: (train, val, test) each is a dictionary mapping emotion id to list of path tuples (video path avi,audio path wav)
"""
train = collections.defaultdict(list)
test = collections.defaultdict(list)
val = collections.defaultdict(list)
for emotion, paths in dataset.items():
# 0.25 * 0.8 = 0.2
train_paths, test_paths = train_test_split(paths, test_size=0.2, random_state=1, shuffle=True)
train_paths, val_paths = train_test_split(train_paths, test_size=0.25, random_state=1, shuffle=True)
train[emotion].extend(train_paths)
val[emotion].extend(val_paths)
test[emotion].extend(test_paths)
return train, val, test
def prepare_paths(video_dir='../../datasets/enterface/original', audio_dir='../../datasets/enterface/wav'):
"""
get the path of video file and its corresponding path of audio file, put into a tuple
put all the data into a dicitonary: {emotion ID: file path tuples}
split into train, test, validation
:param video_dir: path of the video directory.
:param audio_dir: path of the audio directory
:return: train, test, validation, each is a dictionary mapping emotion id to list of path tuples (video path avi,audio path wav)
"""
paths = collections.defaultdict(list)
# encode the emotion into integers from 0 to 5.
possible_emotions = ['anger', 'disgust', 'fear', 'happiness', 'sadness', 'surprise']
emotion_mapping = {emotion: i for i, emotion in enumerate(possible_emotions)}
for curr_dir, sub_dir, files in os.walk(video_dir):
if files:
# path example: './../datasets/enterface/wav\subject 1\anger\garbage.wav'
# emotion will be the second section from the right of the path string.
emotion = os.path.split(os.path.split(curr_dir)[0])[-1]
# catch the exception in folder structure from subject 6
if emotion not in emotion_mapping:
# emotion will be the first section from the right.
emotion = os.path.split(curr_dir)[1]
# get the absolute path of the avi files only, ignore the db files
files = [os.path.join(curr_dir, file) for file in files if file[-2:] != 'db']
emotion_id = emotion_mapping[emotion]
# put in the list.
paths[emotion_id].extend(files)
path_tuples = collections.defaultdict(list)
# convert all avi path to wav path, because they have the same directory structure.
for emotion, avi_paths in paths.items():
for avi_path in avi_paths:
wav_file = avi_path[len(video_dir) + 1:][:-3] + 'wav'
wav_path = os.path.join(audio_dir, wav_file)
path_tuples[emotion].append((avi_path, wav_path))
return split(path_tuples)
def prepare_data(data): # dataold type will be dictionary, emotion: path.
"""
retrieve the frame data and audio spectrogram from the path tuples
:param data: emotion ID: path tuples (video path avi,audio path wav)
:return: (frames, specs), (gender, labels)
"""
gender_mapping = prepare_gender()
frames, specs, gender, labels = [], [], [], []
for emotion_id, paths in data.items():
for avi_path, wav_path in paths:
# get the key frames of the avi_path
# get the spectrograms of the wav path
key_frames = process_video(avi_path)
spectrograms = process_audio(wav_path)
assert (key_frames is None) == (spectrograms is None), "Processors must accept/reject the same paths"
if (key_frames is not None) and (spectrograms is not None):
if frames == []:
frames = key_frames
specs = spectrograms
else:
assert len(key_frames) == len(spectrograms), "Processors must create the same number of samples"
frames = np.vstack((frames, key_frames))
specs = np.vstack((specs, spectrograms))
subject_id = get_subject_id(wav_path) # or avi path, its the same
gender_id = gender_mapping[subject_id]
labels += [emotion_id] * len(key_frames)
gender += [gender_id] * len(key_frames)
labels = np.array(labels)
gender = np.array(gender)
print("frame dims", frames.shape)
print("specs dims", specs.shape)
print("label dims", labels.shape)
print("gender dims", gender.shape)
return (frames, specs), (gender, labels)
def get_dataloaders(data_dir="../datasets/enterface/processed/", bs=32):
"""
load preprocess data into train, test, and validation. and create their data loaders baesd on batch size
:param data_dir: locate of preprocess data.
:param bs: batch size.
:return: train, val, test data loaders
"""
xtrain, ytrain = torch.load(os.path.join(data_dir, 'train'))
xval, yval = torch.load(os.path.join(data_dir, 'val'))
xtest, ytest = torch.load(os.path.join(data_dir, 'test'))
train = CustomDataset(xtrain, ytrain)
val = CustomDataset(xval, yval)
test = CustomDataset(xtest, ytest)
trainloader = DataLoader(train, batch_size=bs, shuffle=True) # , num_workers=2)
valloader = DataLoader(val, batch_size=bs, shuffle=True) # , num_workers=2)
testloader = DataLoader(test, batch_size=bs, shuffle=True) # , num_workers=2)
return trainloader, valloader, testloader
| usef-kh/EC-523-Deep-Learning-Project | AudioVisual/data/enterface.py | enterface.py | py | 6,849 | python | en | code | 2 | github-code | 13 |
10256119996 | import numpy as np
import tensorflow as tf
from collections import namedtuple
def decode_transfer_fn(transfer_fn):
if transfer_fn == "relu": return tf.nn.relu
elif transfer_fn == "relu6": return tf.nn.relu6
elif transfer_fn == "tanh": return tf.nn.tanh
elif transfer_fn == "sig": return tf.nn.sigmoid
elif transfer_fn == "elu": return tf.nn.elu
else:
raise Exception("Unsupported transfer function %s" % transfer_fn)
def repeat_end(val, n, k):
return [val for i in range(n)] + [k]
def build_l2_loss():
l2_loss = tf.zeros([])
for var in tf.trainable_variables():
l2_loss += tf.nn.l2_loss(var)
return l2_loss
def build_learning_rate(cfg, global_step):
lr = cfg['learning_rate']
if type(lr) is float:
return tf.constant(lr)
elif lr['kind'] == "poly":
return tf.train.polynomial_decay(learning_rate=lr['start'],
global_step=global_step,
end_learning_rate=lr['end'],
decay_steps=lr['decay_steps'],
power=lr['power'])
elif lr['kind'] == "exp":
return tf.train.exponential_decay(learning_rate=lr['start'],
global_step=global_step,
decay_steps=lr['decay_steps'],
decay_rate=lr['decay_rate'],
staircase=False)
else:
raise Exception("lr_decay_type must be 'none', 'poly' or 'exp'")
def build_apply_gradients(cfg, loss, learning_rate, global_step):
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
gs, vs = zip(*optimizer.compute_gradients(loss))
gs = [tf.clip_by_value(g, clip_value_min=-cfg['clip_val_val'], clip_value_max=cfg['clip_val_val']) for g in gs]
gs, _ = tf.clip_by_global_norm(gs, cfg['clip_norm_val'])
apply_grads = optimizer.apply_gradients(zip(gs, vs), name='apply_gradients', global_step=global_step)
return apply_grads
def normalize(x, axis, eps):
mean, variance = tf.nn.moments(x, axes=[axis], keep_dims=True)
return tf.nn.batch_normalization(x, mean, variance, offset=None, scale=None, variance_epsilon=eps)
class MLP(object):
def __init__(self, cfg, d_in, d_outs, name, nl_at_end):
self.cfg = cfg
self.name = name
self.transfer_fn = decode_transfer_fn(cfg['mlp_transfer_fn'])
self.nl_at_end = nl_at_end
self._init_weights(d_in, d_outs)
def _init_weights(self, d_in, d_outs):
self.ws = []
self.bs = []
d = d_in
with tf.variable_scope(self.name) as scope:
for i, d_out in enumerate(d_outs):
with tf.variable_scope('%d' % i) as scope:
if self.cfg['weight_reparam']:
w = tf.get_variable(name="w", shape=[d, d_out], initializer=tf.contrib.layers.xavier_initializer())
g = tf.get_variable(name="g", shape=[1, d_out], initializer=tf.ones_initializer())
self.ws.append(tf.nn.l2_normalize(w, axis=0) * tf.tile(g, [d, 1]))
else:
self.ws.append(tf.get_variable(name="w", shape=[d, d_out], initializer=tf.contrib.layers.xavier_initializer()))
self.bs.append(tf.get_variable(name="b", shape=[d_out], initializer=tf.zeros_initializer()))
d = d_out
def forward(self, z):
x = z
for i in range(len(self.ws)):
x = tf.matmul(x, self.ws[i]) + self.bs[i]
if self.nl_at_end or i + 1 < len(self.ws):
x = self.transfer_fn(x)
return x
def kldiv(logits, labels):
return tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=labels) \
+ tf.reduce_sum(labels * tf.math.log(labels + 1e-8))
| dselsam/neurocore-public | python/tfutil.py | tfutil.py | py | 3,981 | python | en | code | 35 | github-code | 13 |
28367257839 | import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.linear_model import ElasticNet, Lasso
from sklearn.feature_selection import SelectFromModel
from sklearn.svm import SVR
from sklearn.model_selection import cross_val_score
from sklearn.pipeline import make_pipeline
from sklearn.decomposition import PCA
from sklearn.preprocessing import RobustScaler
import xgboost as xgb
from ensemble.regressor_averaged import RegressorAveraged
from ensemble.stacked_regressor_averaged import StackedRegressorAveraged
from ensemble.stacked_regressor_retrained import StackedRegressorRetrained
from model.nn import BasicNeuralNetwork
from preprocessing.preprocessor import Preprocessor
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
from sklearn.decomposition import PCA, FastICA
BASE_DIR = 'data/'
#########################
# Preprocess data
#########################
train = pd.read_csv(BASE_DIR + 'train.csv')
test = pd.read_csv(BASE_DIR + 'test.csv')
preprocessor = Preprocessor(magicFeature=True)
train_p, test_p = preprocessor.transform(train, test)
#########################
# Create models
#########################
gb = GradientBoostingRegressor(n_estimators=1000, max_features=0.95,
learning_rate=0.005, max_depth=4)
las = Lasso(alpha=5)
lgb = {
'objective': 'regression',
'metric': 'rmse',
'boosting': 'gbdt',
'learning_rate': 0.0045 , #small learn rate, large number of iterations
'verbose': 0,
'num_iterations': 500,
'bagging_fraction': 0.95,
'bagging_freq': 1,
'bagging_seed': 42,
'feature_fraction': 0.95,
'feature_fraction_seed': 42,
'max_bin': 100,
'max_depth': 3,
'num_rounds': 800
}
regressors = [gb, las, lgb]
meta_regressor = {
'eta': 0.005,
'max_depth': 2,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'base_score': StackedRegressorAveraged.FILL_AVG, # base prediction = mean(target)
'silent': 1
}
col = list(test_p.columns)
stacked_regressor = StackedRegressorAveraged(regressors, meta_regressor, col)
xgb = {
'n_trees': 520,
'eta': 0.0045,
'max_depth': 4,
'subsample': 0.93,
'objective': 'reg:linear',
'eval_metric': 'rmse',
'base_score': StackedRegressorAveraged.FILL_AVG, # base prediction = mean(target)
'silent': True,
'seed': 42,
}
avg_regressor = RegressorAveraged([stacked_regressor, xgb], col, pred_weights = [0.25, 0.75])
avg_regressor = avg_regressor.fit(train_p, train_p['y'])
avg_regressor.predict(test_p)
| ASzot/Kaggle_Mercedes_Benz | main.py | main.py | py | 2,707 | python | en | code | 0 | github-code | 13 |
35205533849 | from util import aoc
def parse(input):
os = []
for line in input.splitlines():
os.append([int(o) for o in line])
return len(os[0]), len(os), os
def unparse(model):
w, h, os = model
sb = []
for row in os:
sb.extend(str(o) for o in row)
sb.append("\n")
return "".join(sb)
def tick(model):
"""I move the model forward one tick, returning a tuple containing the next
model and the number of flashes that occurred during the tick.
"""
def neighbors(x, y):
return [
(x - 1, y - 1),
(x - 1, y),
(x - 1, y + 1),
(x, y - 1),
# me!
(x, y + 1),
(x + 1, y - 1),
(x + 1, y),
(x + 1, y + 1),
]
def in_bounds(x, y):
return 0 <= x < w and 0 <= y < h
def energize(x, y):
if (x, y) in flashed:
return # only flash once per tick!
octopuses[y][x] += 1
if octopuses[y][x] <= 9:
return
# fully energized; flash!
flashed.add((x, y))
octopuses[y][x] = 0
for x, y in neighbors(x, y):
if in_bounds(x, y):
energize(x, y)
w, h, curr = model
octopuses = [list(row) for row in curr]
flashed = set()
for y, row in enumerate(octopuses):
for x, _ in enumerate(row):
energize(x, y)
return (w, h, octopuses), len(flashed)
def part_one(model, *, steps=100):
total_flashes = 0
curr = model
for _ in range(steps):
curr, flashes = tick(curr)
total_flashes += flashes
return total_flashes
if __name__ == "__main__":
aoc.solve(
__file__,
parse,
part_one,
)
| barneyb/aoc-2023 | python/aoc2021/day11/dumbo_octopus_grid.py | dumbo_octopus_grid.py | py | 1,751 | python | en | code | 0 | github-code | 13 |
23649939340 | #!/usr/bin/python
# --------------------------------------------------------------------------
#
# MIT License
#
# --------------------------------------------------------------------------
# --------------------------------------------------------------------------
class CyBldConfigSettings:
def __init__(self, notify_success, notify_fail,
bell_success, bell_fail,
tmux_success, tmux_fail,
allow_multiple, print_stats, talk,
notify_timeout, tmux_refresh_status):
self.notify_success = notify_success
self.notify_fail = notify_fail
self.bell_success = bell_success
self.bell_fail = bell_fail
self.tmux_success = tmux_success
self.tmux_fail = tmux_fail
self.allow_multiple = allow_multiple
self.print_stats = print_stats
self.talk = talk
self.notify_timeout = notify_timeout
self.tmux_refresh_status = tmux_refresh_status
| drcdev-gh/cybld | cybld/cybld_config_settings.py | cybld_config_settings.py | py | 1,066 | python | en | code | 1 | github-code | 13 |
5764705007 | import re
import urllib
from urllib.parse import urlparse
from bs4 import BeautifulSoup
class HtmlParser(object):
def pase(self, page_url, html_content):
if html_content is None:
return
if page_url == '':
page_url = 'http://www.zhuizhuishu.com/top.html'
soup = BeautifulSoup(html_content, 'html.parser', from_encoding='utf8')
return self._get_new_urls(page_url, soup), self._get_new_data(page_url, soup)
def pase_data(self,page_url,html_content):
if html_content is None:
return
if page_url == '':
page_url = 'http://www.zhuizhuishu.com/top.html'
soup = BeautifulSoup(html_content, 'html.parser', from_encoding='utf8')
return self._get_new_data(page_url,soup)
def pase_urls(self,page_url,html_content):
if html_content is None:
return
if page_url == '':
page_url = 'http://www.zhuizhuishu.com/top.html'
soup = BeautifulSoup(html_content, 'html.parser', from_encoding='utf8')
return self._get_new_urls(page_url,soup)
def _get_new_data(self, page_url, soup):
# <li class="stname"><a href="/mulu_3510.html">五行天</a></li>
# <li><a href="/mulu_3510.html">第三百六十七章 石像</a></li>
# <li class="gxdate">08-22 09:59</li>
# <li class="gxdate">10.0</li>
book_info = {}
books = {}
# 小说最新章节标题
books_new_title = {}
# 小说最新章节地址
books_new_url = {}
# 小说名字
books_name = {}
# 小说地址
books_url = {}
# 小说最近更新时间
books_update_time = {}
# 网站url
web_url = "http://www.zhuizhuishu.com/"
item = soup.find('ul', class_="xiaoshuolist").find('li', class_="stname")
item = item.find_next_sibling('li')
item = item.find_next_sibling('li')
item = item.find_next_sibling('li')
count = 0
while (item.find_next_sibling('li') != None):
item = item.find_next_sibling('li')
if item is not None:
count = count + 1
if count == 1:
book_info['name_and_url'] = item
elif count == 2:
book_info['title_and_url'] = item
elif count == 3:
book_info['update_time'] = item
# 每四个<li>组成一个完整的bookInfo
if count % 4 == 0:
count = 0
book = []
name = book_info['name_and_url'].find('a').get_text()
book.append(name)
url = urllib.parse.urljoin(web_url, book_info['name_and_url'].find('a')['href'])
book.append(url)
title = book_info['title_and_url'].find('a').get_text()
book.append(title)
new_url = urllib.parse.urljoin(web_url,
book_info['title_and_url'].find('a')['href'])
book.append(new_url)
update_time = book_info['update_time'].get_text()
book.append(update_time)
books[name] = book
else:
print("处理完一个页面啦。。。")
break
return books
def _get_new_urls(self, page_url, soup):
if page_url != "http://www.zhuizhuishu.com/top.html":
return
new_urls = set()
# 这是小说榜单页面
# <a href="/top_2.html" style="margin-right:5px;">2</a>
# links = soup.find('div', id="divPageNav").find_all('a',
# href=re.compile(r"/top_\d+\.html"))
# for link in links:
# new_url = link['href']
# new_full_url = urllib.parse.urljoin(page_url, new_url)
# new_urls.add(new_full_url)
link = soup.find('div', id="divPageNav").find('a', text="尾页")['href']
patt = re.compile(r"(\d+)")
page_num = int(patt.search(link).group())
for i in range(1, page_num + 1):
new_url = urllib.parse.urljoin(page_url, ''.join(['/top_', str(i), '.html']))
new_urls.add(new_url)
return new_urls
| jiefly/NovelUpdateCraw | test/html_parser.py | html_parser.py | py | 4,382 | python | en | code | 0 | github-code | 13 |
4713974864 | import numpy as np
from bs4 import BeautifulSoup
#从页面读取数据,生成列表
def scrapePage(retX, retY, inFile, yr, numPce, origPrc):
# 打开并读取HTML文件
with open(inFile, encoding='utf-8') as f:
html = f.read()
soup = BeautifulSoup(html, 'html.parser')
i = 1
# 根据HTML页面结构进行解析
#以列表形式返回符合条件的节点
currentRow = soup.find_all('table', r="%d" % i)
while (len(currentRow) != 0):
currentRow = soup.find_all('table', r="%d" % i)
title = currentRow[0].find_all('a')[1].text
lwrTitle = title.lower()
# 查找是否有全新标签
if (lwrTitle.find('new') > -1) or (lwrTitle.find('nisb') > -1):
newFlag = 1.0
else:
newFlag = 0.0
# 查找是否已经标志出售,我们只收集已出售的数据
soldUnicde = currentRow[0].find_all('td')[3].find_all('span')
if len(soldUnicde) == 0:
print("商品 #%d 没有出售" % i)
else:
# 解析页面获取当前价格
soldPrice = currentRow[0].find_all('td')[4]
priceStr = soldPrice.text
priceStr = priceStr.replace('$', '')
priceStr = priceStr.replace(',', '')
if len(soldPrice) > 1:
priceStr = priceStr.replace('Free shipping', '')
sellingPrice = float(priceStr)
# 去掉不完整的套装价格
if sellingPrice > origPrc * 0.5:
print("%d\t%d\t%d\t%f\t%f" % (yr, numPce, newFlag, origPrc, sellingPrice))
retX.append([yr, numPce, newFlag, origPrc])
retY.append(sellingPrice)
i += 1
currentRow = soup.find_all('table', r="%d" % i)
#依次读取六种乐高套装的数据,并生成数据矩阵
def setDataCollect(retX, retY):
# 2006年的乐高8288,部件数目800,原价49.99
scrapePage(retX, retY, './lego/lego8288.html', 2006, 800, 49.99)
# 2002年的乐高10030,部件数目3096,原价269.99
scrapePage(retX, retY, './lego/lego10030.html', 2002, 3096, 269.99)
# 2007年的乐高10179,部件数目5195,原价499.99
scrapePage(retX, retY, './lego/lego10179.html', 2007, 5195, 499.99)
# 2007年的乐高10181,部件数目3428,原价199.99
scrapePage(retX, retY, './lego/lego10181.html', 2007, 3428, 199.99)
# 2008年的乐高10189,部件数目5922,原价299.99
scrapePage(retX, retY, './lego/lego10189.html', 2008, 5922, 299.99)
# 2009年的乐高10196,部件数目3263,原价249.99
scrapePage(retX, retY, './lego/lego10196.html', 2009, 3263, 249.99)
#数据标准化
def regularize(xMat, yMat):
#数据拷贝
inxMat = xMat.copy()
inyMat = yMat.copy()
# 行与行操作,求均值
yMean = np.mean(yMat, 0)
# 数据减去均值
inyMat = yMat - yMean
# 行与行操作,求均值
inMeans = np.mean(inxMat, 0)
# 行与行操作,求方差
inVar = np.var(inxMat, 0)
# print(inxMat)
print(inMeans)
# print(inVar)
# 数据减去均值除以方差实现标准化
inxMat = (inxMat - inMeans) / inVar
return inxMat, inyMat
#计算平方误差
def rssError(yArr, yHatArr):
return ((yArr - yHatArr) ** 2).sum()
#计算回归系数w
def standRegres(xArr, yArr):
xMat = np.mat(xArr);
yMat = np.mat(yArr).T
xTx = xMat.T * xMat
if np.linalg.det(xTx) == 0.0:
print("矩阵为奇异矩阵,不能转置")
return
ws = xTx.I * (xMat.T * yMat)
return ws
#使用线性回归
def useStandRegres():
lgX = []
lgY = []
setDataCollect(lgX, lgY)
data_num, features_num = np.shape(lgX)
lgX1 = np.mat(np.ones((data_num, features_num + 1)))
lgX1[:, 1:5] = np.mat(lgX)
ws = standRegres(lgX1, lgY)
print('%f%+f*年份%+f*部件数量%+f*是否为全新%+f*原价' % (ws[0], ws[1], ws[2], ws[3], ws[4]))
if __name__ == '__main__':
useStandRegres()
| JiweiMma/Linear-Regression | Linear-8.py | Linear-8.py | py | 3,966 | python | zh | code | 0 | github-code | 13 |
10081184100 | from selenium import webdriver
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import pickle
from datetime import datetime
import time
import re
import decimal
MAIN_LINK = "https://www.ceskereality.cz/prodej/bez-drazeb/byty/byty-2-kk/kraj-hlavni-mesto-praha/?d_subtyp=205%2C206%2C207"
COOKIES_BUTTON_CLASS = "button button--filled button__acceptAll"
LISTING_CLASS_EVEN = "div_nemovitost suda"
LISTING_CLASS_ODD = "div_nemovitost licha"
LISTING_CLASS = "div_nemovitost_foto_i"
PRICE_ELEM_CLASS = "h4 fw-bold"
TIMEOUT = 60 # seconds
results = []
driver = webdriver.Firefox()
driver.get(MAIN_LINK)
time.sleep(15)
iframes = driver.find_elements(By.XPATH, "//div[@id='appconsent']//iframe")
# if there is a cookies iframe that needs to be confirmed
if len(iframes) != 0:
print('Number of iframes: ', len(iframes))
driver.switch_to.frame(iframes[0])
# Click button to accpet cookies
driver.find_element(By.XPATH, '//button[contains(@class, "button--filled button__acceptAll")]').click()
# for button in buttons:
# print(button.get_attribute('class'))
# print(len(buttons))
time.sleep(15)
driver.switch_to.parent_frame()
total_listing_count = int(
WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.XPATH, '//span[@class="number"]'))
).text
)
current_page = 1
processed_listings = 0
print(total_listing_count)
elements = [None, ]
while (len(elements) != 0):
if current_page != 1:
driver.get(MAIN_LINK + '&strana=' + str(current_page))
# Get all property listings on a page
try:
elements = WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_all_elements_located((By.XPATH, f'//div[contains(@class, "{LISTING_CLASS}")]//a'))
)
except TimeoutException:
# Case when there is an empty page as the last page
elements = []
driver.implicitly_wait(TIMEOUT)
for element in elements:
href = element.get_attribute('href')
#open new window with specific href
driver.execute_script("window.open('" + href +"');")
# switch to new window
driver.switch_to.window(driver.window_handles[1])
try:
price = WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.XPATH, "//div[@class='price'] "))
)
temp_dict = {'Cena': price.text}
# Get title element which contains disposition and location as subelements
title_element = WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.XPATH, "//div[@class='title'] "))
)
disposition_string = title_element.find_element(By.XPATH, ".//h1").text
location_string = title_element.find_element(By.XPATH, ".//h2").text
temp_dict.update({'Disposition': disposition_string})
temp_dict.update({'Location': location_string})
table = WebDriverWait(driver, TIMEOUT).until(
EC.visibility_of_element_located((By.XPATH, "//tbody"))
)
names = table.find_elements(By.XPATH, ".//th")
values = table.find_elements(By.XPATH, ".//td")
for name, value in zip(names, values):
temp_dict.update({name.text: value.text})
results.append(temp_dict)
except TimeoutException:
# The driver was not sucessfull in getting the listing deatils
# In case that the page did not load properly
# Skip the current listing
pass
driver.implicitly_wait(10)
driver.close()
#back to main window
driver.switch_to.window(driver.window_handles[0])
processed_listings += len(elements)
print(processed_listings)
current_page += 1
driver.quit()
with open(f"/Users/Marek/housing_market/raw_data/ceskereality_raw_{datetime.today().strftime('%Y-%m-%d')}.pkl", mode='wb') as raw_file:
pickle.dump(results, raw_file)
no_dups_results = [dict(t) for t in {tuple(sorted(d.items())) for d in results}]
errors = 0
dispositions = set()
for dict in no_dups_results:
# Convert the price to Decimal
try:
dict['Cena'] = decimal.Decimal(re.sub(r'[^\d]', '', dict['Cena']))
except decimal.InvalidOperation:
dict['Cena'] = None
errors += 1
# Parse the title the disposition,
# area of the apartment and Prague district
# TODO: Area substring was None
area_substring = re.search(r'\d+\sm²', dict['Disposition'])
if area_substring:
dict['Area'] = int(re.sub(r'\sm²', '', area_substring.group(),))
dict['District'] = dict['Disposition'][area_substring.end() + 1:]
disposition = re.search(r'\d\+[\w\d]+', dict['Disposition'])
if disposition:
dict['Disposition'] = disposition.group()
dispositions.add(disposition.group())
print('========Data quality report Ceskereality=========')
print('Number of scraped offerings:', len(results))
print('Number of duplicates:', len(results) - len(no_dups_results))
print('Number of observations with nonnumeric price:', errors)
print('The dispositions encountered:', dispositions)
with open(f"/Users/Marek/housing_market/preprocessed_data/ceskereality_preprocessed_{datetime.today().strftime('%Y-%m-%d')}.pkl", mode='wb') as preprocessed_file:
pickle.dump(no_dups_results, preprocessed_file) | marekratho/price_scraper | ceskereality_scraper.py | ceskereality_scraper.py | py | 5,624 | python | en | code | 0 | github-code | 13 |
25700179522 | import os
# assume check was installed into /usr/local/
env_with_err = Environment(
ENV = os.environ,
CPPPATH = ['#/src', '/usr/local/include'])
if "CC" in os.environ:
env_with_err["CC"] = os.environ["CC"]
if "CCFLAGS" not in os.environ:
env_with_err["CCFLAGS"] = '-g -std=c99 -D_GNU_SOURCE -Wall -Werror -O3'
#print "CCCOM is:", env_with_err.subst('$CCCOM')
objs = env_with_err.Object('src/art', 'src/art.c')
test_runner = env_with_err.Program('test_runner',
objs + ["tests/runner.c"],
LIBS=["check"],
LIBPATH = ['/usr/lib', '/usr/local/lib'])
Default(test_runner)
| mindis/NECSST-data-structure | libart/SConstruct | SConstruct | 608 | python | en | code | 4 | github-code | 13 | |
19902457307 | import control
import numpy as np
class Paraemters:
def __init__(self):
self.m1, self.m2 = 1, 1
self.k1, self.k2 = 2, 3
def dynamics(m1, m2, k1, k2):
A = np.array(
[
[0, 0, 1, 0],
[0, 0, 0, 1],
[-(k1 / m1 + k2 / m1), k2 / m1, 0, 0],
[k2 / m2, -k2 / m2, 0, 0],
]
)
B = np.array([[0, 0], [0, 0], [-1 / m1, 0], [1 / m2, 1 / m2]])
# observe velocity
C = np.array([[0, 0, 1, 0], [0, 0, 0, 1]])
return A, B, C
if __name__ == '__main__':
params = Paraemters()
m1, m2, k1, k2 = params.m1, params.m2, params.k1, params.k2
A, B, C = dynamics(m1, m2, k1, k2)
# 1. compute eigenvalues of unestimated system
eigVal, eigVec = np.linalg.eig(A)
print('eig-vals (unestimated)')
print(eigVal, '\n')
# 2. compute observability of the system (2 ways)
# 2.1. compute observability matrix
Ob = control.obsv(A, C)
print('control.obsv(A,C)')
print(Ob)
# print(f'rank={np.linalg.matrix_rank(Ob)}')
# 2.2. compute observability matrix using transpose of controllability matrix
Ob_trans = control.ctrb(A.T, C.T)
print('control.ctrb(A.T, C.T)')
print(Ob_trans.T)
# 3. observability stability
rank = np.linalg.matrix_rank(Ob)
print('Rank of Ob')
print(rank)
# 4. pole replacement for stable observability
p = np.array([-0.5, -0.6, -0.65, -6])
L_trans = control.place(A.T, C.T, p)
L = L_trans.T
print('L')
print(L)
# 5. check new poles again
new_A = A - L @ C
eigVal, eigVec = np.linalg.eig(new_A)
print('eig-vals (controlled)')
print(eigVal)
| kimsooyoung/robotics_python | lec15_observability/spring_mass_obsv.py | spring_mass_obsv.py | py | 1,666 | python | en | code | 18 | github-code | 13 |
1382786466 | #Triangle Area Calculator - Challenge 3
class Triangle:
def __init__(self, l1, l2, l3):
self.line1 = l1
self.line2 = l2
self.line3 = l3
print(f"The length of the lines are {l1}, {l2}, and {l3}.")
def area(self):
return self.line1 * self.line2 * self.line3
tri = Triangle(42, 42, 42)
print(tri.area())
| tomgonzo/Learning-PY | 12-Paradigms/triangle.py | triangle.py | py | 318 | python | en | code | 1 | github-code | 13 |
12145880151 | import warnings
import cupy
from cupyx.scipy.ndimage import _util
from cupyx.scipy.ndimage import filters
def choose_conv_method(in1, in2, mode='full'):
"""Find the fastest convolution/correlation method.
Args:
in1 (cupy.ndarray): first input.
in2 (cupy.ndarray): second input.
mode (str, optional): ``valid``, ``same``, ``full``.
Returns:
str: A string indicating which convolution method is fastest,
either ``direct`` or ``fft1``.
.. warning::
This function currently doesn't support measure option,
nor multidimensional inputs. It does not guarantee
the compatibility of the return value to SciPy's one.
.. seealso:: :func:`scipy.signal.choose_conv_method`
"""
return cupy.math.misc._choose_conv_method(in1, in2, mode)
def wiener(im, mysize=None, noise=None):
"""Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Args:
im (cupy.ndarray): An N-dimensional array.
mysize (int or cupy.ndarray, optional): A scalar or an N-length list
giving the size of the Wiener filter window in each dimension.
Elements of mysize should be odd. If mysize is a scalar, then this
scalar is used as the size in each dimension.
noise (float, optional): The noise-power to use. If None, then noise is
estimated as the average of the local variance of the input.
Returns:
cupy.ndarray: Wiener filtered result with the same shape as `im`.
.. seealso:: :func:`scipy.signal.wiener`
"""
if im.dtype.kind == 'c':
# TODO: adding support for complex types requires ndimage filters
# to support complex types (which they could easily if not for the
# scipy compatibility requirement of forbidding complex and using
# float64 intermediates)
raise TypeError("complex types not currently supported")
if mysize is None:
mysize = 3
mysize = _util._fix_sequence_arg(mysize, im.ndim, 'mysize', int)
im = im.astype(float, copy=False)
# Estimate the local mean
local_mean = filters.uniform_filter(im, mysize, mode='constant')
# Estimate the local variance
local_var = filters.uniform_filter(im*im, mysize, mode='constant')
local_var -= local_mean*local_mean
# Estimate the noise power if needed.
if noise is None:
noise = local_var.mean()
# Perform the filtering
res = im - local_mean
res *= (1 - noise / local_var)
res += local_mean
return cupy.where(local_var < noise, local_mean, res)
def order_filter(a, domain, rank):
"""Perform an order filter on an N-D array.
Perform an order filter on the array in. The domain argument acts as a mask
centered over each pixel. The non-zero elements of domain are used to
select elements surrounding each input pixel which are placed in a list.
The list is sorted, and the output for that pixel is the element
corresponding to rank in the sorted list.
Args:
a (cupy.ndarray): The N-dimensional input array.
domain (cupy.ndarray): A mask array with the same number of dimensions
as `a`. Each dimension should have an odd number of elements.
rank (int): A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element).
Returns:
cupy.ndarray: The results of the order filter in an array with the same
shape as `a`.
.. seealso:: :func:`cupyx.scipy.ndimage.rank_filter`
.. seealso:: :func:`scipy.signal.order_filter`
"""
if a.dtype.kind in 'bc' or a.dtype == cupy.float16:
# scipy doesn't support these types
raise ValueError("data type not supported")
if any(x % 2 != 1 for x in domain.shape):
raise ValueError("Each dimension of domain argument "
" should have an odd number of elements.")
return filters.rank_filter(a, rank, footprint=domain, mode='constant')
def medfilt(volume, kernel_size=None):
"""Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`. The array will automatically be zero-padded.
Args:
volume (cupy.ndarray): An N-dimensional input array.
kernel_size (int or list of ints): Gives the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size
in each dimension. Default size is 3 for each dimension.
Returns:
cupy.ndarray: An array the same size as input containing the median
filtered result.
.. seealso:: :func:`cupyx.scipy.ndimage.median_filter`
.. seealso:: :func:`scipy.signal.medfilt`
"""
if volume.dtype.kind == 'c':
# scipy doesn't support complex
# (and filters.rank_filter raise TypeError)
raise ValueError("complex types not supported")
# output is forced to float64 to match scipy
kernel_size = _get_kernel_size(kernel_size, volume.ndim)
if any(k > s for k, s in zip(kernel_size, volume.shape)):
warnings.warn('kernel_size exceeds volume extent: '
'volume will be zero-padded')
size = cupy.core.internal.prod(kernel_size)
return filters.rank_filter(volume, size // 2, size=kernel_size,
output=float, mode='constant')
def medfilt2d(input, kernel_size=3):
"""Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size given
by `kernel_size` (must be odd). The array is zero-padded automatically.
Args:
input (cupy.ndarray): A 2-dimensional input array.
kernel_size (int of list of ints of length 2): Gives the size of the
median filter window in each dimension. Elements of `kernel_size`
should be odd. If `kernel_size` is a scalar, then this scalar is
used as the size in each dimension. Default is a kernel of size
(3, 3).
Returns:
cupy.ndarray: An array the same size as input containing the median
filtered result.
See also
--------
.. seealso:: :func:`cupyx.scipy.ndimage.median_filter`
.. seealso:: :func:`cupyx.scipy.signal.medfilt`
.. seealso:: :func:`scipy.signal.medfilt2d`
"""
if input.dtype not in (cupy.uint8, cupy.float32, cupy.float64):
# Scipy's version only supports uint8, float32, and float64
raise ValueError("only supports uint8, float32, and float64")
if input.ndim != 2:
raise ValueError('input must be 2d')
kernel_size = _get_kernel_size(kernel_size, input.ndim)
order = kernel_size[0] * kernel_size[1] // 2
return filters.rank_filter(input, order, size=kernel_size, mode='constant')
def _get_kernel_size(kernel_size, ndim):
if kernel_size is None:
kernel_size = (3,) * ndim
kernel_size = _util._fix_sequence_arg(kernel_size, ndim,
'kernel_size', int)
if any((k % 2) != 1 for k in kernel_size):
raise ValueError("Each element of kernel_size should be odd")
return kernel_size
| YuehChuan/cupy | cupyx/scipy/signal/signaltools.py | signaltools.py | py | 7,354 | python | en | code | null | github-code | 13 |
37259916523 | """Module in charge of the auto-completion feature."""
from typing import (
cast,
List,
Optional,
)
from lsprotocol.types import (
CompletionContext,
CompletionItem,
CompletionItemKind,
CompletionList,
CompletionTriggerKind,
InsertTextFormat,
Position,
Range,
)
from galaxyls.services.definitions import DocumentDefinitionsProvider
from galaxyls.services.xml.nodes import (
XmlCDATASection,
XmlElement,
)
from ..config import CompletionMode
from ..types import AutoCloseTagResult
from .context import XmlContext
from .xsd.parser import (
XsdAttribute,
XsdNode,
XsdTree,
)
class XmlCompletionService:
"""Service in charge of generating completion lists based
on the current XML context.
"""
def __init__(self, xsd_tree: XsdTree, definitions_provider: DocumentDefinitionsProvider):
self.xsd_tree: XsdTree = xsd_tree
self.definitions_provider = definitions_provider
def get_completion_at_context(
self, context: XmlContext, completion_context: CompletionContext, mode: CompletionMode = CompletionMode.AUTO
) -> Optional[CompletionList]:
if isinstance(context.node, XmlCDATASection):
return None
triggerKind = completion_context.trigger_kind
if mode == CompletionMode.AUTO and triggerKind == CompletionTriggerKind.TriggerCharacter and not context.is_attribute:
if completion_context.trigger_character == "<":
return self.get_node_completion(context)
if completion_context.trigger_character == " ":
return self.get_attribute_completion(context)
elif triggerKind == CompletionTriggerKind.Invoked:
if context.is_inside_attribute_value:
return self.get_attribute_value_completion(context)
if context.is_attribute_key:
return self.get_attribute_completion(context)
if context.is_tag and not context.is_closing_tag and not context.is_at_end:
if context.is_valid_tag() and not context.is_tag_name:
return self.get_attribute_completion(context)
return self.get_node_completion(context)
return None
def get_node_completion(self, context: XmlContext) -> CompletionList:
"""Gets a list of completion items with all the available child tags
that can be placed in the current context node.
Args:
context (XmlContext): The XML context information at a specific
document position. It should contain, at least, the current node.
Returns:
CompletionList: A list of completion items with the child nodes
that can be placed under the current node.
"""
result = []
if context.is_empty or context.is_root:
result.append(self._build_node_completion_item(self.xsd_tree.root))
elif context.xsd_element:
for child in context.xsd_element.children:
if not context.has_reached_max_occurs(child):
result.append(self._build_node_completion_item(child, len(result)))
result.append(self._build_node_completion_item(self.xsd_tree.expand_element, len(result)))
return CompletionList(items=result, is_incomplete=False)
def get_attribute_completion(self, context: XmlContext) -> CompletionList:
"""Gets a list of completion items with all the attributes that can be
used in the current context node.
Args:
context (XmlContext): The XML context information at a specific
document position. It should contain, at least, the current node.
Returns:
CompletionList: The completion item with the basic information
about the attributes.
"""
result: List[CompletionItem] = []
if (
context.is_empty
or context.is_content
or context.is_attribute_value
or context.is_closing_tag
or not (context.node is not None and context.node.name)
):
return CompletionList(items=result, is_incomplete=False)
if context.xsd_element:
existing_attr_names = context.node.get_attribute_names()
for attr_name in context.xsd_element.attributes:
if attr_name in existing_attr_names:
continue
attr = context.xsd_element.attributes[attr_name]
result.append(self._build_attribute_completion_item(attr, len(result)))
if context.node.name == "expand":
element = cast(XmlElement, context.node)
macro_name = element.get_attribute_value("macro")
if macro_name:
token_params = self.definitions_provider.macro_definitions_provider.get_macro_token_params(
context.xml_document, macro_name
)
for token in token_params:
if token.param_name in existing_attr_names:
continue
result.append(
CompletionItem(
label=token.param_name,
kind=CompletionItemKind.Variable,
insert_text=f'{token.param_name}="${{1:{token.default_value}}}"',
insert_text_format=InsertTextFormat.Snippet,
sort_text=str(len(result)).zfill(2),
)
)
return CompletionList(items=result, is_incomplete=False)
def get_attribute_value_completion(self, context: XmlContext) -> CompletionList:
"""Gets a list of possible values for an enumeration restricted attribute if exists.
Args:
context (XmlContext): The XML context at an attribute value position.
Returns:
CompletionList: The list of possible values of the attribute if it has an enumeration
restriction.
"""
if context.xsd_element and context.attribute_name:
attribute = context.xsd_element.attributes.get(context.attribute_name)
if attribute and attribute.enumeration:
result = [CompletionItem(label=item, kind=CompletionItemKind.Value) for item in attribute.enumeration]
return CompletionList(items=result, is_incomplete=False)
if attribute and attribute.name == "macro":
macro_names = self.definitions_provider.macro_definitions_provider.get_macro_names(context.xml_document)
result = [CompletionItem(label=item, kind=CompletionItemKind.Value) for item in macro_names]
return CompletionList(items=result, is_incomplete=False)
return CompletionList(items=[], is_incomplete=False)
def get_auto_close_tag(self, context: XmlContext, trigger_character: str) -> Optional[AutoCloseTagResult]:
"""Gets the closing result for the currently opened tag in context.
The `context` parameter should be placed right before the trigger_character, otherwise the context
information will be located at the trigger_character itself which doesn't provide the real context."""
if (
isinstance(context.node, XmlCDATASection)
or context.is_closing_tag
or (context.node is not None and context.node.is_closed)
or (context.is_attribute and not context.is_attribute_end)
or context.characted_at_position == ">"
or context.xsd_element is None
):
return None
tag = context.xsd_element.name
snippet = f"$0</{tag}>"
replace_range = None
is_self_closing = trigger_character == "/"
if is_self_closing and context.position is not None:
# Build the position Range to be replaced by the snippet
# Get the document position of the trigger_character => +1 character from current context.position
start = Position(line=context.position.line, character=context.position.character + 1)
# Check if there is a `>` already after the `/` trigger and include it in the Range to avoid duplication
end_character = context.position.character + 2
if len(context.line_text) > end_character and context.line_text[end_character] == ">":
end_character = end_character + 1
end = Position(line=context.position.line, character=end_character)
replace_range = Range(start=start, end=end)
if not context.is_content:
snippet = "/>$0"
elif context.is_content:
return None
return AutoCloseTagResult(snippet, replace_range)
def _build_node_completion_item(self, node: XsdNode, order: int = 0) -> CompletionItem:
"""Generates a completion item with the information about the
given node definition.
Args:
node (XsdNode): The node definition used to build the
completion item.
order (int): The position for ordering this item.
Returns:
CompletionItem: The completion item with the basic information
about the node.
"""
return CompletionItem(
label=node.name,
kind=CompletionItemKind.Class,
documentation=node.get_doc(),
sort_text=str(order).zfill(2),
)
def _build_attribute_completion_item(self, attr: XsdAttribute, order: int = 0) -> CompletionItem:
"""Generates a completion item with the information about the
given attribute definition.
Args:
attr (XsdAttribute): The attribute definition used to build the
completion item.
order (int): The position for ordering this item.
Returns:
CompletionItem: The completion item with the basic information
about the attribute.
"""
value_placeholder = "$1"
if attr.enumeration:
value_placeholder = f"${{1|{','.join(attr.enumeration)}|}}"
return CompletionItem(
label=attr.name,
kind=CompletionItemKind.Variable,
documentation=attr.get_doc(),
insert_text=f'{attr.name}="{value_placeholder}"',
insert_text_format=InsertTextFormat.Snippet,
sort_text=str(order).zfill(2),
)
| galaxyproject/galaxy-language-server | server/galaxyls/services/completion.py | completion.py | py | 10,531 | python | en | code | 22 | github-code | 13 |
33362908140 | def remove_dups(arr):
anchor = 1
for i in range(len(arr)-1):
if array[i] != arr[i+1]:
arr[anchor] = arr[i+1]
anchor +=1
return arr
array = [11,11,12,20,20,25,27,66,66,87,99,99]
print("Original Array = {}".format(array))
print("New Array = {} ".format(remove_dups(array)))
| BradleyGenao/Python-DS-Algorithms | arrays/remove_dups/remove_dups.py | remove_dups.py | py | 321 | python | en | code | 0 | github-code | 13 |
73255073938 | import numpy as np
import sys
import pprint as pp
import math
##############################################################
################CONVOLUTIONAL FUNCTIONS#######################
##############################################################
def conv(img, conv_filter, bias, stride=2):
(n_filt, n_filt_chan, filt, _) = conv_filter.shape
n_chan, img_dim, _ = img.shape
out_dim = int((img_dim - filt)/stride) + 1 #calculate output dim
assert n_chan == n_filt_chan, "filter and image must have same number of channels"
print((n_filt, out_dim, out_dim))
out = np.zeros((n_filt, out_dim, out_dim))
#convolve each filter over the image
for curr_filt in range(n_filt):
curr_y = out_y = 0
while curr_y + filt < img_dim:
curr_x = out_x = 0
while curr_x +filt <= img_dim:
out[curr_filt, out_y, out_x] = np.sum(conv_filter[curr_filt] * img[:,curr_y:curr_y+filt, curr_x:curr_x+filt]) + bias[curr_filt]
curr_x += stride
out_x += 1
curr_y += stride
out_y += 1
return out
def conv_back(dconv_prev, conv_in, conv_filter, stride):
(n_filt, n_filt_chan, filt, _) = conv_filter.shape
(_, orig_dim, _) = conv_in.shape
dout = np.zeros(conv_in.shape)
dfilt = np.zeros(conv_filter.shape)
dbias = np.zeros((n_filt, 1))
for curr_filt in range(n_filt):
curr_y = out_y = 0
while curr_y + filt <= orig_dim:
curr_x = out_x = 0
while curr_x +filt <- orig_dim:
dfilt[curr_filt] += dconv_prev[curr_filt, out_y, out_x] * conv_in[:, curr_y:curr_y+filt, curr_x:curr_x+filt]
dout[:, curr_y:curr_y+filt, curr_x:curr_x+filt] += dconv_prev[curr_f, out_y, out_x] * conv_filt[curr_f]
curr_x += stride
out_x += 1
curr_y +=stride
out_y += 1
dbias[curr_filt] = np.sum(dconv_prev[curr_filt])
return dout, dfilt, dbias
##############################################################
###################POOLING FUNCTIONS##########################
##############################################################
def pooling(feature_map, size=2, stride=2):
#Preparing the output of the pooling operation.
pool_out = np.zeros((np.uint16((feature_map.shape[0]-size+1)/stride+1),
np.uint16((feature_map.shape[1]-size+1)/stride+1),
feature_map.shape[-1]))
for map_num in range(feature_map.shape[-1]):
r2 = 0
for r in np.arange(0,feature_map.shape[0]-size+1, stride):
c2 = 0
for c in np.arange(0, feature_map.shape[1]-size+1, stride):
pool_out[r2, c2, map_num] = np.max([feature_map[r:r+size, c:c+size, map_num]])
c2 = c2 + 1
r2 = r2 +1
return pool_out
#util for pool_back
def nanargmax(arr):
idx = np.nanargmax(arr)
idxs = np.unravel_index(idx, arr.shape)
return idxs
def pool_back(dpool, orig, filt, stride):
(n_chan, orig_dim, _) = orig.shape
dout = np.zeros(orig.shape)
for curr_c in range(n_chan):
curr_y = out_y = 0
while curr_y + filt <= orig_dim:
curr_x = out_x = 0
while curr_x +filt <= orig_dim:
(a, b) = nanargmax(orig[curr_c, curr_y : curr_y +filt, curr_x:curr_x+filt])
dout[curr_c, curr_y+a, curr_x +b] = dpool[curr_c, out_y, out_x]
curr_x += stride
out_x += 1
curr_y += stride
out_y += 1
return dout
def relu(feature_map):
return feature_map * (feature_map > 0)
#expects weights shape as (activation depth) x (volume of feature map)
def fc(feature_map,weights):
if (np.prod(feature_map.shape) != weights.shape[-1]):
print("Number of weights in FC doesn't match volume of feature map.")
sys.exit()
#Unpack feature map and return activation layer
return np.dot(feature_map.reshape(-1),weights.T)
def cross_entropy(predictions, targets):
N = predictions.shape[0]
ce = -np.sum(targets*np.log(predictions))/N
return ce
| IYake/EECS-738-Final-Project | cnn.py | cnn.py | py | 4,171 | python | en | code | 1 | github-code | 13 |
35499392219 | """
https://github.com/lucidrains/make-a-video-pytorch
"""
import math
import functools
from operator import mul
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange, repeat, pack, unpack
from einops.layers.torch import Rearrange
from .modules_conv import avg_pool_nd, zero_module, normalization, conv_nd
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def mul_reduce(tup):
return functools.reduce(mul, tup)
def divisible_by(numer, denom):
return (numer % denom) == 0
mlist = nn.ModuleList
# for time conditioning
class SinusoidalPosEmb(nn.Module):
def __init__(self, dim, theta = 10000):
super().__init__()
self.theta = theta
self.dim = dim
def forward(self, x):
dtype, device = x.dtype, x.device
assert dtype == torch.float, 'input to sinusoidal pos emb must be a float type'
half_dim = self.dim // 2
emb = math.log(self.theta) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, device = device, dtype = dtype) * -emb)
emb = rearrange(x, 'i -> i 1') * rearrange(emb, 'j -> 1 j')
return torch.cat((emb.sin(), emb.cos()), dim = -1).type(dtype)
class ChanLayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(dim, 1, 1, 1))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
x = (x - mean) * var.clamp(min = eps).rsqrt()
dtype = self.g.dtype
return x.to(dtype) * self.g
def shift_token(t):
t, t_shift = t.chunk(2, dim = 1)
t_shift = F.pad(t_shift, (0, 0, 0, 0, 1, -1), value = 0.)
return torch.cat((t, t_shift), dim = 1)
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.g = nn.Parameter(torch.ones(dim))
def forward(self, x):
eps = 1e-5 if x.dtype == torch.float32 else 1e-3
var = torch.var(x, dim = 1, unbiased = False, keepdim = True)
mean = torch.mean(x, dim = 1, keepdim = True)
return (x - mean) * var.clamp(min = eps).rsqrt() * self.g
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x = x.float()
x, gate = x.chunk(2, dim = 1)
return x * F.gelu(gate)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4):
super().__init__()
inner_dim = int(dim * mult * 2 / 3)
self.proj_in = nn.Sequential(
nn.Conv3d(dim, inner_dim * 2, 1, bias = False),
GEGLU()
)
self.proj_out = nn.Sequential(
ChanLayerNorm(inner_dim),
nn.Conv3d(inner_dim, dim, 1, bias = False)
)
def forward(self, x, enable_time=True):
x = self.proj_in(x)
if enable_time:
x = shift_token(x)
return self.proj_out(x)
# feedforwa
# best relative positional encoding
class ContinuousPositionBias(nn.Module):
""" from https://arxiv.org/abs/2111.09883 """
def __init__(
self,
*,
dim,
heads,
num_dims = 1,
layers = 2,
log_dist = True,
cache_rel_pos = False
):
super().__init__()
self.num_dims = num_dims
self.log_dist = log_dist
self.net = nn.ModuleList([])
self.net.append(nn.Sequential(nn.Linear(self.num_dims, dim), nn.SiLU()))
for _ in range(layers - 1):
self.net.append(nn.Sequential(nn.Linear(dim, dim), nn.SiLU()))
self.net.append(nn.Linear(dim, heads))
self.cache_rel_pos = cache_rel_pos
self.register_buffer('rel_pos', None, persistent = False)
@property
def device(self):
return next(self.parameters()).device
@property
def dtype(self):
return next(self.parameters()).dtype
def forward(self, *dimensions):
device = self.device
if not exists(self.rel_pos) or not self.cache_rel_pos:
positions = [torch.arange(d, device = device) for d in dimensions]
grid = torch.stack(torch.meshgrid(*positions, indexing = 'ij'))
grid = rearrange(grid, 'c ... -> (...) c')
rel_pos = rearrange(grid, 'i c -> i 1 c') - rearrange(grid, 'j c -> 1 j c')
if self.log_dist:
rel_pos = torch.sign(rel_pos) * torch.log(rel_pos.abs() + 1)
self.register_buffer('rel_pos', rel_pos, persistent = False)
rel_pos = self.rel_pos.to(self.dtype)
for layer in self.net:
rel_pos = layer(rel_pos)
return rearrange(rel_pos, 'i j h -> h i j')
# helper classes
class Attention(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.norm = LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
nn.init.zeros_(self.to_out.weight.data) # identity with skip connection
self.pos_embeds = nn.Parameter(torch.randn([1, 30, dim]))
self.frame_rate_embeds = nn.Parameter(torch.randn([1, 30, dim]))
def forward(
self,
x,
context = None,
rel_pos_bias = None,
framerate = None,
):
if framerate is not None:
x = x + self.pos_embeds[:, :x.shape[1]].repeat(x.shape[0], 1, 1)
x = x + self.frame_rate_embeds[:, framerate-1:framerate].repeat(x.shape[0], x.shape[1], 1)
if context is None:
context = x
x = self.norm(x)
context = self.norm(context)
q, k, v = self.to_q(x), *self.to_kv(context).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = self.heads), (q, k, v))
q = q * self.scale
sim = einsum('b h i d, b h j d -> b h i j', q, k)
if exists(rel_pos_bias):
sim = sim + rel_pos_bias
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# main contribution - pseudo 3d conv
class PseudoConv3d(nn.Module):
def __init__(
self,
dim,
dim_out = None,
kernel_size = 3,
*,
temporal_kernel_size = None,
**kwargs
):
super().__init__()
dim_out = default(dim_out, dim)
temporal_kernel_size = default(temporal_kernel_size, kernel_size)
self.spatial_conv = nn.Conv2d(dim, dim_out, kernel_size = kernel_size, padding = kernel_size // 2)
self.temporal_conv = nn.Conv1d(dim_out, dim_out, kernel_size = temporal_kernel_size, padding = temporal_kernel_size // 2) if kernel_size > 1 else None
if exists(self.temporal_conv):
nn.init.dirac_(self.temporal_conv.weight.data) # initialized to be identity
nn.init.zeros_(self.temporal_conv.bias.data)
def forward(
self,
x,
enable_time = True
):
b, c, *_, h, w = x.shape
is_video = x.ndim == 5
enable_time &= is_video
if is_video:
x = rearrange(x, 'b c t h w -> (b t) c h w')
x = self.spatial_conv(x)
if is_video:
x = rearrange(x, '(b t) c h w -> b c t h w', b = b)
if not enable_time or not exists(self.temporal_conv):
return x
x = rearrange(x, 'b c t h w -> (b h w) c t')
x = self.temporal_conv(x)
x = rearrange(x, '(b h w) c t -> b c t h w', h = h, w = w)
return x
def frame_shift(x, shift_num=8):
num_frame = x.shape[2]
x = list(x.chunk(shift_num, 1))
for i in range(shift_num):
if i > 0:
shifted = torch.cat([torch.zeros_like(x[i][:, :, :i]), x[i][:, :, :-i]], 2)
else:
shifted = x[i]
x[i] = shifted
return torch.cat(x, 1)
class ResBlockFrameShift(nn.Module):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
dropout,
out_channels=None,
use_conv=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.out_layers = nn.Sequential(
normalization(self.channels),
nn.SiLU(),
zero_module(
conv_nd(dims, self.channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:return: an [N x C x ...] Tensor of outputs.
"""
num_frames = x.shape[2]
x = rearrange(x, 'b c t h w -> (b t) c h w')
h = self.out_layers(x)
h = rearrange(h, '(b t) c h w -> b c t h w', t=num_frames)
h = frame_shift(h)
h = rearrange(h, 'b c t h w -> (b t) c h w')
out = self.skip_connection(x) + h
out = rearrange(out, '(b t) c h w -> b c t h w', t=num_frames)
return out
class ResBlockVideo(nn.Module):
"""
A residual block that can optionally change the number of channels.
:param channels: the number of input channels.
:param emb_channels: the number of timestep embedding channels.
:param dropout: the rate of dropout.
:param out_channels: if specified, the number of out channels.
:param use_conv: if True and out_channels is specified, use a spatial
convolution instead of a smaller 1x1 convolution to change the
channels in the skip connection.
:param dims: determines if the signal is 1D, 2D, or 3D.
:param use_checkpoint: if True, use gradient checkpointing on this module.
:param up: if True, use this block for upsampling.
:param down: if True, use this block for downsampling.
"""
def __init__(
self,
channels,
dropout,
out_channels=None,
use_conv=False,
use_scale_shift_norm=False,
dims=2,
use_checkpoint=False,
up=False,
down=False,
):
super().__init__()
self.channels = channels
self.dropout = dropout
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.use_checkpoint = use_checkpoint
self.use_scale_shift_norm = use_scale_shift_norm
self.in_layers = nn.Sequential(
normalization(channels),
nn.SiLU(),
conv_nd(dims, channels, self.out_channels, 3, padding=1),
)
self.out_layers = nn.Sequential(
normalization(self.out_channels),
nn.SiLU(),
nn.Dropout(p=dropout),
zero_module(
conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)
),
)
if self.out_channels == channels:
self.skip_connection = nn.Identity()
elif use_conv:
self.skip_connection = conv_nd(
dims, channels, self.out_channels, 3, padding=1
)
else:
self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)
def forward(self, x):
"""
Apply the block to a Tensor, conditioned on a timestep embedding.
:param x: an [N x C x ...] Tensor of features.
:return: an [N x C x ...] Tensor of outputs.
"""
num_frames = x.shape[2]
x = rearrange(x, 'b c t h w -> (b t) c h w ')
h = x
h = self.in_layers(h)
h = self.out_layers(h)
out = self.skip_connection(x) + h
out = rearrange(out, '(b t) c h w -> b c t h w', t=num_frames)
return out
class Downsample3D(nn.Module):
"""
A downsampling layer with an optional convolution.
:param channels: channels in the inputs and outputs.
:param use_conv: a bool determining if a convolution is applied.
:param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then
downsampling occurs in the inner-two dimensions.
"""
def __init__(self, channels, use_conv, dims=2, stride=None, out_channels=None, padding=1):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.use_conv = use_conv
self.dims = dims
stride = 1
if use_conv:
self.op = conv_nd(
dims, self.channels, self.out_channels, 3, stride=stride, padding=padding
)
else:
assert self.channels == self.out_channels
self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)
def forward(self, x):
assert x.shape[1] == self.channels
return self.op(x)
class SpatioTemporalAttention(nn.Module):
def __init__(
self,
dim,
*,
dim_head = 64,
heads = 8,
use_resnet = False,
use_frame_shift = True,
use_context_att = False,
use_temp_att = True,
use_context = False,
):
super().__init__()
self.use_resnet = use_resnet
self.use_frame_shift = use_frame_shift
self.use_context_att = use_context_att
self.use_temp_att = use_temp_att
if use_resnet:
self.resblock = ResBlockVideo(dim, dropout=0, dims=2)
if use_frame_shift:
self.frameshiftblock = ResBlockFrameShift(dim, dropout=0, dims=2)
if use_context_att:
self.downsample_x0 = Downsample3D(4, True, 2, out_channels=dim)
self.temporal_attn_x0 = Attention(dim = dim, dim_head = dim_head, heads = heads)
if use_temp_att:
self.temporal_attn = Attention(dim = dim, dim_head = dim_head, heads = heads)
self.temporal_rel_pos_bias = ContinuousPositionBias(dim = dim // 2, heads = heads, num_dims = 1)
self.ff = FeedForward(dim = dim, mult = 4)
def forward(
self,
x,
x_0 = None,
enable_time = True,
framerate = 4,
is_video = False,
):
x_ndim = x.ndim
is_video = x_ndim == 5 or is_video
enable_time &= is_video
if enable_time:
img_size = x.shape[-1]
if self.use_temp_att:
if x_ndim == 5:
b, c, *_, h, w = x.shape
x = rearrange(x, 'b c t h w -> (b h w) t c')
time_rel_pos_bias = self.temporal_rel_pos_bias(x.shape[1])
if self.use_context_att and x_0 is not None:
x_0_img_size = x_0.shape[-1]
kernel_size = x_0_img_size // img_size
x_0 = F.avg_pool2d(x_0, [kernel_size, kernel_size], stride=None, padding=0, ceil_mode=False, count_include_pad=True, divisor_override=None)
x_0 = self.downsample_x0(x_0).unsqueeze(2)
if x_ndim == 5:
x_0 = rearrange(x_0, 'b c t h w -> (b h w) t c')
x = self.temporal_attn_x0(x, context=x_0, rel_pos_bias = time_rel_pos_bias, framerate = framerate) + x
if self.use_temp_att:
x = self.temporal_attn(x, rel_pos_bias = time_rel_pos_bias, framerate = framerate) + x
if x_ndim == 5:
x = rearrange(x, '(b h w) t c -> b c t h w', w = w, h = h)
x = self.ff(x, enable_time=enable_time) + x
if self.use_frame_shift:
x = self.frameshiftblock(x)
if self.use_resnet:
x = self.resblock(x)
return x | microsoft/i-Code | i-Code-V3/core/models/latent_diffusion/modules_video.py | modules_video.py | py | 17,387 | python | en | code | 1,451 | github-code | 13 |
27573519644 | """
Intro to python exercises shell code
"""
def is_odd(x):
if x%2==1:
return True
return False
def is_palindrome(word):
for i in range(int(len(word)/2)):
if word[i]!=word[len(word)-i-1]:
return False
return True
"""
returns whether `word` is spelled the same forwards and backwards
"""
def only_odds(numlist):
list1 = list()
for i in numlist:
if i%2==1:
list1.append(i)
return list1
"""
returns a list of numbers that are odd from numlist
ex: only_odds([1, 2, 3, 4, 5, 6]) -> [1, 3, 5]
"""
def count_words(text):
dictionary=dict()
word=text.split()
for stringin in word:
if stringin in dictionary:
dictionary[stringin]+=1
else:
dictionary[stringin]=1
return dictionary
"""
return a dictionary of {word: count} in the text
words should be split by spaces (and nothing else)
words should be converted to all lowercase
ex: count_words("How much wood would a woodchuck chuck"
" if a woodchuck could chuck wood?")
->
{'how': 1, 'much': 1, 'wood': 1, 'would': 1, 'a': 2, 'woodchuck': 2,
'chuck': 2, 'if': 1, 'could': 1, 'wood?': 1}
"""
print(only_odds([1, 2, 3, 4, 5, 6]))
| genericpan/mdst_Tutorials | Tutorial1/python_exercises.py | python_exercises.py | py | 1,311 | python | en | code | 0 | github-code | 13 |
30768047868 | """
Escribir un programa que a partir de un número entero cant ingresado por el usuario permita cargar por teclado cant números enteros. La computadora debe mostrar cuál fue el mayor número y en qué posición apareció.
"""
cantidad_num = int(input("Ingrese la cantidad de numeros que va a ingresar: "))
numeros = []
for n in range(cantidad_num):
num = int(input("Ingrese un numero entero: "))
numeros.append(num)
posicion = 0
mayor_numero = numeros[0]
for n in range(1, cantidad_num):
if numeros[n] > mayor_numero:
mayor_numero = numeros[n]
posicion = n+1
print("El mayor numero es: ", mayor_numero, " y esta en la posicion: ", posicion) | aadriaan98/practicas-python | 3 Flujo_de_repeticion/ejercicio46.py | ejercicio46.py | py | 683 | python | es | code | 2 | github-code | 13 |
74322280976 | import random
# global variable for the random operator used to make calls to random shorter
r = random
# get a random value of gold based on turn segment
def get_gold(x):
g = 0
if(x <= 100):
g = r.randint(0, 6)
if((x <= 200) & (x > 100)):
g = r.randint(5,16)
if((x <= 300) & (x > 200)):
g = r.randint(15,31)
return g
# get a random gov type based on turn number
def get_gov_type(x):
type = 0
if(x <= 100):
return type
if((x <= 150) & (x > 100)):
type = 1
if(x > 150):
type = r.randint(2,4)
return type
# get a random number of cities built per turn
def get_num_cities(x):
cities = 0
if(x%6 == 0):
cities = r.randint(0,1)
return cities
# get a random increase in the tech level
def get_tech_level(x):
level = 0
if(x%5 == 0):
level = 1
return level
# get a random number of allies
# or lose some if value is lower than last turn's
def get_num_allies(x):
allies = r.randint(0, 3)
return allies
# get a random of enemies per turn based on number of allies
def get_num_enemies(x):
enemies = 7 - x
return enemies
# determine if enemies are near
def get_enemies_near():
chance = r.randint(0, 100)
near = 0
if(chance >= 70):
near = 1
return near
def generate_data():
# open/create file for the turn data
file = open("TurnData.txt","w+")
# ptsh (previous tech score holder) is used to contain tech score from previous turn
gold = 0
num_cities = 0
tech_level = 0
# loop to fill in data and write it to file for 300 turns
for x in range(300):
gold += get_gold(x)
government = get_gov_type(x)
num_cities += get_num_cities(x)
tech_level += get_tech_level(x)
num_allies = get_num_allies(x)
num_enemies = get_num_enemies(num_allies)
enemies_near = get_enemies_near()
if(x%15 == 0):
gold = 0
# convert the values into a string to be written to the file
turnValues = "{g},{gov},{nc},{tl},{na},{ne},{en}\n".format(g=str(gold),gov=str(government),
nc=str(num_cities),tl=str(tech_level),
na=str(num_allies),ne=str(num_enemies),
en=str(enemies_near))
# write the values to the file
file.write(turnValues)
# close the file
file.close()
generate_data()
| BjornMelin/Freeciv_Research | TestDataGenerator.py | TestDataGenerator.py | py | 2,209 | python | en | code | 1 | github-code | 13 |
42434919606 | import sqlite3
import sys
import re
def main():
args = sys.argv
input_file = args[1]
db_file = args[2]
#dir_name = args[3]
data = read_input_file(input_file)
insert_data(data, db_file)
def read_input_file(input_file):
p1 = re.compile(r'\s+')
p2 = re.compile(r'\s+.*Version')
p3 = re.compile(r'"(.*)"')
p4 = re.compile(r'^R_package_data/(.*)/[^/]+.txt$')
dir_name = ''
if p4.match(input_file):
m = p4.match(input_file)
dir_name = m.group(1)
#print(dir_name)
flag = 0
data = []
package_data = {}
with open(input_file) as f:
for line in f:
if flag == 0 and p1.match(line):
flag = 1
if p2.match(line):
flag = 2
continue
if flag == 1 and p2.match(line):
flag = 2
continue
if flag == 1 and p1.match(line):
continue
if flag == 2 and p1.match(line):
break
if flag:
line_data = re.split(r'\s+', line)
package = line_data[0]
if package not in package_data:
package_data[package] = []
for each_data in line_data:
if p3.match(each_data):
m = p3.match(each_data)
package_data[package].append(m.group(1))
for package in package_data:
image = re.sub('.*/', '', input_file)
image = re.sub('\.txt', '', image)
filepath = '/usr/local/biotools/' + dir_name + '/' + image
data.append([package_data[package][0], package_data[package][2], image, filepath])
return(data)
def insert_data(data, db_file):
data1 = []
for each_data in data:
data1.append([each_data[0], each_data[1]])
con = sqlite3.connect(db_file)
cur = con.cursor()
cur.execute('CREATE TABLE IF NOT EXISTS PACKAGE_VERSION(package text, version text, PRIMARY KEY(package, version));')
cur.execute('CREATE TABLE IF NOT EXISTS PACKAGE_VERSION_IMAGE_FILEPATH(package text, version text, image text, filepath text, PRIMARY KEY(package, image));')
cur.execute('CREATE INDEX IF NOT EXISTS PACKAGE_INDEX ON PACKAGE_VERSION_IMAGE_FILEPATH(package);')
cur.execute('CREATE INDEX IF NOT EXISTS IMAGE_INDEX ON PACKAGE_VERSION_IMAGE_FILEPATH(image);')
cur.execute('CREATE INDEX IF NOT EXISTS FILEPATH_INDEX ON PACKAGE_VERSION_IMAGE_FILEPATH(filepath);')
sql1 = 'INSERT OR IGNORE INTO PACKAGE_VERSION(package, version) values (?,?)'
sql2 = 'INSERT OR IGNORE INTO PACKAGE_VERSION_IMAGE_FILEPATH(package, version, image, filepath) values (?,?,?,?)'
cur.executemany(sql1, data1)
cur.executemany(sql2, data)
con.commit()
con.close()
if __name__ == '__main__':
main()
| yookuda/biocontainers_image | import_R_package_data.py | import_R_package_data.py | py | 2,841 | python | en | code | 0 | github-code | 13 |
10326302097 | '''
run-time: 60 ms, faster than 34.14%
mem-usage: 14.2 mb, less than 73.10%
'''
class Solution:
def findNumbers(self, nums: List[int]) -> int:
count = 0
for num in nums:
digits = 0
while num != 0:
num = num // 10
digits += 1
count += 1 if (digits % 2 == 0) else 0
return count
| NikhilNarvekar123/Competitive-Programming | find_numbers_with_even_number_of_digits.py | find_numbers_with_even_number_of_digits.py | py | 428 | python | en | code | 0 | github-code | 13 |
29849205935 | # https://www.hackerrank.com/challenges/py-set-discard-remove-pop/problem
n = int(input())
s = set(map(int, input().split()))
N = int(input())
for _ in range(N):
c = input().split()
command = c[0]
if command == "remove":
s.remove(int(c[1]))
elif command == "discard":
s.discard(int(c[1]))
elif command == "pop":
s.pop()
print(sum(s))
| ritchereluao/HackerRankPy | Sets/5_discard_remove_pop.py | 5_discard_remove_pop.py | py | 380 | python | en | code | 0 | github-code | 13 |
36790054136 | import turtle
bob = turtle.Turtle()
size = 50
bob.speed("fastest")
bob.penup()
bob.goto(-200,-200)
def draw_square():
bob.pendown()
bob.begin_fill()
for x in range(4):
bob.forward(size)
bob.left(90)
bob.end_fill()
bob.penup()
for c in range(8):
for r in range(8):
if (r+c)%2==0:
bob.goto(-200+r*size, -200+c*size)
draw_square()
| kmurphy/coderdojo | 04-Some_More_Turtle_Graphics/code/chess_2.py | chess_2.py | py | 421 | python | en | code | 1 | github-code | 13 |
6661427455 | # Data Structure ...
# User-Defined ...
# Linked List ...
# Singly Linked List (Adding data @ Ending) ...
class creatingnode():
def __init__(self,data):
self.data = data
self.linkto = None
class S_linkedlist():
def __init__(self,object_name):
self.name = object_name
self.head = None
def traversal(self):
print(self.name, end=" ")
if self.head is None:
print('Linked List is Empty!\n--------------------')
else:
print('Head --> ', end=" ")
n = self.head
while n is not None:
print(f'[{n.data}] --> ',end=" ")
n = n.linkto
print('None')
print('--------------------')
def add_at_starting(self,data):
newnode = creatingnode(data)
if self.head is None:
self.head = newnode
else:
newnode.linkto = self.head
self.head = newnode
def add_at_ending(self,data):
newnode = creatingnode(data)
if self.head is None:
self.head = newnode
else:
n = self.head
while n.linkto is not None:
n = n.linkto
n.linkto = newnode
def add_after_node(self,data,x):
newnode = creatingnode(data)
n = self.head
if self.head is None:
print(self.name, end=" ")
print('No Node in the Linked List!\n--------------------')
else:
while (n.data != x) and (n.linkto is not None):
n = n.linkto
else:
if n.data == x:
newnode.linkto = n.linkto
n.linkto = newnode
else:
print(self.name, end=" ")
print(f'{x} is not in the Linked List!\n--------------------')
def add_before_node(self,data,x):
n = self.head
if n is None:
print(self.name, end=" ")
print('No Node in the Linked List!\n--------------------')
elif n.data == x:
self.add_at_starting(data)
else:
while n.linkto is not None:
if n.linkto.data == x:
self.add_after_node(data,n.data)
break
else:
n = n.linkto
if n.linkto is None:
print(self.name, end=" ")
print(f'{x} is not in the Linked List!\n--------------------')
def add_when_LL_Empty(self,data):
if self.head is None:
self.add_at_starting(data)
else:
print(self.name, end=" ")
print("Linked List is not empty!\n--------------------'")
def del_at_starting(self):
if self.head is None:
print(self.name, end=" ")
print("Linked List is already empty!\n--------------------'")
else:
self.head = self.head.linkto
def del_at_ending(self):
if self.head is None:
print(self.name, end=" ")
print("Linked List is already empty!\n--------------------'")
else:
n = self.head
if n.linkto == None:
self.head = None
else:
while n.linkto.linkto != None:
n = n.linkto
n.linkto = None
def del_Node(self,x):
if self.head is None:
print(self.name, end=" ")
print("Linked List is already empty!\n--------------------'")
else:
n = self.head
if n.data == x:
self.del_at_starting()
else:
while n.linkto != None:
if n.linkto.data != x:
n = n.linkto
else:
n.linkto = n.linkto.linkto
break
else:
if n.linkto == None:
print(self.name, end=" ")
print(f'{x} is not in the Linked List!\n--------------------')
| RithickDharmaRaj-darkCoder/Linked_List | singlyLL.py | singlyLL.py | py | 4,090 | python | en | code | 0 | github-code | 13 |
17825551340 | import cv2
from matplotlib import pyplot
import numpy
img=cv2.imread('smarties.png',cv2.IMREAD_GRAYSCALE)
_, mask=cv2.threshold(img,220,255,cv2.THRESH_BINARY_INV)
kernel=numpy.ones((5,5),numpy.uint8)
dilation=cv2.dilate(mask,kernel,iterations=3)
erosion=cv2.erode(mask,kernel,iterations=3)
opening=cv2.morphologyEx(mask,cv2.MORPH_OPEN,kernel)
closing=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel)
mg=cv2.morphologyEx(mask,cv2.MORPH_GRADIENT,kernel)
th=cv2.morphologyEx(mask,cv2.MORPH_TOPHAT,kernel)
titles=['Image','Mask','Dilation','Erosion','opening','closing','mg','th']
images=[img,mask,dilation,erosion,opening,closing,mg,th]
for i in range(8):
pyplot.subplot(2,4,i+1), pyplot.imshow(images[i],'gray')
pyplot.title(titles[i])
pyplot.xticks([]), pyplot.yticks([])
pyplot.show() | kanavbhasin22/Image_Processing | Morphological.py | Morphological.py | py | 819 | python | en | code | 0 | github-code | 13 |
8343772196 | arr = [[] for _ in range(5)]
dy = [-1, 0, 1, 0]
dx = [0, -1, 0, 1]
visited = [[0 for _ in range(5)] for _ in range(5)]
total = 0
for i in range(5):
st = input()
arr[i] = list(st)
# DFS로 7번 상하좌우 보면서 s 4개있는지 확인
def BFS():
cnt = 0
q = [(a, b)]
visited[a][b] = 1
for _ in range(7):
x, y = q.pop(0)
if arr[x][y] == 'S':
cnt += 1
if cnt == 4:
total += 1
return
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if (nx >= 0 and nx < 5 and ny >= 0 and ny < 5 and visited[nx][ny] == 0):
visited[nx][ny] = 1
q.append((nx, ny))
visited[nx][ny] = 0
return
DFS(0, 0)
print(total) | rohujin97/Algorithm_Study | baekjoon/1941.py | 1941.py | py | 781 | python | en | code | 0 | github-code | 13 |
40054587023 | # 4. Verifique se há dois nomes repetidos.
dic1 = {'user1':{'nome': 'Mioshi', 'sobrenome': 'Kanashiro', 'apelido': 'Japa'},
'user2':{'nome': 'Sergei', 'sobrenome': 'Ivanov', 'apelido': 'Russo'},
'user3':{'nome': 'Alfredo', 'sobrenome': 'Constâncio', 'apelido': 'Portuga'}}
nomes = []
for a, b in dic1.items():
nome = b.get('nome', 0)
nomes.append(nome)
for a, b in dic1.items():
nome = b.get('nome', 0)
print(nomes.count(nome),nome)
| robinson-1985/python-zero-dnc | 33.operacoes_com_dicionarios/11.exercicio4.py | 11.exercicio4.py | py | 470 | python | pt | code | 0 | github-code | 13 |
37563778835 | import os
import csv
from bs4 import BeautifulSoup
from Article import Article
from SearchResultParser import SearchResultParser
import Project
class SearchResultConverter:
def __init__(self):
self.topic = None
self.page = None
self.response = None
self.searchresults = [] #contains list of articles
self.sr_parser = SearchResultParser()
self.RAW_RESULTS_DIR = Project.resource_path("data/raw_results/")
self.CONV_RESULTS_DIR = Project.resource_path("data/conv_results/")
####################################################################
# LOAD server response (which has been offline-saved)
####################################################################
def RAWexists(self, topic, page):
file_name = self.RAW_RESULTS_DIR+topic+'.'+str(page)+'.html'
exists = os.path.isfile(file_name)
return exists
def load_file(self, topic, page):
print("..................................................................")
print("Load from file: ",topic," , Page ",page)
file_name = self.RAW_RESULTS_DIR+topic+'.'+str(page)+'.html'
if not os.path.isfile(file_name):
print("File doesn't exist!")
return
self.topic = topic
self.page = page
response = None
with open(file_name, 'r') as f:
response = f.read()
self.response = response
print("Loaded response from ",file_name)
print()
return response
####################################################################
# PARSE server response INTO local searchresult format
####################################################################
def extract_list(self, response):
soup = BeautifulSoup(response, 'html.parser')
raw_list = soup.find_all('div', attrs={'class': 'gs_r gs_or gs_scl'})
return raw_list
def parse_searchresult(self, raw_searchresult):
self.sr_parser.init_raw(raw_searchresult)
title = self.sr_parser.parse_title()
authors = self.sr_parser.parse_authors()
hyperlink = self.sr_parser.parse_hyperlink()
text = self.sr_parser.parse_text()
cited = self.sr_parser.parse_cited()
year = self.sr_parser.parse_year()
typ = self.sr_parser.parse_typ()
pdflink = self.sr_parser.parse_pdflink()
searchkey = self.topic
article = Article(title, authors, hyperlink, text, cited, year, typ, pdflink, searchkey)
return article
def parse_list_of_searchresults(self):
#Extract HTML-searchresults from HTML-response
raw_list = self.extract_list(self.response)
parsed_list = []
#Parse each HTML-searchresult and store in new LOCAL format
for raw_searchresult in raw_list:
article = self.parse_searchresult(raw_searchresult)
parsed_list.append(article)
self.searchresults = parsed_list
return self.searchresults
####################################################################
# CSV Export of searchresults
####################################################################
def CSVexists(self, topic):
file_name = self.CONV_RESULTS_DIR+topic+'.csv'
exists = os.path.isfile(file_name)
return exists
def resetCSV(self, topic):
file_name = self.CONV_RESULTS_DIR+topic+'.csv'
with open(file_name, 'w+') as f:
f.close()
def extendCSV(self, topic):
#TMP CSV
file_name_tmp = self.CONV_RESULTS_DIR+'_tmp_.csv'
self.resetCSV('_tmp_')
self.writeCSV(topic, True)
#OLD CSV
file_name = self.CONV_RESULTS_DIR+topic+'.csv'
with open(file_name,'r') as file1:
existingLines = [line for line in csv.reader(file1, delimiter=',')]
#DIFF: Compare TMP CSV with OLD CSV to find new rows
new = []
with open(file_name_tmp,'r') as file2:
reader2 = csv.reader(file2,delimiter=',')
for row in reader2:
if row not in new and row not in existingLines:
new.append(row)
#EXTEND: Add new rows to OLD CSV (via append mode = 'a')
with open(file_name, 'a') as f:
csv_writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for row in new:
csv_writer.writerow(row)
self.resetCSV('_tmp_')
def writeCSV(self, topic, is_tmp = False):
file_name = self.CONV_RESULTS_DIR+topic+'.csv'
if is_tmp:
file_name = self.CONV_RESULTS_DIR+'_tmp_.csv'
with open(file_name, 'w') as f:
csv_writer = csv.writer(f, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for sr in self.searchresults:
csv_writer.writerow([sr.title, sr.authors, sr.year, sr.text, sr.hyperlink, sr.pdflink, sr.cited, sr.typ, topic])
def store(self):
file_name = self.CONV_RESULTS_DIR+self.topic+'.csv'
if self.CSVexists(self.topic):
self.extendCSV(self.topic)
print("Stored searchresults in ",file_name," (Extended)")
else:
self.writeCSV(self.topic)
print("Stored searchresults in ",file_name," (New File)")
#'.'+str(self.page)+
####################################################################
# AUTO CONVERT
####################################################################
def convert(self, topic, page):
self.load_file(topic, page)
self.parse_list_of_searchresults()
self.store()
def convertAll(self, topic):
print("__________________________________________________________________")
print("Convert Searchresults for <<< ",topic," >>> to CSV:")
page = 1
while self.RAWexists(topic, page):
self.convert(topic, page)
page = page+1
| PrusakSebastian/PaprScrapr | src/python/SearchResultConverter.py | SearchResultConverter.py | py | 5,268 | python | en | code | 0 | github-code | 13 |
12777787967 | #!/usr/bin/env python
import os
import sys
import datetime
from dateutil import parser
from pprint import pprint as pp
import click
from tvoverlord.config import Config
from tvoverlord.db import DB
from tvoverlord.consoletable import ConsoleTable
from tvoverlord.downloadmanager import DownloadManager
from tvoverlord.search import Search
from tvoverlord.util import U
import tvoverlord.tvutil as tvu
class History:
def __init__(self, criteria=1):
self.db = DB
if criteria is None:
criteria = 1
if isinstance(criteria, int):
sqldata = self.db.get_downloaded_days(criteria)
elif isinstance(criteria, datetime.datetime):
sqldata = self.db.get_downloaded_date(criteria)
elif isinstance(criteria, str):
sqldata = self.db.get_downloaded_title(criteria)
self.sqldata = sqldata
def episode(self, name, season, episode):
seep = ''
if season and episode:
seep = ' S{0:0>2}E{1:0>2}'.format(season, episode)
full = name + seep
return full
def exists(self, filename):
if filename is None:
return ''
elif os.path.exists(filename):
filename = filename
else:
filename = tvu.style(filename, fg='black', strike=True)
return filename
def format_date(self, date):
parsed = parser.parse(date)
new = parsed.strftime('%a %b/%d')
return new
def show(self, what):
# date, title, season, episode, magnet, oneoff, complete, filename
if what:
what = what.replace(' ', '').split(',')
line = []
for i in what:
line.append('{%s}' % i)
line = ' '.join(line)
else:
line = '{date} {title} {complete} {destination}'
try:
lengths = [1] * len(self.sqldata[0])
except IndexError:
return # no sql data
data = []
lengths = {'date': 1, 'title': 1, 'filename': 1, 'hash': 1,
'destination': 1, 'season': 1, 'episode': 1,
'magnet': 1, 'oneoff': 1, 'complete': 1, }
# build list and get the max lengths
for row in self.sqldata:
fields = {
'date': self.format_date(row[0]),
'title': row[1],
'filename': self.exists(row[2]),
'destination': self.exists(row[10]),
'season': row[4],
'episode': row[5],
'magnet': row[6],
'oneoff': 'one off' if row[7] else 'tracked',
'complete': 'complete' if row[8] else 'incomplete',
'hash': row[3],
}
data.append(fields)
for key, value in fields.items():
new = len(str(value))
old = lengths[key]
lengths[key] = max(new, old)
# pad each field to the data in lengths
for row in data:
for name in row:
try:
row[name] = row[name].ljust(lengths[name])
except AttributeError:
# fields has None as value
row[name] = ''.ljust(lengths[name])
for row in data:
try:
click.echo(line.format(**row).strip())
except KeyError:
sys.exit('Invalid key')
def copy(self):
title = 'Copy files to %s' % Config.tv_dir
choice, data = self.display_list(title, table_type='copy')
click.echo()
if choice == 'copy_all':
copied_all = True
for episode in data[1]:
torrent_hash = episode[3]
torrent_dir, torrent_name = os.path.split(episode[2])
click.echo('Copying: %s... ' % episode[1], nl=False)
try:
DownloadManager(torrent_hash, torrent_dir, torrent_name)
except OSError as e:
copied_all = False
click.echo(tvu.style(str(e), fg='red'))
else:
click.echo(tvu.style('Done', fg='green'))
if not copied_all:
click.echo()
click.echo('Error: Some files could not be copied.')
else:
selected = [i for i in data[1] if choice in i][0]
torrent_hash = selected[3]
torrent_dir, torrent_name = os.path.split(selected[2])
click.echo('Copying: %s... ' % selected[1], nl=False)
try:
DownloadManager(torrent_hash, torrent_dir, torrent_name)
except OSError as e:
click.echo(tvu.style(str(e), fg='red'))
sys.exit(1)
click.echo('Done')
def download(self):
title = 'Re-download'
choice, data = self.display_list(title, table_type='redownload')
selected = [i for i in data[1] if choice in i][0]
url = selected[-1]
search = Search()
search.download(chosen_show=url, destination=Config.staging)
def display_list(self, title, table_type):
sqldata = self.sqldata
records = []
if table_type == 'redownload':
data = [
[
title,
['Date downloaded', 'Show name, episode', 'Magnet link'],
[16, 25, 0],
['<', '<', '<']
]
]
for i in sqldata:
records.append([
self.format_date(i[0]),
self.episode(i[1], i[4], i[5]),
i[9],
i[9]]
)
elif table_type == 'copy':
data = [
[
title,
['Date downloaded', 'Show name, episode', 'Source file'],
[16, 25, 0],
['<', '<', '<']
]
]
for i in sqldata:
records.append([
self.format_date(i[0]),
self.episode(i[1], i[4], i[5]),
self.exists(i[2]),
i[3]]
)
data.append(records)
tbl = ConsoleTable(data, table_type)
tbl.set_count(None)
result = tbl.generate()
return (result, data)
if __name__ == '__main__':
pass
| shrx/tv-overlord | tvoverlord/history.py | history.py | py | 6,469 | python | en | code | null | github-code | 13 |
31943604960 | from random import randrange
from typing import List
# @lc code=start
class Solution:
def __init__(self, n: int, blacklist: List[int]):
m = len(blacklist)
self.bound = w = n - m
black = {b for b in blacklist if b >= w}
self.b2w = {}
for b in blacklist:
if b < self.bound:
while w in black:
w += 1
self.b2w[b] = w
w += 1
def pick(self) -> int:
x = randrange(self.bound)
return self.b2w.get(x, x)
# Your Solution object will be instantiated and called as such:
# obj = Solution(n, blacklist)
# param_1 = obj.pick()
# @lc code=end
| wylu/leetcodecn | src/python/p700to799/710.黑名单中的随机数.py | 710.黑名单中的随机数.py | py | 678 | python | en | code | 3 | github-code | 13 |
26790057040 | from typing import Any, Dict, Sequence, Tuple, Union
import hydra
import omegaconf
import pytorch_lightning as pl
import torch
import torchmetrics
from torch.optim import Optimizer
from transformers import AutoModelForSequenceClassification
import wandb
from src.common.constants import GenericConstants as gc
from src.common.utils import PROJECT_ROOT
class MyModel(pl.LightningModule):
def __init__(self, model_name, num_labels, *args, **kwargs) -> None:
super().__init__()
# populate self.hparams with args and kwargs automagically!
self.save_hyperparameters()
self.bert = AutoModelForSequenceClassification.from_pretrained(
model_name, num_labels=num_labels
)
self.num_classes = num_labels
# Initialize metrics
self.train_accuracy_metric = torchmetrics.Accuracy()
self.val_accuracy_metric = torchmetrics.Accuracy()
self.f1_metric = torchmetrics.F1(num_classes=self.num_classes)
self.precision_macro_metric = torchmetrics.Precision(
average="macro", num_classes=self.num_classes
)
self.recall_macro_metric = torchmetrics.Recall(
average="macro", num_classes=self.num_classes
)
self.precision_micro_metric = torchmetrics.Precision(average="micro")
self.recall_micro_metric = torchmetrics.Recall(average="micro")
def forward(
self, input_ids, attention_mask, labels=None
) -> Dict[str, torch.Tensor]:
outputs = self.bert(
input_ids=input_ids, attention_mask=attention_mask, labels=labels
)
return outputs
def step(self, batch: Any, batch_idx: int):
outputs = self.forward(
batch["input_ids"], batch["attention_mask"], labels=batch[gc.LABEL]
)
preds = torch.argmax(outputs.logits, 1)
return preds, outputs.logits, outputs.loss
def training_step(self, batch: Any, batch_idx: int) -> torch.Tensor:
# Conduct forward step and retrieve
# loss and logits output
labels = batch[gc.LABEL]
preds, logits, loss = self.step(batch, batch_idx)
# Calculate metrics
train_acc = self.train_accuracy_metric(preds, labels)
# Log metrics
self.log("train/loss", loss, prog_bar=True, on_epoch=True)
self.log("train/acc", train_acc, prog_bar=True, on_epoch=True)
return loss
def validation_step(self, batch: Any, batch_idx: int) -> torch.Tensor:
labels = batch[gc.LABEL]
preds, logits, loss = self.step(batch, batch_idx)
# Metrics
valid_acc = self.val_accuracy_metric(preds, labels)
precision_macro = self.precision_macro_metric(preds, labels)
recall_macro = self.recall_macro_metric(preds, labels)
precision_micro = self.precision_micro_metric(preds, labels)
recall_micro = self.recall_micro_metric(preds, labels)
f1 = self.f1_metric(preds, labels)
# Logging metrics
self.log("valid/loss", loss, prog_bar=True, on_step=True)
self.log("valid/acc", valid_acc, prog_bar=True, on_epoch=True)
self.log("valid/precision_macro", precision_macro,
prog_bar=True, on_epoch=True)
self.log("valid/recall_macro", recall_macro,
prog_bar=True, on_epoch=True)
self.log("valid/precision_micro", precision_micro,
prog_bar=True, on_epoch=True)
self.log("valid/recall_micro", recall_micro,
prog_bar=True, on_epoch=True)
self.log("valid/f1", f1, prog_bar=True, on_epoch=True)
return {"labels": labels, "logits": logits}
def test_step(self, batch: Any, batch_idx: int) -> torch.Tensor:
loss = self.step(batch, batch_idx)
self.log_dict(
{"test_loss": loss},
)
return loss
def validation_epoch_end(self, outputs):
labels = torch.cat([x["labels"] for x in outputs])
logits = torch.cat([x["logits"] for x in outputs])
# preds = torch.argmax(logits, 1)
# There are multiple ways to track the metrics
# 1. Confusion matrix plotting using inbuilt W&B method
self.logger.experiment.log(
{
"conf": wandb.plot.confusion_matrix(
probs=logits.cpu().numpy(), y_true=labels.cpu().numpy()
)
}
)
# 2. Confusion Matrix plotting using scikit-learn method
# wandb.log({"cm": wandb.sklearn.plot_confusion_matrix(labels.numpy(),
# preds)})
# 3. Confusion Matric plotting using Seaborn
# data = confusion_matrix(labels.numpy(), preds.numpy())
# df_cm = pd.DataFrame(data, columns=np.unique(labels),
# index=np.unique(labels))
# df_cm.index.name = "Actual"
# df_cm.columns.name = "Predicted"
# plt.figure(figsize=(7, 4))
# plot = sns.heatmap(
# df_cm, cmap="Blues", annot=True, annot_kws={"size": 16}
# ) # font size
# self.logger.experiment.log({"Confusion Matrix": wandb.Image(plot)})
self.logger.experiment.log(
{"roc": wandb.plot.roc_curve(labels.cpu().numpy(),
logits.cpu().numpy())}
)
def configure_optimizers(
self,
) -> Union[Optimizer, Tuple[Sequence[Optimizer], Sequence[Any]]]:
"""
Choose what optimizers and learning-rate schedulers to
use in your optimization.
Normally you'd need one. But in the case of GANs or similar
you might have multiple.
Return:
Any of these 6 options.
- Single optimizer.
- List or Tuple - List of optimizers.
- Two lists - The first list has multiple optimizers, the second
a list of LR schedulers (or lr_dict).
- Dictionary, with an 'optimizer' key, and (optionally) a
'lr_scheduler'
key whose value is a single LR scheduler or lr_dict.
- Tuple of dictionaries as described, with an optional
'frequency' key.
- None - Fit will run without any optimizer.
"""
opt = hydra.utils.instantiate(
self.hparams.optim.optimizer,
params=self.parameters(),
_convert_="partial"
)
if not self.hparams.optim.use_lr_scheduler:
return [opt]
scheduler = hydra.utils.instantiate(
self.hparams.optim.lr_scheduler, optimizer=opt
)
return [opt], [scheduler]
@hydra.main(config_path=str(PROJECT_ROOT / "conf"), config_name="default")
def main(cfg: omegaconf.DictConfig):
model: pl.LightningModule = hydra.utils.instantiate(
cfg.model.modelmodule,
optim=cfg.optim,
data=cfg.data,
logging=cfg.logging,
_recursive_=False,
)
print("Success!") if model else print("Fail!")
if __name__ == "__main__":
main()
| ktl014/eval-student-writing | src/pl_modules/model.py | model.py | py | 6,995 | python | en | code | 0 | github-code | 13 |
72915384978 | import os
import os.path
import sys
import glob
import shutil
import fnmatch
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir,
os.pardir))
from scripts import utils
recursive_lint = ('__pycache__', '*.pyc')
lint = ('build', 'dist', 'pkg/pkg', 'pkg/qutebrowser-*.pkg.tar.xz', 'pkg/src',
'pkg/qutebrowser', 'qutebrowser.egg-info', 'setuptools-*.egg',
'setuptools-*.zip', 'doc/qutebrowser.asciidoc', 'doc/*.html',
'doc/qutebrowser.1', 'README.html', 'qutebrowser/html/doc')
def remove(path):
"""Remove either a file or directory unless --dry-run is given."""
if os.path.isdir(path):
print("rm -r '{}'".format(path))
if '--dry-run' not in sys.argv:
shutil.rmtree(path)
else:
print("rm '{}'".format(path))
if '--dry-run' not in sys.argv:
os.remove(path)
def main():
"""Clean up lint in the current dir."""
utils.change_cwd()
for elem in lint:
for f in glob.glob(elem):
remove(f)
for root, _dirs, _files in os.walk(os.getcwd()):
path = os.path.basename(root)
if any(fnmatch.fnmatch(path, e) for e in recursive_lint):
remove(root)
if __name__ == '__main__':
main()
| qutebrowser/qutebrowser | scripts/dev/cleanup.py | cleanup.py | py | 1,281 | python | en | code | 9,084 | github-code | 13 |
71536209939 | import logging
from fastapi import APIRouter, Depends, HTTPException
from starlette import status
from tortoise.transactions import in_transaction
from core.auth import auth_current_user
from core.helpers import get_amount, get_refund_amount
from core.roles import get_roles_client
from core.stripe import get_stripe
from db.repositories.order import OrderRepository
from db.repositories.payment_method import PaymentMethodRepository
from db.repositories.subscription import SubscriptionRepository
from db.repositories.user_subscription import UserSubscriptionRepository
from models.api_models import PaymentDataIn
from models.common_models import OrderStatus, SubscriptionState
router = APIRouter()
logger = logging.getLogger(__name__)
@router.post("/subscription/payment/create")
async def create_subscription_payment(
payment_data: PaymentDataIn,
auth_user=Depends(auth_current_user),
stripe_client=Depends(get_stripe),
order_repository=Depends(OrderRepository),
user_subscription_repository=Depends(UserSubscriptionRepository),
) -> None:
"""Метод оформления (оплаты) подписки"""
user_subscription = await user_subscription_repository.get_user_subscription(
user_id=auth_user.user_id,
status=[
SubscriptionState.ACTIVE,
SubscriptionState.CANCELED,
],
)
if user_subscription:
logger.error(
"Error when paying for a subscription, %s has active/not expired or paid subscription",
auth_user.user_id,
)
raise HTTPException(status.HTTP_409_CONFLICT, detail="User has subscriptions")
user_order = await order_repository.get_order(
user_id=auth_user.user_id, status=OrderStatus.PROGRESS
)
if user_order:
logger.error(
"Error when paying for a subscription, user %s has order in progress",
auth_user.user_id,
)
raise HTTPException(
status.HTTP_409_CONFLICT, detail="User has order in progress"
)
subscription = await SubscriptionRepository.get_subscription(
subscription_id=payment_data.subscription_id
)
if not subscription:
logger.error(
"Error when paying for a subscription, subscription with id-%s does not exist",
payment_data.subscription_id,
)
raise HTTPException(
status.HTTP_404_NOT_FOUND, detail="Subscription does not exist"
)
stripe_payment_method = await stripe_client.create_payment_method(
payment_method_data=payment_data.payment_method
)
async with in_transaction():
payment_method = await PaymentMethodRepository.create_payment_method(
payment_method_data=stripe_payment_method, user_id=auth_user.user_id
)
order = await order_repository.create_order(
user_id=auth_user.user_id,
user_email=auth_user.user_email,
subscription=subscription,
payment_data=payment_data,
payment_method=payment_method,
)
logger.info("Order %s created for user %s", order.id, auth_user.user_id)
customer = await stripe_client.create_customer(
user_id=order.user_id,
user_email=order.user_email,
)
await stripe_client.attach_payment_method(
payment_method_id=payment_method.id, customer_id=customer.id
)
payment = await stripe_client.create_payment(
customer_id=customer.id,
user_email=customer.email,
amount=get_amount(order.total_cost),
currency=order.currency.value,
payment_method_id=order.payment_method.id,
)
logger.info("Payment %s created for user %s", payment.id, auth_user.user_id)
await order_repository.update_order_external_id(
order_id=order.id,
external_id=payment.id,
status=OrderStatus.PROGRESS,
customer_id=customer.id,
)
logger.info(
"Order %s update status to progress and has external_id %s",
order.id,
payment.id,
)
@router.post("/subscription/payment/confirm")
async def confirm_subscription_payment(
payment_id: str,
auth_user=Depends(auth_current_user),
stripe_client=Depends(get_stripe),
order_repository=Depends(OrderRepository),
) -> None:
"""Метод подтверждения платёжа пользователем"""
user_order = await order_repository.get_order(
user_id=auth_user.user_id,
status=OrderStatus.PROGRESS,
)
if not user_order:
logger.error(
"Error when confirm payment a subscription, user % has no processing orders",
auth_user.user_id,
)
raise HTTPException(
status.HTTP_404_NOT_FOUND, detail="User has no processing orders"
)
await stripe_client.confirm_payment(
payment_id=payment_id, payment_method=user_order.payment_method.id
)
@router.post("/subscription/refund")
async def refund_subscription(
auth_user=Depends(auth_current_user),
roles_client=Depends(get_roles_client),
stripe_client=Depends(get_stripe),
order_repository=Depends(OrderRepository),
user_subscription_repository=Depends(UserSubscriptionRepository),
) -> None:
"""Метод возврата денег за подписку"""
user_subscription = await user_subscription_repository.get_user_subscription(
user_id=auth_user.user_id,
status=[SubscriptionState.ACTIVE, SubscriptionState.CANCELED],
)
if not user_subscription:
logger.error(
"Error when refunding a subscription, user %s has no active subscription",
auth_user.user_id,
)
raise HTTPException(
status.HTTP_404_NOT_FOUND, detail="User has no active subscription"
)
user_order = await order_repository.get_order(
user_id=auth_user.user_id,
status=OrderStatus.PAID,
subscription=user_subscription.subscription,
)
if not user_order:
logger.error(
"Error when returning a subscription, user %s has no actual paid orders",
auth_user.user_id,
)
raise HTTPException(
status.HTTP_404_NOT_FOUND, detail="User has no actual paid orders"
)
refund_amount = get_refund_amount(
end_date=user_subscription.end_date,
amount=user_order.total_cost,
period=user_order.subscription.period.value,
)
async with in_transaction():
refund_order = await order_repository.create_refund_order(
order=user_order, total_cost=refund_amount
)
logger.info(
"Refund order %s created for user %s", refund_order.id, auth_user.user_id
)
refund = await stripe_client.create_refund(
payment_intent_id=user_order.external_id, amount=get_amount(refund_amount)
)
await order_repository.update_order_external_id(
order_id=refund_order.id, external_id=refund.id, status=OrderStatus.PROGRESS
)
logger.info(
"Refund order %s update status to progress and has external_id %s",
refund_order.id,
refund.id,
)
await user_subscription_repository.update_user_subscription_status_by_id(
subscription_id=user_subscription.id, status=SubscriptionState.INACTIVE
)
logger.info("Subscription %s update status to inactive", user_subscription.id)
await roles_client.revoke_role(
user_id=refund_order.user_id,
role_title=f"subscriber_{refund_order.subscription.type.value}",
)
logger.info(
"Roles subscriber_%s has revoke from user %s",
refund_order.subscription.type.value,
refund_order.user_id,
)
@router.post("/subscription/cancel")
async def cancel_subscription(
auth_user=Depends(auth_current_user),
user_subscription_repository=Depends(UserSubscriptionRepository),
) -> None:
"""Метод отказа от подписки (отказа от автоматической пролонгации)"""
user_subscription = await user_subscription_repository.get_user_subscription(
user_id=auth_user.user_id,
subscription__automatic=True,
status=[SubscriptionState.ACTIVE],
)
if not user_subscription:
logger.info(
"Error when canceling a subscription, user %s has no active automatic subscription",
auth_user.user_id,
)
raise HTTPException(
status.HTTP_404_NOT_FOUND,
detail="User has no active automatic subscription",
)
await user_subscription_repository.update_user_subscription_status_by_id(
subscription_id=user_subscription.id, status=SubscriptionState.CANCELED
)
logger.info("Subscription %s update status to canceled", user_subscription.id)
| Ivan-Terex91/graduate_work | billing_api/api/v1/billing.py | billing.py | py | 9,066 | python | en | code | 0 | github-code | 13 |
32053534315 | """Provides the 4heat DataUpdateCoordinator."""
from __future__ import annotations
from collections.abc import Coroutine
from dataclasses import dataclass
from datetime import timedelta
from typing import Any, cast
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import device_registry, entity_registry
from homeassistant.helpers.debounce import Debouncer
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
DATA_CONFIG_ENTRY,
DOMAIN,
ENTRY_RELOAD_COOLDOWN,
LOGGER,
SENSORS,
UPDATE_INTERVAL,
)
from .exceptions import FourHeatError
from .fourheat import FourHeatDevice
@dataclass
class FourHeatEntryData:
"""Class for sharing data within a given config entry."""
coordinator: FourHeatCoordinator | None = None
device: FourHeatDevice | None = None
def get_entry_data(hass: HomeAssistant) -> dict[str, FourHeatEntryData]:
"""Return 4heat entry data for a given config entry."""
return cast(dict[str, FourHeatEntryData], hass.data[DOMAIN][DATA_CONFIG_ENTRY])
class FourHeatCoordinator(DataUpdateCoordinator):
"""Class to manage fetching 4heat data."""
def __init__(
self, hass: HomeAssistant, entry: ConfigEntry, device: FourHeatDevice
) -> None:
"""Init the coorditator."""
self.device_id: str | None = None
self.hass = hass
self.entry = entry
self.device = device
self.sensors: dict[str, dict] = {}
self.platforms: dict[str, list[dict[str, dict]]] = {}
self._update_is_running: bool = False
self.unload_platforms: dict | None = None
super().__init__(
hass,
LOGGER,
name=device.name,
update_interval=timedelta(seconds=UPDATE_INTERVAL),
)
self._debounced_reload: Debouncer[Coroutine[Any, Any, None]] = Debouncer(
hass,
LOGGER,
cooldown=ENTRY_RELOAD_COOLDOWN,
immediate=False,
function=self._async_reload_entry,
)
if not self.device.initialized:
sensors = {}
ent_reg = entity_registry.async_get(hass)
entries = entity_registry.async_entries_for_config_entry(
ent_reg, self.entry.entry_id
)
for sensor in entries:
sensors[sensor.unique_id.split("-")[-1]] = {
"sensor_type": None,
"value": None,
}
self.sensors = sensors
else:
self.sensors = device.sensors
self.platforms = self.build_platforms()
entry.async_on_unload(self._debounced_reload.async_cancel)
entry.async_on_unload(
self.async_add_listener(self._async_device_updates_handler)
)
@callback
def build_platforms(
self,
) -> dict[str, list[dict[str, dict]]]:
"""Find available platforms."""
platforms: dict[str, list] = {}
if not self.sensors:
return platforms
for attr in self.sensors:
try:
sensor_conf = SENSORS[attr]
except KeyError:
LOGGER.warning(
"Sensor %s is not known. Please inform the mainteainer", attr
)
sensor_conf = [
{
"name": f"UN {attr}",
"platform": "sensor",
}
]
for sensor in sensor_conf:
sensor_description = {}
keys = {}
try:
platform = str(sensor["platform"])
except KeyError:
LOGGER.warning(
"Mandatory config entry 'platforms' for sensor %s is missing. Please contact maintainer",
attr,
)
platform = "sensor"
for key, value in sensor.items():
if key != "platform":
if value:
keys[key] = value
else:
LOGGER.debug(
"Empty value for %s in sensor %s configuration",
key,
attr,
)
if keys:
sensor_description[attr] = keys
if platform not in platforms:
platforms[platform] = []
platforms[platform].append(sensor_description)
return platforms
async def _async_reload_entry(self) -> None:
"""Reload entry."""
LOGGER.debug("Reloading entry %s", self.name)
await self.hass.config_entries.async_reload(self.entry.entry_id)
@callback
def _async_device_updates_handler(self) -> None:
"""Finish async init."""
if self.sensors.keys() != self.device.sensors.keys():
self.unload_platforms = self.platforms
self.sensors = self.device.sensors
self.platforms = self.build_platforms()
self.async_setup()
self.hass.async_create_task(
self.hass.config_entries.async_forward_entry_setups(
self.entry, self.platforms
)
)
self.hass.async_create_task(self._debounced_reload.async_call())
async def _async_update_data(self, init: bool = False) -> None:
"""Update data via device library."""
LOGGER.debug("Trying update of data")
LOGGER.debug("Last update success: %s", self.last_update_success)
if self._update_is_running:
LOGGER.debug("Last update try is still running. Canceling new one")
return
self._update_is_running = True
try:
await self.device.async_update_data()
except FourHeatError as error:
self.last_exception = error
LOGGER.debug(
"Update of data failed: %s",
repr(error),
)
raise UpdateFailed from error
finally:
self._update_is_running = False
def async_setup(self) -> None:
"""Set up the coordinator."""
dev_reg = device_registry.async_get(self.hass)
entry = dev_reg.async_get_or_create(
config_entry_id=self.entry.entry_id,
name=self.name,
manufacturer=self.manufacturer,
model=self.model,
identifiers={("serial", str(self.serial))},
)
self.device_id = entry.id
@property
def model(self) -> str:
"""Get model of the device."""
return cast(str, self.device.model)
@property
def serial(self) -> str:
"""Get serial of the device."""
if not self.device.initialized or not self.device.serial:
if self.entry.unique_id:
return self.entry.unique_id
return self.entry.entry_id
return self.device.serial
@property
def manufacturer(self) -> str:
"""Manufacturer of the device."""
return cast(str, self.device.manufacturer)
def info(self, attr: str) -> dict[str, Any] | None:
"""Return info over attribute."""
return self.sensors[attr]
| anastas78/homeassistant-fourheat | custom_components/fourheat/coordinator.py | coordinator.py | py | 7,438 | python | en | code | 0 | github-code | 13 |
72013214419 | from flask import Flask, request, jsonify
from load_model_and_recommend import recommend_for_book
app = Flask(__name__)
app.config["DEBUG"] = True
@app.route('/')
def hello():
return 'Hello World!'
@app.route('/recommend_book',methods=['GET'])
def sen_recommend():
if 'book_name' in request.args:
book_name = request.args['book_name']
if('n' in request.args):
try:
n= int(request.args['n'])
except:
return "invalid n value ,accepts int only"
else:
n=10
l=recommend_for_book(book_name,return_list=True,n_neighbors=n)
return jsonify(l)
return 'no input'
# books = [
# {'id': 0,
# 'title': 'A Fire Upon the Deep',
# 'author': 'Vernor Vinge',
# 'first_sentence': 'The coldsleep itself was dreamless.',
# 'year_published': '1992'},
# {'id': 1,
# 'title': 'The Ones Who Walk Away From Omelas',
# 'author': 'Ursula K. Le Guin',
# 'first_sentence': 'With a clamor of bells that set the swallows soaring, the Festival of Summer came to the city Omelas, bright-towered by the sea.',
# 'published': '1973'},
# {'id': 2,
# 'title': 'Dhalgren',
# 'author': 'Samuel R. Delany',
# 'first_sentence': 'to wound the autumnal city.',
# 'published': '1975'}
# ]
# @app.route('/recommend_api', methods=['GET', 'POST'])
# def method_name():
# return jsonify(books)
# @app.route('/api/v1/resources/books', methods=['GET'])
# def api_id():
# # Check if an ID was provided as part of the URL.
# # If ID is provided, assign it to a variable.
# # If no ID is provided, display an error in the browser.
# if 'id' in request.args:
# id = int(request.args['id'])
# else:
# return "Error: No id field provided. Please specify an id."
# # Create an empty list for our results
# results = []
# # Loop through the data and match results that fit the requested ID.
# # IDs are unique, but other fields might return many results
# for book in books:
# if book['id'] == id:
# results.append(book)
# # Use the jsonify function from Flask to convert our list of
# # Python dictionaries to the JSON format.
# return jsonify(results)
if __name__ == '__main__':
app.run() | Akshith-github/Books_Recommendation_system | implementation_1_knn/flask_ml_api.py | flask_ml_api.py | py | 2,325 | python | en | code | 1 | github-code | 13 |
71253508819 | import matplotlib.pyplot as plt
x = [2, 6, 9, 1]
y = [8, 3, 7, 1]
plt.plot(x,y)
plt.title('line')
plt.xlabel('x')
plt.ylabel('y')
plt.grid(axis='both')
plt.show() | debdutgoswami/python-semester-practical | Question 21 - 30/Q28.py | Q28.py | py | 164 | python | en | code | 0 | github-code | 13 |
28920375748 | """
Exercício - Salvando a classe em json
Salve os dados da sua classe em JSON
e depois crie novamente as instâncias
da classe com os dados salvos
Faça em arquivos separados.
"""
import json
import os
BASE_DIR = os.path.dirname(__file__)
SAVE_TO = os.path.join(BASE_DIR, 'ex24.json')
class Estadio:
def __init__(self, nome, capacidade, gramado,
estrutura, localizacao, arquitetura):
self.nome = nome
self.capacidade = capacidade
self.gramado = gramado
self.estrutura = estrutura
self.localizacao = localizacao
self.arquitetura = arquitetura
def converter(self):
return {
'nome': self.nome,
'capacidade': self.capacidade,
'gramado': self.gramado,
'estrutura': self.estrutura,
'localizacao': self.localizacao,
'arquitetura': self.arquitetura,
}
def salvar_classe(info):
with open(SAVE_TO, 'w') as file:
json.dump(info, file, indent=2, ensure_ascii=False)
maracana = Estadio('Maracanã', 78.838, 'natural', 'moderno',
'Rio de Janeiro', 'marcante')
pacaiambu = Estadio('Pacaiambu', 40.199, 'conservado, natural', 'tradicional',
'São Paulo', 'clássica')
estadios = [vars(maracana), vars(pacaiambu)]
if __name__ == '__main__':
salvar_classe(estadios) | devSantZ/python_course | secao_3/exercicios/ex24_a.py | ex24_a.py | py | 1,374 | python | pt | code | 0 | github-code | 13 |
27636075683 | import pygame
from main.networking import Networking
from main.display import Display
from main.entity_manager import EntityManager
from main.character import Character
from main.controls import Controls
class Client():
def __init__(self):
self.tps=60
self.clock = pygame.time.Clock()
self.is_running=True
self.data='N'
self.controls=Controls(self)
self.time = 1635690553843
self.start_networking()
self.start_display()
self.server_x_size = 1600.
self.server_y_size = 900.
self.x_scale = self.display_width/self.server_x_size
self.y_scale = self.display_height/self.server_y_size
self.start_client()
def start_networking(self):
self.networking=Networking(self)
self.networking.start_client_thread()
def start_display(self):
self.display_width = 1366
self.display_height = 768
self.display=Display(self, self.display_width, self.display_height)
self.display.start_display()
def start_client(self):
self.is_running=True
self.entity_manager = EntityManager(self)
#self.entity_manager.create_entity(Character, 45, x=15, y=10)
while self.is_running:
self.clock.tick(self.tps)
#print('client thread')
for event in pygame.event.get():
if (
event.type == pygame.QUIT or
(
event.type==pygame.KEYDOWN and
event.key==pygame.K_ESCAPE
)
):
self.is_running = False
self.controls.get_keys()
self.entity_manager.update()
self.display.update()
#print(self.networking.data)
#keys=pygame.key.get_pressed()
#self.data=keys[pygame.K_w] and 'W' or 'N'
#print(self.data)
| ouriquegustavo/PyNamite | client/main/client.py | client.py | py | 2,090 | python | en | code | 1 | github-code | 13 |
16138189851 | import tweepy
from textblob import TextBlob
Consumer_Key= "your consumer key"
Consumer_Secret="your consumer secret"
Access_Token ="your access token"
AccessToken_Secret= "your access token secret"
authen=tweepy.OAuthHandler(Consumer_Key,Consumer_Secret)
authen.set_access_token(Access_Token,AccessToken_Secret)
api=tweepy.API(authen);
tweets=api.search("trump")
for tweet in tweets:
print(tweet.text)
analysis=TextBlob(tweet.text)
print(analysis.sentiment)
| chajaykrishna/TwitterSentimentAnalysis | tweets.py | tweets.py | py | 464 | python | en | code | 0 | github-code | 13 |
14230982357 | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('role', '0004_auto_20200916_2012'),
]
operations = [
migrations.AlterUniqueTogether(
name='rolerelatedobject',
unique_together={('role_id', 'object_type', 'object_id')},
),
migrations.AlterIndexTogether(
name='roleperm',
index_together={('role_id',)},
),
migrations.AlterIndexTogether(
name='rolescope',
index_together={('role_id',)},
),
migrations.AlterIndexTogether(
name='roleuser',
index_together={('role_id',)},
),
migrations.CreateModel(
name='RoleCommonAction',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('creator', models.CharField(max_length=64, verbose_name='创建者')),
('updater', models.CharField(max_length=64, verbose_name='更新者')),
('created_time', models.DateTimeField(auto_now_add=True)),
('updated_time', models.DateTimeField(auto_now=True)),
('role_id', models.IntegerField(verbose_name='角色ID')),
('system_id', models.CharField(max_length=32, verbose_name='系统ID')),
('name', models.CharField(max_length=128, verbose_name='名称')),
('name_en', models.CharField(default='', max_length=128, verbose_name='名称EN')),
('_action_ids', models.TextField(db_column='action_ids', verbose_name='操作列表')),
],
options={
'verbose_name': '角色常用操作',
'verbose_name_plural': '角色常用操作',
'ordering': ['id'],
'index_together': {('role_id', 'system_id')},
},
),
]
| TencentBlueKing/bk-iam-saas | saas/backend/apps/role/migrations/0005_auto_20201029_2028.py | 0005_auto_20201029_2028.py | py | 1,962 | python | en | code | 24 | github-code | 13 |
4568515908 | class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
def push(self, x):
"""
:type x: int
:rtype: void
"""
self.stack.append(x)
def pop(self):
"""
:rtype: void
"""
if not self.stack:
return False
return self.stack.pop(-1)
def top(self):
"""
:rtype: int
"""
if not self.stack:
return False
return self.stack[-1]
def getMin(self):
"""
:rtype: int
"""
if not self.stack:
return False
return min(self.stack)
# Your MinStack object will be instantiated and called as such:
if __name__ == '__main__':
minStack = MinStack()
minStack.push(-2)
minStack.push(0)
minStack.push(-3)
print(minStack.getMin())
print(minStack.pop())
print(minStack.top())
print(minStack.getMin())
| Weikoi/OJ_Python | leetcode/easy/easy 1-200/155_最小值栈.py | 155_最小值栈.py | py | 994 | python | en | code | 0 | github-code | 13 |
10840022252 | import logging
import numpy as np
import tensorflow as tf
from tensorflow.python.estimator.estimator import Estimator
from tensorflow.python.estimator.run_config import RunConfig
from tensorflow.python.estimator.model_fn import EstimatorSpec
from tensorflow.keras.utils import Progbar
from .text_preprocessing import FullTokenizer, convert_lst_to_features, stub_preprocessor
logger = logging.getLogger('tensorflow')
logger.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s : %(message)s')
sh = logging.StreamHandler()
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
logger.handlers = [sh]
class BERTFeatureExtractor(object):
def __init__(self, graph_path, vocab_path,
preprocessor=stub_preprocessor, use_gpu=True,
batch_size=256, seq_len=64, space_escape='_'):
self.batch_size = batch_size
self.seq_len = seq_len
self._tokenizer = FullTokenizer(vocab_path)
self._preprocessor = preprocessor
self._graphdef = graph_path
self._use_gpu = use_gpu
self._config = self._build_config()
self._graph = tf.Graph()
self._sess = tf.Session(graph=self._graph, config=self._config)
self._input_names = ['input_ids', 'input_mask', 'input_type_ids']
self._space_escape = space_escape
self._data_container = DataContainer()
with self._graph.as_default():
self._estimator = self._build_estimator()
self._input_fn = self._build_input_fn()
self._predict_fn = self._estimator.predict(
input_fn=self._input_fn, yield_single_examples=False)
self.transform(self._space_escape)
logger.info('Initialized.')
def _build_config(self):
config = tf.ConfigProto(device_count={'GPU': 1 if self._use_gpu else 0})
config.gpu_options.allow_growth = True
return config
def _build_estimator(self):
def model_fn(features, mode):
with tf.gfile.GFile(self._graphdef, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
output = tf.import_graph_def(graph_def,
input_map={k + ':0': features[k]
for k in self._input_names},
return_elements=['final_encodes:0'])
return EstimatorSpec(mode=mode, predictions={'output': output[0]})
return Estimator(model_fn=model_fn, config=RunConfig(session_config=self._config))
def _build_input_fn(self):
def generator():
while True:
yield self._build_feed_dict(self._data_container.get())
def input_fn():
return tf.data.Dataset.from_generator(
generator,
output_types={iname: tf.int32 for iname in self._input_names},
output_shapes={iname: (None, None) for iname in self._input_names})
return input_fn
def _build_feed_dict(self, texts):
text_features = list(convert_lst_to_features(
texts, self.seq_len, self.seq_len,
self._tokenizer, is_tokenized=False, mask_cls_sep=False))
target_shape = (len(texts), -1)
feed_dict = {}
for iname in self._input_names:
features_i = np.array([getattr(f, iname) for f in text_features])
features_i = features_i.reshape(target_shape)
feed_dict[iname] = features_i
return feed_dict
def transform(self, texts, verbose=False):
if type(texts) is str:
texts = [texts]
texts = list(map(self._preprocessor, texts))
n_samples = len(texts)
blank_idx = []
for i, text in enumerate(texts):
if len(text) == 0:
texts[i] = self._space_escape
blank_idx.append(i)
bar = Progbar(n_samples)
mats = []
for bi, text_batch in enumerate(batch(texts, self.batch_size)):
self._data_container.set(text_batch)
features = next(self._predict_fn)['output']
mats.append(features)
if verbose:
bar.add(len(text_batch))
mat = np.vstack(mats)
if len(blank_idx):
blank_idx = np.array(blank_idx)
mat[blank_idx] = 0.0
return mat
def __call__(self, texts, verbose=False):
return self.transform(texts, verbose)
class DataContainer:
def __init__(self):
self._samples = None
def set(self, samples):
self._samples = samples
def get(self):
return self._samples
def batch(iterable, n=1):
itr_len = len(iterable)
for ndx in range(0, itr_len, n):
yield iterable[ndx:min(ndx + n, itr_len)]
| gaphex/bert_experimental | bert_experimental/feature_extraction/bert_feature_extractor.py | bert_feature_extractor.py | py | 4,951 | python | en | code | 77 | github-code | 13 |
26312019932 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 28 14:15:49 2022
@author: nathan
"""
import pandas as pd
import geopandas as gpd
import os
from shapely.geometry import Polygon
import folium
# Degree spacing between grid cells
latDeg = 0.5
lonDeg = 0.625
crs_list = ["EPSG:4326", "EPSG:6933", "EPSG:3857"] # Careful which CRS is used, will affect results
path = os.getcwd()
# import list of grid cells
gridcells = pd.read_excel(path + '/coordinate.xlsx')
gridcells.dropna(inplace=True)
polys = []
# convert grid cell points to grid cell polygons
for index, row in gridcells.iterrows():
#for each grid cell point, create a rectangular polygon using point as lower left
lats = [row.lat, row.lat + latDeg, row.lat + latDeg, row.lat]
lons = [row.lon, row.lon, row.lon + lonDeg, row.lon + lonDeg]
poly_geom = Polygon(zip(lons, lats))
polys.append(poly_geom)
# storing grid cell polygons in a geodataframe
data = gpd.GeoDataFrame(gridcells['grid cell'], geometry = polys)
data['lat'] = gridcells.lat
#Checking different CRS
for crs in crs_list:
data_temp = data.set_crs(crs, allow_override=True).copy()
colname = ('area_' + crs)
data[colname] = data_temp.area
data = data.drop('geometry', axis=1) | NathanDeMatos/UVic-ESD | LandUse/GridArea.py | GridArea.py | py | 1,264 | python | en | code | 0 | github-code | 13 |
30121127086 | import turtle
t= turtle.Turtle()
t.shape("turtle")
def house():
t.forward(100)
t.left(90)
t.forward(100)
t.left(90)
t.forward(100)
t.left(90)
t.forward(100)
t.left(90)
t.forward(100)
t.left(90)
t.forward(100)
t.right(90)
t.forward(20)
t.left(135)
t.forward(100)
t.left(90)
t.forward(100)
t.left(135)
t.forward(22)
def gres():
t.speed(10)
t.color("blue")
t.width(300)
t.goto(350,0)
t.goto(350,300)
t.goto(-350,300)
t.goto(-350,0)
t.goto(0,0)
t.goto(0,-240)
t.color("sienna")
t.width(400)
t.goto(350,-240)
t.goto(-350,-240)
t.up()
t.width(10)
t.color("black")
t.goto(0,0)
t.down()
t.speed(10)
house()
t.up()
t.goto(-200,0)
t.down()
house()
t.up()
t.goto(200,0)
t.down()
house()
t.speed(10)
t.width(40)
t.color("green")
t.up()
t.goto(-550,-25)
t.down()
t.goto(550,-25)
t.goto(0,-25)
t.color("black")
#gres()#
def kvadrat():
for i in range(4):
t.forward(100)
t.left(90)
#kvadrat()#
def figyra(n):
for i in range(n):
t.forward(30)
t.left(360/n)
def yzor(d):
t.speed (8)
for i in range(d):
figyra(7)
t.left(360/d)
#yzor(45)#
def up1():
t.forward(10)
def down2():
t.backward(10)
def left3():
t.left(10)
def right4():
t.right(10)
def red5():
t.color("red")
t.screen.onkeypress(left3,"Left")
t.screen.onkeypress(right4,"Right")
t.screen.onkeypress(down2,"Down")
t.screen.onkeypress(up1,"Up")
t.screen.onkeypress(red5,"m")
t.screen.listen()
t.screen.mainloop()
| Sasha2011a/- | рисовалка.py | рисовалка.py | py | 2,023 | python | en | code | 0 | github-code | 13 |
73845810257 | import os
import shutil
from pathlib import Path
from typing import List, Optional, Union
class DisplayablePath:
display_filename_prefix_middle = "├──"
display_filename_prefix_last = "└──"
display_parent_prefix_middle = " "
display_parent_prefix_last = "│ "
def __init__(self, path, parent_path, is_last):
self.path = Path(str(path))
self.parent = parent_path
self.is_last = is_last
if self.parent:
self.depth = self.parent.depth + 1
else:
self.depth = 0
@classmethod
def make_tree(cls, root, parent=None, is_last=False, criteria=None, ignore=None):
if ignore is None:
ignore = []
root = Path(str(root))
criteria = criteria or cls._default_criteria
displayable_root = cls(root, parent, is_last)
yield displayable_root
children = sorted(
[path for path in root.iterdir() if criteria(root, ignore)], # noqa
key=lambda s: str(s).lower(),
)
count = 1
for path in children:
is_last = count == len(children)
if path.is_dir():
yield from cls.make_tree(
path,
parent=displayable_root,
is_last=is_last,
criteria=criteria,
ignore=ignore,
)
else:
yield cls(path, displayable_root, is_last)
count += 1
@classmethod
def _default_criteria(cls, root, ignore):
for file in ignore:
if file in str(root):
return False
return True
@property
def display_name(self):
if self.path.is_dir():
return self.path.name + "/"
return self.path.name
def displayable(self):
if self.parent is None:
return self.display_name
_filename_prefix = (
self.display_filename_prefix_last
if self.is_last
else self.display_filename_prefix_middle
)
parts = ["{!s} {!s}".format(_filename_prefix, self.display_name)]
parent = self.parent
while parent and parent.parent is not None:
parts.append(
self.display_parent_prefix_middle
if parent.is_last
else self.display_parent_prefix_last
)
parent = parent.parent
return "".join(reversed(parts))
def build_settings_path(default_path: Union[str, Path]) -> str:
default_path = default_path if default_path[0] == "s" else default_path
return default_path
def loader(length, index, text) -> None:
max_sharps = 10
percent = index * 100 // length
sharps = index * max_sharps // length
if sharps == max_sharps - 1:
loader(length, index + 1, text)
print(
text
+ " | "
+ "".join(["==" for _ in range(sharps)])
+ " | "
+ str(percent)
+ "%"
)
def copytree(
src: str,
dst: str,
symlinks: bool = False,
ignore: Optional[List[str]] = None,
) -> None:
"""
Copy entire tree
:param src: Source file path
:param dst: Destination of source file
:param symlinks: Any symlinks
:param ignore: Ignore files
"""
if not os.path.exists(dst):
os.makedirs(dst)
items = os.listdir(src)
for idx, item in enumerate(items):
s = os.path.join(src, item)
d = os.path.join(dst, item)
if os.path.isdir(s):
copytree(s, d, symlinks, ignore)
continue
if not os.path.exists(d) or os.stat(s).st_mtime - os.stat(d).st_mtime > 1:
shutil.copy2(s, d)
def remove_tree(path):
return shutil.rmtree(path, ignore_errors=True)
| Kel0/django-parrallel-sessions | dps/utils.py | utils.py | py | 3,824 | python | en | code | 0 | github-code | 13 |
27188707073 | N = int(input())
triangle = [list(map(int, input().split())) for _ in range(N)]
dp = []
for i in range(1, N + 1):
dp.append([0] * i)
for x in range(0, N):
if x == 0:
dp[x][0] = triangle[x][0]
elif x == 1:
dp[x][0] = dp[x - 1][0] + triangle[x][0]
dp[x][-1] = dp[x - 1][0] + triangle[x][-1]
for t in range(1, len(dp[x]) - 1):
if len(dp[x]) > 1:
dp[x][0] = dp[x - 1][0] + triangle[x][0]
dp[x][-1] = dp[x - 1][-1] + triangle[x][-1]
dp[x][t] = max(dp[x - 1][t - 1], dp[x - 1][t]) + triangle[x][t]
print(max(dp[-1])) | Nam4o/Algorithm | 백준/Silver/1932. 정수 삼각형/정수 삼각형.py | 정수 삼각형.py | py | 618 | python | en | code | 1 | github-code | 13 |
5294441894 | import pickle
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import csv
import datetime
# test_modelos_pickle = {'l_modelo' : l_modelo,
# 'l_total_reward': l_rewards,
# 'l_info': l_info}
# Salvo resultados
raiz = './results/'
#files = sorted(files, key=lambda x: os.path.getmtime(os.path.join(raiz, x)))
modelos = ['DQN_V2', 'DQN_LSTM_V2', 'A2C_V1', 'PPO_LR_0.0005']
periodos = ['2008-2009', '2020-2022', '2012-2014', '2018-2020']
ETF_names = ['ETF-CONSUMER', 'ETF-CONSUMER-BASIS', 'ETF-ENERGY', 'ETF-FINANCIAL', 'ETF-HEALTH-CARE', 'ETF-INDUSTRIAL',
'ETF-MATERIALS', 'ETF-REAL-STATE', 'ETF-TECHNOLOGY', 'ETF-UTILITIES']
results = []
for periodo in periodos:
fname = raiz + 'test_con_portfolio_' + periodo + '.pickle'
# pd.DataFrame(columns=['Fichero', 'l_total_reward', 'mean_reward', 'std_reward', 'last portfolio'])
portfolios = []
if os.path.isfile(fname):
data = pickle.load(open(fname,'rb'))
for i in range(len(data['l_modelo'])):
if (i+1) % 10 == 0:
if ('DQN' in data['l_modelo'][i]) or ('LSTM' in data['l_modelo'][i]):
portfolios.append(data['l_info'][i][-1]['portfolio'])
else:
portfolios.append(data['l_info'][i]['portfolio'])
fig, ax = plt.subplots()
n = 10
x = np.arange(n)
width = 1/5
for i, modelo in enumerate(modelos):
plt.bar(x + (i-1.5) * width, portfolios[i], width=width, label = modelo)
plt.xticks(x, ETF_names, rotation='vertical')
plt.ylabel("Posición")
plt.title("Portfolio final " + periodo)
plt.legend(loc='best')
plt.tight_layout()
plt.savefig('./figures/comparativa_portfolio_final_full_invested_'+periodo+'.png', format='png')
plt.show()
| falamo1969/AgenteInversionTFM | resumen_last_portfolio_full_invested.py | resumen_last_portfolio_full_invested.py | py | 1,875 | python | en | code | 0 | github-code | 13 |
1363875863 | from flask import Flask, render_template, request
from recipe_scrapers import scrape_me
import sqlite3
app = Flask(__name__) # create app instance
@app.route("/")
def index(): # Home page of the KitchenCompanion app
return render_template('index.html', title = 'Home')
@app.route("/view") # Connects to database, fetches all records, and returns view.html to display list
def view():
con = sqlite3.connect("test.db") #Open Connection to DB
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from recipes")
rows = cur.fetchall()
return render_template('view.html', rows = rows, title = 'View Recipes')
@app.route("/add",methods = ["POST","GET"]) # Form page to input recipe URL to be added to DB
def add():
con = sqlite3.connect("test.db") #Open Connection to DB
con.row_factory = sqlite3.Row
cur = con.cursor()
cur.execute("select * from sources")
webrows = cur.fetchall()
return render_template('add.html', webrows = webrows, title = 'Add Recipes')
@app.route("/save",methods = ["POST","GET"]) # Accepts add.html form URL, uses recipe_scrapers package, returns recipe strings. Adds each to DB
def save():
msg = "msg" # For displaying status message if recipe was added
if request.method == "POST":
try:
recipe = request.form["recipe"]
scraper = scrape_me(recipe) # url as a string, it can be url from any site listed in the README
title = scraper.title() #returned as str
totalTime = scraper.total_time() #returned as int
yields = scraper.yields() #returned as str
ingredientsList = scraper.ingredients() #returned as list
seperator = ', ' #For ingredients returned as list
ingredientsString = seperator.join(ingredientsList) # Ingredients list to string
instructions = scraper.instructions() #returned as str
with sqlite3.connect("test.db") as con: #Open Connection to DB, inserts above recipe strings
cur = con.cursor()
cur.execute("INSERT into recipes (title, totaltime,yields,ingredients,instructions) values (?,?,?,?,?)",(title,totalTime,yields,ingredientsString,instructions,))
con.commit()
msg = "Recipe successfully added!"
pagetitle = 'Success!'
except:
con.rollback()
msg = "Unable to add recipe :("
pagetitle = 'Error'
finally:
con.close()
return render_template("save.html",title = pagetitle, msg = msg)
@app.route("/delete",methods = ["POST","GET"]) # Presents delete.html form, user inputs recipe ID to delete from DB. Not really needed....
def delete(): # call method & return html template
return render_template('delete.html', title = 'Delete Recipe')
@app.route("/deletestatus",methods = ["POST","GET"]) # Delete recipe from DB with input from /delete method input
def deletestatus():
id = request.form["id"] # Unique recipe ID from VIEW to be used for deletion
with sqlite3.connect("test.db") as con:
try:
cur = con.cursor()
cur.execute("delete from recipes where id = ??",id)
msg = "Recipe successfully deleted"
pagetitle = 'Success!'
return render_template("deletestatus.html",title = pagetitle, msg = msg)
except:
msg = "Unable to delete recipe :("
pagetitle = 'Error'
finally:
return render_template("deletestatus.html",title = pagetitle, msg = msg)
@app.route("/recipe",methods = ["POST","GET"]) # Page to view single recipe chosen from view.html page
def recipe():
if request.method == "POST":
try:
id = request.form["recipeid"] #
with sqlite3.connect("test.db") as con:
cur = con.cursor()
sqlite_select_query = """SELECT * from recipes where id = ?"""
cur.execute(sqlite_select_query, (id, ))
singlerow = cur.fetchall()
print(type(singlerow))
title = singlerow[1]
print(title[1])
except:
title = 'Recipe'
finally:
return render_template('recipe.html', singlerow = singlerow, title = title)
if __name__ == "__main__": # on running python app.py
app.run(debug=True) # run the flask app
#TODO Actual CSS styling, bug fixes in app.py, refactoring, code indentation, recipe presentation, grid & flexbox layouts, search, toasts for add/deletions, unit conversions, much much more | WinSpartan/KitchenCompanion | kitchen_app/app.py | app.py | py | 4,931 | python | en | code | 0 | github-code | 13 |
22215437854 | class LinearValueFunction:
def __init__(self, step_size):
self.step_size = step_size
# Use a tile coding with only a single tiling (i.e. state aggregation):
# a grid of square tiles
self.tile_size = 16
self.w = np.zeros(((BOUNDARY_SOUTH - BOUNDARY_NORTH + self.tile_size) // self.tile_size,
(BOUNDARY_EAST - BOUNDARY_WEST + self.tile_size) // self.tile_size,
4))
# Return estimated action value of given state and action
def value(self, state, action):
if is_goal_reached(state):
return 0.0
return self.w[(state[1] - BOUNDARY_NORTH) // self.tile_size, (state[0] - BOUNDARY_WEST) // self.tile_size, action]
# Return vector of estimated action values of given state, for each action
def values(self, state):
if is_goal_reached(state):
return np.zeros(4)
return self.w[(state[1] - BOUNDARY_NORTH) // self.tile_size, (state[0] - BOUNDARY_WEST) // self.tile_size, :]
# learn with given state, action and target
def learn(self, state, action, target):
self.w[(state[1] - BOUNDARY_NORTH) // self.tile_size,
(state[0] - BOUNDARY_WEST) // self.tile_size,
action] += (
self.step_size
* (target
- self.w[(state[1] - BOUNDARY_NORTH) // self.tile_size,
(state[0] - BOUNDARY_WEST) // self.tile_size,
action]))
# Return estimated state value, based on the estimated action values
def state_value(self, state):
return np.max(self.values(state))
| ottomattas/INFOMAML | Assignments/linearvf.py | linearvf.py | py | 1,661 | python | en | code | 0 | github-code | 13 |
34537534108 | import socket
import json
import numpy as np
import matplotlib.pyplot as plot
class RingBuffer:
def __init__(self,size_max):
self.max = size_max
self.data = []
class __Full:
def append(self, x):
self.data[self.cur] = x
self.cur = (self.cur+1) % self.max
def get(self):
return self.data[self.cur:]+self.data[:self.cur]
def append(self,x):
self.data.append(x)
if len(self.data) == self.max:
self.cur = 0
self.__class__ = self.__Full
def get(self):
return self.data
HOST = 'X.X.X.X' # IP address
PORT = 6531 # Port to listen on (use ports > 1023)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind((HOST, PORT))
s.listen()
print("Starting server at: ", (HOST, PORT))
conn, addr = s.accept()
RBX=RingBuffer(10)
RBY=RingBuffer(10)
RBZ=RingBuffer(10)
RBGX=RingBuffer(10)
RBGY=RingBuffer(10)
RBGZ=RingBuffer(10)
RBT=RingBuffer(10)
with conn:
print("Connected at", addr)
f=plot.figure()
while True:
data = conn.recv(1024).decode('utf-8')
print("Received from socket server:", data)
if (data.count('{') != 1):
# Incomplete data are received.
choose = 0
buffer_data = data.split('}')
while buffer_data[choose][0] != '{':
choose += 1
data = buffer_data[choose] + '}'
obj = json.loads(data)
print(obj)
t = obj['s']
x=obj['x']
y=obj['y']
z=obj['z']
gx=obj['gx']
gy=obj['gy']
gz=obj['gz']
RBX.append(x)
RBY.append(y)
RBZ.append(z)
RBGX.append(gx)
RBGY.append(gy)
RBGZ.append(gz)
RBT.append(t)
f.clear()
ax = f.subplots(2,3,sharex='col',sharey='row')
ax[0][0].scatter(RBT.get(), RBX.get(), c='blue')
print(RBT.get())
ax[0][1].scatter(RBT.get(), RBY.get(), c='c')
ax[0][2].scatter(RBT.get(), RBZ.get(), c='g')
ax[1][0].scatter(RBT.get(), RBGX.get(), c='k')
ax[1][1].scatter(RBT.get(), RBGY.get(), c='m')
ax[1][2].scatter(RBT.get(), RBGZ.get(), c='r')
name_list=['ax','ay','az','gx','gy','gz']
for i in range(2):
for j in range(3):
ax[i][j].set_xlabel("sample num")
ax[i][j].legend([name_list[3*i+j]])
f.canvas.draw()
f.canvas.flush_events()
plot.pause(0.5) | Howard-149/mbed-HW2 | server.py | server.py | py | 2,720 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.