id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
11339599 | <gh_stars>0
import asyncio
from collections import deque
def split_into_even_size(lst, size):
return [lst[i:i + size] for i in range(0, len(lst), size)]
import datetime
class ExpiredSet:
def __init__(self, seconds, max_seconds=None):
self.container = {}
self.max_seconds = max_seconds
def add(self, val, seconds):
duration = datetime.timedelta(seconds=seconds)
expire_at = self.container.get(val)
if expire_at and expire_at > datetime.datetime.now():
duration += expire_at - datetime.datetime.now()
if self.max_seconds:
max_duration = datetime.timedelta(seconds=max_seconds)
if duration > max_duration:
duration = max_duration
self.container[val] = datetime.datetime.now() + duration
def remove(self, val):
self.container.pop(val)
def length(self):
return len(self.container)
def maintain(self):
now = datetime.datetime.now()
for k, v in list(self.container.items()):
if now >= v:
self.remove(k)
def intersection(self, other):
return [key for key in list(self.container.keys()) if other.container.get(key)]
def cosine_similarity(self, other):
'''
molecular = sum(self.container[k]*other.container[k] for k in self.container.keys() if k in other.container)
if not molecular:
return 0
denominator = math.sqrt(sum(i*i for i in self.container.values())) * math.sqrt(sum(i*i for i in other.container.values()))
return molecular / denominator
'''
def jaccard_similarity(self, other):
n = len(self.intersection(other))
if not n:
return 0
return n / (len(self.container) + len(other.container) - n)
def to_list(self):
return list(self.container.keys())
class MergedStream:
'''
if iterator yield None value,
it assume that the iterator is terminated
and ignore the result of None
'''
def __init__(self, *iterables):
self._iterables = list(iterables)
self._wakeup = asyncio.Event()
self.done = deque()
self.next_futs = {}
def _add_iters(self, next_futs, on_done):
while self._iterables:
it = self._iterables.pop()
it = it.__aiter__()
nfut = asyncio.ensure_future(it.__anext__())
nfut.add_done_callback(on_done)
next_futs[nfut] = it
return next_futs
async def __aiter__(self):
done = self.done
next_futs = self.next_futs
def on_done(nfut):
self.done.append((nfut, next_futs.pop(nfut)))
#done[nfut] = next_futs.pop(nfut)
self._wakeup.set()
self._add_iters(next_futs, on_done)
try:
while next_futs or self.done:
await self._wakeup.wait()
self._wakeup.clear()
while self.done:
nfut, it = self.done.popleft()
try:
ret = nfut.result()
except StopAsyncIteration:
continue
if ret is None:
continue
if it:
self._iterables.append(it)
yield ret
if self._iterables:
self._add_iters(next_futs, on_done)
finally:
# if the generator exits with an exception, or if the caller stops
# iterating, make sure our callbacks are removed
for nfut in next_futs:
nfut.remove_done_callback(on_done)
def append(self, new_iter):
self._iterables.append(new_iter)
self._wakeup.set()
def append_future(self, new_future):
def on_done(nfut):
self.done.append((nfut, self.next_futs.pop(nfut)))
self._wakeup.set()
nfut = asyncio.ensure_future(new_future)
nfut.add_done_callback(on_done)
self.next_futs[nfut] = None
async def wrapper(coru, semaphore, sec):
async with semaphore:
r = await coru
await asyncio.sleep(sec)
return r
def limited_api(n, sec, corus):
semaphore = asyncio.Semaphore(n)
return asyncio.gather(*[wrapper(coru, semaphore, sec) for coru in corus])
async def test():
async def wait(n):
await asyncio.sleep(n)
if n == 2:
print(2)
return None
return n
m = MergedStream(2)
m.append_future(wait(1))
m.append_future(wait(2))
m.append_future(wait(3))
async for i in m:
print(i)
m.append_future(wait(3))
m.append_future(wait(2))
m.append_future(wait(1))
if __name__ == "__main__":
import time
asyncio.run(test())
a = ExpiredSet(3)
a.add(0, 3)
a.add(1, 5)
b = ExpiredSet(3)
b.add(0, 2)
b.add(3, 3)
print(a.jaccard_similarity(b))
a.maintain()
assert(a.to_list() == [0,1])
assert(a.length() == 2)
time.sleep(2)
a.maintain()
assert(a.to_list() == [0,1])
assert(a.length() == 2)
time.sleep(2)
a.maintain()
assert(a.to_list() == [1])
assert(a.length() == 1)
a.add(1, 2)
time.sleep(2)
a.maintain()
assert(a.to_list() == [1])
assert(a.length() == 1)
time.sleep(2)
a.maintain()
assert(a.to_list() == [])
assert(a.length() == 0)
| StarcoderdataPython |
6684893 | #stylus.py
#(c) <NAME>, Smith-Kettlewell Eye Research Institute
#code to support stylus, including markers printed on it
import numpy as np
import cv2
def detectMarkers_clean(gray, dictionary, parameters): #call aruco.detectMarkers but clean up the returned corners and ids
#corners: multi-array of dimension n x 4 x 2
#ids: multi-array of dimension n
corners, ids, _ = cv2.aruco.detectMarkers(gray, dictionary=dictionary, parameters=parameters)
if len(corners)>0:
corners = np.array(corners)
corners = corners[:,0,:,:] #get rid of the useless dimension
ids = ids[:,0] #get rid of the useless dimension
return corners, ids
def arrange_corresponding_points(objbases, corners, ids): #create corresponding lists of (detected) 2D image points and 3D model points
imgpoints, objpoints = [], [] #create corresponding lists of (detected) 2D image points and 3D model points
for this_id in ids:
if this_id in list(objbases.keys()): #otherwise irrelevant and should be ignored
inds = np.where(ids==this_id)
that_ind = inds[0][0] #index corresponding to probe marker with id
corners4 = corners[that_ind,:,:]
imgpoints.append(corners4)
objpoints.append(objbases[this_id])
center = np.round(np.average(corners4, axis=0)).astype(int)
if len(imgpoints)>0:
imgpoints2 = np.array(imgpoints)
imgpoints3 = np.ascontiguousarray(imgpoints2[:,:,None]).reshape((len(imgpoints)*4,2,1)) #want contiguous n x 2 x 1
objpoints2 = np.array(objpoints)
objpoints3 = np.ascontiguousarray(objpoints2[:,:,None]).reshape((len(imgpoints)*4,3,1)) #promote to n x 3 x 1 dimensions, then make sure it's contiguous
return True, objpoints3, imgpoints3
else:
return False, None, None
def projection_error(imgpoints, objpoints, rvec, tvec, mtx, dist): #n x 2 x 1, n x 1 x 3 respectively
n, _, _ = imgpoints.shape
reprojected, _ = cv2.projectPoints(objpoints, np.squeeze(rvec), np.squeeze(tvec), mtx, dist)
errors = [cv2.norm(reprojected[k,0,:] - imgpoints[k,:,0]) for k in range(n)]
#print('errors:', errors)
return errors
def stylus_half_inch_dowel(offset, objbases, corners, ids, mtx, dist):
#1/2" square dowel stylus, variable length
condition, objpoints3, imgpoints3 = arrange_corresponding_points(objbases, corners, ids)
if condition:
ret, rvec, tvec = cv2.solvePnP(objpoints3, imgpoints3, mtx, dist)
errors = projection_error(imgpoints3, objpoints3, rvec, tvec, mtx, dist)
E = np.max(errors)
if E<10 and len(errors)>=8: #if reprojection error is not too bad, and there are at least two markers detected
R,_ = cv2.Rodrigues(rvec)
tip = (R@offset) + np.squeeze(tvec) #XYZ tip location in camera coordinates
return True, tip, rvec, tvec
else:
return False, 0*offset, 0*offset, 0*offset
else:
return False, 0*offset, 0*offset, 0*offset
def stylus_1_inch_dowel(offset, objbases, corners, ids, mtx, dist):
#1" square dowel stylus, variable length
condition, objpoints3, imgpoints3 = arrange_corresponding_points(objbases, corners, ids)
if condition:
ret, rvec, tvec = cv2.solvePnP(objpoints3, imgpoints3, mtx, dist)
errors = projection_error(imgpoints3, objpoints3, rvec, tvec, mtx, dist)
E = np.max(errors)
if E<10 and len(errors)>=8: #if reprojection error is not too bad, and there are at least two markers detected
R,_ = cv2.Rodrigues(rvec)
tip = (R@offset) + np.squeeze(tvec) #XYZ tip location in camera coordinates
return True, tip, rvec, tvec
else:
return False, 0*offset, 0*offset, 0*offset
else:
return False, 0*offset, 0*offset, 0*offset
def stylus_2_inch_dowel(offset, objbases, corners, ids, mtx, dist):
#2" square dowel stylus, variable length
condition, objpoints3, imgpoints3 = arrange_corresponding_points(objbases, corners, ids)
if condition:
ret, rvec, tvec = cv2.solvePnP(objpoints3, imgpoints3, mtx, dist)
errors = projection_error(imgpoints3, objpoints3, rvec, tvec, mtx, dist)
E = np.max(errors)
if E<10 and len(errors)>=8: #if reprojection error is not too bad, and there are at least two markers detected
R,_ = cv2.Rodrigues(rvec)
tip = (R@offset) + np.squeeze(tvec) #XYZ tip location in camera coordinates
return True, tip, rvec, tvec
else:
return False, 0*offset, 0*offset, 0*offset
else:
return False, 0*offset, 0*offset, 0*offset
def stylus_2_inch_box(offset, objbases, corners, ids, mtx, dist):
#stylus_2_inch_box, variable length
condition, objpoints3, imgpoints3 = arrange_corresponding_points(objbases, corners, ids)
if condition:
ret, rvec, tvec = cv2.solvePnP(objpoints3, imgpoints3, mtx, dist)
errors = projection_error(imgpoints3, objpoints3, rvec, tvec, mtx, dist)
E = np.max(errors)
#print('errors:',errors)
if E<10 and len(errors)>=4: #if reprojection error is not too bad, and there is at least one marker detected
R,_ = cv2.Rodrigues(rvec)
tip = (R@offset) + np.squeeze(tvec) #XYZ tip location in camera coordinates
return True, tip, rvec, tvec
else:
return False, 0*offset, 0*offset, 0*offset
else:
return False, 0*offset, 0*offset, 0*offset
#########
class Markers:
def __init__(self):
#probe markers for stylus:
self.arucodict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_250)
self.arucodetectparam = cv2.aruco.DetectorParameters_create()
#self.arucodetectparam.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_SUBPIX #this helps with aruco markers but not charuco board
self.arucodetectparam.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_CONTOUR #this helps with aruco markers but not charuco board
self.arucodetectparam.cornerRefinementWinSize = 3 #3
#arucodetectparam.cornerRefinementMethod = cv2.aruco.CORNER_REFINE_NONE #Debugging!
#arucodetectparam.cornerRefinementMinAccuracy = 0.05 #debugging
#arucodetectparam.cornerRefinementMaxIterations = 50
if 1: #empirically, these settings seem to improve marker localization
self.arucodetectparam.adaptiveThreshWinSizeMin = 33 #default 3 but maybe 33 is good
self.arucodetectparam.adaptiveThreshWinSizeMax = 53 #default 23 but maybe 53 is good
#self.arucodetectparam.adaptiveThreshWinSizeStep = 10 #default 10 and this is good
class Stylus:
def __init__(self, which, length, mtx, dist):
self.which = which #which specifies the type of stylus
self.mtx = mtx
self.dist = dist
self.PROBE_LENGTH = length
if which == 'stylus_half_inch_dowel': #origin is in center marker on top ("eraser") of stylus
self.func = stylus_half_inch_dowel
b = 1.27 #width in cm of white square (bounded by lines on all four sides)
a = 500/800*b #width in cm of Aruco marker
margin = (b-a)/2 #margin in cm between marker and edge of paper
offset = np.array([0.,0.,-self.PROBE_LENGTH])
objbases = {}
objbases[0] = np.array([[-a/2,a/2,0.],[a/2,a/2,0.],[a/2,-a/2,0.],[-a/2,-a/2,0.]]) #marker id 0
objbases[1] = np.array([[-a/2,-b/2,-margin],[a/2,-b/2,-margin],[a/2,-b/2,-b+margin],[-a/2,-b/2,-b+margin]]) #marker id 1
for k in range(2,12):
objbases[k] = objbases[1] + (k-1)*np.array([0.,0.,-b])
objbases[21] = np.array([[b/2,-a/2,-margin],[b/2,a/2,-margin],[b/2,a/2,-b+margin],[b/2,-a/2,-b+margin]]) #marker id 21
for k in range(22,32):
objbases[k] = objbases[21] + (k-21)*np.array([0.,0.,-b])
objbases[41] = np.array([[a/2,b/2,-margin],[-a/2,b/2,-margin],[-a/2,b/2,-b+margin],[a/2,b/2,-b+margin]]) #marker id 41, problem here?
for k in range(42,52):
objbases[k] = objbases[41] + (k-41)*np.array([0.,0.,-b])
objbases[61] = np.array([[-b/2,a/2,-margin],[-b/2,-a/2,-margin],[-b/2,-a/2,-b+margin],[-b/2,a/2,-b+margin]]) #marker id 61
for k in range(62,72):
objbases[k] = objbases[61] + (k-61)*np.array([0.,0.,-b])
n = len(objbases)
full_model3D = []
for id in objbases:
full_model3D.append(objbases[id][0,:])
full_model3D.append(objbases[id][1,:])
full_model3D.append(objbases[id][2,:])
full_model3D.append(objbases[id][3,:])
full_model3D = np.array(full_model3D)
full_model3D = full_model3D[:,None,:]
self.offset = offset
self.full_model3D = full_model3D
self.objbases = objbases
self.a = a
self.b = b
self.margin = margin
if which == 'stylus_1_inch_dowel': # origin is in center marker on top ("eraser") of stylus
self.func = stylus_half_inch_dowel
b = 2.54 # width in cm of white square (bounded by lines on all four sides)
a = 500 / 800 * b # width in cm of Aruco marker
margin = (b - a) / 2 # margin in cm between marker and edge of paper
offset = np.array([0., 0., -self.PROBE_LENGTH])
objbases = {}
objbases[0] = np.array(
[[-a / 2, a / 2, 0.], [a / 2, a / 2, 0.], [a / 2, -a / 2, 0.], [-a / 2, -a / 2, 0.]]) # marker id 0
objbases[1] = np.array([[-a / 2, -b / 2, -margin], [a / 2, -b / 2, -margin], [a / 2, -b / 2, -b + margin],
[-a / 2, -b / 2, -b + margin]]) # marker id 1
for k in range(2, 12):
objbases[k] = objbases[1] + (k - 1) * np.array([0., 0., -b])
objbases[21] = np.array([[b / 2, -a / 2, -margin], [b / 2, a / 2, -margin], [b / 2, a / 2, -b + margin],
[b / 2, -a / 2, -b + margin]]) # marker id 21
for k in range(22, 32):
objbases[k] = objbases[21] + (k - 21) * np.array([0., 0., -b])
objbases[41] = np.array([[a / 2, b / 2, -margin], [-a / 2, b / 2, -margin], [-a / 2, b / 2, -b + margin],
[a / 2, b / 2, -b + margin]]) # marker id 41, problem here?
for k in range(42, 52):
objbases[k] = objbases[41] + (k - 41) * np.array([0., 0., -b])
objbases[61] = np.array([[-b / 2, a / 2, -margin], [-b / 2, -a / 2, -margin], [-b / 2, -a / 2, -b + margin],
[-b / 2, a / 2, -b + margin]]) # marker id 61
for k in range(62, 72):
objbases[k] = objbases[61] + (k - 61) * np.array([0., 0., -b])
n = len(objbases)
full_model3D = []
for id in objbases:
full_model3D.append(objbases[id][0, :])
full_model3D.append(objbases[id][1, :])
full_model3D.append(objbases[id][2, :])
full_model3D.append(objbases[id][3, :])
full_model3D = np.array(full_model3D)
full_model3D = full_model3D[:, None, :]
self.offset = offset
self.full_model3D = full_model3D
self.objbases = objbases
self.a = a
self.b = b
self.margin = margin
if which == 'stylus_2_inch_dowel': # origin is in center marker on top ("eraser") of stylus
self.func = stylus_half_inch_dowel
b = 2*2.54 # width in cm of white square (bounded by lines on all four sides)
a = 500 / 800 * b # width in cm of Aruco marker
margin = (b - a) / 2 # margin in cm between marker and edge of paper
offset = np.array([0., 0., -self.PROBE_LENGTH])
objbases = {}
objbases[0] = np.array(
[[-a / 2, a / 2, 0.], [a / 2, a / 2, 0.], [a / 2, -a / 2, 0.], [-a / 2, -a / 2, 0.]]) # marker id 0
objbases[1] = np.array([[-a / 2, -b / 2, -margin], [a / 2, -b / 2, -margin], [a / 2, -b / 2, -b + margin],
[-a / 2, -b / 2, -b + margin]]) # marker id 1
for k in range(2, 12):
objbases[k] = objbases[1] + (k - 1) * np.array([0., 0., -b])
objbases[21] = np.array([[b / 2, -a / 2, -margin], [b / 2, a / 2, -margin], [b / 2, a / 2, -b + margin],
[b / 2, -a / 2, -b + margin]]) # marker id 21
for k in range(22, 32):
objbases[k] = objbases[21] + (k - 21) * np.array([0., 0., -b])
objbases[41] = np.array([[a / 2, b / 2, -margin], [-a / 2, b / 2, -margin], [-a / 2, b / 2, -b + margin],
[a / 2, b / 2, -b + margin]]) # marker id 41, problem here?
for k in range(42, 52):
objbases[k] = objbases[41] + (k - 41) * np.array([0., 0., -b])
objbases[61] = np.array([[-b / 2, a / 2, -margin], [-b / 2, -a / 2, -margin], [-b / 2, -a / 2, -b + margin],
[-b / 2, a / 2, -b + margin]]) # marker id 61
for k in range(62, 72):
objbases[k] = objbases[61] + (k - 61) * np.array([0., 0., -b])
n = len(objbases)
full_model3D = []
for id in objbases:
full_model3D.append(objbases[id][0, :])
full_model3D.append(objbases[id][1, :])
full_model3D.append(objbases[id][2, :])
full_model3D.append(objbases[id][3, :])
full_model3D = np.array(full_model3D)
full_model3D = full_model3D[:, None, :]
self.offset = offset
self.full_model3D = full_model3D
self.objbases = objbases
self.a = a
self.b = b
self.margin = margin
if which == 'stylus_2_inch_box': #origin is in center marker on top ("eraser") of stylus
self.func = stylus_2_inch_box
b = 5.08 #width in cm of white square (bounded by lines on all four sides)
a = 500/800*b #width in cm of Aruco marker
margin = (b-a)/2 #margin in cm between marker and edge of paper
offset = np.array([0.,0.,-self.PROBE_LENGTH])
objbases = {}
objbases[0] = np.array([[-a/2,a/2,0.],[a/2,a/2,0.],[a/2,-a/2,0.],[-a/2,-a/2,0.]]) #marker id 0
objbases[1] = np.array([[-a/2,-b/2,-margin],[a/2,-b/2,-margin],[a/2,-b/2,-b+margin],[-a/2,-b/2,-b+margin]]) #marker id 1
objbases[2] = np.array([[b/2,-a/2,-margin],[b/2,a/2,-margin],[b/2,a/2,-b+margin],[b/2,-a/2,-b+margin]]) #marker id 2
objbases[3] = np.array([[a/2,b/2,-margin],[-a/2,b/2,-margin],[-a/2,b/2,-b+margin],[a/2,b/2,-b+margin]]) #marker id 3
objbases[4] = np.array([[-b/2,a/2,-margin],[-b/2,-a/2,-margin],[-b/2,-a/2,-b+margin],[-b/2,a/2,-b+margin]]) #marker id 4
n = len(objbases)
full_model3D = []
for id in objbases:
full_model3D.append(objbases[id][0,:])
full_model3D.append(objbases[id][1,:])
full_model3D.append(objbases[id][2,:])
full_model3D.append(objbases[id][3,:])
full_model3D = np.array(full_model3D)
full_model3D = full_model3D[:,None,:]
self.offset = offset
self.full_model3D = full_model3D
self.objbases = objbases
self.a = a
self.b = b
self.margin = margin
def apply_image(self, corners, ids): # apply image info, calculate results and save
self.visible = False
self.rvec = None
self.tvec = None
self.tip_XYZ = None #np.array([0., 0., 0.])
self.corners = corners
self.ids = ids
if len(corners) > 0:
visible, tip_XYZ, rvec, tvec = self.func(self.offset, self.objbases, corners, ids, self.mtx, self.dist)
self.rvec = np.squeeze(rvec)
self.tvec = np.squeeze(tvec)
self.visible = visible
self.tip_XYZ = tip_XYZ
| StarcoderdataPython |
8144341 | #!/usr/bin/pyhon3
#coding:utf-8
#引入开发包
import requests
import os
from bs4 import BeautifulSoup
url='http://www.qiushu.cc/t/76675/23617439.html'
#请求头,模拟浏览器
req_header={
'Accept':'*/*',
'Accept-Encoding':'gzip, deflate',
'Accept-Language':'zh-CN,zh;q=0.8,zh-TW;q=0.7,zh-HK;q=0.5,en-US;q=0.3,en;q=0.2',
'Connection':'keep-alive',
'Cookie':'Hm_lvt_ac168fe809d6bd1c9e16493…1c9e16493d086e741e=1523111879',
'DNT':'1',
'Host':'www.80txt.com',
'Referer':'http://www.80txt.com/txtml_76675.html',
'User-Agent':'Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:59.0) Gecko/20100101 Firefox/59.0',
'X-Requested-With':'XMLHttpRequest'
}
#请求当前章节页面 params为请求参数
r=requests.get(url,params=req_header)
r.encoding = 'utf-8'
#soup转换
soup=BeautifulSoup(r.text,"html.parser")
for ss in soup.select("#stsm"): #删除stsm标签
ss.decompose()
#获取章节文本
section_text=soup.select('.book_content')[0].text
#打开/创建文件
fo = open('2.txt','wb')
fo.write((section_text).encode('utf-8'))
print(section_text)
#使用完后关闭文件
fo.close()
print('下载成功') | StarcoderdataPython |
9624291 | class SimulationParameters(object):
def __init__(
self,
simulation_time: float,
packet_length: int,
generation_constant: float,
queue_constant: float,
lambda_on: float,
lambda_off: float,
streams_number: int,
dropped_streams: int,
):
self.simulation_time = simulation_time
self.packet_length = packet_length
self.generation_constant = generation_constant
self.queue_constant = queue_constant
self.lambda_on = lambda_on
self.lambda_off = lambda_off
self.streams_number = streams_number
self.dropped_streams = dropped_streams
| StarcoderdataPython |
150830 | <filename>AVM + DELTA.py
# Monte Carlo Valuation of a European Option in a Black-Scholes World
# With implementation of Antithetic and Delta-based control variate method
# by <NAME>
# 10/31/2016
from math import *
import numpy as np
import random
from scipy.stats import norm
# Computation of Black-Scholes
def CBS(S, K, T, r, sigma,t, option):
t2t = T-t # time to maturity
# Calculations for the solution to BSM equation
dplus = (1 / (sigma * sqrt(t2t))) * ((log(S / K)) + (r + ((sigma ** 2) / 2)) * t2t)
dminus = (1 / (sigma * sqrt(t2t))) * ((log(S / K)) + (r - (sigma ** 2) / 2) * t2t)
# Calculating price of Call and Put
if option == 'Call':
return S * norm.cdf(dplus) - K * exp(-r * t2t) * norm.cdf(dminus)
elif option == 'Put':
return K * exp(-r * t2t) * norm.cdf(-dminus) - S * norm.cdf(-dplus)
# Initialize parameters
S = 100
r = 0.06
sig = 0.2
T = 1
K = 100
N = 10
M = 100
div = 0.03 # In percentage
option = 'Put'
# Precompute constants
dt = T/N
nu = r - div - 0.5*(sig**2)
nudt = nu*dt
sigsdt = sig*sqrt(dt)
erddt = exp((r-div)*dt)
beta1 = -1
sum_CT = 0
sum_CT2 = 0
for j in range(1,M): # For each simulation
St1 = S
St2 = S
cv1 = 0
cv2 = 0
for i in range(1,N): # For each time step
t = (i - 1) * dt
delta1 = CBS(St1, K, T, r, sig, t, option)
delta2 = CBS(St2, K, T, r, sig, t, option)
eps = np.random.normal(0, 1)
Stn1 = St1 * exp(nudt + sigsdt * eps)
Stn2 = St2 * exp(nudt + sigsdt * (-eps))
cv1a = cv1 + delta1 * (Stn1 - St1 * erddt)
cv2a = cv2 + delta2 * (Stn2 - St2 * erddt)
St1 = Stn1
St2 = Stn2
if option == 'Call':
CT = 0.5*(max(0, St1 - K) + beta1*cv1a + max(0, St2 - K) + beta1*cv2a)
sum_CT = sum_CT + CT
sum_CT2 = sum_CT2 + CT*CT
elif option == 'Put':
CT = 0.5*(max(0, K - St1) + beta1*cv1a + max(0, K - St2) + beta1*cv2a)
sum_CT = sum_CT + CT
sum_CT2 = sum_CT2 + CT * CT
else:
break
Value = sum_CT/M*exp(-r*T)
SD = sqrt((sum_CT2 - sum_CT*sum_CT/M)*exp(-2*r*T)/(M-1))
SE = SD/sqrt(M)
print('The Value of European',option,'Option is',Value)
print('The Standard Deviation of this Option is',SD)
print('The Standard Error in this case is',SE)
| StarcoderdataPython |
3337556 | #!/usr/bin/env python3
# Copyright 2019 <NAME>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import serial
import serial.tools.list_ports
from serial import SerialException
EEPROG_IDENTIFIER = 'eeprog'
class ConnectionFailed(Exception):
pass
class DeviceNotFound(Exception):
pass
def connect_to(device):
try:
ser = serial.Serial(port = device, baudrate=9600)
identifier = ser.read_until().decode('ascii').strip()
if identifier == EEPROG_IDENTIFIER:
return ser
raise ConnectionFailed("Failed to connect to device: " + device)
except SerialException:
raise ConnectionFailed("No such device: " + device)
def find_device():
for comport in serial.tools.list_ports.comports():
try:
with serial.Serial(port = comport.device, baudrate=9600, timeout=2.0) as ser:
identifier = ser.read_until().decode('ascii').strip()
if identifier == EEPROG_IDENTIFIER:
return comport.device
except:
pass
raise DeviceNotFound("Failed to find device")
| StarcoderdataPython |
6652885 | import collections
import operator
import bytewax
from bytewax import inp
# You can define your own functions which add groupings of steps to a
# dataflow. This allows you to repeat a pattern of steps easily.
def calc_counts(flow):
"""Add steps to this flow which counts the frequencies of input
items and emits (item, count) tuples downstream."""
flow.map(lambda x: (x, 1))
flow.reduce_epoch(operator.add)
def get_count(word_count):
word, count = word_count
return count
def inspector(count_count):
that_same_count, num_words_with_the_same_count = count_count
print(
f"There were {num_words_with_the_same_count} different words with a count of {that_same_count}"
)
ec = bytewax.Executor()
flow = ec.Dataflow(inp.single_batch(open("examples/sample_data/wordcount.txt")))
# "at this point we have full sentences as items in the dataflow"
flow.flat_map(str.split)
# "words"
calc_counts(flow)
# ("word", count)
flow.map(get_count)
# count
calc_counts(flow)
# (that_same_count, num_words_with_the_same_count)
flow.inspect(inspector)
if __name__ == "__main__":
ec.build_and_run()
| StarcoderdataPython |
5112492 | <gh_stars>1-10
import os, sys
from datetime import datetime
import time
import random
import inspect
import platform
import getpass
import socket
import inspect
from distutils.dir_util import copy_tree
class Singleton(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class HTMlLogger(metaclass=Singleton):
f = open(os.getcwd() + "report.html", "a")
filepath = ""
parentid = ""
rowid = ""
childid = ""
def __init__(self, reportpath):
self.filepath = reportpath
# for arg in args:
# if arg != "":
# self.filepath = arg
# break
# if args.__len__()==0:
# self.filepath = str(os.path.dirname(os.path.abspath(__file__)))+"\..\.."
if not(self.filepath.find("Reports")!=-1):
if not os.path.exists(self.filepath + "/Reports"):
os.mkdir(self.filepath + "/Reports")
self.filepath = self.filepath + "/Reports"
else:
if not os.path.exists(self.filepath):
os.mkdir(self.filepath)
if not os.path.exists(self.filepath+"/css"):
copy_tree(str(os.path.dirname(os.path.abspath(__file__)))+'/css',self.filepath+"/css")
repfld = self.filepath
print(repfld)
self.filepath = self.filepath + "/report_" + str(time.strftime("%Y%m%d-%H%M%S")) + ".html"
self.f = open(self.filepath, "w+")
support_logger().create_support(repfld)
# print('i am htmllogger called') # Removed unwanted print statement
strStartFile = """<html>
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.7.1/jquery.min.js"></script>
<script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jqueryui/1.10.1/jquery-ui.min.js"></script>
<script type="text/javascript" src="./css/jquery.tbltree.js"></script>
<link type="text/css" href="css/jquery.tbltree.css" rel="stylesheet">
<script type="text/javascript" src="./css/jquery.cookie.js"></script>
<script type="text/javascript" src="https://www.gstatic.com/charts/loader.js"></script>
<script type="text/javascript">
$(function() {
// initialize with default options
$( "#table1" ).tbltree();
});
google.charts.load("current", {packages:["corechart"]});
google.charts.setOnLoadCallback(drawChart);
function openTab(evt, cityName) {
var i, tabcontent, tablinks;
tabcontent = document.getElementsByClassName("maincontent");
for (i = 0; i < tabcontent.length; i++) {
tabcontent[i].style.display = "none";
}
tablinks = document.getElementsByClassName("tablinks");
for (i = 0; i < tablinks.length; i++) {
tablinks[i].className = tablinks[i].className.replace(" active", "");
}
document.getElementById(cityName).style.display = "";
evt.currentTarget.className += " active";
}
function load(){
document.getElementById("defaultOpen").click();
}
function calcFail()
{
var elements = document.getElementsByClassName("fail");
var names = '';
for(var i=0; i<elements.length; i++) {
names = elements[i].getAttribute('parentid');
document.getElementById(names).innerHTML="<img src='css/images/fail_4.png'/>  FAIL";
document.getElementById(names).setAttribute("class", "testFAIL");
}
}
function drawChart() {
var pass = document.getElementsByClassName("testPASS").length
var fail = document.getElementsByClassName("testFAIL").length
//var norun = 1
var data = google.visualization.arrayToDataTable([
['Task', 'Hours per Day'],
['PASS', pass],
['FAIL', fail],
// ['No Run', norun],
]);
var options = {
pieHole: 0.5,
colors: ['green', '#FF0000', '#3498db']
};
var chart = new google.visualization.PieChart(document.getElementById('donutchart'));
chart.draw(data, options);
}
</script>
<style>
#sidebar {
width: 15%;
height: 100%;
position: fixed;
background: #454545; /*#2f323a*/
left: 0;
}
.nav-collapse.collapse {
display: inline;
}
ul.sidebar-menu , ul.sidebar-menu li ul.sub{
margin: -2px 0 0;
padding: 0;
}
ul.sidebar-menu {
margin-top: 75px;
}
#sidebar > ul > li > ul.sub {
display: none;
}
#sidebar > ul > li.active > ul.sub, #sidebar > ul > li > ul.sub > li > a {
display: block;
}
ul.sidebar-menu li ul.sub li{
background: white;
margin-bottom: 0;
margin-left: 0;
margin-right: 0;
}
ul.sidebar-menu li ul.sub li:last-child{
border-radius: 0 0 4px 4px;
-webkit-border-radius: 0 0 4px 4px;
}
ul.sidebar-menu li ul.sub li a {
font-size: 12px;
padding: 6px 0;
line-height: 35px;
height: 35px;
-webkit-transition: all 0.3s ease;
-moz-transition: all 0.3s ease;
-o-transition: all 0.3s ease;
-ms-transition: all 0.3s ease;
transition: all 0.3s ease;
color: #fcfffa;
}
ul.sidebar-menu li ul.sub li a:hover {
color: white;
background: transparent;
}
ul.sidebar-menu li ul.sub li.active a {
color: white;
-webkit-transition: all 0.3s ease;
-moz-transition: all 0.3s ease;
-o-transition: all 0.3s ease;
-ms-transition: all 0.3s ease;
transition: all 0.3s ease;
display: block;
}
ul.sidebar-menu li{
line-height: 20px !important;
margin-bottom: 5px;
margin-left:0px;
margin-right:10px;
}
ul.sidebar-menu li.sub-menu{
line-height: 15px;
}
ul.sidebar-menu li a span{
display: inline-block;
color: white;
}
ul.sidebar-menu li a{
color: #fcfffa;
text-decoration: none;
display: block;
padding: 15px 10px 15px 10px;
font-size: 12px;
font-color:white
outline: none;
-webkit-transition: all 0.3s ease;
-moz-transition: all 0.3s ease;
-o-transition: all 0.3s ease;
-ms-transition: all 0.3s ease;
transition: all 0.3s ease;
}
ul.sidebar-menu li a.active, ul.sidebar-menu li a:hover, ul.sidebar-menu li a:focus {
background: #999999;
color: #fff;
display: block;
-webkit-transition: all 0.3s ease;
-moz-transition: all 0.3s ease;
-o-transition: all 0.3s ease;
-ms-transition: all 0.3s ease;
transition: all 0.3s ease;
}
ul.sidebar-menu li a i {
font-size: 15px;
padding-right: 6px;
}
ul.sidebar-menu li a:hover i, ul.sidebar-menu li a:focus i {
color: #fff;
}
ul.sidebar-menu li a.active i {
color: #fff;
}
#rcorners2 {
border-radius: 25px;
border: 4px solid #22242a; /* #73AD21*/
box-shadow: 5px
padding: 0px;
width: 80%;
}
#table1 {
box-shadow: 5px
padding: 0px;
width:70%;
isplay: inline-block;
}
#fail{
align:center
}
.img-circle {
border-radius: 50%;
border-top-left-radius: 50%;
border-top-right-radius: 50%;
border-bottom-right-radius: 50%;
border-bottom-left-radius: 50%;
}
.header{
background: #454545;
border-bottom: 1px solid #454545;
height: 3%;
position: fixed;
left: 0;
top: 0;
right: 0;
z-index: 1002;
}
.logo{
background: #4ECDC4;
size:12px;
}
.logo1{
background: White;
size:13px;
}
.container{
/* background: white;*/
background : #f1f1f1;
}
.maincontent{
display: inline-block;
margin-top: 1%;
/* padding-left: 1px;*/
padding-right: 15px;
padding-bottom: 15px;
padding-top: 0px;
width: 100%;
}
#detailedreport{
padding-left: 2px;
}
#dashboard{
padding-top: 0px;
}
.data{
width : 100px;
}
#EnvDet{
width: 320px;
padding: 10px;
border: 5px solid gray;
margin: 0;
}
#ExeDet{
width: 320px;
padding-left: 10%;
border: 5px solid gray;
margin: 0;
}
#th{
font-size: 23px;
}
</style>
</head>
<body onload="load();calcFail()" >
<!-- **********************************************************************************************************************************************************
TOP BAR CONTENT & NOTIFICATIONS
*********************************************************************************************************************************************************** -->
<!--header start-->
<header class="header">
<div class="sidebar-toggle-box">
<div class="fa fa-bars tooltips" data-placement="right" data-original-title="Toggle Navigation"></div>
</div>
<!--logo start-->
</header>
<div id="sidebar" class="nav-collapse ">
<!-- sidebar menu start-->
<ul class="sidebar-menu" id="nav-accordion">
<p class="centered" align="Center"><img src="css/images/logo.png" class="img-circle" width="80px"></p>
<p align="Center"><b><font size=6 color="White" >HTMLLogger</font></b></p>
<li class="mt">
<a class="tablinks" onclick="openTab(event, 'dashboard')" id="defaultOpen">
<i class="fa fa-dashboard"></i>
<span >Dashboard</span>
</a>
</li>
<li class="sub-menu">
<a onclick="openTab(event, 'detailedreport')" class="tablinks">
<i class="fa fa-desktop"></i>
<span>Detailed Report</span>
</a>
</li>
<li class="sub-menu">
<a onclick="openTab(event, 'envdetails')" class="tablinks">
<i class="fa fa-cogs"></i>
<span>Environment Details</span>
</a>
</li>
</ul>
</div>
<div id="envdetails" class="maincontent" style="width: 900px; height: 500px; padding-left: 15%;" >
<p><h1 id = 'EnvDet'><b>Environment Details</b></h1></p>
<p><b> OS Name : </b>""" + platform.system() + """</p>
<p><b> OS Release : </b>""" + platform.release() + """</p>
<p><b> Machine Name :</b>""" + socket.gethostname() + """</p>
<p><b> User Name : </b>""" + getpass.getuser() + """ </p>
</div>
<div id="dashboard" class="maincontent" style="padding-left: 15%;">
<p><h1 id = 'EnvDet'><b>Summary</b></h1></p>
<div id="donutchart" style="width: 700px; height:500px;"></div>
</div>
<div class="maincontent" id="detailedreport">
<table id="table1" border=1 align=Center class="container">
<tr>
<th id='th'>Test Case Name</th><th id='th' >Status</th><th id='th'>Time </th>
</tr>"""
self.f.writelines(strStartFile)
self.f.close()
# print(inspect.stack()[1][0].f_code.co_name)
def assert_testcase_log(self, log):
self.f = open(self.filepath, "a")
self.parentid = random.randrange(0, 6000, 1)
tlog = """
<tr row-id='""" + str(self.parentid) + """'>
<td><b>""" + log + """</b></td><td class="testPASS" id = '""" + str(
self.parentid) + """'><img src="css/images/pass_4.png"/>  PASS</td><td class="data">""" + str(
time.strftime("%H:%M:%S")) + """</td>
</tr>
"""
self.f.writelines(tlog)
self.f.close()
# print(self.__doc__)
def assert_step_log(self, log):
self.f = open(self.filepath, "a")
self.rowid = random.randrange(6000, 200000, 1)
tlog = """
<tr row-id='""" + str(self.rowid) + """' parent-id='""" + str(self.parentid) + """'>
<td>""" + log + """</td><td class="data"><img src="css/images/pass_4.png"/>  PASS</td><td class="data">""" + str(
time.strftime("%H:%M:%S")) + """</td>
</tr>
"""
self.f.writelines(tlog)
self.f.close()
def assert_step_fail_log(self, driver, log):
self.f = open(self.filepath, "a")
self.childid = random.randrange(20000, 60000, 1)
img = self.filepath.split("/")[len(self.filepath.split("/")) - 1]
snappath = self.filepath.replace(self.filepath.split("/")[len(self.filepath.split("/")) - 1],"")
if not os.path.exists(snappath+"/FailureImages"):
os.mkdir(snappath+"/FailureImages")
snappath = snappath+"/FailureImages"
img = img.replace("html", "png")
#snappath = snappath.replace("LatestTree", "FailureImg")
img = img.replace("report_", "snap_")
strP = img #snappath.split("/")[len(snappath.split("/")) - 1]
# snappath.replace("LatestTree/report_", "FailureImg/report_"+str(random.randint(1,101)))
snappath = snappath+"/"+strP
driver.get_screenshot_as_file(snappath)
tlog = """
<tr row-id='""" + str(self.childid) + """' parent-id='""" + str(self.parentid) + """'>
<td><font color='red'>""" + log + """</font></td><td class="fail" id="fail" parentid='""" + str(
self.parentid) + """'><img src="css/images/fail.png"/>  FAIL</td><td class="data">""" + str(
time.strftime("%H:%M:%S")) + """</td>
</tr>
<tr row-id='""" + str(self.childid + 1) + """' parent-id='""" + str(self.childid) + """'>
<td colspan="3"><img src='FailureImages/""" + strP + """' width="25%" height="25%"/></td>
</tr>
"""
self.f.writelines(tlog)
self.f.close()
def close_report(self):
self.f = open(self.filepath, "r+")
data = self.f.read()
data = data.replace("</table></div></body></html>", '')
self.f.seek(0)
self.f.truncate()
self.f.writelines(data + "</table></div></body></html>")
self.f.close()
class support_logger():
def create_support(self, support_js):
if not os.path.exists(support_js + "/css"):
os.mkdir(support_js + "/css")
support_js = support_js + "/css"
f = open(support_js + "/jquery.tbltree.js", "a")
tlog = """/*
* jQuery tbletree Plugin 1.0.0
*
* Copyright 2014, <NAME>
* Licensed under the MIT licenses.
*/
(function($) {
$.widget( "ui.tbltree", {
// default options
options: {
rowAttr: 'row-id',
parentAttr: 'parent-id',
treeColumn: 0,
saveState: false,
saveStateName: 'tbltree-state',
saveStateMethod: 'cookie',
initState: 'collapsed',
levelPicker: '',
expanderTemplate: '<span class="tbltree-expander"></span>',
levelPickerTemplate: '<div class="tbltree-level-pickers">\
<span id="0" class="tbltree-level-item">[1]</span> \
<span id="1" class="tbltree-level-item">[2]</span> \
<span id="2" class="tbltree-level-item">[3]</span> \
<span id="3" class="tbltree-level-item">[4]</span> \
</div>',
indentTemplate: '<span class="tbltree-indent"></span>',
expanderExpandedClass: 'tbltree-expander-expanded',
expanderCollapsedClass: 'tbltree-expander-collapsed',
count: {
enabled: false,
format: '<div class="tbltree-count-wrap">(<span class="tbltree-count"></span>)</div>',
method: function(row) {
// count every row
return 1;
},
click: null
},
// callbacks
change: null,
expand: null,
collapse: null,
showlevel: null
},
// the constructor
_create: function() {
var $this = this;
this.element
.addClass( "jquery-tbltree" )
if (this.options.levelPicker !== "" && $(this.options.levelPicker).length > 0) {
this.pickers = $(this.options.levelPickerTemplate);
this.pickers.find('.tbltree-level-item').click(function(){
$this.showLevel($(this).attr('id'))
})
$(this.options.levelPicker).append(this.pickers);
}
},
_init: function() {
var $this = this;
this.getRootNodes().each(function(){
$this._initTree($(this));
})
},
getID: function(row) {
return row.attr(this.options.rowAttr);
},
getParentID: function(row) {
return row.attr(this.options.parentAttr);
},
isExpanded: function(cell) {
return cell.hasClass('tbltree-expanded');
},
isCollapsed: function(cell) {
return cell.hasClass('tbltree-collapsed');
},
getRootNodes: function () {
var nodes = this.element.find('tr['+this.options.rowAttr+']').not('tr['+this.options.parentAttr+']')
return nodes
},
getRow: function(id) {
return this.element.find('tr['+this.options.rowAttr+'="'+id+'"]');
},
saveState: function(row) {
var $this = this;
if ($this.options.saveState && $this.options.saveStateMethod === 'cookie') {
var stateArrayString = $.cookie(this.options.saveStateName) || '';
var stateArray = (stateArrayString === '' ? [] : stateArrayString.split(','));
var nodeId = $this.getID(row);
if ($this.isExpanded(row)) {
if ($.inArray(nodeId, stateArray) === -1) {
stateArray.push(nodeId);
}
} else if ($this.isCollapsed(row)) {
if ($.inArray(nodeId, stateArray) !== -1) {
stateArray.splice($.inArray(nodeId, stateArray), 1);
}
}
$.cookie(this.options.saveStateName, stateArray.join(','));
}
return $this;
},
getState: function(row) {
var $this = this;
if ($this.options.saveState && $this.options.saveStateMethod === 'cookie') {
var stateArrayString = $.cookie(this.options.saveStateName) || '';
var stateArray = (stateArrayString === '' ? [] : stateArrayString.split(','));
if ($.inArray($this.getID(row), stateArray) !== -1) {
return "expanded";
} else {
return "collapsed";
}
}
return $this.options.initState;
},
toggle: function (row) {
if (typeof(row) != "object") {
row = this.getRow(row);
}
if (this.isCollapsed(row)) {
this.expand(row, 1);
} else {
this.collapse(row, 1);
}
},
collapse: function(id, user) {
var $this = this;
if (typeof(id) === "object") {
row_id = this.getID(id);
row = id;
} else {
row_id = id;
row = this.getRow(row_id);
}
var row_id = this.getID(row);
if (user) {
this.render(row, 'collapsed');
this.saveState(row);
this._trigger("collapse", null, row);
this._trigger("change", null, {type: 'collapsed', 'row': row});
}
this._getChildren(row_id).each(function(){
$(this).hide();
$this.collapse($(this), 0);
})
},
expand: function(id, user) {
var $this = this;
if (typeof(id) === "object") {
row_id = this.getID(id);
row = id;
} else {
row_id = id;
row = this.getRow(row_id);
}
var row_id = this.getID(row);
if (user) {
this.render(row, 'expanded')
this.saveState(row);
this._trigger("expand", null, row);
this._trigger("change", null, {type: 'expanded', 'row': row});
}
this._getChildren(row_id).each(function(){
if ( ! $this.isCollapsed($(this))) {
$this.expand($(this), 0);
}
$(this).show();
})
},
expandLevel: function(level) {
var $this = this;
$this.element.find('tr[level]').filter(function() {
return $(this).attr("level") <= level;
})
.each(function(){
$this.expand($(this), 1);
})
},
collapseLevel: function(level) {
var $this = this;
$this.element.find('tr[level='+level+']').each(function(){
$this.collapse($(this), 1);
})
},
showLevel: function(level) {
var $this = this;
if (level > 0) {
$this.expandLevel(level - 1);
}
$this.collapseLevel(level);
this._trigger("showlevel", null, level);
},
render: function(row, state) {
var $this = this;
if (state == 'collapsed') {
row.attr('tree-state', 'hidden')
row.removeClass('tbltree-expanded');
row.addClass('tbltree-collapsed');
} else {
row.attr('tree-state', 'shown')
row.removeClass('tbltree-collapsed');
row.addClass('tbltree-expanded');
}
this._renderExpander(row);
},
_getChildren: function (id) {
if (typeof(id) === "object") {
id = this.getID(id);
}
return this.element.find('tr['+this.options.parentAttr+'="'+id+'"]');
},
getTreeCell: function (row) {
return $(row.find('td').get(this.options.treeColumn));
},
isLeaf: function(row) {
if (row.attr('is-leaf') == "true") {
return true;
}
return false;
},
_initExpander: function(root) {
var $this = this;
var cell = this.getTreeCell(root);
var tpl = $(this.options.expanderTemplate);
var expander = root.find('.tbltree-expander');
if (expander) {
expander.remove();
}
tpl.prependTo(cell)
tpl.click(function() {
$this.toggle(root)
});
},
_renderExpander: function(cell) {
if (cell.attr('is-leaf') == "true") {
return;
}
var expander = cell.find('.tbltree-expander');
if (expander.length) {
if (!this.isCollapsed(cell)) {
expander.removeClass(this.options.expanderCollapsedClass);
expander.addClass(this.options.expanderExpandedClass);
} else {
expander.removeClass(this.options.expanderExpandedClass);
expander.addClass(this.options.expanderCollapsedClass);
}
} else {
this._initExpander(cell);
this._renderExpander(cell);
}
},
_initIndent: function(cell, level) {
cell.find('.tbltree-indent').remove();
var $indent = $(this.options.indentTemplate);
$indent.css('width', (level * 16));
$indent.insertBefore(cell.find('.tbltree-expander'));
},
_initTree: function(row, parent, level) {
var $this = this;
level = (level == undefined) ? 0: level;
var children = this._getChildren(row);
$this._initExpander(row);
$this._initIndent(row, level)
row.attr('level', level);
row.attr('is-leaf', (children.length == 0));
$this.render(row, this.getState(row));
if (parent !== undefined && parent.attr('tree-state') == 'hidden') {
row.hide();
row.attr('tree-state', 'hidden');
} else {
row.show();
}
if (children.length != 0) {
var count = $this._getCount(row);
$.each(children, function(i, tree){
$this._initTree($(tree), row, level+1);
count += $this._getCount($(tree));
})
$this._setCount(row, count);
}
},
_getCount: function(row) {
if (!this.options.count.enabled) {
return 0;
}
var count = row.attr('count');
if (count != undefined) {
return parseInt(count);
}
count = 0;
if (typeof(this.options.count.method) === "function") {
count += parseInt(this.options.count.method(row));
}
return count;
},
_setCount: function(row, count) {
if (!this.options.count.enabled) {
return 0;
}
var $this = this;
if (typeof(this.options.count.format) === "function") {
this.options.count.format(row, count);
} else {
var elem = $(this.options.count.format);
elem.find('.tbltree-count').text(count)
elem.appendTo(this.getTreeCell(row))
if (typeof(this.options.count.click) === "function") {
elem.css('cursor', 'pointer').click(function(e) {
$this.options.count.click(e, row, count)
} )
}
}
row.attr('count', count);
},
// events bound via _on are removed automatically
// revert other modifications here
_destroy: function() {
this.element
.removeClass( "jquery-tbltree" )
.enableSelection()
this.pickers.remove();
},
// _setOptions is called with a hash of all options that are changing
// always refresh when changing options
_setOptions: function() {
// _super and _superApply handle keeping the right this-context
this._superApply( arguments );
},
// _setOption is called for each individual option that is changing
_setOption: function( key, value ) {
// prevent invalid color values
this._super( key, value );
}
});
})(jQuery);"""
f.writelines(tlog)
f.close()
f = open(support_js + "/jquery.cookie.js", "a")
tlog = """/*!
* jQuery Cookie Plugin v1.3.1
* https://github.com/carhartl/jquery-cookie
*
* Copyright 2013 <NAME>
* Released under the MIT license
*/
(function (factory) {
if (typeof define === 'function' && define.amd) {
// AMD. Register as anonymous module.
define(['jquery'], factory);
} else {
// Browser globals.
factory(jQuery);
}
}(function ($) {
var pluses = /\+/g;
function decode(s) {
if (config.raw) {
return s;
}
return decodeURIComponent(s.replace(pluses, ' '));
}
function decodeAndParse(s) {
if (s.indexOf('"') === 0) {
// This is a quoted cookie as according to RFC2068, unescape...
s = s.slice(1, -1).replace(/\\"/g, '"').replace(/\\\\/g, '\\');
}
s = decode(s);
try {
return config.json ? JSON.parse(s) : s;
} catch(e) {}
}
var config = $.cookie = function (key, value, options) {
// Write
if (value !== undefined) {
options = $.extend({}, config.defaults, options);
if (typeof options.expires === 'number') {
var days = options.expires, t = options.expires = new Date();
t.setDate(t.getDate() + days);
}
value = config.json ? JSON.stringify(value) : String(value);
return (document.cookie = [
config.raw ? key : encodeURIComponent(key),
'=',
config.raw ? value : encodeURIComponent(value),
options.expires ? '; expires=' + options.expires.toUTCString() : '', // use expires attribute, max-age is not supported by IE
options.path ? '; path=' + options.path : '',
options.domain ? '; domain=' + options.domain : '',
options.secure ? '; secure' : ''
].join(''));
}
// Read
var cookies = document.cookie.split('; ');
var result = key ? undefined : {};
for (var i = 0, l = cookies.length; i < l; i++) {
var parts = cookies[i].split('=');
var name = decode(parts.shift());
var cookie = parts.join('=');
if (key && key === name) {
result = decodeAndParse(cookie);
break;
}
if (!key) {
result[name] = decodeAndParse(cookie);
}
}
return result;
};
config.defaults = {};
$.removeCookie = function (key, options) {
if ($.cookie(key) !== undefined) {
// Must not alter options, thus extending a fresh object...
$.cookie(key, '', $.extend({}, options, { expires: -1 }));
return true;
}
return false;
};
}));"""
f.writelines(tlog)
f.close()
f = open(support_js + "/script.js", "a")
tlog = """(function($) {
$(document).ready(function(){
// putting lines by the pre blocks
$("pre").each(function(){
var pre = $(this).text().split("\n");
var lines = new Array(pre.length+1);
for(var i = 0; i < pre.length; i++) {
var wrap = Math.floor(pre[i].split("").length / 70)
if (pre[i]==""&&i==pre.length-1) {
lines.splice(i, 1);
} else {
lines[i] = i+1;
for(var j = 0; j < wrap; j++) {
lines[i] += "\n";
}
}
}
$(this).before("<pre class='lines'>" + lines.join("\n") + "</pre>");
});
var headings = [];
var collectHeaders = function(){
headings.push({"top":$(this).offset().top - 15,"text":$(this).text()});
}
if($(".markdown-body h1").length > 1) $(".markdown-body h1").each(collectHeaders)
else if($(".markdown-body h2").length > 1) $(".markdown-body h2").each(collectHeaders)
else if($(".markdown-body h3").length > 1) $(".markdown-body h3").each(collectHeaders)
$(window).scroll(function(){
if(headings.length==0) return true;
var scrolltop = $(window).scrollTop() || 0;
if(headings[0] && scrolltop < headings[0].top) {
$(".current-section").css({"opacity":0,"visibility":"hidden"});
return false;
}
$(".current-section").css({"opacity":1,"visibility":"visible"});
for(var i in headings) {
if(scrolltop >= headings[i].top) {
$(".current-section .name").text(headings[i].text);
}
}
});
$(".current-section a").click(function(){
$(window).scrollTop(0);
return false;
})
});
})(jQuery)"""
f.writelines(tlog)
f.close()
f = open(support_js + "/jquery.tbltree.css", "a")
tlog = """.tbltree-indent {width:16px; height: 16px; display: inline-block; position: relative; border: 2;}
.tbltree-expander {width:16px; height: 16px; display: inline-block; position: relative; cursor: pointer;}
.tbltree-expander-expanded{background-image: url(images/collapse.png);}
.tbltree-expander-collapsed{background-image: url(images/expand.png);}
.tbltree-level-pickers {float: left;}
.tbltree-level-pickers .tbltree-level-item {cursor: pointer;}
.tbltree-count-wrap {
font-style: italic;
font-size: 10px;
float: right;
}
"""
f.writelines(tlog)
f.close()
| StarcoderdataPython |
9716992 | <gh_stars>10-100
# -*- coding: utf8 -*-
"""
Constants for modules
"""
__author__ = 'sergey'
COMPRESSION_SUPPORTED=('lzo', 'zlib', 'bz2', 'xz', 'snappy',
'lz4', 'lz4h', 'lz4r07',
'quicklz', 'quicklzf', 'quicklzm', 'quicklzb',
'brotli',
'zstd',
'zstd001', 'zstd036', 'zstd047', 'zstd061',
)
COMPRESSION_READONLY=("quicklz", "zstd001", "zstd036", "zstd047", 'zstd061', 'lz4r07')
COMPRESSION_TYPE_BEST="all_best"
COMPRESSION_TYPE_DEFAULT="all"
COMPRESSION_TYPE_FAST="all_fast"
COMPRESSION_TYPE_CUSTOM="custom"
COMPRESSION_TYPE_NONE="none"
COMPRESSION_LEVEL_DEFAULT="default"
COMPRESSION_LEVEL_FAST="fast"
COMPRESSION_LEVEL_NORM="normal"
COMPRESSION_LEVEL_BEST="best"
# Subset of hashlib simple funcs
WANTED_HASH_FUCTIONS = {'md4', 'md5', 'sha1',
'sha224', 'sha256', 'sha384', 'sha512',
'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512',
'blase2s', 'blake2b',
'shake_128', 'shake_256'
'whirlpool', 'ripemd160'}
HAS_FUNCTION_DEFAULT = 'md5'
# For .sqlite3 files
COMPRESSION_PROGS = {
"pigz": {"ext": ".gz", "comp": ["-4q"], "decomp": ["-dq"], "priority": 10, "can-comp": True, "can-decomp": True},
"gzip": {"ext": ".gz", "comp": ["-4q"], "decomp": ["-dq"], "priority": 1, "can-comp": True, "can-decomp": True},
"pbzip2": {"ext": ".bz2", "comp": ["-1"], "decomp": ["-d"], "priority": 10, "can-comp": True, "can-decomp": True},
"bzip2": {"ext": ".bz2", "comp": ["-1q"], "decomp": ["-dq"], "priority": 1, "can-comp": True, "can-decomp": True},
"pxz": {"ext": ".xz", "comp": ["-2"], "decomp": [], "priority": 10, "can-comp": True, "can-decomp": False},
"xz": {"ext": ".xz", "comp": ["-2q"], "decomp": ["-dq"], "priority": 1, "can-comp": True, "can-decomp": True},
"lzop": {"ext": ".lzo", "comp": ["-3q"], "decomp": ["-dq"], "priority": 1, "can-comp": True, "can-decomp": True},
"lz4": {"ext": ".lz4", "comp": ["-1q"], "decomp": ["-dq"], "priority": 1, "can-comp": True, "can-decomp": True},
# As of 0.8 -- need to be forced to remove compressed file
"pzstd": {"ext": ".zst", "comp": ["-6q", "--rm"], "decomp": ["-dq", "--rm"], "priority": 10, "can-comp": True, "can-decomp": True},
"zstd": {"ext": ".zst", "comp": ["-6q", "--rm"], "decomp": ["-dq", "--rm"], "priority": 1, "can-comp": True, "can-decomp": True},
}
COMPRESSION_PROGS_EXT = {
".gz": ("pigz", "gzip",),
".bz2": ("pbzip2", "bzip2",),
".xz": ("pxz", "xz",),
".lzo": ("lzop",),
".lz4": ("lz4",),
".zst": ("pzstd", "zstd",)
}
COMPRESSION_PROGS_NONE = "none"
COMPRESSION_PROGS_DEFAULT = COMPRESSION_PROGS_NONE
ROOT_SUBVOLUME_NAME=b"@root"
BLOCK_SIZE_MIN=512
BLOCK_SIZE_DEFAULT=128*1024 # 128kb
BLOCK_SIZE_MAX=16*1024*1024 # 16Mb
| StarcoderdataPython |
5095571 | #!/usr/bin/env python
"""Application controller for RAxML (v7.0.3).
WARNING: Because of the use of the -x option, this version is no longer
compatible with RAxML version VI.
"""
from cogent.app.parameters import FlagParameter, ValuedParameter, FilePath
from cogent.app.util import CommandLineApplication, ResultPath, get_tmp_filename
from cogent.core.tree import PhyloNode
from cogent.core.alignment import Alignment
from cogent.core.moltype import DNA, RNA, PROTEIN
from cogent.util.warning import deprecated
from random import choice, randint
from os import walk
from os.path import isabs
from cogent.parse.tree import DndParser
__author__ = "<NAME>"
__copyright__ = "Copyright 2007-2012, The Cogent Project"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", \
"<NAME>", "<NAME>"]
__license__ = "GPL"
__version__ = "1.5.3"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
class Raxml(CommandLineApplication):
"""RAxML application controller"""
deprecated('class',
'cogent.app.raxml.Raxml',
'cogent.app.raxml_v730.Raxml',
'1.6')
_options ={
# Specify a column weight file name to assign individual wieghts to
# each column of the alignment. Those weights must be integers
# separated by any number and type of whitespaces whithin a separate
# file, see file "example_weights" for an example.
'-a':ValuedParameter('-',Name='a',Delimiter=' '),
# Specify an integer number (random seed) for bootstrapping
'-b':ValuedParameter('-',Name='b',Delimiter=' '),
# Specify number of distinct rate catgories for raxml when
# ModelOfEvolution is set to GTRCAT or HKY85CAT.
# Individual per-site rates are categorized into numberOfCategories
# rate categories to accelerate computations. (Default = 50)
'-c':ValuedParameter('-',Name='c',Delimiter=' ', Value=50),
# This option allows you to start the RAxML search with a complete
# random starting tree instead of the default Maximum Parsimony
# Starting tree. On smaller datasets (around 100-200 taxa) it has
# been observed that this might sometimes yield topologies of distinct
# local likelihood maxima which better correspond to empirical
# expectations.
'-d':FlagParameter('-',Name='d'),
# This allows you to specify up to which likelihood difference.
# Default is 0.1 log likelihood units, author recommends 1 or 2 to
# rapidly evaluate different trees.
'-e':ValuedParameter('-',Name='e',Delimiter=' ', Value=0.1),
# select search algorithm:
# d for normal hill-climbing search (Default)
# when -f option is omitted this algorithm will be used
# o old (slower) algorithm from v. 2.1.3
# c (check) just tests whether it can read the alignment
# e (evaluate) to optimize model+branch lengths for given input tree
# b (bipartition) draws bipartitions
# s (split) splits into individual genes, provided with model file
'-f':ValuedParameter('-',Name='f',Delimiter=' ', Value="d"),
# select grouping file name: allows incomplete multifurcating constraint
# tree in newick format -- resolves multifurcations randomly, adds
# other taxa using parsimony insertion
'-g':ValuedParameter('-', Name='g',Delimiter=' '),
# prints help and exits
'-h':FlagParameter('-', Name='h'),
# allows initial rearrangement to be constrained, e.g. 10 means
# insertion will not be more than 10 nodes away from original.
# default is to pick a "good" setting.
'-i':ValuedParameter('-', Name='i', Delimiter=' '),
# writes checkpoints (off by default)
'-j':FlagParameter('-', Name='j'),
#specifies that RAxML will optimize model parameters (for GTRMIX and
# GTRGAMMA) as well as calculating likelihoods for bootstrapped trees.
'-k':FlagParameter('-', Name='k'),
# Model of Nucleotide Substitution:
# -m GTRGAMMA: GTR + Optimization of substitution rates + Gamma
# -m GTRCAT: GTR + Optimization of substitution rates + Optimization
# of site-specific evolutionary rates which are categorized into
# numberOfCategories distinct rate categories for greater
# computational efficiency
# -m GTRMIX: Searches for GTRCAT, then switches to GTRGAMMA
# Amino Acid Models
# matrixName (see below): DAYHOFF, DCMUT, JTT, MTREV, WAG, RTREV,
# CPREV, VT, BLOSUM62, MTMAM, GTR.
# F means use empirical nucleotide frequencies (append to string)
# -m PROTCATmatrixName[F]: uses site-specific rate categories
# -m PROTGAMMAmatrixName[F]: uses Gamma
# -m PROTMIXmatrixName[F]: switches between gamma and cat models
# e.g. -m PROTCATBLOSUM62F would use protcat with BLOSUM62 and
# empirical frequencies
'-m':ValuedParameter('-',Name='m',Delimiter=' '),
# Specifies the name of the output file.
'-n':ValuedParameter('-',Name='n',Delimiter=' '),
# Specifies the name of the outgroup (or outgroups: comma-delimited,
# no spaces, should be monophyletic).
'-o':ValuedParameter('-',Name='o',Delimiter=' '),
# Specified MultipleModel file name, in format:
# gene1 = 1-500
# gene2 = 501-1000
# (note: ranges can also be discontiguous, e.g. 1-100, 200-300,
# or can specify codon ranges as e.g. 1-100/3, 2-100/3, 3-100/3))
'-q':ValuedParameter('-', Name='q', Delimiter=' '),
# Name of the working directory where RAxML-V will write its output
# files.
'-w':ValuedParameter('-',Name='w',Delimiter=' '),
# Constraint file name: allows a bifurcating Newick tree to be passed
# in as a constraint file, other taxa will be added by parsimony.
'-r':ValuedParameter('-',Name='r',Delimiter=' '),
# Specify a random number seed for the parsimony inferences. This
# allows you to reproduce your results and will help me debug the
# program. This option HAS NO EFFECT in the parallel MPI version
'-p':ValuedParameter('-',Name='p',Delimiter=' '),
# specify the name of the alignment data file, in relaxed PHYLIP
# format.
'-s':ValuedParameter('-',Name='s',Delimiter=' '),
# Specify a user starting tree file name in Newick format
'-t':ValuedParameter('-',Name='t',Delimiter=' '),
# Print the version
'-v':FlagParameter('-',Name='v'),
# Compute only randomized starting parsimony tree with RAxML, do not
# optimize an ML analysis of the tree
'-y':FlagParameter('-', Name='y'),
# Multiple tree file, for use with -f b (to draw bipartitions onto the
# common tree specified with -t)
'-z':ValuedParameter('-', Name='z', Delimiter=' '),
# Specifies number of runs on distinct starting trees.
'-#':ValuedParameter('-', Name='#', Delimiter=' '),
#Specify an integer number (random seed) to turn on rapid bootstrapping
'-x':ValuedParameter('-', Name='x', Delimiter=' ')
}
_parameters = {}
_parameters.update(_options)
_command = "raxmlHPC"
_out_format = "RAxML_%s.%s"
def _format_output(self, outfile_name, out_type):
""" Prepend proper output prefix to output filename """
outfile_name = self._absolute(outfile_name)
outparts = outfile_name.split("/")
outparts[-1] = self._out_format % (out_type, outparts[-1] )
return '/'.join(outparts)
def _input_as_seqs(self,data):
lines = []
for i,s in enumerate(data):
#will number the sequences 1,2,3,etc.
lines.append(''.join(['>',str(i+1)]))
lines.append(s)
return self._input_as_lines(lines)
def _input_as_lines(self,data):
if data:
self.Parameters['-s']\
.on(super(Raxml,self)._input_as_lines(data))
return ''
def _input_as_string(self,data):
"""Makes data the value of a specific parameter
This method returns the empty string. The parameter will be printed
automatically once set.
"""
if data:
self.Parameters['-in'].on(str(data))
return ''
def _input_as_multiline_string(self, data):
if data:
self.Parameters['-s']\
.on(super(Raxml,self)._input_as_multiline_string(data))
return ''
def _absolute(self,path):
path = FilePath(path)
if isabs(path):
return path
elif self.Parameters['-w'].isOn():
return self.Parameters['-w'].Value + path
else:
return self.WorkingDir + path
def _log_out_filename(self):
if self.Parameters['-n'].isOn():
return self._format_output(str(self.Parameters['-n'].Value), "log")
else:
raise ValueError, "No output file specified."
def _info_out_filename(self):
if self.Parameters['-n'].isOn():
return self._format_output(str(self.Parameters['-n'].Value), "info")
else:
raise ValueError, "No output file specified."
def _parsimony_tree_out_filename(self):
if self.Parameters['-n'].isOn():
return self._format_output(str(self.Parameters['-n'].Value), "parsimonyTree")
else:
raise ValueError, "No output file specified."
def _result_tree_out_filename(self):
if self.Parameters['-n'].isOn():
return self._format_output(str(self.Parameters['-n'].Value), "result")
else:
raise ValueError, "No output file specified."
def _result_bootstrap_out_filename(self):
if self.Parameters['-n'].isOn():
return self._format_output(str(self.Parameters['-n'].Value), \
"bootstrap")
else:
raise ValueError, "No output file specified"
def _checkpoint_out_filenames(self):
"""
RAxML generates a crapload of checkpoint files so need to
walk directory to collect names of all of them.
"""
out_filenames = []
if self.Parameters['-n'].isOn():
out_name = str(self.Parameters['-n'].Value)
walk_root = self.WorkingDir
if self.Parameters['-w'].isOn():
walk_root = str(self.Parameters['-w'].Value)
for tup in walk(walk_root):
dpath, dnames, dfiles = tup
if dpath == walk_root:
for gen_file in dfiles:
if out_name in gen_file and "checkpoint" in gen_file:
out_filenames.append(walk_root + gen_file)
break
else:
raise ValueError, "No output file specified."
return out_filenames
def _get_result_paths(self,data):
result = {}
result['Info'] = ResultPath(Path=self._info_out_filename(),
IsWritten=True)
if self.Parameters['-k'].isOn():
result['Bootstrap'] = ResultPath(
Path=self._result_bootstrap_out_filename(),
IsWritten=True)
else:
result['Log'] = ResultPath(Path=self._log_out_filename(),
IsWritten=True)
result['ParsimonyTree'] = ResultPath(
Path=self._parsimony_tree_out_filename(),
IsWritten=True)
result['Result'] = ResultPath(
Path=self._result_tree_out_filename(),
IsWritten=True)
for checkpoint_file in self._checkpoint_out_filenames():
checkpoint_num = checkpoint_file.split(".")[-1]
try:
checkpoint_num = int(checkpoint_num)
except Exception, e:
raise ValueError, "%s does not appear to be a valid checkpoint file"
result['Checkpoint%d' % checkpoint_num] = ResultPath(
Path=checkpoint_file,
IsWritten=True)
return result
#SOME FUNCTIONS TO EXECUTE THE MOST COMMON TASKS
def raxml_alignment(align_obj,
raxml_model="GTRCAT",
params={},
SuppressStderr=True,
SuppressStdout=True):
"""Run raxml on alignment object
align_obj: Alignment object
params: you can set any params except -w and -n
returns: tuple (phylonode,
parsimonyphylonode,
log likelihood,
total exec time)
"""
# generate temp filename for output
params["-w"] = "/tmp/"
params["-n"] = get_tmp_filename().split("/")[-1]
params["-m"] = raxml_model
ih = '_input_as_multiline_string'
seqs, align_map = align_obj.toPhylip()
#print params["-n"]
# set up command
raxml_app = Raxml(
params=params,
InputHandler=ih,
WorkingDir=None,
SuppressStderr=SuppressStderr,
SuppressStdout=SuppressStdout)
# run raxml
ra = raxml_app(seqs)
# generate tree
tree_node = DndParser(ra["Result"])
# generate parsimony tree
parsimony_tree_node = DndParser(ra["ParsimonyTree"])
# extract log likelihood from log file
log_file = ra["Log"]
total_exec_time = exec_time = log_likelihood = 0.0
for line in log_file:
exec_time, log_likelihood = map(float, line.split())
total_exec_time += exec_time
# remove output files
ra.cleanUp()
return tree_node, parsimony_tree_node, log_likelihood, total_exec_time
def build_tree_from_alignment(aln, moltype, best_tree=False, params={}):
"""Returns a tree from Alignment object aln.
aln: an xxx.Alignment object, or data that can be used to build one.
moltype: cogent.core.moltype.MolType object
best_tree: best_tree suppport is currently not implemented
params: dict of parameters to pass in to the RAxML app controller.
The result will be an xxx.Alignment object, or None if tree fails.
"""
if best_tree:
raise NotImplementedError
if '-m' not in params:
if moltype == DNA or moltype == RNA:
params["-m"] = 'GTRMIX'
elif moltype == PROTEIN:
params["-m"] = 'PROTMIXGTR'
else:
raise ValueError, "Moltype must be either DNA, RNA, or PROTEIN"
if not hasattr(aln, 'toPhylip'):
aln = Alignment(aln)
seqs, align_map = aln.toPhylip()
# generate temp filename for output
params["-w"] = "/tmp/"
params["-n"] = get_tmp_filename().split("/")[-1]
params["-k"] = True
params["-x"] = randint(1,100000)
ih = '_input_as_multiline_string'
raxml_app = Raxml(params=params,
InputHandler=ih,
WorkingDir=None,
SuppressStderr=True,
SuppressStdout=True)
raxml_result = raxml_app(seqs)
tree = DndParser(raxml_result['Bootstrap'], constructor=PhyloNode)
for node in tree.tips():
node.Name = align_map[node.Name]
raxml_result.cleanUp()
return tree
| StarcoderdataPython |
5123437 | <reponame>MichaelTROEHLER/datadog-api-client-python<filename>tests/v1/test_synthetics_trigger_ci_tests_response_results.py<gh_stars>0
# coding: utf-8
# Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License.
# This product includes software developed at Datadog (https://www.datadoghq.com/).
# Copyright 2019-Present Datadog, Inc.
from __future__ import absolute_import
import sys
import unittest
import datadog_api_client.v1
try:
from datadog_api_client.v1.model import synthetics_device_id
except ImportError:
synthetics_device_id = sys.modules[
'datadog_api_client.v1.model.synthetics_device_id']
from datadog_api_client.v1.model.synthetics_trigger_ci_tests_response_results import SyntheticsTriggerCITestsResponseResults
class TestSyntheticsTriggerCITestsResponseResults(unittest.TestCase):
"""SyntheticsTriggerCITestsResponseResults unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testSyntheticsTriggerCITestsResponseResults(self):
"""Test SyntheticsTriggerCITestsResponseResults"""
# FIXME: construct object with mandatory attributes with example values
# model = SyntheticsTriggerCITestsResponseResults() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1926422 | <reponame>ormsbya/ProteomicsUtils
"""
Generates plots of the TPE reactivity (according to Log2 Cys/NonCys) for all peptides corresponding to a given protein across samples e.g. denaturation curve or time intervals.
"""
import os, sys
from ProteomicsUtils.LoggerConfig import logger_config
from ProteomicsUtils import StatUtils, CalcUtils, FileHandling, DataWrangling, PlotUtils
import matplotlib.pyplot as plt
logger = logger_config(__name__)
logger.info("Import Successful")
def main(input_path, output_path, sample_name):
"""
Master function to apply a list of functions to the input file, generating urea denaturation curve for each protein
Parameters:
input_path: string
input path for the file to be processed
output_path: string
output path for which any output generated by functions will be saved
sample_name: string
sample name associated with the file to be processed.
Returns:
summary_table: DataFrame
dataframe containing the summarised output of the functions
applied in order
"""
#av_summary = do_funcs(input_path, output_path, sample_name)
logger.info('Input Path: {}'.format(input_path))
logger.info(f'Preparing to process {sample_name}....')
total_data = FileHandling.file_reader(input_path)
quant_data, col_list = DataWrangling.quantified_data(total_data)
two_unique_cys, cys_pep, non_cys_pep = DataWrangling.Unique_Cys_sorter(quant_data)
#set index of summary dataframes to the protein accession
cys_pep = cys_pep.set_index(["Master Protein Accessions"], drop=False)
non_cys_pep = non_cys_pep.set_index(["Master Protein Accessions"], drop=False)
non_cys_Av = CalcUtils.non_cys_AR(cys_pep, non_cys_pep)
summary_table = CalcUtils.cys_div_noncys(cys_pep, non_cys_Av, col_list)
#Saving all dataframes so far to excel results document
data_frames = [total_data, quant_data, two_unique_cys, cys_pep, non_cys_pep, summary_table]
sheetnames = ['Total Data', 'Quant Data', 'TwoUniqueCYS', 'CysPep', 'NonCysPep', 'Summary Table']
FileHandling.df_to_excel(output_path, sheetnames, data_frames)
#collect only columns of interest
summary_table.reset_index(drop=True, inplace=True)
ratio_col = [col for col in summary_table.columns if '_Cys/NonCys' in col]
select_col = ['Master Protein Accessions', 'Annotated Sequence'] + ratio_col
summary_data = summary_table[select_col]
logger.debug(summary_data)
#rename columns to simple names
summary_data = summary_data.rename(columns = {'Master Protein Accessions':'ProteinID', 'Annotated Sequence':'Sequence'})
#for peptides seen more than once in a sample, take average ratio to give only unique ratios for each peptide
av_summary = CalcUtils.single_element_av(summary_data, 'Sequence')
##Filtering for proteins which have too many missing values, and generating plots.
logger.info('Filtering for missing values...')
#removing rows with >thresh Nans
filtered_consensus = DataWrangling.filter_NaNs(av_summary, filter_type='total', threshold=0)
#preparing variables and plotting scatter for each protein
logger.info('Creating scatter plots...')
urea_conc = [0, 0.5, 1, 1.5, 2, 2.5, 3, 3.5, 4, 4.5, 5, 5.5, 6]
fig_dict = PlotUtils.multirow_scatter(filtered_consensus,
key='ProteinID',
col_head='Sequence',
x_vals=urea_conc,
x_label='Urea Conc',
y_label='Cys_NonCys')
#to save all figures to pdf
FileHandling.fig_to_pdf(fig_dict, output_path+'Thresholded_')
logger.info('Save figs to pdf complete')
#to show all figures as output,
for protein, figure in fig_dict.items():
plt.show(figure)
Threshold_0 = filtered_consensus
dfs = [Threshold_0]
sheetnames = ['Total_0']
FileHandling.df_to_excel(output_path=output_path+'Thresholded_', sheetnames = sheetnames, data_frames=dfs)
return summary_data
if __name__ == "__main__":
#setting defaults
input_path = 'C:/Users/dezer_000/Desktop/Current Analysis/180501_Urea_Exp8_Analysis/180523_Test_new_module/170529_Dezerae_MultiConsensus_Peptides_2UP.xlsx'
output_path = 'C:/Users/dezer_000/Desktop/Current Analysis/180501_Urea_Exp8_Analysis/180523_Test_new_module/'
sample_name = 'Urea Denaturation (Exp 8)'
main(input_path, output_path, sample_name)
| StarcoderdataPython |
8049689 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import time
from loguru import logger
class Moment:
def __init__(self, unix_timestamp: int = None, format_time: str = None):
self.unix_timestamp = unix_timestamp or int(time.time())
self.format_time = format_time or '%Y-%m-%d %H:%M:%S'
@classmethod
def from_datetime(cls, datetime_value: str, format_time: str):
try:
unix_timestamp = int(time.mktime(datetime.datetime.strptime(datetime_value, format_time).timetuple()))
except Exception as e:
logger.exception(e)
unix_timestamp = 0
return Moment(unix_timestamp, format_time)
def to_unix_timestamp(self):
return self.unix_timestamp
def to_datetime(self) -> datetime.datetime:
return datetime.datetime.fromtimestamp(self.unix_timestamp)
def to_string(self) -> str:
return datetime.datetime.utcfromtimestamp(self.unix_timestamp).strftime(self.format_time)
def add_days(self, days: int):
datetime_value = datetime.datetime.fromtimestamp(self.unix_timestamp)
time_delta = datetime_value + datetime.timedelta(days=days)
time_delta = time_delta.replace(minute=59, hour=23, second=59)
self.unix_timestamp = int(time_delta.strftime('%s'))
return self
| StarcoderdataPython |
6569453 | <gh_stars>100-1000
# coding=utf-8
# Copyright 2019 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple script to convert CSV output from rf_benchmark to Markdown format.
The input CSV should have the following fields:
- CNN
- input resolution
- end_point
- FLOPS (Billion)
- RF size hor
- RF size ver
- effective stride hor
- effective stride ver
- effective padding hor
- effective padding ver
Since usually in all cases the parameters in the horizontal and vertical
directions are the same, this is assumed by this script, which only prints one
of them to the Markdown file.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import csv
import sys
from absl import app
cmd_args = None
def main(unused_argv):
with open(cmd_args.markdown_path, 'w') as f:
# Write table header and field size.
f.write('CNN | resolution | end-point | FLOPs (Billion) | RF | '
'effective stride | effective padding\n')
f.write(':--------------------: | :----------: | :---------------: | '
':---------------: | :-----: | :----: | :----:\n')
with open(cmd_args.csv_path) as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
# Make sure horizontal and parameters are the same.
assert row['RF size hor'] == row['RF size ver']
assert row['effective stride hor'] == row['effective stride ver']
assert row['effective padding hor'] == row['effective padding ver']
f.write('%s|%s|%s|%s|%s|%s|%s\n' %
(row['CNN'], row['input resolution'], row['end_point'],
row['FLOPs (Billion)'], row['RF size hor'],
row['effective stride hor'], row['effective padding hor']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register('type', 'bool', lambda v: v.lower() == 'true')
parser.add_argument(
'--csv_path',
type=str,
default='/tmp/rf.csv',
help='Path where CSV output of rf_benchmark was saved.')
parser.add_argument(
'--markdown_path',
type=str,
default='/tmp/rf.md',
help='Path where Markdown output will be saved.')
cmd_args, unparsed = parser.parse_known_args()
app.run(main=main, argv=[sys.argv[0]] + unparsed)
| StarcoderdataPython |
9662794 | <reponame>roeselfa/FeatureLearningBasedDistanceMetrics<gh_stars>1-10
import AdvancedBA as ba
import matplotlib.pylab as plt
import seaborn as sns
from pm4py.objects.log.adapters.pandas import csv_import_adapter
from pm4py.objects.conversion.log import factory as conversion_factory
from pm4py.util import constants
plt.rcParams.update({'font.size': 26})
caseIDKey = "Case ID"
activityKey = "Activity"
basePath = sys.argv[1] # Path to the directory where all the event logs are located
savePath = sys.argv[2] # Where to save the graphic
logNames = ['', '', ''] # the logs that should be compared in regards to Advanced BA (e.g. ["ApplicationProcess", "SepsisCases", "TravelClaims"])
modes = ['', ''] # the different PRETSA instances (e.g. {"Levenshtein", "FDM"])
kRange = [2, 4, 8, 16, 32, 64, 128, 256] # range for privacy parameter k
tRange = [0.1, 0.25, 0.5, 0.75, 1] # range for privacy parameter t
separator = ";" # some .csv are separated by ";", some by ","
def main():
matrices = list()
for logName in logNames:
originalLogPath = basePath + logName + '/' + logName + '.csv'
originalFrame = csv_import_adapter.import_dataframe_from_path(originalLogPath, sep=separator)
originalLog = conversion_factory.apply(originalFrame,
parameters={constants.PARAMETER_CONSTANT_CASEID_KEY: caseIDKey,
constants.PARAMETER_CONSTANT_ACTIVITY_KEY: activityKey})
originalTraces = getTracesFromLog(originalLog)
uniqueEventList = getEventList(originalTraces)
originalFollowsRelations = ba.getFollowsRelations(allEvents=uniqueEventList, traces=originalTraces)
originalPrecedesRelations = ba.getPrecedesRelations(allEvents=uniqueEventList, traces=originalTraces)
for m in modes:
matrix = list()
for t in tRange:
column = list()
for k in kRange:
sanitizedLogPath = basePath + logName + '/' + logName + '_t' + str(t) + '_k' + str(k) + '_' + m + '.csv'
sanitizedFrame = csv_import_adapter.import_dataframe_from_path(sanitizedLogPath, sep=";") # PRETSA sanitized logs always use ';'
sanitizedLog = conversion_factory.apply(sanitizedFrame,
parameters={
constants.PARAMETER_CONSTANT_CASEID_KEY: caseIDKey,
constants.PARAMETER_CONSTANT_ACTIVITY_KEY: activityKey})
sanitizedTraces = getTracesFromLog(sanitizedLog)
sanitizedFollowsRelations = ba.getFollowsRelations(allEvents=uniqueEventList,
traces=sanitizedTraces)
sanitizedPrecedesRelations = ba.getPrecedesRelations(allEvents=uniqueEventList,
traces=sanitizedTraces)
baScore = ba.getBehavioralAppropriatenessScore(originalFollows=originalFollowsRelations,
originalPrecedes=originalPrecedesRelations,
sanitizedFollows=sanitizedFollowsRelations,
sanitizedPrecedes=sanitizedPrecedesRelations,
events=uniqueEventList)
print("Done with", logName, m, k, t, ':', baScore)
column.append(baScore)
matrix.append(column)
title = logName + ' (' + m + ')'
matrices.append((title, matrix))
fig, ax = plt.subplots(len(logNames), len(modes), figsize=(6 * len(modes), 4 * len(logNames)), sharey=True,
sharex=True)
for i in range(len(matrices)):
k = int(i / 2)
j = i % 2
name = matrices[i][0]
matrix = matrices[i][1]
xLabels = kRange
yLabels = tRange
im_normal = sns.heatmap(matrix, ax=ax[k][j], vmin=0, vmax=1, cmap="Greens", cbar=False)
ax[k][j].set_yticklabels(yLabels, rotation=0)
ax[k][j].set_title(name)
ax[k][0].set_ylabel('t')
ax[-1][-1].set_xlabel('k')
ax[-1][-2].set_xlabel('k')
ax[-1][-1].set_xticklabels(xLabels, rotation=30)
ax[-1][-2].set_xticklabels(xLabels, rotation=30)
mappable_normal = im_normal.get_children()[0]
fig.subplots_adjust(bottom=-0.01)
plt.colorbar(mappable_normal, ax=ax[:], orientation='horizontal', cmap="Greens", pad=0.102)
plt.savefig(savePath + '.png', dpi=92, bbox_inches='tight')
def getTracesFromLog(log):
resultLog = list()
for trace in log:
exportLog = list()
for event in trace:
exportLog.append(event[activityKey])
resultLog.append(exportLog)
return resultLog
def getEventList(traceSet):
events = list()
for t in traceSet:
for e in t:
if e not in events:
events.append(e)
return events
def getDiffMatrix(m1, m2):
diffMatrix = list()
for i in range(len(m1)):
row1 = m1[i]
row2 = m2[i]
diffRow = list()
if len(row1) != len(row2):
raise ValueError("Rows should have same length!")
for j in range(len(row1)):
diffRow.append(row1[j] - row2[j])
diffMatrix.append(diffRow)
return diffMatrix
main()
print("Done")
| StarcoderdataPython |
6475568 | <reponame>EVEprosper/ProsperDatareader
"""prosper.datareader.news: utilities for looking at news data"""
import pandas as pd
import prosper.datareader.robinhood as robinhood # TODO: simplify import
import prosper.datareader.yahoo as yahoo
import prosper.datareader.intrinio as intrinio
import prosper.datareader.exceptions as exceptions
import prosper.datareader.config as config
def company_news_rh(
ticker,
page_limit=robinhood.news.PAGE_HARDBREAK,
logger=config.LOGGER
):
"""get news items from Robinhood for a given company
Args:
ticker (str): stock ticker for desired company
page_limit (int, optional): how many pages to allow in call
logger (:obj:`logging.logger`, optional): logging handle
Returns:
(:obj:`pandas.DataFrame`): tabularized data for news
"""
logger.info('Fetching company raw data feed for `%s` -- ROBINHOOD', ticker)
raw_news_data = robinhood.news.fetch_company_news_rh(
ticker.upper(),
page_limit=page_limit,
logger=logger
)
logger.info('--Pushing data into Pandas')
news_df = pd.DataFrame(raw_news_data)
news_df['published_at'] = pd.to_datetime(news_df['published_at'])
logger.debug(news_df)
return news_df
def company_news_intrinio(
ticker,
username='',
password='',
public_key='',
endpoint_name='news',
logger=config.LOGGER,
):
"""get news items from Intrinino
Notes:
credentials required from: https://intrinio.com/account
username/password OR public_key, not both
Args:
ticker (str): stock ticker for desired company
username (str): intrinio username
password (str): <PASSWORD>
public_key (str): intrinio public_key
logger (:obj:`logging.logger`): logging handle
Returns:
pandas.DataFrame: tabularized data for news
Raises:
exceptions.InvalidAuth: invalid auth pattern
requests.exceptions: HTTP/connection errors
"""
logger.info('Fetching company raw data feed for `%s` -- INTRININO', ticker)
connection = intrinio.auth.IntrinioHelper(
username=username,
password=password,
public_key=public_key,
)
raw_data = connection.request(
endpoint_name,
params={'ticker': ticker.upper()}
)
logger.info('--pushing data into Pandas')
news_df = pd.DataFrame(raw_data['data'])
news_df['publication_date'] = pd.to_datetime(news_df['publication_date'])
logger.debug(news_df)
return news_df
def company_headlines_yahoo(
ticker,
logger=config.LOGGER,
):
"""get news items from Yahoo for a given company
Notes:
Wraps https://developer.yahoo.com/finance/company.html
Args:
ticker (str): stock ticker for desired company
logger (:obj:`logging.logger`, optional): logging handle
Returns:
(:obj:`pandas.DataFrame`): tabularized data for news
Raises:
requests.exceptions: HTTP/connection errors
"""
logger.info('Fetching company raw data feed for `%s` -- yahoo', ticker)
raw_data = yahoo.news.fetch_finance_headlines_yahoo(ticker)
logger.info('--pushing data into Pandas')
news_df = pd.DataFrame(raw_data)
news_df['published'] = pd.to_datetime(news_df['published'])
return news_df
def industry_headlines_yahoo(
ticker,
logger=config.LOGGER,
):
"""get news items from Yahoo for an industry segment given a ticker
Notes:
Wraps https://developer.yahoo.com/finance/industry.html
Args:
ticker (str): stock ticker for desired company
logger (:obj:`logging.logger`, optional): logging handle
Returns:
(:obj:`pandas.DataFrame`): tabularized data for news
Raises:
requests.exceptions: HTTP/connection errors
"""
logger.info('Fetching industry raw data feed for `%s` -- yahoo', ticker)
raw_data = yahoo.news.fetch_finance_headlines_yahoo(
ticker,
uri=yahoo.news.INDUSTRY_NEWS_URL,
)
logger.info('--pushing data into Pandas')
news_df = pd.DataFrame(raw_data)
news_df['published'] = pd.to_datetime(news_df['published'])
return news_df
| StarcoderdataPython |
9789463 | <reponame>qgoestch/sinecity_testcases
# -*- coding: utf-8 -*-
##
# \file errors_calc2_modes.py
# \title Calculation of the errors and norms for the case2:
# modes in 2D square box.
# \author <NAME>
# \version 0.1
# \license BSD 3-Clause License
# \inst UMRAE (Ifsttar Nantes), LAUM (Le Mans Université)
# \date 2017, 12 Feb.
##
import numpy as np
from scipy import special as sp
import os
import site
base_path = reduce(lambda l, r: l + os.path.sep + r,
os.path.dirname(os.path.realpath(__file__)).split(
os.path.sep))
data_plotting_path = os.path.join(base_path.rsplit(os.sep, 2)[0],
'data_plotting')
site.addsitedir(data_plotting_path)
from plot_time_signals import plot_ts, plot_ts_basic
from plot_errors_norms import plot_error_basic
def error_calc2(h_set, case):
"""
:param d_sr: horizontal distances between the source and the receivers,
list of floats (m).
:type d_sr: list of floats
:param h_set: spatial step sequence (m).
:type h_set: list of floats
:param c: sound speed (m.s-1).
:type c: float
:param freq: frequency sequence (Hz).
:type freq: list of floats
:param case: integer that sorts of the saved folders in the results dir.
:type case: int
:return nothing, plot and save graphs of the corresponding data_plotting
functions.
"""
for num_meth in ['fdtd', 'tlm']:
import os
res_path = os.path.join(base_path.rsplit(os.sep, 1)[0], 'results',
'case%i' % case, num_meth)
res_path_fd = os.path.join(base_path.rsplit(os.sep, 1)[0], 'results',
'case%i' % case, 'fd')
one_norm = np.zeros((len(h_set)))
two_norm = np.zeros((len(h_set)))
max_norm = np.zeros((len(h_set)))
ord_acc_one = np.zeros((len(h_set) - 1))
ord_acc_two = np.zeros((len(h_set) - 1))
ord_acc_max = np.zeros((len(h_set) - 1))
for l in range(len(h_set)):
p_num = np.load(os.path.join(res_path,'p_%s_%i.npy'
% (num_meth, l)))
p_an = np.load(os.path.join(res_path, 'p_an_%i.npy' % l))
error = np.abs(p_num[1:-1, 1:-1] - p_an[1:-1, 1:-1])
one_norm[l] = np.linalg.norm((error) *
h_set[l] ** 2, ord=1)
two_norm[l] = np.linalg.norm((error) *
h_set[l] ** 2, ord=2)
max_norm[l] = np.linalg.norm((error) *
h_set[l] ** 2, ord=np.inf)
for l in range(len(h_set) - 1):
ord_acc_one[l] = np.log(
one_norm[l + 1] / one_norm[l]) / np.log(
h_set[l + 1] / h_set[l])
ord_acc_two[l] = np.log(
two_norm[l + 1] / two_norm[l]) / np.log(
h_set[l + 1] / h_set[l])
ord_acc_max[l] = np.log(
max_norm[l + 1] / max_norm[l]) / np.log(
h_set[l + 1] / h_set[l])
import os
res_path = os.path.join(base_path.rsplit(os.sep, 1)[0],
'results', 'case%i'
% case, '%s' % num_meth)
if not os.path.exists(res_path):
os.makedirs(res_path)
np.save(os.path.join(res_path, 'one_norm_%s.npy' % num_meth),
one_norm)
np.save(os.path.join(res_path, 'two_norm_%s.npy' % num_meth),
two_norm)
np.save(os.path.join(res_path, 'max_norm_%s.npy' % num_meth),
max_norm)
np.save(os.path.join(res_path, 'ord_acc_one_%s.npy' % num_meth),
ord_acc_one)
np.save(os.path.join(res_path, 'ord_acc_two_%s.npy' % num_meth),
ord_acc_two)
np.save(os.path.join(res_path, 'ord_acc_max_%s.npy' % num_meth),
ord_acc_max)
# plot_error_basic(h_set, one_norm, two_norm, max_norm,
# ord_acc_one, ord_acc_two, ord_acc_max,
# case, True)
print 'end'
| StarcoderdataPython |
1784483 | <gh_stars>0
# -*- coding: utf-8 -*-
"""Application configuration."""
import os, datetime
import simplekv.memory
class Config(object):
"""Base configuration."""
#SECRET_KEY = os.environ.get('MYFLASKAPP_SECRET', 'secret-key') # TODO: Change me
SECRET_KEY = '<KEY>' #csrf
EMAIL_KEY = '<KEY>' # email verification url server sign
EMAIL_SALT = '5yWHy0BJhZHsmztTgEWBN85e1pfcST0NeRqHgZnYbpReX65C8zHEuf7Ll5JDcRMK'# email verification url server salt
API_SALT = 'rZaz0baIqkRcGgHJkF3wlrIr3MSXlfuxZn0FFyGJcDIwehSaaMxk7IgoiHeG3XLy' # api server signature for signing tokens.
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
SQLALCHEMY_TRACK_MODIFICATIONS = False
MAPBOX_ACCESS_TOKEN = '<KEY>'
#jwt
JWT_BLACKLIST_ENABLED = True
JWT_BLACKLIST_STORE = simplekv.memory.DictStore()
JWT_BLACKLIST_TOKEN_CHECKS = 'refresh'
JWT_ACCESS_TOKEN_EXPIRES = datetime.timedelta(minutes=30)
# mail settings
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USE_SSL = False
# gmail authentication
MAIL_USERNAME = ''
MAIL_PASSWORD = ''
# mail accounts
MAIL_DEFAULT_SENDER = ''
# async celery -enable fernet encryption
CELERY_BROKER_URL = 'amqp://guest:@127.0.0.1:5672//'
CELERY_RESULT_BACKEND = 'amqp://guest:@127.0.0.1:5672//'
#mobile
TWILIO_ACCOUNT_SID = 'AC<KEY>'
TWILIO_AUTH_TOKEN = '<PASSWORD>'
TWILIO_NUMBER = '+12048099756'
#compression
COMPRESS_MIMETYPES = 'application/json'
COMPRESS_LEVEL = 6
COMPRESS_MIN_SIZE = 500
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgres://rsabrgeoknimft:XEkvI6vNbQp7D20xrK-Cklr8xo@ec2-204-236-228-77.compute-1.<EMAIL>.com:5432/dck08qjrmev1uv' # TODO: Change me
DEBUG_TB_ENABLED = False # Disable Debug toolbar
SESSION_COOKIE_SECURE = True
SESSION_COOKIE_HTTPONLY = True
WTF_CSRF_ENABLED = True
BCRYPT_LOG_ROUNDS = 13
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
DB_NAME = 'dev.db'
# Put the db file in project root
DB_PATH = os.path.join(Config.PROJECT_ROOT, DB_NAME)
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(DB_PATH) # memorydb 'sqlite://'
DEBUG_TB_ENABLED = True
CACHE_TYPE = 'simple' # Can be "simple", "memcached", "redis", etc.
WTF_CSRF_ENABLED = False
TRAP_HTTP_EXCEPTIONS = True
class TestConfig(Config):
"""Test configuration."""
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
BCRYPT_LOG_ROUNDS = 4 # For faster tests; needs at least 4 to avoid "ValueError: Invalid rounds"
WTF_CSRF_ENABLED = False # Allows form testing
| StarcoderdataPython |
5167109 | botnick = "botsnamehere"
# twitch username the bot will use
trustedppl = ["trusted user", "another trusted user"] # NO spaces in the username
# the people you trust to use certain commands
channeltojoin = "yourchannelnamehere"
#the twitch channel to join
oa = "oauth:abc123abc123"
# your oauth key for the user you chose, use something like http://www.twitchapps.com/tmi/ to get an oauth is easiest
| StarcoderdataPython |
3266896 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import sys
import platform
if platform.system() == 'Darwin':
sys.path.append("../Resources/lib/python2.7/site-packages/")
import sip
# Tell qt to return python string instead of QString
# These are only needed for Python v2 but are harmless for Python v3.
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
from PyQt4 import QtGui, QtCore
QtCore.QTextCodec.setCodecForCStrings(QtCore.QTextCodec.codecForName("utf-8"))
from ggpo.common.controller import Controller
from ggpo.common.settings import Settings
from ggpo.gui.colortheme import ColorTheme
from ggpo.gui.ggpowindow import GGPOWindow
from ggpo.gui.logindialog import LoginDialog
# noinspection PyUnresolvedReferences
import ggpo.resources.ggpo_rc
def main(argv=None):
app = None
started = False
# create the application if necessary
if not QtGui.QApplication.instance():
app = QtGui.QApplication(argv)
app.setQuitOnLastWindowClosed(True)
app.setOrganizationName("FightCade")
QtCore.QCoreApplication.setApplicationName("FightCade")
ColorTheme.saveDefaultStyle()
if not Settings.value(Settings.COLORTHEME) or Settings.value(Settings.COLORTHEME)=='fightcade' or Settings.value(Settings.COLORTHEME)=='ggpong':
ColorTheme.setGNGTheme(True)
controller = Controller()
thread = QtCore.QThread()
controller.moveToThread(thread)
thread.started.connect(controller.selectLoop)
thread.start()
def loggedIn():
if started==False:
window = GGPOWindow()
window.setWindowIcon(QtGui.QIcon(':/assets/icon-128.png'))
window.setController(controller)
window.restorePreference()
controller.sendListChannels()
window.show()
window.raise_()
window.activateWindow()
UDP=False
port=6009
while True:
UDP = controller.connectUdp(port)
port=port-1
if (UDP==True or port < 6006):
break
logindialog = LoginDialog()
logindialog.setController(controller)
logindialog.accepted.connect(loggedIn)
logindialog.rejected.connect(sys.exit)
logindialog.exec_()
logindialog.raise_()
logindialog.activateWindow()
started=True
return app.exec_()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| StarcoderdataPython |
6636223 | from twisted.internet import reactor
from twisted.web.server import Site
from webservices.async import provider_for_twisted
from webservices.models import Provider
API_KEYS = {
'pubkey': 'privkey', # your keys here
}
class HelloProvider(Provider):
def get_private_key(self, public_key):
return API_KEYS.get(public_key)
def provide(self, data):
name = data.get('name', 'world')
return {'greeting': u'hello %s' % name}
resource = provider_for_twisted(HelloProvider())
site = Site(resource)
reactor.listenTCP(8000, site)
reactor.run()
| StarcoderdataPython |
3215526 | <gh_stars>0
import json
import os
import torch
class TimeDistributed(torch.nn.Module):
""" Time distributed wrapper compatible with linear/dense pytorch layer modules"""
def __init__(self, module, batch_first=True):
super(TimeDistributed, self).__init__()
self.module = module
self.batch_first = batch_first
def forward(self, x):
# Linear layer accept 2D input
if len(x.size()) <= 2:
return self.module(x)
# Squash samples and timesteps into a single axis
x_reshape = x.contiguous().view(
-1, x.size(-1)
) # (samples * timesteps, input_size)
out = self.module(x_reshape)
# We have to reshape Y back to the target shape
if self.batch_first:
out = out.contiguous().view(
x.size(0), -1, out.size(-1)
) # (samples, timesteps, output_size)
else:
out = out.view(
-1, x.size(1), out.size(-1)
) # (timesteps, samples, output_size)
return out
def save(model, hyperparameters, PATH=None):
"""Save the trained model(.pth) along with its hyperparameters as a json (hyper.json) at the user defined Path
Parameters:
-----------
model (torch.nn.Module): Trained Model
hyperparameters(dict): Hyperparameters of the model
PATH (str): Directory path to save the trained model and its hyperparameters
Returns:
---------
None
"""
if hyperparameters is not None and not isinstance(hyperparameters, dict):
raise Exception("Invalid argument, hyperparameters must be dict")
# Save
if PATH is None:
PATH = os.getcwd() + "model.pt"
torch.save(model.state_dict(), PATH)
hyperdir, _ = os.path.split(PATH)
if hyperparameters is not None:
with open(os.path.join(hyperdir, "hypers.json"), "w") as fp:
json.dump(hyperparameters, fp, sort_keys=False)
if hyperdir == "":
hyperdir = "."
print(f"Model and hyperparameters saved at {hyperdir}")
def load(model, PATH=None):
"""Load trained model from PATH using the model_hyperparameters saved in the
Parameters:
-----------
model (torch.nn.Module): Type of the model ['ae','vae','vaegan','irl','lstm','custom']
PATH (str): Directory path of the model: Defaults to None: Means Current working directory
Returns:
---------
model(torch.nn.module): Model
"""
# Hyperparameters
if PATH is None:
PATH = os.getcwd() + "/model.pt"
print(f"Model loaded from {PATH}")
else:
raise Exception(f"Model state dict not found at {PATH}")
# Load state of the model
model.load_state_dict(torch.load(PATH))
return model
def read_hyperparameters(hyperparameter_json):
"""Read the json file and return the hyperparameters as dict
Args:
hyperparameter_json (json): Json file containing the hyperparameters of the trained model
Returns:
[dict]: Python dictionary of the hyperparameters
"""
with open(hyperparameter_json) as f_in:
return json.load(f_in)
| StarcoderdataPython |
5050900 | <reponame>lordjabez/temperature-collector<filename>lambdas/temperature-collector/weather.py<gh_stars>1-10
import logging
import requests
_weather_url = 'https://api.openweathermap.org/data/2.5/weather'
_log = logging.getLogger(__name__)
def get_temperature(config, lat, lon):
api_key = config['openWeatherApiKey']
params = {'appid': api_key, 'lat': lat, 'lon': lon, 'units': 'imperial'}
response = requests.get(_weather_url, params=params)
if response.status_code == requests.codes.ok:
_log.info(f'OpenWeather call succeeded for {lat} {lon}: {response}')
weather = response.json()
return weather['main']['temp']
else:
_log.warning('OpenWeather call failed')
| StarcoderdataPython |
11342183 | <filename>CIM15/IEC61970/Informative/InfCustomers/PowerQualityPricing.py
# Copyright (C) 2010-2011 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61968.Common.Document import Document
class PowerQualityPricing(Document):
"""Pricing can be based on power quality.Pricing can be based on power quality.
"""
def __init__(self, normalLowVoltLimit=0.0, valueUninterruptedServiceEnergy=0.0, voltLimitViolCost=0.0, normalHighVoltLimit=0.0, emergencyHighVoltLimit=0.0, emergencyLowVoltLimit=0.0, powerFactorMin=0.0, voltImbalanceViolCost=0.0, valueUninterruptedServiceP=0.0, *args, **kw_args):
"""Initialises a new 'PowerQualityPricing' instance.
@param normalLowVoltLimit: Normal low voltage limit.
@param valueUninterruptedServiceEnergy: Value of uninterrupted service (Cost per energy).
@param voltLimitViolCost: Voltage limit violation cost (Cost per unit Voltage).
@param normalHighVoltLimit: Normal high voltage limit.
@param emergencyHighVoltLimit: Emergency high voltage limit.
@param emergencyLowVoltLimit: Emergency low voltage limit.
@param powerFactorMin: Threshold minimum power factor for this PricingStructure, specified in instances where a special charge is levied if the actual power factor for a Service falls below the value specified here.
@param voltImbalanceViolCost: Voltage imbalance violation cost (Cost per unit Voltage).
@param valueUninterruptedServiceP: Value of uninterrupted service (Cost per active power).
"""
#: Normal low voltage limit.
self.normalLowVoltLimit = normalLowVoltLimit
#: Value of uninterrupted service (Cost per energy).
self.valueUninterruptedServiceEnergy = valueUninterruptedServiceEnergy
#: Voltage limit violation cost (Cost per unit Voltage).
self.voltLimitViolCost = voltLimitViolCost
#: Normal high voltage limit.
self.normalHighVoltLimit = normalHighVoltLimit
#: Emergency high voltage limit.
self.emergencyHighVoltLimit = emergencyHighVoltLimit
#: Emergency low voltage limit.
self.emergencyLowVoltLimit = emergencyLowVoltLimit
#: Threshold minimum power factor for this PricingStructure, specified in instances where a special charge is levied if the actual power factor for a Service falls below the value specified here.
self.powerFactorMin = powerFactorMin
#: Voltage imbalance violation cost (Cost per unit Voltage).
self.voltImbalanceViolCost = voltImbalanceViolCost
#: Value of uninterrupted service (Cost per active power).
self.valueUninterruptedServiceP = valueUninterruptedServiceP
super(PowerQualityPricing, self).__init__(*args, **kw_args)
_attrs = ["normalLowVoltLimit", "valueUninterruptedServiceEnergy", "voltLimitViolCost", "normalHighVoltLimit", "emergencyHighVoltLimit", "emergencyLowVoltLimit", "powerFactorMin", "voltImbalanceViolCost", "valueUninterruptedServiceP"]
_attr_types = {"normalLowVoltLimit": float, "valueUninterruptedServiceEnergy": float, "voltLimitViolCost": float, "normalHighVoltLimit": float, "emergencyHighVoltLimit": float, "emergencyLowVoltLimit": float, "powerFactorMin": float, "voltImbalanceViolCost": float, "valueUninterruptedServiceP": float}
_defaults = {"normalLowVoltLimit": 0.0, "valueUninterruptedServiceEnergy": 0.0, "voltLimitViolCost": 0.0, "normalHighVoltLimit": 0.0, "emergencyHighVoltLimit": 0.0, "emergencyLowVoltLimit": 0.0, "powerFactorMin": 0.0, "voltImbalanceViolCost": 0.0, "valueUninterruptedServiceP": 0.0}
_enums = {}
_refs = []
_many_refs = []
| StarcoderdataPython |
6510432 | import json
import random
import pytest
from src import es_handler
@pytest.fixture()
def es_cleared():
# setup
es = es_handler.connect_elasticsearch()
es.indices.delete(index='test_index', ignore=[400, 404])
yield
# cleanup after test execution
es.indices.delete(index='test_index', ignore=[400, 404])
@pytest.fixture
def es_with_events():
# setup
es = es_handler.connect_elasticsearch()
es.indices.delete(index='test_index', ignore=[400, 404])
files = [
'happy_path.json',
]
file = files[random.randint(0, len(files) - 1)]
print('----> using file ' + file)
kafka_events = json.load(open(file))
# when
for kafka_event in kafka_events:
assert es_handler.store_record(es, 'test_index', kafka_event), 'unable to store event'
yield
# teardown
es.indices.delete(index='test_index', ignore=[400, 404])
| StarcoderdataPython |
9662720 | import logging
import paramiko
import string
from time import sleep
from ocs_ci.ocs.exceptions import CommandFailed, TimeoutException
log = logging
class WinNode(object):
def __init__(self, **kw):
self.login = "Administrator"
self.password = "<PASSWORD>"
self.ip_address = kw['ip_address']
self.private_ip = kw['private_ip']
def win_exec(self, ps_command, timeout=180):
log.info("Running powershell`s command `{}`".format(ps_command))
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
client.connect(self.ip_address, username=self.login, password=self.password)
command = 'powershell -Command "& {{{}}}"'.format(ps_command)
chan_ssh = client.get_transport().open_session()
chan_ssh.exec_command(command)
for i in range(0, timeout):
sleep(1)
if chan_ssh.exit_status_ready():
break
else:
raise TimeoutException("Timeout")
output = dict()
output["exit_code"] = chan_ssh.recv_exit_status()
output["stdout"] = chan_ssh.recv(-1)
output["stderr"] = chan_ssh.recv_stderr(-1)
if not bool(output["stderr"]) and output["exit_code"] == 0:
return output
else:
raise CommandFailed(output["stderr"])
def start_iscsi_initiator(self):
self.win_exec("Start-Service msiscsi")
self.win_exec("Set-Service msiscsi -startuptype 'automatic'")
def get_iscsi_initiator_name(self):
output = self.win_exec("(Get-InitiatorPort).NodeAddress")
stdout = output["stdout"].strip()
return stdout
def create_new_target(self, ip, port=3260):
command = "New-IscsiTargetPortal -TargetPortalAddress {} -TargetPortalPortNumber {}".format(ip, port)
self.win_exec(command)
def delete_target(self):
pass
def connect_to_target(self, ip, username, password):
command = "Connect-IscsiTarget -NodeAddress iqn.2003-01.com.redhat.iscsi-gw:ceph-igw"\
r" -IsMultipathEnabled \$True -TargetPortalAddress {} -AuthenticationType ONEWAYCHAP"\
" -ChapUsername {} -ChapSecret {}".format(ip, username, password)
self.win_exec(command)
def disconnect_from_target(self,):
command = "Disconnect-IscsiTarget -NodeAddress "\
"iqn.2003-01.com.redhat.iscsi-gw:ceph-igw -Confirm:$false"
self.win_exec(command)
def create_disk(self, number):
letters = list(string.ascii_uppercase)[3:3 + number]
for disk, part in zip(letters, list(range(1, 1 + number))):
self.win_exec("Initialize-Disk -Number {} -PartitionStyle MBR".format(part))
self.win_exec("New-Partition -DiskNumber {0} -UseMaximumSize -DriveLetter {1}".format(part, disk))
self.win_exec("Get-Volume -DriveLetter {}".format(disk))
self.win_exec("Format-Volume -DriveLetter {} -FileSystem NTFS".format(disk))
def check_disk(self, number):
command = "Get-Disk -Number {}".format(number)
self.win_exec(command)
def create_fio_job_options(self, job_options):
command = 'Set-Content -Value "{}" -Path \'C:\\Program Files\\fio\\test.fio\''.format(job_options)
self.win_exec(command)
def run_fio_test(self):
log.info("starting fio test")
try:
output = self.win_exec(
"cd 'C:\\Program Files\\fio\\'; .\\fio.exe .\\test.fio",
timeout=4800)
except CommandFailed:
log.exception("fio test filed")
return 1
else:
log.info(output["stdout"])
return 0
| StarcoderdataPython |
6664511 | from django.contrib import admin
from django import forms
from .models import Pathogen, Original_host, Origin_pathogen, NCBI_nodes, Changelog, Table_descriptions
import json
class PathogenForm(forms.ModelForm):
class Meta:
fields = []
model = Pathogen
def __init__(self, *args, **kwargs):
super(PathogenForm, self).__init__(*args, **kwargs)
self.fields['taxon'].queryset = NCBI_nodes.objects.exclude(has_data=False).order_by('scientific_name')
class PathogenAdmin(admin.ModelAdmin):
fields = ['given_name', 'taxon', 'is_choice']
form = PathogenForm
list_display = ('scientific_name', 'given_name', 'is_choice')
search_fields = ['scientific_name', 'given_name']
def save_model(self, request, obj, form, change):
Changelog.objects.create(dtuser=request.user.username, dtaction='admin_save',
dtvalues=json.dumps({obj._meta.db_table: form.changed_data}),
dtrow=obj.pk if obj.pk else 0)
super(PathogenAdmin, self).save_model(request, obj, form, change)
obj.scientific_name = obj.taxon.get_name
obj.save()
class HostForm(forms.ModelForm):
class Meta:
fields = []
model = Original_host
def __init__(self, *args, **kwargs):
super(HostForm, self).__init__(*args, **kwargs)
self.fields['taxon'].queryset = NCBI_nodes.objects.exclude(has_data=False).order_by('scientific_name')
class HostAdmin(admin.ModelAdmin):
fields = ['given_name', 'taxon', 'is_choice', 'comment']
form = HostForm
list_display = ('scientific_name', 'given_name', 'is_choice', 'comment')
search_fields = ['scientific_name', 'given_name']
def save_model(self, request, obj, form, change):
Changelog.objects.create(dtuser=request.user.username, dtaction='admin_save',
dtvalues=json.dumps({obj._meta.db_table: form.changed_data}),
dtrow=obj.pk if obj.pk else 0)
super(HostAdmin, self).save_model(request, obj, form, change)
obj.scientific_name = obj.taxon.get_name
obj.save()
class OriginAdmin(admin.ModelAdmin):
fields = ['given_name', 'country', 'is_choice', 'comment']
list_display = ('country', 'given_name', 'is_choice', 'comment')
class TaxonAdminForm(forms.ModelForm):
def save(self, commit=True):
m = super(CallResultTypeForm, self).save(commit=False)
# do custom stuff
if commit:
m.save()
return m
class TaxonAdmin(admin.ModelAdmin):
fields = ['get_name', 'tax_id', 'has_data']
list_display = ('get_name', 'tax_id', 'has_data')
list_filter = ('has_data', 'rank')
readonly_fields = ('get_name', 'tax_id')
ordering = ['scientific_name']
search_fields = ['scientific_name']
def get_search_results(self, request, queryset, search_term):
try:
search_term_as_int = int(search_term)
except ValueError:
queryset, use_distinct = super(TaxonAdmin, self).get_search_results(request, queryset, search_term)
else:
queryset = self.model.objects.filter(tax_id=search_term_as_int)
use_distinct = False
return queryset, use_distinct
def has_add_permission(self, request):
return False
class DiscriptionAdmin(admin.ModelAdmin):
fields = ['description','placeholder']
list_filter = ('table', 'auto_created')
list_display = ('__str__','description','placeholder')
readonly_fields = ['table', 'column']
def has_add_permission(self, request):
return False
admin.site.register(NCBI_nodes, TaxonAdmin)
admin.site.register(Pathogen, PathogenAdmin)
admin.site.register(Original_host, HostAdmin)
admin.site.register(Origin_pathogen, OriginAdmin)
admin.site.register(Table_descriptions, DiscriptionAdmin) | StarcoderdataPython |
11210822 | <reponame>ekmixon/gamechanger-ml
"""
usage: python predict_table.py [-h] -m MODEL_PATH -d DATA_PATH [-b BATCH_SIZE]
[-l MAX_SEQ_LEN] -g GLOB [-o OUTPUT_CSV] -a
AGENCIES_PATH [-r]
Binary classification of each sentence in the files matching the 'glob' in
data_path
optional arguments:
-h, --help show this help message and exit
-m MODEL_PATH, --model-path MODEL_PATH
directory of the torch model
-d DATA_PATH, --data-path DATA_PATH
path holding the .json corpus files
-b BATCH_SIZE, --batch-size BATCH_SIZE
batch size for the data samples; default=8
-l MAX_SEQ_LEN, --max-seq-len MAX_SEQ_LEN
maximum sequence length, 128 to 512; default=128
-g GLOB, --glob GLOB file glob pattern
-o OUTPUT_CSV, --output-csv OUTPUT_CSV
the .csv for output
-a AGENCIES_PATH, --agencies-path AGENCIES_PATH
the .csv for agency abbreviations
-r, --raw-output write the results of the classifier / entity
attachment
"""
import logging
import os
import time
import pandas as pd
import gamechangerml.src.text_classif.utils.classifier_utils as cu
from gamechangerml.src.featurization.abbreviations_utils import (
get_references,
get_agencies_dict,
get_agencies,
)
from gamechangerml.src.text_classif.utils.entity_coref import EntityCoref
from gamechangerml.src.text_classif.utils.log_init import initialize_logger
from gamechangerml.src.text_classif.utils.resp_stats import count_output
logger = logging.getLogger(__name__)
def _agg_stats(df):
resp_per_doc, resp_no_entity, n_uniq_entities, n_docs = count_output(df)
if resp_per_doc:
df_resp_doc = pd.DataFrame(
list(resp_per_doc.items()), columns=["doc", "count"]
)
df_resp_doc.to_csv("resp-in-doc-stats.csv", index=False)
if resp_no_entity:
df_resp_no_e = pd.DataFrame(
list(resp_per_doc.items()), columns=["doc", "count"]
)
df_resp_no_e.to_csv("resp-no-entity-stats.csv", index=False)
def predict_table(
model_path, data_path, glob, max_seq_len, batch_size, output_csv, stats
):
"""
See the preamble (help) for a description of these arguments.
For each file matching `glob`, the `raw_text` is parsed into sentences
and run through the classifier. Recognized entities are then associated
with sentences classified as `1` or `responsibility`. The final output
is assembled by using sentences classified as `1` with organization
information, references, document title, etc.
Returns:
pandas.DataFrame
"""
if not os.path.isdir(data_path):
raise ValueError("no data path {}".format(data_path))
if not os.path.isdir(model_path):
raise ValueError("no model path {}".format(model_path))
rename_dict = {
"entity": "Organization / Personnel",
"sentence": "Responsibility Text",
"agencies": "Other Organization(s) / Personnel Mentioned",
"refs": "Documents Referenced",
"title": "Document Title",
"source": "Source Document",
}
start = time.time()
entity_coref = EntityCoref()
entity_coref.make_table(
model_path,
data_path,
glob,
max_seq_len,
batch_size,
)
df = entity_coref.to_df()
df = df[df.top_class == 1].reset_index()
logger.info("retrieving agencies csv")
duplicates, aliases = get_agencies_dict(args.agencies_path)
df["agencies"] = get_agencies(
file_dataframe=df,
doc_dups=None,
duplicates=duplicates,
agencies_dict=aliases,
)
df["refs"] = get_references(df, doc_title_col="src")
renamed_df = df.rename(columns=rename_dict)
final_df = renamed_df[
[
"Source Document",
"Document Title",
"Organization / Personnel",
"Responsibility Text",
"Other Organization(s) / Personnel Mentioned",
"Documents Referenced",
]
]
if output_csv is not None:
final_df.to_csv(output_csv, index=False)
logger.info("final csv written")
if stats:
_agg_stats(final_df)
elapsed = time.time() - start
logger.info("total time : {:}".format(cu.format_time(elapsed)))
return final_df
if __name__ == "__main__":
from argparse import ArgumentParser
desc = "Binary classification of each sentence in the files "
desc += "matching the 'glob' in data_path"
fp = os.path.split(__file__)
fp = "python " + fp[-1]
parser = ArgumentParser(prog=fp, description=desc)
parser.add_argument(
"-m",
"--model-path",
dest="model_path",
type=str,
required=True,
help="directory of the pytorch model",
)
parser.add_argument(
"-d",
"--data-path",
dest="data_path",
type=str,
required=True,
help="path holding the .json corpus files",
)
parser.add_argument(
"-b",
"--batch-size",
dest="batch_size",
type=int,
default=8,
help="batch size for the data samples; default=8",
)
parser.add_argument(
"-l",
"--max-seq-len",
dest="max_seq_len",
type=int,
default=128,
help="maximum sequence length, 128 to 512; default=128",
)
parser.add_argument(
"-g",
"--glob",
dest="glob",
type=str,
required=True,
help="file glob pattern",
)
parser.add_argument(
"-o",
"--output-csv",
dest="output_csv",
type=str,
default=None,
help="the .csv for output",
)
parser.add_argument(
"-a",
"--agencies-path",
dest="agencies_path",
type=str,
required=True,
help="the .csv for agency abbreviations",
)
parser.add_argument(
"-s",
"--stats",
action="store_true",
dest="stats",
help="write aggregate statistics",
)
initialize_logger(to_file=False, log_name="none")
args = parser.parse_args()
_ = predict_table(
args.model_path,
args.data_path,
args.glob,
args.max_seq_len,
args.batch_size,
args.output_csv,
args.stats,
)
| StarcoderdataPython |
103590 | <reponame>rodmidde/pandoc-astah-include
#!/usr/bin/env python
"""
Astah filter to process code blocks with class "ashah" into images.
Needs `astah-community.jar and dependencies`.
"""
import os
import shutil
from subprocess import call
from pandocfilters import toJSONFilter, Para, Image, get_caption
def get_filepaths_by_index(topdir, exten, idx):
filelist = []
for root, directories, filenames in os.walk(topdir):
for filename in filenames:
if filename.lower().endswith(exten):
fullname = os.path.join(root, filename)
filelist.append( fullname )
filelist.sort()
return filelist[idx]
def clear_dir(dirPath):
for root, dirs, files in os.walk(dirPath):
for d in dirs:
shutil.rmtree(os.path.join(root, d))
def astah(key, value, format, _):
if key == 'CodeBlock':
[[ident, classes, keyvals], code] = value
kv = {key: value for key, value in keyvals}
if "astah" in classes:
caption, typef, keyvals = get_caption(keyvals)
if "file" in kv:
if "format" in kv:
if "index" in kv:
output_dir = "astah-generated-files"
clear_dir(output_dir)
call(["java", "-Djava.awt.headless=true", "-Dcheck_jvm_version=false", "-cp",
"astah/astah-community.jar", "com.change_vision.jude.cmdline.JudeCommandRunner", "-image",
"all", "-resized", "-f", kv["file"], "-t", kv["format"], "-o", output_dir])
dest = get_filepaths_by_index(output_dir, kv["format"], int(kv["index"]))
return Para([Image([ident, [], keyvals], caption, [dest, typef])])
if __name__ == "__main__":
toJSONFilter(astah)
| StarcoderdataPython |
1684323 |
import random
def eightball():
# https://en.wikipedia.org/wiki/Magic_8-Ball#Possible_answers
return random.choice(("It is certain", "It is decidedly so", "Without a doubt", "Yes, definitely",
"You may rely on it", "As I see it, yes", "Most likely", "Outlook good", "Yes",
"Signs point to yes", "Reply hazy; try again", "Ask again later", "Better not tell you now",
"Cannot predict now", "Concentrate and ask again", "Don't count on it", "My reply is no",
"My sources say no", "Outlook not so good", "Very doubtful"))
| StarcoderdataPython |
12864804 | # -*- coding: utf-8 -*-
"""Constants for building the biological network explorer's transformations toolbox."""
from typing import List, Tuple
from pybel.struct.pipeline.decorators import mapped
# Default NetworkX explorer toolbox functions (name, button text, description)
_explorer_toolbox = (
('collapse_to_genes', 'Collapse to Genes', 'Collapse proteins and RNAs to genes'),
('collapse_all_variants', 'Collapse Variants', 'Collapse Variants to their Parent Nodes'),
('collapse_to_protein_interactions', 'Protein Interaction Network',
'Reduce the Network to Interactions between Proteins'),
('enrich_protein_and_rna_origins', 'Expand Protein Origins',
'Adds RNAs corresponding to Proteins, then adds Genes corresponding to RNAs and miRNAs'),
('prune_protein_rna_origins', 'Prune Genes/RNAs',
'Delete genes/RNAs that only have transcription/translation edges'),
('expand_periphery', 'Expand Periphery', 'Expand the periphery of the network'),
('expand_internal', 'Expand Internal', 'Adds missing edges between nodes in the network'),
('remove_isolated_nodes', 'Remove Isolated Nodes', 'Remove from the network all isolated nodes'),
('get_largest_component', 'Get Largest Component', 'Retain only the largest component and removes all others'),
('enrich_unqualified', 'Enrich unqualified edges', 'Adds unqualified edges from the universe'),
('remove_associations', 'Remove Associations', 'Remove associative relations'),
('remove_pathologies', 'Remove Pathologies', 'Removes all pathology nodes'),
('remove_biological_processes', 'Remove Biological Processes', 'Removes all biological process nodes'),
)
_bio2bel_functions = (
(
'enrich_rnas',
'Enrich RNA controllers from miRTarBase',
'Adds the miRNA controllers of RNA nodes from miRTarBase'
), (
'enrich_mirnas',
'Enrich miRNA targets',
'Adds the RNA targets of miRNA nodes from miRTarBase'
), (
'enrich_genes_with_families',
'Enrich Genes with Gene Family Membership',
'Adds the parents of HGNC Gene Families'
), (
'enrich_families_with_genes',
'Enrich Gene Family Membership',
'Adds the children to HGNC gene familes'
), (
'enrich_bioprocesses',
'Enrich Biological Process Hierarchy',
'Adds parent biological processes'
), (
'enrich_chemical_hierarchy',
'Enrich Chemical Hierarchy',
'Adds parent chemical entries'
), (
'enrich_proteins_with_enzyme_families',
'Add Enzyme Class Members',
'Adds enzyme classes for each protein'
), (
'enrich_enzymes',
'Enrich Enzyme Classes',
'Adds proteins corresponding to present ExPASy Enzyme codes'
)
)
def _function_is_registered(name: str) -> bool:
return name in mapped
def get_explorer_toolbox() -> List[Tuple[str, str, str]]:
"""Get the explorer toolbox list."""
explorer_toolbox = list(_explorer_toolbox)
explorer_toolbox.extend(
(func_name, title, description)
for func_name, title, description in _bio2bel_functions
if _function_is_registered(func_name)
)
return explorer_toolbox
| StarcoderdataPython |
1807802 | <reponame>borislavstoychev/nails_project
from django.urls import path
from nails_project.schedule import views
urlpatterns = [
path('', views.ScheduleListView.as_view(), name='schedule view'),
path('create-schedule/', views.ScheduleCreateView.as_view(), name='schedule create'),
path('schedule-delete/<int:pk>', views.ScheduleDeleteView.as_view(), name='delete schedule'),
] | StarcoderdataPython |
6554458 | <filename>weibo/test_weibo.py
# -*- coding:utf-8 -*-
# 微博热搜主页 : https://m.weibo.cn/p/102803_ctg1_8999_-_ctg1_8999_home
# 微博热搜主页的数据 : https://m.weibo.cn/api/container/getIndex?containerid=102803_ctg1_8999_-_ctg1_8999_home&page={}
# 微博热搜博主发表微博的数据 : https://m.weibo.cn/api/container/getIndex?uid={}&luicode=10000011&lfid=102803_ctg1_8999_-_ctg1_8999_home%26page%3D1&featurecode=20000320&type=uid&value={}&containerid=107603{}
# 微博热搜博主发表的微博的评论的数据 : https://m.weibo.cn/api/comments/show?id={}&page=1
import requests,json,re
import sys,time,pymysql
class Handler(object):
def on_start(self):
'''
获取微博热搜的用户的id
:return:
'''
page=1
while True:
url = "https://m.weibo.cn/api/container/getIndex?containerid=102803_ctg1_8999_-_ctg1_8999_home&page={}".format(page)
page+=1
reponse = requests.get(url)
ob_json = json.loads(reponse.text)
list_cards = ob_json.get('cards')
if list_cards is None:
break
for card in list_cards:
if card.get('card_type') is 9:
mblog = card.get('mblog')
user = mblog.get('user')
user_id = user.get('id')
user_name = user.get('screen_name')
crawl_time = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
conn = pymysql.connect(host='127.0.0.1', port=3306, user='repository', passwd='<PASSWORD>', db='repository',
charset='utf8')
cur = conn.cursor()
# 先查找是否存在
cur.execute("select * from weibo_user where user_id = %s", user_id)
rows = cur.fetchall()
if len(rows) == 0:
cur.execute("insert into weibo_user(user_id,user_name,crawl_time) values(%s,%s,%s)",(user_id, user_name,crawl_time))
conn.commit()
cur.close()
conn.close()
self.get_weibo(user_id)
def get_weibo(self,id):
'''
获取热搜博主的发表的微博id
:param id:表示某位博主的id
:return:
'''
for page in range(1,2):#选取某博主最近发表的10条微博
url = "https://m.weibo.cn/api/container/getIndex?uid={}&luicode=10000011&lfid=102803_ctg1_8999_-_ctg1_8999_home&featurecode=20000320&type=uid&value={}&containerid=107603{}&page={}".format(id,id,id,page)
reponse = requests.get(url)
ob_json = json.loads(reponse.text)
list_cards = ob_json.get('cards')
if list_cards is None:
return
for card in list_cards:
if card.get('card_type') is 9:
mblog = card.get('mblog')
user_id = mblog.get('user').get('id')
weibo_id = mblog.get('id')#某条微博的id
text = mblog.get('text')
reposts_count = mblog.get('reposts_count')#转发数
comments_count = mblog.get('comments_count')#评论数
attitudes_count = mblog.get('attitudes_count')#点赞数
created_at = mblog.get('created_at')
crawl_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print user_id,weibo_id,text,reposts_count,comments_count,attitudes_count,created_at,crawl_time
# conn = pymysql.connect(host='127.0.0.1', port=3306, user='repository', passwd='<PASSWORD>', db='repository',
# charset='utf8')
# cur = conn.cursor()
# # 先查找是否存在
# cur.execute("select * from weibo_weibo where weibo_id = %s", weibo_id)
# rows = cur.fetchall()
# if len(rows) == 0:
# cur.execute("insert into weibo_weibo(user_id,weibo_id,text,reposts_count,comments_count,attitudes_count,created_at,crawl_time) values(%s,%s,%s)",(user_id,weibo_id,text,reposts_count,comments_count,attitudes_count,created_at,crawl_time))
# conn.commit()
# cur.close()
# conn.close()
# self.get_comment_userid(weibo_id)
def get_comment_userid(self,id):
'''
获取评论微博用户的id
:param id:表示某条微博的id
:return:
'''
url = "https://m.weibo.cn/api/comments/show?id={}&page=1".format(id)
reponse = requests.get(url)
ob_json = json.loads(reponse.text)
list_comments = ob_json.get('hot_data')
if list_comments is None:
return
for comment in list_comments:
user = comment.get('user')
user_id = user.get('id')
user_name = user.get('screen_name')
crawl_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print user_id,user_name
if __name__ == '__main__':
handle = Handler()
handle.on_start()
| StarcoderdataPython |
1652710 | <filename>quantities/volume.py
from .quantity_type import QuantityType
from .quantity import Quantity
from .units import Unit
from .area import AreaType
from .length import LengthType
class cubic_meter(Unit):
profile = {
"name":"cubic meter",
"symbol":"",
"express_by_SI_base":"m+3",
"express_by_SI":""
}
class litre(Unit):
profile = {
"name":"litre",
"symbol":"l",
"express_by_SI_base":"",
"express_by_SI":""
}
@classmethod
def to_pri_unit(cls, value):
return value / 1e+3
@classmethod
def from_pri_unit(cls, value):
return value * 1e+3
class gallon(Unit):
profile = {
"name":"gallon",
"symbol":"gal",
"express_by_SI_base":"",
"express_by_SI":""
}
@classmethod
def to_pri_unit(cls, value):
return 0.0037854117840007 * value
@classmethod
def from_pri_unit(cls, value):
return value / 0.0037854117840007
class VolumeType(QuantityType):
pri_unit = cubic_meter
SI_conherent_unit = pri_unit
litre = litre
gallon = gallon
@classmethod
def register_type(cls):
cls.source[(LengthType, '*', AreaType)] = cls
cls.source[(AreaType, '*', LengthType)] = cls
cls.source[(cls, '/', LengthType)] = AreaType
cls.source[(cls, '/', AreaType)] = LengthType
class Volume(Quantity):
def __init__(self, value, unit=None):
super().__init__(value, VolumeType, unit) | StarcoderdataPython |
6484066 | import tensorflow as tf
from os.path import join, exists
from .preprocess import ZaloDatasetProcessor
from .modeling import BertClassifierModel
from bert import tokenization
import kashgari
from kashgari.tasks.classification import BiGRU_Model
from kashgari.embeddings import BERTEmbedding, TransformerEmbedding
from kashgari.tokenizer import BertTokenizer
from kashgari.tasks.classification import CNNLSTMModel
import logging
logging.basicConfig(level='DEBUG')
flags = tf.flags
FLAGS = flags.FLAGS
flags.DEFINE_string("mode", None,
"Training or Predicting?")
flags.DEFINE_string("dataset_path", None,
"The path to the dataset")
flags.DEFINE_string("bert_model_path", None,
"Link to BERT cased model")
flags.DEFINE_string("model_path", None,
"Default path to store the trained model")
flags.DEFINE_string("train_filename", "train.json",
"The name of the training file (stored in the dataset folder)")
flags.DEFINE_string("train_augmented_filename", None,
"The name of the additional training file with augmented data (stored in the dataset folder)")
flags.DEFINE_string("dev_filename", None,
"The name of the developemt file (stored in the dataset folder)")
flags.DEFINE_string("test_filename", "test.json",
"The name of the testing file (stored in the dataset folder)")
flags.DEFINE_string("test_predict_outputmode", "zalo",
"The mode in which the predict file should be (Zalo-defined 'zalo' or full information 'full')")
flags.DEFINE_integer("max_sequence_len", 256,
"The maximum input sequence length for embeddings")
flags.DEFINE_bool("do_lowercase", False,
"Whether to lower case the input text. Should be True for uncased "
"models and False for cased models.")
flags.DEFINE_float("model_learning_rate", 1e-5,
"The default model learning rate")
flags.DEFINE_integer("model_batch_size", 16,
"Training input batch size")
flags.DEFINE_integer("train_epochs", 3,
"Number of loops to train the whole dataset")
flags.DEFINE_float("train_dropout_rate", 0.1,
"Default dropout rate")
flags.DEFINE_float("bert_warmup_proportion", 0.1,
"Proportion of training to perform linear learning rate warmup")
flags.DEFINE_bool("use_pooled_output", True,
"Use pooled output from pretrained BERT. False for using meaned output")
flags.DEFINE_string("loss_type", "cross_entropy",
"The default loss function to use during training (Can be *cross_entropy* or *focal_loss*")
flags.DEFINE_float("loss_label_smooth", 0,
"Float in [0, 1] to perform label smoothing when calculate loss. "
"When 0, no smoothing occurs. When positive, the binary"
"ground truth labels `y_true` are squeezed toward 0.5, with larger values"
"of `label_smoothing` leading to label values closer to 0.5.")
flags.DEFINE_integer("save_checkpoint_steps", 500,
"The number of steps between each checkpoint save")
flags.DEFINE_integer("save_summary_steps", 100,
"The number of steps between each summary write")
flags.DEFINE_integer("keep_checkpoint_max", 1,
"The maximum number of checkpoints to keep")
flags.DEFINE_string("encoding", "utf-8",
"Encoding used in the dataset")
flags.DEFINE_string("zalo_predict_csv_file", "./zalo.csv",
"Destination for the Zalo submission predict file")
flags.DEFINE_string("eval_predict_csv_file", None,
"Destination for the development set predict file (None if no output is required)")
flags.DEFINE_float("dev_size", 0.2,
"The size of the development set taken from the training set"
"If dev_filename exists, this is ignored")
def main(_):
print("[Main] Starting....")
# Tokenizer initialzation
tokenizer = BertTokenizer.load_from_vacob_file(vocab_path)
embed = TransformerEmbedding(vocab_path, config_path, checkpoint_path,
bert_type='bert',
task=kashgari.CLASSIFICATION,
sequence_length=FLAGS.max_sequence_len)
processor = ZaloDatasetProcessor()
processor.load_from_path("")
model = CNNLSTMModel(embed)
model.evaluate()
# Training/Testing
# if FLAGS.mode.lower() == 'train':
# print('[Main] Begin training')
# eval_result = model.train_and_eval()
# print('[Main] Training complete.')
# print('[Main] Evaluation complete')
# print("Accuracy: {}%".format(eval_result['accuracy'] * 100))
# print("Loss: {}".format(eval_result['loss']))
# print("F1 Score: {}".format(eval_result['f1_score'] * 100))
# print("Recall: {}%".format(eval_result['recall'] * 100))
# print("Precision: {}%".format(eval_result['precision'] * 100))
# if FLAGS.eval_predict_csv_file is not None:
# print('[Main] Development set predict and output to file')
# _ = model.predict_from_eval_file(test_file=dev_file, output_file=FLAGS.eval_predict_csv_file,
# file_output_mode='full')
# elif FLAGS.mode.lower() == 'eval':
# eval_result = model.eval()
# print('[Main] Evaluation complete')
# print("Accuracy: {}%".format(eval_result['accuracy'] * 100))
# print("Loss: {}".format(eval_result['loss']))
# print("F1 Score: {}".format(eval_result['f1_score'] * 100))
# print("Recall: {}%".format(eval_result['recall'] * 100))
# print("Precision: {}%".format(eval_result['precision'] * 100))
# if FLAGS.eval_predict_csv_file is not None:
# print('[Main] Development set predict and output to file')
# _ = model.predict_from_eval_file(test_file=dev_file, output_file=FLAGS.eval_predict_csv_file,
# file_output_mode='full')
# elif FLAGS.mode.lower() == 'predict_test':
# print("[Main] Begin Predict based on Test file")
# results = model.predict_from_eval_file(test_file=test_file, output_file=FLAGS.zalo_predict_csv_file,
# file_output_mode=FLAGS.test_predict_outputmode)
# print(results)
# elif FLAGS.mode.lower() == 'predict_manual':
# while True:
# question = input("Please enter question here (or empty to exit): ")
# if question == "":
# break
# paragragh = input("Please enter potential answer here here (or empty to exit): ")
# if paragragh == "":
# break
# result = model.predict([(question, paragragh)])[0]
# print('Prediction: {} with confidence of {}%'
# .format(result['prediction'], result['probabilities'] * 100))
#
# print('[Main] Finished')
if __name__ == "__main__":
""" Sanity flags check """
assert FLAGS.mode.lower() in ['train', 'eval', 'predict_test', 'predict_manual'], \
"[FlagsCheck] Mode can only be 'train', 'eval', 'predict_test' or 'predict_manual'"
assert exists(FLAGS.dataset_path), "[FlagsCheck] Dataset path doesn't exist"
assert exists(FLAGS.bert_model_path), "[FlagsCheck] BERT pretrained model path doesn't exist"
assert FLAGS.test_predict_outputmode.lower() in ['full', 'zalo'], "[FlagsCheck] Test file output mode " \
"can only be 'full' or 'zalo'"
assert FLAGS.model_path is not None, "[FlagsCheck] BERT finetuned model location must be set"
assert FLAGS.loss_type.lower() in ['cross_entropy', 'focal_loss', 'kld', 'squared_hinge', 'hinge'],\
"[FlagsCheck] Incorrect loss function used"
tf.compat.v1.app.run()
| StarcoderdataPython |
11309616 | import time
import numpy as np
from cechmate import phat_diagrams, Alpha, Rips
def test_phat_diagrams():
t = np.linspace(0, 2 * np.pi, 40)
X = np.zeros((len(t), 2))
X[:, 0] = np.cos(t)
X[:, 1] = np.sin(t)
np.random.seed(10)
X += 0.2 * np.random.randn(len(t), 2)
rips = Rips(1).build(X)
dgms = phat_diagrams(rips)
def test_rips():
"""
A test with a noisy circle, comparing H1 to GUDHI
"""
t = np.linspace(0, 2 * np.pi, 40)
X = np.zeros((len(t), 2))
X[:, 0] = np.cos(t)
X[:, 1] = np.sin(t)
np.random.seed(10)
X += 0.2 * np.random.randn(len(t), 2)
rips = Rips(1).build(X)
def test_alpha():
# Make a 3-sphere in 4 dimensions
X = np.random.randn(15, 4)
X = X / np.sqrt(np.sum(X ** 2, 1)[:, None])
tic = time.time()
diagrams = Alpha().build(X)
phattime = time.time() - tic
| StarcoderdataPython |
276766 | # Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import argparse
from sccl.language import *
from sccl.topologies import *
from sccl.language.collectives import Collective
class ReduceGather(Collective):
def __init__(self, num_ranks, chunk_factor, inplace, groups):
Collective.__init__(self, num_ranks, chunk_factor, inplace)
self.groups = groups
self.gpus_per_group = num_ranks // groups
assert chunk_factor == 1, "Only supports chunks == number of ranks"
def init_buffers(self):
assert self.chunk_factor == 1
rank_buffers = []
chunks_per_node = self.num_ranks
for r in range(self.num_ranks):
input_buffer = [None] * self.gpus_per_group
output_buffer = [None] * chunks_per_node
for c in range(self.groups):
input_buffer[c] = Chunk(r, c, -1, c)
buffers = {Buffer.input : input_buffer,
Buffer.output : output_buffer}
rank_buffers.append(buffers)
return rank_buffers
def check(self, prog):
expected_chunks = []
for r in range(self.num_ranks):
chunk = ReduceChunk([])
for x in range(self.groups):
y = r // self.groups
next = y * self.groups + x
chunk = chunk.reduce(Chunk(next, r % self.gpus_per_group))
expected_chunks.append(chunk)
correct = True
for r in range(self.num_ranks):
output = prog.buffers[r][Buffer.output]
for c in range(self.num_ranks):
chunk = output[c]
if chunk is None or chunk != expected_chunks[c]:
print(f'Rank {r} chunk {c} is incorrect should be {expected_chunks[c]} given {chunk}')
correct = False
return correct
def program(num_ranks, groups, instances, protocol):
gpus_per_group = num_ranks // groups
topology = fully_connected(num_ranks)
chunk_factor = 1
inplace = False
collective = ReduceGather(num_ranks, chunk_factor, inplace, groups)
with SCCLProgram("reduce-gather", topology, collective, instances, protocol, threadblock_policy=ThreadblockPolicy.manual):
# Per group reduce scatter
for y in range(groups):
for x in range(gpus_per_group):
output_index = y * groups + x
input_index = x
gpu = y * groups + (x+1) % gpus_per_group
c = chunk(gpu, Buffer.input, input_index)
# Use the input buffer to perform reduction across groups
for x_ in range(1, gpus_per_group):
c = c.reduce(y * groups + (x + 1 + x_) % gpus_per_group, Buffer.input, input_index, sendtb=0, recvtb=0, ch=0)
# Copy reduced chunk into the output buffer
c = c.send(c.rank, Buffer.output, output_index, sendtb=0, recvtb=0, ch=0)
# Ring Allgather
for r in range(num_ranks):
c = chunk(r, Buffer.output, r)
next = (r + 1) % num_ranks
while next != r:
c = c.send(next, Buffer.output, r, sendtb=1, recvtb=1, ch=1)
next = (next + 1) % num_ranks
Check()
XML()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('num_ranks', type=int, help ='number of ranks')
parser.add_argument('groups', type=int, help='number of reduction groups')
parser.add_argument('--instances', type=int, default=1, help='number of instances')
parser.add_argument('--protocol', type=str, default='Simple',
choices=['Simple', 'LL', 'LL128'], help ='NCCL protocol')
args = parser.parse_args()
assert args.num_ranks % args.groups == 0
program(args.num_ranks, args.groups, args.instances, args.protocol)
| StarcoderdataPython |
6421402 | """
Constants Module
"""
class Constants():
INCIDENT_UPDATE_STATUSES = {
"investigating": "Investigating",
"identified": "Identified",
"monitoring": "Monitoring",
"update": "Update",
"resolved": "Resolved",
}
COMPONENT_STATUSES = {
"operational": "Operational",
"degraded_performance": "Degraded Performance",
"partial_outage": "Partial Outage",
"major_outage": "Major Outage",
"maintenance": "Maintenance",
}
| StarcoderdataPython |
6695929 | import os
import random
import itertools
import numpy as np
import collections
import matplotlib.pyplot as plt
from collections import Counter
from itertools import chain
from bisect import bisect_right, bisect_left
from reclist.current import current
def statistics(x_train, y_train, x_test, y_test, y_pred):
train_size = len(x_train)
test_size = len(x_test)
# num non-zero preds
num_preds = len([p for p in y_pred if p])
return {
'training_set__size': train_size,
'test_set_size': test_size,
'num_non_null_predictions': num_preds
}
def sample_hits_at_k(y_preds, y_test, x_test=None, k=3, size=3):
hits = []
for idx, (_p, _y) in enumerate(zip(y_preds, y_test)):
if _y[0] in _p[:k]:
hit_info = {
'Y_TEST': [_y[0]],
'Y_PRED': _p[:k],
}
if x_test:
hit_info['X_TEST'] = [x_test[idx][0]]
hits.append(hit_info)
if len(hits) < size or size == -1:
return hits
return random.sample(hits, k=size)
def sample_misses_at_k(y_preds, y_test, x_test=None, k=3, size=3):
misses = []
for idx, (_p, _y) in enumerate(zip(y_preds, y_test)):
if _y[0] not in _p[:k]:
miss_info = {
'Y_TEST': [_y[0]],
'Y_PRED': _p[:k],
}
if x_test:
miss_info['X_TEST'] = [x_test[idx][0]]
misses.append(miss_info)
if len(misses) < size or size == -1:
return misses
return random.sample(misses, k=size)
def sample_all_misses_at_k(y_preds, y_test, x_test=None, k=3, size=3):
misses = []
for idx, (_p, _y) in enumerate(zip(y_preds, y_test)):
missing_y = [item for item in _y if item not in _p[:k]]
if missing_y:
miss_info = {
'Y_TEST': missing_y,
'Y_PRED': _p[:k],
}
if x_test:
miss_info['X_TEST'] = [x_test[idx][0]]
misses.append(miss_info)
if len(misses) < size or size == -1:
return misses
return random.sample(misses, k=size)
def hit_rate_at_k_nep(y_preds, y_test, k=3):
y_test = [[k] for k in y_test]
return hit_rate_at_k(y_preds, y_test, k=k)
def hit_rate_at_k(y_preds, y_test, k=3):
hits = 0
for _p, _y in zip(y_preds, y_test):
if len(set(_p[:k]).intersection(set(_y))) > 0:
hits += 1
return hits / len(y_test)
def mrr_at_k_nep(y_preds, y_test, k=3):
"""
Computes MRR
:param y_preds: predictions, as lists of lists
:param y_test: target data, as lists of lists (eventually [[sku1], [sku2],...]
:param k: top-k
"""
y_test = [[k] for k in y_test]
return mrr_at_k(y_preds, y_test, k=k)
def mrr_at_k(y_preds, y_test, k=3):
"""
Computes MRR
:param y_preds: predictions, as lists of lists
:param y_test: target data, as lists of lists (eventually [[sku1], [sku2],...]
:param k: top-k
"""
rr = []
for _p, _y in zip(y_preds, y_test):
for rank, p in enumerate(_p[:k], start=1):
if p in _y:
rr.append(1 / rank)
break
else:
rr.append(0)
assert len(rr) == len(y_preds)
return np.mean(rr)
def coverage_at_k(y_preds, product_data, k=3):
pred_skus = set(itertools.chain.from_iterable(y_preds[:k]))
all_skus = set(product_data.keys())
nb_overlap_skus = len(pred_skus.intersection(all_skus))
return nb_overlap_skus / len(all_skus)
def popularity_bias_at_k(y_preds, x_train, k=3):
# estimate popularity from training data
pop_map = collections.defaultdict(lambda : 0)
num_interactions = 0
for session in x_train:
for event in session:
pop_map[event] += 1
num_interactions += 1
# normalize popularity
pop_map = {k:v/num_interactions for k,v in pop_map.items()}
all_popularity = []
for p in y_preds:
average_pop = sum(pop_map.get(_, 0.0) for _ in p[:k]) / len(p) if len(p) > 0 else 0
all_popularity.append(average_pop)
return sum(all_popularity) / len(y_preds)
def precision_at_k(y_preds, y_test, k=3):
precision_ls = [len(set(_y).intersection(set(_p[:k]))) / len(_p) if _p else 1 for _p, _y in zip(y_preds, y_test)]
return np.average(precision_ls)
def recall_at_k(y_preds, y_test, k=3):
recall_ls = [len(set(_y).intersection(set(_p[:k]))) / len(_y) if _y else 1 for _p, _y in zip(y_preds, y_test)]
return np.average(recall_ls)
def ndcg_at_k(y_preds, y_test, k=3):
import sklearn.metrics
results = list(reversed(list(range(1, k+1))))
user_ndcgs = []
for _p, _y in zip(y_preds, y_test):
relevance = []
for j in _p[:k]:
if j in _y:
relevance.append(1)
else:
relevance.append(0)
# 0 pad relevance to k if there are fewer than k predictions
if len(relevance) < k:
relevance += [0]*(k-len(relevance))
user_ndcgs.append(sklearn.metrics.ndcg_score([relevance], [results]))
return np.average(np.asarray(user_ndcgs))
def ndcg_at_k_user_differential(y_preds, y_test, y_test_full, k=3,
user_feature='gender'):
pred_breakdown, test_breakdown = _breakdown_preds_by_user_feature(y_test_full, y_preds, y_test,
user_feature=user_feature)
return _apply_func_to_breakdown(ndcg_at_k, pred_breakdown, test_breakdown, k=k)
def _breakdown_preds_by_user_feature(y_test, y_preds, y_test_ids, user_feature='gender'):
from collections import defaultdict
pred_breakdown = defaultdict(list)
test_breakdown = defaultdict(list)
for _t, _p, _t_ids in zip(y_test, y_preds, y_test_ids):
target_user_feature = _t[0][user_feature]
if not target_user_feature:
target_user_feature = 'unknown'
pred_breakdown[target_user_feature].append(_p)
test_breakdown[target_user_feature].append(_t_ids)
return pred_breakdown, test_breakdown
def _apply_func_to_breakdown(func, pred_breakdown, test_breakdown, *args, **kwargs):
retval = {}
for key in sorted(pred_breakdown.keys()):
retval[key] = func(pred_breakdown[key], test_breakdown[key], *args, **kwargs)
return retval
def rec_items_distribution_at_k(y_preds, k=3, bin_width=100, debug=True):
def _calculate_hist(frequencies, bins):
""" Works out the counts of items in each bucket """
counts_per_bin = Counter([bisect_right(bins, item) - 1 for item in frequencies])
counts_per_bin_list = list(counts_per_bin.items())
empty_bins_indices = [ele for ele in np.arange(len(bins) - 1) if ele not in [
index for index, _ in counts_per_bin_list
]]
counts_per_bin_list.extend([(index, 0) for index in empty_bins_indices])
counts_per_bin_sorted = sorted(counts_per_bin_list, key=lambda x: x[0], reverse=False)
return [y for _, y in counts_per_bin_sorted]
def _format_results(bins, counts_per_bin):
""" Formatting results """
results = {}
for index, (_, count) in enumerate(zip(bins, counts_per_bin)):
if bins[index] != bins[index + 1]:
results[str(bins[index]) + '-' + str(bins[index + 1] - 1)] = count
return results
# estimate frequency of recommended items in predictions
reduce_at_k_preds = [preds[:k] for preds in y_preds]
counts = Counter(chain.from_iterable(reduce_at_k_preds))
frequencies = list(counts.values())
# fixed bin size
bins = np.arange(np.min(frequencies), np.max(frequencies) + bin_width, bin_width)
counts_per_bin_sorted = _calculate_hist(frequencies, bins)
# log bin size
log_bins = np.logspace(np.log10(bins[0]), np.log10(bins[-1]), len(bins))
log_bins = np.array([int(round(log_bin, 2)) for log_bin in log_bins])
counts_per_logbin_sorted = _calculate_hist(frequencies, log_bins)
test_results = {
'histogram_fixed': _format_results(bins, counts_per_bin_sorted),
'histogram_log': _format_results(log_bins, counts_per_logbin_sorted)
}
if debug:
f, (ax1, ax2) = plt.subplots(2, 1)
# debug / visualization
ax1.bar(bins[:-1], counts_per_bin_sorted,
width=bin_width, align='edge')
ax1.set_ylabel('Number of items')
ax2.bar(log_bins[:-1], counts_per_logbin_sorted,
width=(log_bins[1:] - log_bins[:-1]), align='edge')
ax2.set_xscale('log', base=10)
ax2.set_xlabel('No. of times an item appear in a recs list')
ax2.set_ylabel('Number of items')
plt.savefig(os.path.join(current.report_path, 'plots', 'rec_items_distribution_at_k.png'))
return test_results
| StarcoderdataPython |
6609496 | <filename>code/test_poly_time.py<gh_stars>0
from os import urandom
from pyspark import SparkConf, SparkContext, StorageLevel
from time import time
from base.algebra import *
from base.ntt import fast_coset_divide, fast_coset_evaluate, fast_multiply
from base.univariate import Polynomial
from rdd.rdd_poly import (
ntt1,
poly_add,
poly_combine,
poly_mul_constant,
poly_scale,
poly_sub,
rdd_fast_coset_divide,
rdd_fast_coset_evaluate,
rdd_fast_multiply,
rdd_ntt,
)
import sys
from test_spark import get_sc
field = Field.main()
g = field.generator()
def test_poly_scale(): # 这个比较耗时
coefficients = [v for (_, v) in arr]
poly = Polynomial(coefficients)
print("test_poly_scale")
start = time()
values1 = poly.scale(g)
print("finished. ", time() - start)
def test_rdd_poly_scale():
sc = get_sc()
rdd_arr = sc.parallelize(arr)
print("test_rdd_poly_scale")
start = time()
values1 = poly_scale(rdd_arr, g).collect()
print("finished. ", time() - start)
sc.stop()
def test_poly_add():
coefficients = [v for (_, v) in arr]
poly = Polynomial(coefficients)
coefficients1 = [v for (_, v) in arr1]
poly1 = Polynomial(coefficients1)
print("test_poly_add")
start = time()
values1 = poly + poly1
print("finished. ", time() - start)
def test_rdd_poly_add():
sc = get_sc()
rdd_arr = sc.parallelize(arr)
rdd_arr1 = sc.parallelize(arr)
print("test_rdd_poly_add")
start = time()
values1 = poly_add(rdd_arr, rdd_arr1).collect()
print("finished. ", time() - start)
sc.stop()
def test_poly_sub():
coefficients = [v for (_, v) in arr]
poly = Polynomial(coefficients)
coefficients1 = [v for (_, v) in arr1]
poly1 = Polynomial(coefficients1)
print("test_poly_sub")
start = time()
values1 = poly - poly1
print("finished. ", time() - start)
def test_rdd_poly_sub():
sc = get_sc()
rdd_arr = sc.parallelize(arr)
rdd_arr1 = sc.parallelize(arr1)
print("test_rdd_poly_sub")
start = time()
values1 = poly_sub(rdd_arr, rdd_arr1).collect()
print("finished. ", time() - start)
sc.stop()
def test_poly_mul_constant():
coefficients = [v for (_, v) in arr]
poly = Polynomial(coefficients)
print("test_poly_mul_constant")
start = time()
values1 = Polynomial([v * coefficients[0] for v in poly.coefficients])
print("finished. ", time() - start)
def test_rdd_poly_mul_constant():
sc = get_sc()
rdd_arr = sc.parallelize(arr)
print("test_rdd_poly_mul_constant")
start = time()
values1 = poly_mul_constant(rdd_arr, arr[0][1]).collect()
print("finished. ", time() - start)
sc.stop()
if __name__ == "__main__":
mode = int(sys.argv[1])
logn = int(sys.argv[2]) # 15
n = 1 << logn
print(n)
primitive_root = field.primitive_nth_root(n)
arr = [(i, field.sample(urandom(17))) for i in range(n)]
arr1 = [(i, field.sample(urandom(17))) for i in range(n)]
if mode == 0:
test_poly_scale()
# test_poly_add()
# test_poly_sub()
# test_poly_mul_constant()
elif mode == 1:
test_rdd_poly_scale()
# test_rdd_poly_add()
# test_rdd_poly_sub()
# test_rdd_poly_mul_constant()
else:
test_poly_scale()
test_rdd_poly_scale()
# test_poly_add()
# test_rdd_poly_add()
# test_poly_sub()
# test_rdd_poly_sub()
# test_poly_mul_constant()
# test_rdd_poly_mul_constant()
| StarcoderdataPython |
1790843 | import asyncio
import logging
import os
import threading
from time import sleep
from unittest.mock import MagicMock
from dotenv import load_dotenv
from Collectors.EdgeOS.Collector import EdgeOSCollector
from Collectors.Environment.Collector import EnvironmentCollector
from DB.InfluxDBCloud import InfluxDBCloud
_loop = None
def schedule_background(coro):
global _loop
if _loop is None:
_loop = asyncio.new_event_loop()
threading.Thread(target=_loop.run_forever, daemon=True).start()
_loop.call_soon_threadsafe(asyncio.create_task, coro)
if __name__ == '__main__':
load_dotenv("../settings.env")
influx_url = os.environ.get('INFLUX_URL')
influx_token = os.environ.get('INFLUX_TOKEN')
influx_org = os.environ.get('INFLUX_ORG')
influx_bucket = os.environ.get('INFLUX_BUCKET')
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(name)s:%(message)s')
logger = logging.getLogger()
if os.environ.get("DEBUG_MODE"):
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
with InfluxDBCloud(influx_url, influx_bucket, influx_token, influx_org) as db_writer:
if os.environ.get("NO_DB_WRITE"):
db_writer.write = MagicMock(side_effect=lambda x: logging.info("Suppressed DB Write"))
eosc = EdgeOSCollector(db_writer)
eosc.register_collector()
envc = EnvironmentCollector(db_writer)
envc.register_collector()
schedule_background(eosc.start())
schedule_background(envc.start())
while True:
sleep(1)
| StarcoderdataPython |
8183011 | <gh_stars>1-10
from typing import List
def find_line(lines: List[str],
keyword: str,
fallback: str = '') -> str:
for line in lines:
if line.startswith(keyword):
return line.replace('\n', '')
return fallback
def find_value(line: str,
delimiter: str) -> str:
return line.split(delimiter)[-1]
| StarcoderdataPython |
9365 | #!/usr/bin/env python3
"""
Description: Python script to append the common columns in one sheet from another sheet using fuzzy matching.
"""
import pip
def import_or_install(package):
try:
__import__(package)
except ImportError:
pip.main(['install', package])
import os
import sys
import argparse
import_or_install('numpy')
import_or_install('pandas')
import_or_install('fuzzywuzzy')
import numpy as np
import pandas as pd
from fuzzywuzzy import process, fuzz
class FuzzyMatcher:
"""
FuzzyMatcher class to perform the fuzzy matching.
"""
def __init__(self, df_1, df_2, columns_1, columns_2, append_in='second'):
"""
The constructor takes five arguments. The last argument 'append_in' is optional.
Parameters:
df_1: the first table in pandas.DataFrame format or the name of the CSV file for the first table
df_2: the second table in pandas.DataFrame format or the name of the CSV file for the second table
columns_1: list of common columns in the first table
columns_2: list of common columns in the second table
append_in (optional):
'first' if the common columns are to be appended in the first table
'second' if the common columns are to be appended in the second table
"""
if type(df_1) == str:
df_1 = pd.read_csv(df_1)
if type(df_2) == str:
df_2 = pd.read_csv(df_2)
df_1.columns = df_1.columns.str.lower().str.strip()
df_2.columns = df_2.columns.str.lower().str.strip()
columns_1 = [i.lower().strip() for i in columns_1]
columns_2 = [i.lower().strip() for i in columns_2]
if append_in == 'first':
temp = df_1
df_1 = df_2
df_2 = temp
temp = columns_1
columns_1 = columns_2
columns_2 = temp
self.df_1 = df_1.rename(columns=dict(zip(columns_1, columns_2)))
self.columns = columns_2
self.df_2 = self._fuzzy_match(self.df_1, df_2, self.columns[0])
@staticmethod
def _string_matching(name, collection, mapping_):
"""
Returns similar name using fuzzy matching.
"""
if name in collection:
return name
if name in mapping_:
return mapping_[name]
similar = process.extractOne(name, collection, scorer=fuzz.ratio)[0]
mapping_[name] = similar
return similar
def _fuzzy_match(self, df_1_t, df_2_t, common_column_t):
"""
Returns dataframe with the common column appended.
Notice that the appended columns end with '_t'.
"""
collection = set(df_1_t[common_column_t])
mapping_ = {}
df_2_t[common_column_t + '_t'] = df_2_t[common_column_t].apply(self._string_matching, args=(collection, mapping_))
return df_2_t
@property
def fuzzy_match(self):
"""
Returns the dataframe consisting of all the appended columns.
"""
for i_t, common_column in enumerate(self.columns[1:], start=1):
self.df_2[common_column + '_t'] = np.nan
group_1 = self.df_1.groupby(self.columns[:i_t])
group_2 = self.df_2.groupby([i + '_t' for i in self.columns[:i_t]])
for key, df_slice_2 in group_2:
df_slice_1 = group_1.get_group(key)
df_slice_2 = self._fuzzy_match(df_slice_1, df_slice_2, common_column)
self.df_2.loc[df_slice_2.index, common_column + '_t'] = df_slice_2.loc[:, common_column + '_t']
return self.df_2
def save(self, filename):
"""
Saves the result dataframe to a CSV file, filename.
"""
self.df_2.to_csv(filename)
def parse_args(parser):
"""
Parsing and configuration of the command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--firstcsv', type=str, required=True, help='CSV file for first table.')
parser.add_argument('--secondcsv', type=str, required=True, help='CSV file for second table.')
parser.add_argument('--destination', type=str, default='output.csv', help='Destination filename.')
parser.add_argument('--commoncolumns1', type=str, required=True, help='Common columns for first table.')
parser.add_argument('--commoncolumns2', type=str, required=True, help='Common columns for second table in the same order.')
parser.add_argument("--in", dest="_in", default='second', choices=['second', 'first'], help='Table to append the columns. ')
return check_args(parser.parse_args())
def check_args(args):
"""
Checking the arguments if they are entered properly.
Validations performed:
1. Compulsory arguments are entered.
2. The entered filenames are present in the current folder.
3. The entered column names are present in the corresponding files.
4. If the destination filename is already present in the directory, ask the user if it can be overwritten.
"""
# for --firstcsv and --secondcsv
for filename in [args.firstcsv, args.secondcsv]:
if not os.path.isfile(filename):
raise Exception("File {} is not present in the currrent folder.".format(filename))
# --commoncolumns1
commoncolumns1 = [i.strip().lower() for i in args.commoncolumns1.split(',')]
temp = set(commoncolumns1) - set(pd.read_csv(args.firstcsv, nrows=1).columns.str.lower().str.strip())
if temp:
raise Exception("The following columns are not present in the file, {}:\n{}".format(args.firstcsv, temp))
# --commoncolumns2
commoncolumns2 = [i.strip().lower() for i in args.commoncolumns2.split(',')]
temp = set(commoncolumns2) - set(pd.read_csv(args.secondcsv, nrows=1).columns.str.lower().str.strip())
if temp:
raise Exception("The following columns are not present in the file, {}:\n{}".format(args.secondcsv, temp))
# --destination
if os.path.isfile(args.destination):
print("The file {} already exists. Do you want to overwrite it? y/n".format(args.destination))
ans = input().strip().lower()
if ans == 'n':
print("Please enter different destination filename and run the script again.")
sys.exit()
return args
if __name__ == "__main__":
# instantiate the ArgumentParser class and parse the arguments
parser = argparse.ArgumentParser()
arguments = parse_args(parser)
# save the arguments as some variables which later would be passed to FuzzyMatcher class
filename_1 = arguments.firstcsv
filename_2 = arguments.secondcsv
result_filename = arguments.destination
# clean and lowercase-ize the columns names
common_columns_1 = [i.strip().lower() for i in arguments.commoncolumns1.split(',')]
common_columns_2 = [i.strip().lower() for i in arguments.commoncolumns2.split(',')]
# instantiate the FuzzyMatcher object, perform the fuzzy match, and save the result to the destination CSV file
fuzzy_matcher = FuzzyMatcher(filename_1, filename_2, common_columns_1, common_columns_2, append_in=arguments._in)
fuzzy_matcher.fuzzy_match
fuzzy_matcher.save(result_filename)
| StarcoderdataPython |
6599046 | from mysql.connector.pooling import MySQLConnectionPool as MariaDBConnectionPool
from undine.database import Database
from undine.utils.exception import UndineException
import mysql.connector as mariadb
class MariaDbConnector(Database):
_DEFAULT_HOST = 'localhost'
_DEFAULT_DATABASE = 'undine'
_DEFAULT_USER = 'undine'
_DEFAULT_PASSWD = 'password'
def __init__(self, config):
db_config = {
'host': config.setdefault('host', self._DEFAULT_HOST),
'database': config.setdefault('database', self._DEFAULT_DATABASE),
'user': config.setdefault('user', self._DEFAULT_USER),
'passwd': config.setdefault('password', self._DEFAULT_PASSWD)
}
try:
self._pool = MariaDBConnectionPool(pool_name=db_config['database'],
pool_size=32,
**db_config)
except mariadb.Error as error:
raise UndineException('MariaDB connection failed: {}'.format(error))
def _execute_multiple_dml(self, queries):
conn = self._pool.get_connection()
cursor = conn.cursor()
for item in queries:
cursor.execute(item.query, item.params)
cursor.close()
conn.commit()
conn.close()
def _execute_single_dml(self, query, params):
conn = self._pool.get_connection()
cursor = conn.cursor()
cursor.execute(query, params)
cursor.close()
conn.commit()
conn.close()
def _fetch_a_tuple(self, query, params):
conn = self._pool.get_connection()
cursor = conn.cursor()
cursor.execute(query, params)
row = cursor.fetchone()
cursor.close()
conn.close()
return row
def _fetch_all_tuples(self, query, params):
conn = self._pool.get_connection()
cursor = conn.cursor()
cursor.execute(query, params)
rows = cursor.fetchall()
cursor.close()
conn.close()
return rows
| StarcoderdataPython |
350251 | __author__ = '<NAME>, <EMAIL>'
from innermemetic import InnerMemeticSearch
from inversememetic import InverseMemeticSearch
class InnerInverseMemeticSearch(InnerMemeticSearch, InverseMemeticSearch):
""" inverse of inner memetic search"""
def _learnStep(self):
self.switchMutations()
InnerMemeticSearch._learnStep(self)
self.switchMutations() | StarcoderdataPython |
5186490 | """Provide an easy interface for loading data into L{DataFrame}s for Spark.
"""
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from sparklingpandas.utils import add_pyspark_path
add_pyspark_path()
import pandas
from StringIO import StringIO as sio
from pyspark.context import SparkContext
from sparklingpandas.dataframe import DataFrame, _normalize_index_names
import logging
class PSparkContext():
"""This is a thin wrapper around SparkContext from PySpark which makes it
easy to load data into L{DataFrame}s."""
def __init__(self, spark_context, sql_ctx=None):
"""Initialize a PSparkContext with the associacted spark context,
and Spark SQL context if provided. This context is usef to load
data into L{DataFrame}s.
Parameters
----------
spark_context: SparkContext
Initialized and configured spark context. If you are running in the
PySpark shell, this is already created as "sc".
sql_ctx: SQLContext, optional
Initialized and configured SQL context, if not provided Sparkling
Panda's will create one.
Returns
-------
Correctly initialized SparklingPandasContext.
"""
self.spark_ctx = spark_context
if sql_ctx:
self.sql_ctx = sql_ctx
else:
logging.info("No sql context provided, creating")
from pyspark.sql import SQLContext
self.sql_ctx = SQLContext(self.spark_ctx)
@classmethod
def simple(cls, *args, **kwargs):
"""Takes the same arguments as SparkContext and constructs a
PSparkContext"""
return PSparkContext(SparkContext(*args, **kwargs))
def read_csv(self, file_path, use_whole_file=False, names=None, skiprows=0,
*args, **kwargs):
"""Read a CSV file in and parse it into Pandas DataFrames. By default,
the first row from the first partition of that data is parsed and used
as the column names for the data from. If no 'names' param is
provided we parse the first row of the first partition of data and
use it for column names.
Parameters
----------
file_path: string
Path to input. Any valid file path in Spark works here, eg:
'file:///my/path/in/local/file/system' or 'hdfs:/user/juliet/'
use_whole_file: boolean
Whether of not to use the whole file.
names: list of strings, optional
skiprows: integer, optional
indicates how many rows of input to skip. This will
only be applied to the first partition of the data (so if
#skiprows > #row in first partition this will not work). Generally
this shouldn't be an issue for small values of skiprows.
No other value of header is supported.
All additional parameters available in pandas.read_csv() are usable
here.
Returns
-------
A SparklingPandas DataFrame that contains the data from the
specified file.
"""
def csv_file(partition_number, files):
# pylint: disable=unexpected-keyword-arg
file_count = 0
for _, contents in files:
# Only skip lines on the first file
if partition_number == 0 and file_count == 0 and _skiprows > 0:
yield pandas.read_csv(
sio(contents), *args,
header=None,
names=mynames,
skiprows=_skiprows,
**kwargs)
else:
file_count += 1
yield pandas.read_csv(
sio(contents), *args,
header=None,
names=mynames,
**kwargs)
def csv_rows(partition_number, rows):
# pylint: disable=unexpected-keyword-arg
in_str = "\n".join(rows)
if partition_number == 0:
return iter([
pandas.read_csv(
sio(in_str), *args, header=None,
names=mynames,
skiprows=_skiprows,
**kwargs)])
else:
# could use .iterows instead?
return iter([pandas.read_csv(sio(in_str), *args, header=None,
names=mynames, **kwargs)])
# If we need to peak at the first partition and determine the column
# names
mynames = None
_skiprows = skiprows
if names:
mynames = names
else:
# In the future we could avoid this expensive call.
first_line = self.spark_ctx.textFile(file_path).first()
frame = pandas.read_csv(sio(first_line), **kwargs)
# pylint sees frame as a tuple despite it being a DataFrame
mynames = list(frame.columns)
_skiprows += 1
# Do the actual load
if use_whole_file:
return self.from_pandas_rdd(
self.spark_ctx.wholeTextFiles(file_path)
.mapPartitionsWithIndex(csv_file))
else:
return self.from_pandas_rdd(
self.spark_ctx.textFile(file_path)
.mapPartitionsWithIndex(csv_rows))
def parquetFile(self, *paths):
"""Loads a Parquet file, returning the result as a L{DataFrame}.
Parameters
----------
paths: string, variable length
The path(s) of the parquet files to load. Should be Hadoop style
paths (e.g. hdfs://..., file://... etc.).
Returns
-------
A L{DataFrame} of the contents of the parquet files.
"""
return self.from_spark_rdd(self.sql_ctx.parquetFile(paths))
def jsonFile(self, path, schema=None, sampling_ratio=1.0):
"""Loads a text file storing one JSON object per line as a
L{DataFrame}.
Parameters
----------
path: string
The path of the json files to load. Should be Hadoop style
paths (e.g. hdfs://..., file://... etc.).
schema: StructType, optional
If you know the schema of your input data you can specify it. The
schema is specified using Spark SQL's schema format. If not
specified will sample the json records to determine the schema.
Spark SQL's schema format is documented (somewhat) in the
"Programmatically Specifying the Schema" of the Spark SQL
programming guide at: http://bit.ly/sparkSQLprogrammingGuide
sampling_ratio: int, default=1.0
Percentage of the records to sample when infering schema.
Defaults to all records for safety, but you may be able to set to
a lower ratio if the same fields are present accross records or
your input is of sufficient size.
Returns
-------
A L{DataFrame} of the contents of the json files.
"""
schema_rdd = self.sql_ctx.jsonFile(path, schema, sampling_ratio)
return self.from_spark_rdd(schema_rdd)
def from_pd_data_frame(self, local_df):
"""Make a Sparkling Pandas dataframe from a local Pandas DataFrame.
The intend use is for testing or joining distributed data with local
data.
The types are re-infered, so they may not match.
Parameters
----------
local_df: Pandas DataFrame
The data to turn into a distributed Sparkling Pandas DataFrame.
See http://bit.ly/pandasDataFrame for docs.
Returns
-------
A Sparkling Pandas DataFrame.
"""
def frame_to_rows(frame):
"""Convert a Pandas DataFrame into a list of Spark SQL Rows"""
# TODO: Convert to row objects directly?
return [r.tolist() for r in frame.to_records()]
schema = list(local_df.columns)
index_names = list(local_df.index.names)
index_names = _normalize_index_names(index_names)
schema = index_names + schema
rows = self.spark_ctx.parallelize(frame_to_rows(local_df))
sp_df = DataFrame.from_schema_rdd(
self.sql_ctx.createDataFrame(
rows,
schema=schema,
# Look at all the rows, should be ok since coming from
# a local dataset
samplingRatio=1))
sp_df._index_names = index_names
return sp_df
def sql(self, query):
"""Perform a SQL query and create a L{DataFrame} of the result.
The SQL query is run using Spark SQL. This is not intended for
querying arbitrary databases, but rather querying Spark SQL tables.
Parameters
----------
query: string
The SQL query to pass to Spark SQL to execute.
Returns
-------
Sparkling Pandas DataFrame.
"""
return DataFrame.from_spark_rdd(self.sql_ctx.sql(query), self.sql_ctx)
def table(self, table):
"""Returns the provided Spark SQL table as a L{DataFrame}
Parameters
----------
table: string
The name of the Spark SQL table to turn into a L{DataFrame}
Returns
-------
Sparkling Pandas DataFrame.
"""
return DataFrame.from_spark_rdd(self.sql_ctx.table(table),
self.sql_ctx)
def from_spark_rdd(self, spark_rdd):
"""
Translates a Spark DataFrame into a Sparkling Pandas Dataframe.
Currently, no checking or validation occurs.
Parameters
----------
spark_rdd: Spark DataFrame
Input Spark DataFrame.
Returns
-------
Sparkling Pandas DataFrame.
"""
return DataFrame.from_spark_rdd(spark_rdd, self.sql_ctx)
def DataFrame(self, elements, *args, **kwargs):
"""Create a Sparkling Pandas DataFrame for the provided
elements, following the same API as constructing a Panda's DataFrame.
Note: since elements is local this is only useful for distributing
dataframes which are small enough to fit on a single machine anyways.
Parameters
----------
elements: numpy ndarray (structured or homogeneous), dict, or
Pandas DataFrame.
Input elements to use with the DataFrame.
Additional parameters as defined by L{pandas.DataFrame}.
Returns
-------
Sparkling Pandas DataFrame."""
return self.from_pd_data_frame(pandas.DataFrame(
elements,
*args,
**kwargs))
def from_pandas_rdd(self, pandas_rdd):
"""Create a Sparkling Pandas DataFrame from the provided RDD
which is comprised of Panda's DataFrame. Note: the current version
drops index information.
Parameters
----------
pandas_rdd: RDD[pandas.DataFrame]
Returns
-------
Sparkling Pandas DataFrame."""
return DataFrame.fromDataFrameRDD(pandas_rdd, self.sql_ctx)
def read_json(self, file_path,
*args, **kwargs):
"""Read a json file in and parse it into Pandas DataFrames.
If no names is provided we use the first row for the names.
Currently, it is not possible to skip the first n rows of a file.
Headers are provided in the json file and not specified separately.
Parameters
----------
file_path: string
Path to input. Any valid file path in Spark works here, eg:
'my/path/in/local/file/system' or 'hdfs:/user/juliet/'
Other than skipRows, all additional parameters available in
pandas.read_csv() are usable here.
Returns
-------
A SparklingPandas DataFrame that contains the data from the
specified file.
"""
def json_file_to_df(files):
""" Transforms a JSON file into a list of data"""
for _, contents in files:
yield pandas.read_json(sio(contents), *args, **kwargs)
return self.from_pandas_rdd(self.spark_ctx.wholeTextFiles(file_path)
.mapPartitions(json_file_to_df))
def stop(self):
"""Stop the underlying SparkContext
"""
self.spark_ctx.stop()
| StarcoderdataPython |
4963739 | <gh_stars>0
'''
Author: <NAME>
Date: Dec 29 2016
Description: Script that runs in the background. Shuts down the pi when a GPIO
pin goes low
'''
import time
import RPi.GPIO as GPIO
import os
# global vars
TIME_DELAY = 500
BUT_PIN = 21
# setup helper function for event callback
def shutdown(c):
os.system("sudo shutdown -P now")
# setup gpio and event listener
GPIO.setmode(GPIO.BCM)
GPIO.setup(BUT_PIN, GPIO.IN, pull_up_down = GPIO.PUD_UP)
GPIO.add_event_detect(BUT_PIN, GPIO.FALLING, callback = shutdown, bouncetime = 2000)
while True:
time.sleep(TIME_DELAY)
| StarcoderdataPython |
6414277 | import datetime
import random
import math
import pandas as pd
import DataGenUtil
from faker import Faker
SUBJECTS = ['Math - Algebra', 'Math - Geometry', 'English Language', 'History - World History',
'Science Biology', 'Health', 'Technology - Programming', 'Physical Education', 'Art', 'Music']
SCHOOL_TYPES = ['Elementary', 'Middle', 'High']
GRADES = [(0, 'Kindergarten'), (1, 'First'), (2, 'Second'), (3, 'Third'),
(4, 'Fourth'), (5, 'Fifth'), (6, 'Sixth'), (7, 'Seventh'), (8, 'Eigth')]
ACTIONS = [('ID', 'In-school Suspension'), ('ES', 'Restorative Dialogue'), ('RJ', 'Restorative Justice'), ('EY', 'Expelled Remainder Of School/yr'),
('As', 'Tcher/parent/student Conference'), ('IS', 'In-school Detention'), ('LD', 'Lunch Detention'), ('PC', 'Parent Contact'), ('EL', 'Expelled Less Than School Year'),
('AC', 'Behavior/Attendance Contract'), ('VB', 'Verbal Warning'), ('SF', 'Suspension 5 Days Or Less'), ('RS', 'Referral To Social Worker'), ('SM', 'Suspension More Than Five Days'),
('SS', 'Saturday School'), ('AP', 'Admin/Prnt/Gurdn/Stu Conference'), ('RF', 'Referral To Counseling'), ('DB', 'Detention Before/after School'), ('LP', 'Loss of Privileges'),
('IA', 'In-school Alternative'), ('Cn', 'Ref Police - No charges filed'), ('EN', 'Expelled Into Next School Year')]
ATTENDANCE_TYPES = [('V', 'Early Completion'), ('5', 'Parent Request Opt Out Testing'), ('A', 'Absent (unexcused)'), ('F', 'Field Trip'), ('C', 'Counselor'), ('X', 'Excused Tardy'), ('I', 'In School Detention'), ('Q', 'Went Home Ill'), ('O', 'Office'), ('W', 'Weather'),
('6', 'State or District Testing'), ('N', 'Nurse/Infirmary'), ('G', 'Early Release/Parent'), ('H', 'Timeout to Home'), ('Y', 'In a Facility'), ('R', 'Runaway'), ('P', 'Prearranged'), ('S', 'Suspended'), ('L', 'Tutored-District'), ('D', 'Enrolled in Special Program'),
('M', 'SPED ONLY in school no IEP Svcs'), ('J', 'Teacher Excused'), ('E', 'Excused Absence'), ('T', 'Tardy (Unexcused)'), ('Pr', 'Present'), ('K', 'Social Worker'), ('Z', 'In Detention Center (SCYSC)')]
INVOLVEMENTS = [('A', 'Accomplice'), ('W', 'Witness'), ('V', 'Victim'), ('P', 'Perpetrator'), ('N', 'Not Applicable')]
INCIDENTS = [('AA1', 'L1 Unexcused Absences'), ('ALA', 'L2 Abusive Lang w/Staff'), ('ALP', 'L1 Abusive Lang/Intim w/Student'), ('APL', 'L3 Any Act Prohibit by F/S/L Law'),
('ASR', 'L2 Altering Sch/Classrm Rcrds'), ('AT3', 'L3 3rd Degree Assault (by adult'), ('CLM', 'L1 Classroom Misconduct'), ('CLO', 'L2 Continual LEVEL I Infraction'),
('CLT', 'L3 Continual LEVEL II Infraction'), ('CP2', 'L2 Campus Misconduct'), ('CPM', 'L1 Campus Misconduct'), ('DEP', 'L3 Destruction/Sch/Emp Prop'), ('DIS', 'L1 Dishonesty'),
('DSP', 'L2 Defacing School Prop'), ('FCD', 'L2 Fail Complete Disc Asignmt'), ('FIG', 'L2 Fighting'), ( 'HA3', 'L3 Harassment'), ('HAR', 'L2 Harassment'), ('IDH', 'L1 Inappropriate Dress/Hygiene'),
('INS', 'L1 Insubordination'), ('IS2', 'L2 Insubor/open/persist defiance'), ('L1E', 'L1 Inappropriate/Prsnl Elect Dev'), ('L2B', 'L2 Bullying'), ('L2E', 'L2 Inappropriate/Prsnl Elect Dev'),
('L2P', 'L2 Phys Mistreatment of Studnt'), ('L2V', 'L2 Violation of AUA'), ('L3A', 'L3 P/U of Alcohol'), ('L3D', 'L3 P/U of Drug Paraphernalia'), ('PSV', 'L2 P/D/S Sched 4 or 5 substances'),
('PU4', 'L4 P/U Dangerous Weapon'), ('PUT', 'L2 P/U of Tobacco/Simulated'), ('PUW', 'L2 Inadvertent Pos(Stand )Weap'), ('SV2', 'L2 Serious Violations at School'), ('SV3', 'L3 Serious Violations at School'),
('THE', 'L2 Theft'), ('ULC', 'L2 Unauthorized Leaving Campus'), ('ULM', 'L3 Unlawful U/P/D/S of Marijuana'), ('UNA', 'L2 Unexcused Absences/Truancy'), ('UNT', 'L1 Unexcused Tardiness'), ('WF3', 'L3 Weapon/Facsimile (Standard)')]
class ContosoDataGenerator:
def __init__(self, students_per_school=100, classes_in_student_schedule=6, students_per_section=25, student_teacher_ratio=9, include_optional_fields=True,
fall_semester_start_date='2021-08-15', fall_semester_end_date='2021-12-15', spring_semester_start_date='2022-01-10', spring_semester_end_date='2022-05-10'):
# Set a seed value in Faker so it generates the same values every time it's run
self.faker = Faker('en_US')
Faker.seed(1)
self.students_per_school = students_per_school
self.classes_in_student_schedule = classes_in_student_schedule
self.students_per_section = students_per_section
self.student_teacher_ratio = student_teacher_ratio
self.include_optional = include_optional_fields
self.fall_semester_start_date = fall_semester_start_date
self.fall_semester_end_date = fall_semester_end_date
self.spring_semester_start_date = spring_semester_start_date
self.spring_semester_end_date = spring_semester_end_date
self.teachers_per_school = math.ceil(self.students_per_school/self.student_teacher_ratio)
self.section_id = 1
self.student_id = 1
self.teacher_id = 1
self.course_id = 1
self.school_id = 1
self.term_id = 1
self.domain = '@Classrmtest86.org'
def generate_data(self, num_of_schools, writer):
schools = []
for n in range(num_of_schools):
school_data = self.create_school(n)
schools.append(school_data.pop('School'))
for key in school_data.keys():
data_str = DataGenUtil.list_of_dict_to_csv(school_data[key])
writer.write(f"contoso_sis/{key}.csv", data_str)
data_str = DataGenUtil.list_of_dict_to_csv(schools)
writer.write('contoso_sis/School.csv', data_str)
def create_school(self, school_id):
school_data = {}
school_data['School'] = {
'SchoolID': school_id,
'SchoolName': f"{self.faker.last_name()} {random.choice(SCHOOL_TYPES)}"
}
school_data['Students'] = self.create_students(school_id)
school_data['Courses'] = self.create_courses()
school_data['Terms'] = self.create_terms()
school_data['Attendance'], school_data['ClassAttendance'], school_data['DailyIncidents'] = self.create_daily_records(school_id, school_data)
return school_data
def create_students(self, school_id):
students = []
for n in range(self.students_per_school):
students.append(self.create_student(school_id, self.student_id, 'student'))
self.student_id += 1
return students
def create_student(self, school_id, user_id, user_type):
grade_num, grade = random.choice(GRADES)
gender = random.choice(['Male', 'Female'])
if gender == 'Male': fname = self.faker.first_name_male()
else: fname = self.faker.first_name_female()
user = {
'ID': user_id,
'Firstname': fname,
'Lastname': self.faker.last_name(),
'Gender': gender,
'FederalRaceCategory': random.choice(['Asian', 'Black', 'White', 'Hispanic', 'American Indian']),
'PrimaryLanguage': random.choices(['English', 'Spanish', 'German', 'French', 'Japanese'], weights=(85, 10, 2, 2, 1))[0],
'ELLStatus': random.choices(['', 'English Learner', 'Initially Fluent English Proficient', 'Redesignated Fluent English Proficient'], weights=(80, 10, 5, 5))[0],
'SpecialEducation': random.choices(['', 'Designated Instruction Service', 'Resource Specialty Program', 'Special Day Class'], weights=(80, 10, 5, 5))[0],
'LowIncome': random.choices([0, 1], weights=(60, 40))[0],
'GradeNumber': grade_num,
'Grade': grade,
'CumulativeGPA': random.choice([0.523, 0.423, 1.13, 2.63, 2.33, 3.33, 4.0]),
'StartSchoolYear': self.fall_semester_start_date,
'EndSchoolYear': self.spring_semester_end_date
}
return user
def create_terms(self):
terms = []
terms.append({
'TermID': self.term_id,
'TermName': 'Fall Semester',
'TermStartDate': self.fall_semester_start_date,
'TermEndDate': self.fall_semester_end_date,
})
self.term_id += 1
terms.append({
'TermID': self.term_id,
'TermName': 'Spring Semester',
'TermStartDate': self.spring_semester_start_date,
'TermEndDate': self.spring_semester_end_date,
})
self.term_id += 1
return terms
def create_courses(self):
courses = []
for subject in SUBJECTS:
courses.append({
'CourseID': self.course_id,
'CourseName': subject,
'CourseCode': subject
})
self.course_id += 1
return courses
def create_daily_records(self, school_id, school_data):
date_range = pd.date_range(datetime.datetime.strptime(self.fall_semester_start_date, "%Y-%m-%d"), datetime.datetime.strptime(self.spring_semester_end_date, "%Y-%m-%d"))
daily_attendance = []
class_attendance = []
incidents = []
for student in school_data['Students']:
for single_date in date_range:
daily_attendance.append(self.create_daily_attendance_record(school_id, student, single_date))
class_attendance.append(self.create_class_attendance_record(school_id, student, single_date, school_data['Courses']))
if (random.randint(1, 100)) <= 10: # 10% chance of an incident occurring
incidents.append(self.create_incident_record(school_id, student['ID'], single_date))
return (daily_attendance, class_attendance, incidents)
def create_class_attendance_record(self, school_id, student_id, date_value, courses):
# todo: fix term id to use the correct term id based on the date
class_attendance = {
'SchoolID': school_id,
'AttendanceDate': date_value.strftime("%Y-%m-%d"),
'StudentID': student_id,
'Term': '1',
'CourseID': random.choice(courses)['CourseID'],
'AttendTypeID': random.choice(ATTENDANCE_TYPES)[0]
}
return class_attendance
def create_incident_record(self, school_id, student_id, date_value):
incident_id, incident = random.choice(INCIDENTS)
involvement_id, incident = random.choice(INVOLVEMENTS)
action_id, action = random.choice(ACTIONS)
incident_record = {
'StudentID': student_id,
'SchoolID': school_id,
'IncidentID': incident_id,
'InvolvementID': involvement_id,
'IncidentDate': date_value.strftime("%Y-%m-%d"),
'ActionID': action_id
}
return incident_record
def create_daily_attendance_record(self, school_id, student, date_value):
possible_periods_in_day = 6
unexcused_all_day = random.choices([0, 1], weights=(80, 20))[0]
if unexcused_all_day == 1:
excused_all_day = 0
else:
excused_all_day = random.choices([0, 1], weights=(70, 30))[0]
attendance_record = {
'SchoolID': school_id,
'AttendanceDate': date_value.strftime("%Y-%m-%d"),
'StudentID': student['ID'],
'NumofPossiblePeriods': possible_periods_in_day,
'NumofTardies': random.choices([0, 1, 2, 3, 4, 5, 6], weights=(50, 20, 10, 5, 5, 5, 5))[0],
'NumofUnexcusedAbsent': random.choices([0, 1, 2, 3], weights=(70, 10, 10, 10))[0],
'NumofExcusedAbsent': random.choices([0, 1, 2, 3], weights=(60, 20, 10, 10))[0],
'UnexcusedAllDay': unexcused_all_day,
'ExcusedAllDay': excused_all_day,
'Cumulative GPA': student['CumulativeGPA']
}
return attendance_record
| StarcoderdataPython |
6652494 | <filename>sklearn_wrapper/modules/Outputer.py<gh_stars>1-10
import importlib
from logging import getLogger
import numpy as np
import pandas as pd
logger = getLogger('predict').getChild('Outputer')
if 'ConfigReader' not in globals():
from .ConfigReader import ConfigReader
if 'LikeWrapper' not in globals():
from .commons.LikeWrapper import LikeWrapper
if 'MyKerasClassifier' not in globals():
from .commons.MyKeras import MyKerasClassifier
class Outputer(ConfigReader, LikeWrapper):
def __init__(
self,
feature_columns, train_ids, test_ids,
X_train, Y_train, X_test,
train_cv, val_cv, scorer, classes, single_estimators, estimator,
kernel=False
):
self.feature_columns = feature_columns
self.train_ids = train_ids
self.test_ids = test_ids
self.X_train = X_train
self.Y_train = Y_train
self.X_test = X_test
self.classes = classes
self.estimator = estimator
self.kernel = kernel
self.configs = {}
def get_predict_data(self):
output = {
'Y_pred': self.Y_pred,
'Y_pred_proba': self.Y_pred_proba,
'Y_pred_df': self.Y_pred_df,
'Y_pred_proba_df': self.Y_pred_proba_df,
}
return output
@classmethod
def _trans_xy_for_predict(self, estimator, X_train, Y_train, X_target):
Y_train = self.ravel_like(Y_train)
return X_train, Y_train, X_target
@classmethod
def predict_like(
self, train_mode, estimator, X_train, Y_train, X_target
):
X_train, Y_train, X_target = \
self._trans_xy_for_predict(estimator, X_train, Y_train, X_target)
Y_pred_proba = None
# clf
if train_mode == 'clf':
# keras
if estimator.__class__ in [MyKerasClassifier] and \
Y_train.ndim == 2 and Y_train.shape[1] > 1:
Y_pred = estimator.predict_proba(X_target)
else:
Y_pred = estimator.predict(X_target)
if hasattr(estimator, 'predict_proba'):
Y_pred_proba = estimator.predict_proba(
X_target)
# reg
elif train_mode == 'reg':
Y_pred = estimator.predict(X_target)
else:
logger.error('TRAIN MODE SHOULD BE clf OR reg')
raise Exception('NOT IMPLEMENTED')
return Y_pred, Y_pred_proba
def _inverse_translate_y_pre(self):
if self.configs['pre']['train_mode'] != 'reg':
return
y_pre = self.configs['pre'].get('y_pre')
if not y_pre:
return
logger.info('inverse translate y_pred with %s' % y_pre)
if y_pre == 'log':
self.Y_pred = np.array(list(map(np.exp, self.Y_pred)))
else:
logger.error('NOT IMPLEMENTED FIT Y_PRE: %s' % y_pre)
raise Exception('NOT IMPLEMENTED')
return
def _calc_base_predict_df(self):
self.Y_pred_df = pd.merge(
pd.DataFrame(data=self.test_ids, columns=[self.id_col]),
pd.DataFrame(data=self.Y_pred, columns=self.pred_cols),
left_index=True, right_index=True)
if self.Y_pred_proba is None:
self.Y_pred_proba_df = None
return
if self.Y_pred_proba.shape[1] == self.classes.shape[0]:
self.Y_pred_proba_df = pd.DataFrame(
data=self.test_ids, columns=[self.id_col])
if len(self.pred_cols) == 1:
proba_columns = list(map(
lambda x: '%s_%s' % (self.pred_cols[0], str(x)),
self.classes))
else:
proba_columns = self.pred_cols
self.Y_pred_proba_df = pd.merge(
self.Y_pred_proba_df,
pd.DataFrame(
data=self.Y_pred_proba,
columns=proba_columns),
left_index=True, right_index=True)
else:
logger.warning(
'NOT MATCH DIMENSION OF Y_PRED_PROBA AND CLASSES')
return
def _calc_post_predict_df(self):
fit_post = self.configs['post']
if not fit_post:
return
if not self.kernel:
myfunc = importlib.import_module(
'modules.myfuncs.%s' % fit_post['myfunc'])
for method_name in fit_post['methods']:
logger.info('fit post: %s' % method_name)
if not self.kernel:
method_name = 'myfunc.%s' % method_name
self.Y_pred_df, self.Y_pred_proba_df = eval(
method_name)(self.Y_pred_df, self.Y_pred_proba_df)
return
def _round_predict_df(self):
if isinstance(self.Y_pred_df, pd.DataFrame):
self.Y_pred_df = self.Y_pred_df.round(5)
if isinstance(self.Y_pred_proba_df, pd.DataFrame):
self.Y_pred_proba_df = self.Y_pred_proba_df.round(5)
return
def calc_predict_data(self):
self.Y_pred, self.Y_pred_proba = self.predict_like(
train_mode=self.configs['fit']['train_mode'],
estimator=self.estimator, X_train=self.X_train,
Y_train=self.Y_train, X_target=self.X_test)
self._inverse_translate_y_pre()
self._calc_base_predict_df()
self._calc_post_predict_df()
self._round_predict_df()
return self.Y_pred_df, self.Y_pred_proba_df
def write_predict_data(self):
modelname = self.configs['fit'].get('modelname', 'tmp_model')
filename = '%s.csv' % modelname
output_path = self.configs['data']['output_dir']
if isinstance(self.Y_pred_df, pd.DataFrame):
self.Y_pred_df.to_csv(
'%s/%s' % (output_path, filename), index=False)
if isinstance(self.Y_pred_proba_df, pd.DataFrame):
self.Y_pred_proba_df.to_csv(
'%s/proba_%s' % (output_path, filename), index=False)
return filename
| StarcoderdataPython |
9639726 | OMICSDI = {
'base_url': "https://www.omicsdi.org/ws/dataset/search?",
'omicsdi_api_url': "https://www.omicsdi.org/ws/dataset/{}/{}.json",
'metabolights': {
'query': "repository:\"Metabolights\"",
'dataset_url': "http://www.ebi.ac.uk/metabolights/{}",
'omicsdi_url': "https://www.omicsdi.org/dataset/metabolights_dataset/{}",
'omicsdi_api_url': "https://www.omicsdi.org/ws/dataset/metabolights_dataset/{}.json",
},
'pride': {
'query': "repository:\"Pride\"",
'dataset_url': "http://www.ebi.ac.uk/pride/archive/projects/{}",
'omicsdi_url': "https://www.omicsdi.org/dataset/pride/{}",
'omicsdi_api_url': "https://www.omicsdi.org/ws/dataset/pride/{}.json"
},
'arrayexpress': {
'query': "repository:\"ArrayExpress\"",
'dataset_url': "https://www.ebi.ac.uk/arrayexpress/experiments/{}",
'omicsdi_url': "https://www.omicsdi.org/dataset/arrayexpress-repository/{}",
'omicsdi_api_url': "https://www.omicsdi.org/ws/dataset/arrayexpress-repository/{}.json"
},
'eva': {
'query': "repository:\"EVA\"",
'dataset_url': "https://www.ebi.ac.uk/eva/?eva-study={}",
'omicsdi_url': "https://www.omicsdi.org/dataset/eva/{}",
'omicsdi_api_url': "https://www.omicsdi.org/ws/dataset/eva/{}.json"
},
'expression-atlas': {
'query': "repository:\"ExpressionAtlas\"",
'dataset_url': "http://www.ebi.ac.uk/gxa/experiments/{}",
'omicsdi_url': "https://www.omicsdi.org/dataset/atlas-experiments/{}",
'omicsdi_api_url': "https://www.omicsdi.org/ws/dataset/atlas-experiments/{}.json"
},
'biomodels': {
'query': "repository:\"BioModels\"",
'dataset_url': "https://www.ebi.ac.uk/biomodels/{}",
'omicsdi_url': "https://www.omicsdi.org/dataset/biomodels/{}",
'omicsdi_api_url': "https://www.omicsdi.org/ws/dataset/biomodels/{}.json"
},
'ena': {
'query': "repository:\"ENA\"",
},
}
OMICSDI_HEADERS = ['dataset', 'id', 'pub_date' , 'dataset_url',
'omicsdi_url', 'omicsdi_api_url','local_path']
HASH_HEADERS = ["type","id","name","dataset","bundle","size","timestamp",
"crc32c","md5","sha256","sha512","trunc512","blake2b",
"contents"]
HASH_TYPES = ["crc32c","md5","sha256","sha512","trunc512","blake2b"]
PATHS = {
'metabolights': {
'identifiers.org': "http://identifiers.org/metabolights:{}",
'file': ['/ebi/ftp/pub/databases/metabolights/studies/public/{}'],
'https': ['https://www.ebi.ac.uk/metabolights/{}/files/'],
'ftp': [
'ftp://ftp.ebi.ac.uk/pub/databases/metabolights/studies/public/{}'
'ftp://hh-gridftp-1.ebi.ac.uk:2811/gridftp/pub/databases/metabolights/studies/public/{}'
'ftp://oy-gridftp-1.ebi.ac.uk:2811/pub/databases/metabolights/studies/public/{}'
],
'gsiftp': [
'gsiftp://hh-gridftp-1.ebi.ac.uk/gridftp/pub/databases/metabolights/studies/public/{}'
'gsiftp://oy-gridftp-1.ebi.ac.uk/pub/databases/metabolights/studies/public/{}'
],
'globus': [
'globus://ebi#public/gridftp/pub/databases/metabolights/studies/public/{}'
'globus://9e437f9e-7e22-11e5-9931-22000b96db58/gridftp/pub/databases/metabolights/studies/public/{}'
'globus://ebi#pub/pub/databases/metabolights/studies/public/{}'
'globus://ddb59cc9-6d04-11e5-ba46-22000b92c6ec/pub/databases/metabolights/studies/public/{}'
'globus://ebi#09443db8-59e6-11e9-a621-0a54e005f950/pub/databases/metabolights/studies/public/{}'
]
},
'pride': {
'identifiers.org': "http://identifiers.org/pride:{}",
'file': ['/nfs/public/release/pride/prod/pride/data/archive/{}'],
'https': ['https://www.ebi.ac.uk/pride/data/archive/{}'],
'ftp': [
'ftp://pg-gridftp-2.ebi.ac.uk/pride/data/archive/{}'
],
'gsiftp': [
'gsiftp://pg-gridftp-2.ebi.ac.uk/pride/data/archive/{}'
],
'globus': [
'globus://ddb59cab-6d04-11e5-ba46-22000b92c6ec/pride/data/archive/{}',
'globus://ebi#pride/pride/data/archive/{}'
],
'aspera': [
'<EMAIL>:pride/data/archive/{}'
]
},
'arrayexpress': {
'identifiers.org': "http://identifiers.org/arrayexpress:{}",
'file': [
'/ebi/ftp/pub/databases/arrayexpress/data/experiment/{}',
'/ebi/ftp/pub/databases/microarray/data/experiment/{}'
],
'https': [
"https://www.ebi.ac.uk/arrayexpress/files/{}"
],
},
'eva': {
'file': ['/ebi/ftp/pub/databases/eva/{}'],
'ftp': ["ftp://ftp.ebi.ac.uk/pub/databases/eva/{}"],
'globus': ["globus://6f70c1b4-b824-11e9-98d7-0a63aa6b37da:/gridftp/pub/databases/eva/{}"]
},
'expression-atlas': {
'file': [
'/ebi/ftp/pub/databases/arrayexpress/data/atlas/experiments/{}',
'/ebi/ftp/pub/databases/microarray/data/atlas/experiments/{}'
],
'ftp': ["ftp://ftp.ebi.ac.uk/pub/databases/arrayexpress/data/atlas/experiments/{}"]
},
'omics_ena_project': {
'file': ['/nfs/era-pub/vol1/{}'],
'ftp': ['ftp.sra.ebi.ac.uk/vol1/{}'],
'https': ['ftp.sra.ebi.ac.uk/vol1/{}']
}
}
ACCESS_URLS = {
'eva': [
{ 'type': 'file',
'region': 'ebi-cli.ebi.ac.uk',
'path': '/ebi/ftp/pub/databases/eva/{}',
'access_url': 'file:///ebi/ftp/pub/databases/eva/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'file',
'region': 'sra-login.ebi.ac.uk',
'path': '/ebi/ftp/pub/databases/eva/{}',
'access_url': 'file:///ebi/ftp/pub/databases/eva/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'sftp',
'region': 'ebi-cli.ebi.ac.uk',
'path': '/ebi/ftp/pub/databases/eva/{}',
'access_url': 'sftp://ebi-cli.ebi.ac.uk/ebi/ftp/pub/databases/eva/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'ftp',
'region': 'ftp.ebi.ac.uk',
'path': '/ftp/pub/databases/eva/{}',
'access_url': 'ftp://ftp.ebi.ac.uk/pub/databases/eva/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'globus',
'region': 'globus.ebi.ac.uk',
'path': '/gridftp/pub/databases/eva/{}',
'access_url': 'globus://fd9c190c-b824-11e9-98d7-0a63aa6b37da:/gridftp/pub/databases/eva/{}',
'access_id': '',
'headers': ''
},
],
'metabolights': [
{ 'type': 'file',
'region': 'ebi-cli.ebi.ac.uk',
'path': '/ebi/ftp/pub/databases/metabolights/studies/public/{}',
'access_url': 'file:///ebi/ftp/pub/databases/metabolights/studies/public/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'file',
'region': 'sra-login.ebi.ac.uk',
'path': '/ebi/ftp/pub/databases/metabolights/studies/public/{}',
'access_url': 'file:///ebi/ftp/pub/databases/metabolights/studies/public/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'sftp',
'region': 'ebi-cli.ebi.ac.uk',
'path': '/ebi/ftp/pub/databases/metabolights/studies/public/{}',
'access_url': 'sftp://ebi-cli.ebi.ac.uk/ebi/ftp/pub/databases/metabolights/studies/public/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'http',
'region': 'ftp.ebi.ac.uk',
'path': '/pub/databases/metabolights/studies/public/{}',
'access_url': 'https://www.ebi.ac.uk/metabolights/{}/files/',
'access_id': '',
'headers': ''
},
{ 'type': 'ftp',
'region': 'ftp.ebi.ac.uk',
'path': '/pub/databases/metabolights/studies/public/{}',
'access_url': 'ftp://ftp.ebi.ac.uk/pub/databases/metabolights/studies/public/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'globus',
'region': 'globus.ebi.ac.uk',
'path': '/gridftp/pub/databases/eva/{}',
'access_url': 'globus://fd9c190c-b824-11e9-98d7-0a63aa6b37da:/gridftp/pub/databases/metabolights/studies/public/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'aspera',
'region': 'fasp.ebi.ac.uk',
'path': '/studies/public/{}',
'access_url': 'fasp://fasp_ml@fasp.ebi.ac.uk/studies/public/{}',
'access_id': 'asperaweb_id_dsa.openssh',
'headers': ''
},
],
'expression-atlas': [
{ 'type': 'file',
'region': 'ebi-cli.ebi.ac.uk',
'path': '/ebi/ftp/pub/databases/arrayexpress/data/atlas/experiments/{}',
'access_url': 'file:///ebi/ftp/pub/databases/arrayexpress/data/atlas/experiments/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'file',
'region': 'sra-login.ebi.ac.uk',
'path': '/ebi/ftp/pub/databases/metabolights/studies/public/{}',
'access_url': 'file:///ebi/ftp/pub/databases/arrayexpress/data/atlas/experiments/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'sftp',
'region': 'ebi-cli.ebi.ac.uk',
'path': '/ebi/ftp/pub/databases/arrayexpress/data/atlas/experiments/{}',
'access_url': 'sftp://ebi-cli.ebi.ac.uk/ebi/ftp/pub/databases/arrayexpress/data/atlas/experiments/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'ftp',
'region': 'ftp.ebi.ac.uk',
'path': '/pub/databases/arrayexpress/data/atlas/experiments/{}',
'access_url': 'ftp://ftp.ebi.ac.uk/pub/databases/arrayexpress/data/atlas/experiments/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'globus',
'region': 'globus.ebi.ac.uk',
'path': '/gridftp/pub/databases/arrayexpress/data/atlas/experiments/{}',
'access_url': 'globus://fd9c190c-b824-11e9-98d7-0a63aa6b37da:/gridftp/pub/databases/arrayexpress/data/atlas/experiments/{}',
'access_id': '',
'headers': ''
},
],
'ena': [
{ 'type': 'file',
'region': 'ebi-cli.ebi.ac.uk',
'path': '/ebi/ftp/era-pub/{}',
'access_url': 'file:///ebi/ftp/era-pub/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'file',
'region': 'sra-login.ebi.ac.uk',
'path': '/ebi/ftp/era-pub/{}',
'access_url': 'file:///ebi/ftp/era-pub/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'sftp',
'region': 'ebi-cli.ebi.ac.uk',
'path': '/ebi/ftp//era-pub/{}',
'access_url': 'sftp://ebi-cli.ebi.ac.uk/ebi/ftp/era-pub/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'ftp',
'region': 'ftp.era.ebi.ac.uk',
'path': '/vol1/era-pub/{}',
'access_url': 'ftp://ftp.era.ebi.ac.uk/vol1/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'ftp',
'region': 'ftp.ebi.ac.uk',
'path': '/era-pub/{}',
'access_url': 'ftp://ftp.ebi.ac.uk/era-pub/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'gsiftp',
'region': 'gsiftp.ebi.ac.uk',
'path': '/era-pub/{}',
'access_url': 'gsiftp://hx-gridftp-8.ebi.ac.uk/era-pub/{}',
'access_id': '',
'headers': ''
},
{ 'type': 'globus',
'region': 'globus.ebi.ac.uk',
'path': '/gridftp/ena/{}',
'access_url': 'globus://fd9c190c-b824-11e9-98d7-0a63aa6b37da:/gridftp/ena/{}',
'access_id': '',
'headers': ''
},
],
} | StarcoderdataPython |
6653944 | #
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Tests for the HostRegistry class."""
import unittest
from htrun.host_tests_registry import HostRegistry
from htrun import BaseHostTest
class HostRegistryTestCase(unittest.TestCase):
class HostTestClassMock(BaseHostTest):
def setup(self):
pass
def result(self):
pass
def teardown(self):
pass
def setUp(self):
self.HOSTREGISTRY = HostRegistry()
def tearDown(self):
pass
def test_register_host_test(self):
self.HOSTREGISTRY.register_host_test(
"host_test_mock_auto", self.HostTestClassMock()
)
self.assertEqual(True, self.HOSTREGISTRY.is_host_test("host_test_mock_auto"))
def test_unregister_host_test(self):
self.HOSTREGISTRY.register_host_test(
"host_test_mock_2_auto", self.HostTestClassMock()
)
self.assertEqual(True, self.HOSTREGISTRY.is_host_test("host_test_mock_2_auto"))
self.assertNotEqual(
None, self.HOSTREGISTRY.get_host_test("host_test_mock_2_auto")
)
self.HOSTREGISTRY.unregister_host_test("host_test_mock_2_auto")
self.assertEqual(False, self.HOSTREGISTRY.is_host_test("host_test_mock_2_auto"))
def test_get_host_test(self):
self.HOSTREGISTRY.register_host_test(
"host_test_mock_3_auto", self.HostTestClassMock()
)
self.assertEqual(True, self.HOSTREGISTRY.is_host_test("host_test_mock_3_auto"))
self.assertNotEqual(
None, self.HOSTREGISTRY.get_host_test("host_test_mock_3_auto")
)
def test_is_host_test(self):
self.assertEqual(False, self.HOSTREGISTRY.is_host_test(""))
self.assertEqual(False, self.HOSTREGISTRY.is_host_test(None))
self.assertEqual(False, self.HOSTREGISTRY.is_host_test("xyz"))
def test_host_test_str_not_empty(self):
for ht_name in self.HOSTREGISTRY.HOST_TESTS:
ht = self.HOSTREGISTRY.HOST_TESTS[ht_name]
self.assertNotEqual(None, ht)
def test_host_test_has_name_attribute(self):
for ht_name in self.HOSTREGISTRY.HOST_TESTS:
ht = self.HOSTREGISTRY.HOST_TESTS[ht_name]
self.assertTrue(hasattr(ht, "setup"))
self.assertTrue(hasattr(ht, "result"))
self.assertTrue(hasattr(ht, "teardown"))
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
368717 | <filename>transforms.py
from albumentations import (
Compose,
OneOf,
CenterCrop,
GaussNoise,
Normalize,
HorizontalFlip,
Resize,
Rotate,
JpegCompression,
ChannelShuffle,
InvertImg,
RandomBrightnessContrast,
RGBShift,
RandomGamma,
HueSaturationValue,
MultiplicativeNoise,
)
from albumentations.pytorch import ToTensorV2
from torchvision import transforms
import cv2
def get_test_transform(image_size, mean, std):
return transforms.Compose(
[
transforms.Resize(image_size),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]
)
def get_test_transform_albumentations(image_size, mean, std):
return Compose(
[
Resize(*image_size, interpolation=cv2.INTER_AREA),
Normalize(mean=mean, std=std),
ToTensorV2(),
]
)
def train_albumentations(image_size, mean, std):
return Compose(
[
Rotate(limit=6, p=0.5),
HorizontalFlip(p=0.5),
RandomBrightnessContrast(
brightness_limit=(-0.1, 0.2), contrast_limit=(-0.2, 0.2), p=0.3
),
RGBShift(5, 5, 5, p=0.3),
HueSaturationValue(1, 10, 5, p=0.2),
GaussNoise(10, p=0.25),
MultiplicativeNoise((0.85, 1.05), per_channel=True, p=0.25),
ChannelShuffle(p=0.05),
Resize(*image_size, interpolation=cv2.INTER_AREA),
Compose(
[
CenterCrop(width=200, height=200, p=1),
Resize(*image_size, interpolation=cv2.INTER_AREA),
],
p=0.4,
),
Normalize(mean=mean, std=std),
ToTensorV2(),
]
)
| StarcoderdataPython |
6402553 | <reponame>Sirrah91/Asteroid-spectra<gh_stars>0
# Parameters for the data collection
# Only spectra within this range will be processes
lambda_min = 450 # Minimum value of lambda; nm
lambda_max = 2450 # Maximum value of lambda; nm
resolution_max = 15 # Maximum acceptable step in wavelength resolution; nm
denoise = True # Denoise the spectrum? (convolution with the given kernel)
normalise = True # Normalise the spectrum?
resolution_final = 5 # Interpolated resolution; nm
normalised_at = 550 # nm
project_dir = '/home/dakorda/Python/NN/' # Directory which contains Datasets, Modules, etc.
path_relab = "".join((project_dir, '/Datasets/RELAB/')) # Path to RELAB folder
path_relab_raw = "".join((project_dir, '/RELAB/')) # Path to RELAB folder
path_taxonomy = "".join((project_dir, '/Datasets/taxonomy/')) # Path to Tuomas folder
path_ctape = "".join((project_dir, '/Datasets/C-Tape/')) # Path to Tuomas folder
path_MGM = '/home/local/dakorda/MGM/david_mgm/input/' # Path to MGM folder
web_page = 'http://www.planetary.brown.edu/relabdata/data/'
# olivine, orthopyroxene, clinopyroxene, plagioclase
subtypes_CD = 2, 3, 3, 3
use_minerals_CD = 1, 1, 1, 1
num_minerals_CD = sum(use_minerals_CD)
# One for each number
num_labels_CD = sum([subtypes_CD[i] for i in range(len(subtypes_CD)) if use_minerals_CD[i]]) + num_minerals_CD
| StarcoderdataPython |
3470860 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 5 18:05:41 2016
@author: Olivier
"""
import numpy as np
class basisTab:
m = np.array(
[
["date", 0, "varchar", 1],
["calories", 0, "int", 0],
["gsr", 0, "int", 0],
["hearrate", 0, "int", 0],
["skintemp", 0, "int", 0],
["steps", 0, "int", 0],
["insttime", 0, "int", 0],
]
)
timeFormat = ["%Y-%m-%d %H:%MZ"]
timeShift = 0
tableName = "basis"
linesToSkip = 1
maxcolumns = 6
class painTab:
m = np.array(
[
["year", 1, "int", 1],
["month", 1, "int", 1],
["day", 1, "int", 1],
["location", 0, "varchar", 0],
["side", 0, "varchar", 0],
["intensity", 0, "int", 0],
["comment", 0, "varchar", 0],
["starttime", 0, "int", 0],
["endtime", 0, "int", 0],
]
)
timeFormat = ["%Y/%m/%d", "0"]
timeShift = 86400
tableName = "pain"
linesToSkip = 2
maxcolumns = 7
class sportTab:
m = np.array(
[
["year", 1, "int", 1],
["month", 1, "int", 1],
["day", 1, "int", 1],
["activity", 0, "varchar", 0],
["start", 0, "varchar", 0],
["duration", 0, "varchar", 0],
["km", 0, "varchar", 0],
["denivelation", 0, "int", 0],
["appdur", 0, "int", 0],
["appden", 0, "int", 0],
["painafter", 0, "int", 0],
["inreg", 0, "varchar", 0],
["side", 0, "varchar", 0],
["effortInt", 0, "int", 0],
# ['other',0,'varchar',0],
["starttime", 0, "int", 0],
["endtime", 0, "int", 0],
]
)
timeFormat = ["%Y/%m/%d", "0"]
timeShift = 86400
tableName = "sports"
linesToSkip = 1
maxcolumns = 14
class manicTimeTab:
m = np.array(
[
["name", 0, "varchar", 0],
["start", 0, "varchar", 1],
["end", 0, "varchar", 2],
["duration", 0, "varchar", 0],
["starttime", 0, "int", 0],
["endtime", 0, "int", 0],
]
)
timeFormat = ["%d/%m/%Y %H:%M:%S", "%d/%m/%Y %H:%M:%S"]
timeShift = 0
tableName = "manicTime"
linesToSkip = 1
maxcolumns = 4
class screenSaverTab:
m = np.array(
[
["name", 0, "varchar", 0],
["start", 0, "varchar", 1],
["end", 0, "varchar", 2],
["duration", 0, "varchar", 0],
["starttime", 0, "int", 0],
["endtime", 0, "int", 0],
]
)
timeFormat = ["%d/%m/%Y %H:%M:%S", "%d/%m/%Y %H:%M:%S"]
timeShift = 0
tableName = "screenSaver"
linesToSkip = 1
maxcolumns = 4
class whatPulseTab:
m = np.array(
[
["date", 0, "varchar", 1],
["keys", 0, "int", 0],
["clicks", 0, "int", 0],
["starttime", 0, "int", 0],
["endtime", 0, "int", 0],
]
)
timeFormat = ["%Y-%m-%d", "0"]
timeShift = 86400
tableName = "whatPulse"
linesToSkip = 1
maxcolumns = 3
class taplogTab:
m = np.array(
[
["ms", 0, "int", 0],
["timezoneoffset", 0, "int", 0],
["timestamp", 0, "varchar", 0],
["dayofyear", 0, "int", 0],
["dayofmonth", 0, "int", 0],
["dayofweek", 0, "varchar", 0],
["timeofday", 0, "int", 0],
["idd", 0, "int", 0],
["cat1", 0, "varchar", 0],
["cat2", 0, "varchar", 0],
["insttime", 0, "int", 0],
]
)
timeFormat = ["0"]
timeShift = 0
tableName = "taplog"
linesToSkip = 1
maxcolumns = 10
class general:
m = np.array(
[
["year", 1, "int", 1],
["month", 1, "int", 1],
["day", 1, "int", 1],
["stress", 1, "int", 0],
["mood", 1, "int", 0],
["socquant", 1, "varchar", 0],
["socqual", 1, "varchar", 0],
["weight", 1, "int", 0],
["starttime", 0, "int", 0],
["endtime", 0, "int", 0],
]
)
timeFormat = ["%Y/%m/%d", "0"]
timeShift = 86400
tableName = "general"
linesToSkip = 2
maxcolumns = 8
class generalActivities:
m = np.array(
[
["year", 1, "int", 1],
["month", 1, "int", 1],
["day", 1, "int", 1],
["paper", 0, "int", 0],
["ubuntu", 0, "int", 0],
["driving", 0, "int", 0],
["store", 0, "int", 0],
["ridingcar", 0, "int", 0],
["starttime", 0, "int", 0],
["endtime", 0, "int", 0],
]
)
timeFormat = ["%Y/%m/%d", "0"]
timeShift = 86400
tableName = "generalactivities"
linesToSkip = 2
maxcolumns = 8
| StarcoderdataPython |
9648470 | <reponame>autorouting/main
# THIS IS THE CODE THAT CONSTANTLY RUNS IN THE BACKGROUND ON THE SERVER.
import sys
import time
import pickle
import networkx as nx
import osmnx as ox
import serialize
import socket
import concurrent.futures
# Load network into memory
G = pickle.load(open('graph', 'rb'))
def generate_distance_matrix(coordpairs, G):
"""
Given coordinates, calculate driving distance between every pair of locations
Parameters:
coordpairs (list): the coordinates of every location
G (NetworkX graph): the network containing the locations
Returns:
list: a 2D matrix; each cell contains the distance from the location corresponding to the row to the location corresponding to the column.
"""
# get nodes for each location
nodes = []
start_time = time.perf_counter()
for coords in coordpairs:
nodes.append(ox.get_nearest_node(G, coords))
end_time = time.perf_counter()
print("Nodes generation time: " + str(end_time - start_time))
start_time = time.perf_counter()
MAX_DISTANCE = 7666432.01 # a constant rigging distance matrix to force the optimizer to go to origin first
# initiate output matrix
output_list = [[None for j in range(len(nodes))] for i in range(len(nodes))]
# Execute a function to fill each cell; store workers in an array so as to check when all are done
def fill_cell(G, nodes, i, j):
nonlocal output_list
output_list[i][j] = nx.shortest_path_length(G, nodes[i], nodes[j], weight='length')
return "done"
workers = []
executer = concurrent.futures.ThreadPoolExecutor(3)
for i in range(len(nodes)):
for j in range(len(nodes)):
workers.append(executer.submit(fill_cell, G, nodes, i, j))
concurrent.futures.wait(workers, return_when=concurrent.futures.ALL_COMPLETED)
# rig distance so that optimization algorithm chooses to go to origin asap (after depot)
for i in range(2, len(output_list)):
output_list[i][1] = MAX_DISTANCE
# output data
end_time = time.perf_counter()
print("Distance calculation time: " + str(end_time - start_time))
return (output_list)
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# Bind the socket to the port
server_address = ('localhost', 6000)
print('starting up on %s port %s' % server_address)
sock.bind(server_address)
# Listen for incoming connections
sock.listen(1)
while True:
# Wait for a connection
print('waiting for a connection')
connection, client_address = sock.accept()
try:
print('connection from', client_address)
# Receive the data in small chunks and retransmit it
received = b''
start_time = time.perf_counter()
while True:
print('start recieving')
data = connection.recv(256)
print(data)
received += data
if len(data)<256 or data[-1]==10 :
print('received "%s"' % data)
break
end_time = time.perf_counter()
print("Recieve message time: " + str(end_time - start_time))
#time.sleep(1)
print("send reply")
message=generate_distance_matrix(serialize.deserializeCgiToServer(received), G)
start_time = time.perf_counter()
connection.sendall(serialize.serializeServerToCgi(message))
print("done sending")
end_time = time.perf_counter()
print("Send message time: " + str(end_time - start_time))
except Exception as err:
print(err)
finally:
# Clean up the connection
print("close socket")
connection.close()
| StarcoderdataPython |
5096800 | <filename>choosemybeer.py
#!/usr/bin/env python
#############################################################
# #
# ChooseMyBeer - find the keg that's right for you #
# written by <NAME> (<EMAIL>) #
# #
#############################################################
import argparse
import heapq
from urlparse import urlparse
from beerkeg import BeerKeg
from utils import get_html, is_num, unique
import lxml.html as lh
def get_parser():
parser = argparse.ArgumentParser(description='find the keg that\'s right '
'for you')
parser.add_argument('-a', '--attempts', type=int, nargs='?',
help='number of attempts to resolve each ABV '
'(default: 10)')
parser.add_argument('-f', '--filter', type=str, nargs='*',
help='find kegs with descriptions matching these '
'keywords')
parser.add_argument('-l', '--limit', type=int, nargs='?',
help='limit number of kegs to crawl (default: 10000)')
parser.add_argument('-p', '--price', type=float, nargs='?',
help='limit the price range')
parser.add_argument('-t', '--top', type=int, nargs='?',
help='number of top kegs to display (default: 3)')
parser.add_argument('-u', '--unfilter', type=str, nargs='*',
help='find kegs with descriptions not matching these '
'keywords')
return parser
def get_optimal_kegs(args):
''' Gets kegs from bevmo.com
finds the kegs with the optimal gallons of alcohol per USD
'''
num_kegs = args['top']
beer_limit = args['limit']
num_attempts = args['attempts']
max_price = args['price']
desc_filter = args['filter']
desc_unfilter = args['unfilter']
''' The first url to crawl and its base url '''
seed_url = 'http://www.bevmo.com/Shop/ProductList.aspx/\
Beer/Kegs/_/N-15Z1z141vn?DNID=Beer'
base_url = '{url.scheme}://{url.netloc}'.format(url=urlparse(seed_url))
''' Get initial unique page links from the seed url
append base_url to them
'''
''' For info on XPaths, see:
http://www.w3schools.com/xpath/xpath_syntax.asp
'''
init_page_links = []
init_page_links[:] = unique(get_html(seed_url).xpath('//div[@class="Product\
ListPaging"]/a/@href'))
if not init_page_links:
print('Failed to retrieve the initial keg page links!')
return None
''' Lists for holding links to pages of beer kegs '''
page_links = [seed_url] + map(lambda x: base_url + x, init_page_links)
new_page_links = []
''' Lists for holding links to individual beer kegs '''
beer_links = []
new_beer_links = []
''' To keep track of already crawled beer kegs '''
crawled_beers = set()
''' List for matching --filter and --unfilter keyword arguments to
keg descriptions
'''
matched = []
''' List to hold top beer kegs, the size of optimal_kegs is limited by the
num_kegs argument
'''
optimal_kegs = []
keg = None
while len(page_links) > 0 and len(crawled_beers) < beer_limit:
''' Links are removed as they are crawled '''
page_link = page_links.pop(0)
''' Beer keg links '''
new_beer_links[:] = unique(get_html(page_link).xpath('//a[@class="Prod\
uctListItemLink"]\
/@href'))
beer_links += [base_url + x for x in new_beer_links]
''' Crawl the beer keg links
get the gallons of alcohol/USD ratio
'''
for link in beer_links:
''' Break if the number of crawled beers exceeds the limit '''
if len(crawled_beers) >= beer_limit:
break
''' Cache the BevMo beer id's to prevent duplicates '''
beer_id = link.split('/')[-1]
if beer_id not in crawled_beers:
''' Create BeerKeg object '''
keg = BeerKeg(link, num_attempts, verbose=True)
''' Call keg.parse() then filter kegs by their descriptions
Calling keg.parse() produces fields keg.desc, keg.price, etc
keg.parse() will only parse once per keg object
'''
''' Check if price is within range if one was given '''
if max_price:
keg.parse()
if keg.price > max_price:
''' Move onto the next keg and ignore this one '''
continue
''' args['filter'] has words that must be in the description '''
''' desc_filter has words that must be in the description '''
if desc_filter:
keg.parse()
matched = [word in keg.desc for word in desc_filter]
''' All keywords must be present for a match '''
if not all(matched):
''' Move onto the next keg and ignore this one '''
continue
''' desc_unfilter has words that can't be in the description '''
if desc_unfilter:
keg.parse()
matched = [word in keg.desc for word in desc_unfilter]
''' Any keyword must be present to nullify a match '''
if any(matched):
''' Move onto the next keg and ignore this one '''
continue
''' Add current beer to crawled beers '''
crawled_beers.add(beer_id)
''' Print how many kegs have been crawled '''
print('Keg {}'.format(len(crawled_beers)))
''' Gets the gallons of alcohol per USD for the keg '''
ratio = keg.get_ratio()
print('')
''' Maintain a sorted list of the current top 3 kegs using
heapq (heap queue algorithm)
optimal_kegs holds a tuple containing the ratio and keg
associated with it
'''
if optimal_kegs:
for opt_tuple in optimal_kegs:
''' If ratio is greater than any keg ratio currently
in optimal_kegs, then add it
'''
if ratio > opt_tuple[0]:
if len(optimal_kegs) >= num_kegs:
''' Adds new item to list
removes the smallest to maintain size
'''
heapq.heappushpop(optimal_kegs, (ratio, keg))
else:
heapq.heappush(optimal_kegs, (ratio, keg))
break
else:
''' Will only occur for the very first keg crawled '''
heapq.heappush(optimal_kegs, (ratio, keg))
''' Typical link: Shop/ProductList.aspx/_/N-15Z1z141vn/No-100?DNID=Beer
If No- is evenly divisible by 100, it leads to more pages to add
'''
if 'No-' in page_link:
if int(page_link.split('No-')[1].split('?')[0]) % 100 == 0:
''' Unique new page links with their base url appended '''
new_page_links[:] = unique(get_html(page_link).xpath('//div[@cl\
ass="Produ\
ctListPagi\
ng"]/a/@hr\
ef'))
page_links += [base_url + x for x in new_page_links]
''' Sort the list in descending order by ratio
(index 0 in the keg tuple)
'''
return sorted(optimal_kegs, key=lambda x: x[0], reverse=True)
def command_line_runner():
parser = get_parser()
args = vars(parser.parse_args())
''' Number of top kegs to display (default: 3) '''
if not args['top']:
args['top'] = 3
''' Number of keg pages to crawl (default: 10000) '''
if not args['limit']:
args['limit'] = 10000
''' Number of attempts to resolve each ABV (default: 10) '''
if not args['attempts']:
args['attempts'] = 10
optimal_kegs = get_optimal_kegs(args)
ratio = 0
keg = None
try:
''' Print a menu for the user to choose from the top kegs '''
printing = True
optimal_keg = None
chosen_keg = -1
quit = 0
if not optimal_kegs:
print('An error occurred during processing. \
Check your internet connection.')
else:
''' Loop until user decides to quit '''
while printing and chosen_keg != quit:
''' keg_tuple is ratio followed by BeerKeg object '''
for i, keg_tuple in enumerate(optimal_kegs):
ratio = keg_tuple[0]
keg = keg_tuple[1]
print('\n{}. {}\tRatio: {}'.format(i, keg.name, ratio))
print('Available: {}\tVolume: {} Gal.\tPrice: ${}\n{}\
'.format(keg.num_avail, keg.volume, \
keg.price, keg.desc))
''' Make quit always be the last menu option '''
quit = i+1
print('\n{}. Quit'.format(quit))
try:
chosen_keg = int(raw_input('Choose a keg: '))
except Exception:
continue
''' If chosen keg is within the optimal kegs range
(quit is one outside), then open the link
'''
if chosen_keg >= 0 and chosen_keg < len(optimal_kegs):
optimal_keg = optimal_kegs[chosen_keg][1]
''' Opens the link to the keg in a browser
using webbrowser
'''
optimal_keg.open()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
command_line_runner()
| StarcoderdataPython |
94572 | <filename>curso em video/python/mundo 2/ex 067.py<gh_stars>0
#num1 = int(input('qual tabuada você deseja?'))
#num2 = 1
#while True:
# if num1 <= 0:
# break
# print(f'{num2} X {num1} ={num2*num1}')
# num2 += 1
# if num2>=11:
# num1 = int(input('qual tabuada você deseja?'))
# num2 = 1
#print('programa encerrado!')
while True:
num1 = int(input('qual tabuada você deseja?'))
if num1 < 0:
break
print(30 * '-')
for c in range(1,11):
print(f'{num1} X {c} = {c*num1}')
print(30*'-')
print('programa encerrado') | StarcoderdataPython |
6441242 | # coding: utf-8
from fabkit import task, serial
from fablib.mysql import MySQL
@task
def setup():
mysql = MySQL()
mysql.setup()
return {'status': 1}
@task
@serial
def setup_replication():
mysql = MySQL()
mysql.setup_replication()
return {'status': 1}
| StarcoderdataPython |
3578080 | <reponame>eHealthAfrica/aether
#!/usr/bin/env python
# Copyright (C) 2019 by eHealth Africa : http://www.eHealthAfrica.org
#
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from io import open
from setuptools import setup, find_packages
def read(f):
return open(f, 'r', encoding='utf-8').read()
setup(
version=read('/var/tmp/VERSION').strip(),
name='aether.client',
description='A python library with Aether Client functionality',
long_description=read('README.md'),
long_description_content_type='text/markdown',
url='https://github.com/eHealthAfrica/aether/',
author='eHealth Africa',
author_email='<EMAIL>',
license='Apache2 License',
install_requires=[
'bravado',
'jsonschema[format]<4',
'requests[security]',
'requests_oauthlib'
],
packages=find_packages(),
include_package_data=True,
)
| StarcoderdataPython |
11221535 | <reponame>borisdayma/wav2vec-toolkit
import os
import sys
import textwrap
import pkg_resources
BASE_PATH = "wav2vec_toolkit"
LANG_PATH = "languages"
LANG_MODULE_REQUIREMENTS = ["normalizer.py", "README.md", "requirements.txt"]
def get_file_path(name: str):
return os.path.abspath(os.path.join(os.path.dirname(__file__), name))
def parse_requirements(filename: str):
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
def load_module_from_lang(lang: str):
lang_mod_path = f"{BASE_PATH}/{LANG_PATH}/{lang}"
lang_path = "/".join(lang_mod_path.split("/")[-2:])
for path in LANG_MODULE_REQUIREMENTS:
_path = get_file_path(os.path.join(lang_path, path))
if not os.path.exists(_path):
raise FileNotFoundError(
textwrap.dedent(
f"""
The filename {path} not existed in `{lang}` directory {_path},
you can easily add a new language by instructions mentioned at repo.
https://github.com/anton-l/wav2vec-toolkit/tree/master#adding-new-languages
"""
)
)
requirements_txt = get_file_path(os.path.join(lang_path, "requirements.txt"))
dependencies = parse_requirements(requirements_txt)
try:
pkg_resources.require(dependencies)
except pkg_resources.VersionConflict as error:
print(
textwrap.dedent(
f"""
{error.dist} is installed but {error.req} is required,
fastest solution `pip install -r lang/{lang}/requirements.txt`,
you can easily add a new language by instructions mentioned at repo.
https://github.com/anton-l/wav2vec-toolkit/tree/master#adding-new-languages
"""
)
)
raise
except pkg_resources.DistributionNotFound as error:
print(
textwrap.dedent(
f"""
The '{error.req}' distribution was not found and is required by {error.requirers_str},
fastest solution `pip install -r lang/{lang}/requirements.txt`,
you can easily add a new language by instructions mentioned at repo.
https://github.com/anton-l/wav2vec-toolkit/tree/master#adding-new-languages
"""
)
)
raise
try:
module = __import__(lang_mod_path.replace("/", "."), fromlist=["Normalizer"])
except ModuleNotFoundError:
print(
textwrap.dedent(
f"""
something wrong happened with your language {lang},
you can easily add a new language by instructions mentioned at repo.
https://github.com/anton-l/wav2vec-toolkit/tree/master#adding-new-languages
"""
)
)
raise
normalizer = module.Normalizer if getattr(module, "Normalizer") else None
return normalizer
| StarcoderdataPython |
1938063 | <gh_stars>0
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Load dependencies for the google-cloud-cpp-pubsub library."""
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
def google_cloud_cpp_pubsub_deps():
"""Loads dependencies need to compile the google-cloud-cpp-pubsub library.
Application developers can call this function from their WORKSPACE file
to obtain all the necessary dependencies for google-cloud-cpp-pubsub,
including gRPC and its dependencies. This function only loads
dependencies that have not been previously loaded, allowing
application developers to override the version of the dependencies
they want to use.
"""
# Load rules_cc, used by googletest
if "rules_cc" not in native.existing_rules():
http_archive(
name = "rules_cc",
strip_prefix = "rules_cc-a508235df92e71d537fcbae0c7c952ea6957a912",
urls = [
"https://github.com/bazelbuild/rules_cc/archive/a508235df92e71d537fcbae0c7c952ea6957a912.tar.gz",
],
sha256 = "d21d38c4b8e81eed8fa95ede48dd69aba01a3b938be6ac03d2b9dc61886a7183",
)
# Load google-cloud-cpp-common.
if "com_github_googleapis_google_cloud_cpp_common" not in native.existing_rules():
http_archive(
name = "com_github_googleapis_google_cloud_cpp_common",
strip_prefix = "google-cloud-cpp-common-0.21.0",
urls = [
"https://github.com/googleapis/google-cloud-cpp-common/archive/v0.21.0.tar.gz",
],
sha256 = "2e1cd2a97122a02fe3c58a997657a360e19ec9984b857197a9a193a07b4c092b",
)
# Load a version of googletest that we know works.
if "com_google_googletest" not in native.existing_rules():
http_archive(
name = "com_google_googletest",
strip_prefix = "googletest-release-1.10.0",
urls = [
"https://github.com/google/googletest/archive/release-1.10.0.tar.gz",
],
sha256 = "9dc9157a9a1551ec7a7e43daea9a694a0bb5fb8bec81235d8a1e6ef64c716dcb",
)
# Load the googleapis dependency.
if "com_google_googleapis" not in native.existing_rules():
http_archive(
name = "com_google_googleapis",
urls = [
"https://github.com/googleapis/googleapis/archive/e9e90a787703ec5d388902e2cb796aaed3a385b4.tar.gz",
],
strip_prefix = "googleapis-e9e90a787703ec5d388902e2cb796aaed3a385b4",
sha256 = "4c0ba761e943b818cc8b242ed05d0cfdaaac7c4035a43eeab0820461c77619f0",
build_file = "@com_github_googleapis_google_cloud_cpp_pubsub//bazel:googleapis.BUILD",
)
# Load protobuf.
if "com_google_protobuf" not in native.existing_rules():
http_archive(
name = "com_google_protobuf",
strip_prefix = "protobuf-3.11.3",
urls = [
"https://github.com/google/protobuf/archive/v3.11.3.tar.gz",
],
sha256 = "cf754718b0aa945b00550ed7962ddc167167bd922b842199eeb6505e6f344852",
)
# Load gRPC and its dependencies, using a similar pattern to this function.
if "com_github_grpc_grpc" not in native.existing_rules():
http_archive(
name = "com_github_grpc_grpc",
strip_prefix = "grpc-1.26.0",
urls = [
"https://github.com/grpc/grpc/archive/v1.26.0.tar.gz",
],
sha256 = "2fcb7f1ab160d6fd3aaade64520be3e5446fc4c6fa7ba6581afdc4e26094bd81",
)
# We use the cc_proto_library() rule from @com_google_protobuf, which
# assumes that grpc_cpp_plugin and grpc_lib are in the //external: module
native.bind(
name = "grpc_cpp_plugin",
actual = "@com_github_grpc_grpc//src/compiler:grpc_cpp_plugin",
)
native.bind(
name = "grpc_lib",
actual = "@com_github_grpc_grpc//:grpc++",
)
| StarcoderdataPython |
9680266 | #!/usr/bin/env python3
import base
import numpy as np
def tasks():
return [
ScoreMeanStd(),
]
class ScoreMeanStd(base.Task):
"""
Calculates mean and standard averages of the number of mutations, hse
score and dom score. A comparison is made between hse/dom scores of
positions with and without mutations.
"""
def __init__(self):
super(ScoreMeanStd, self).__init__('score_mean_std')
self._set_data_subdir('poster2014')
def _run(self):
with base.connect() as con:
c = con.cursor()
# Get positions (may have gaps!)
idx = []
for r in c.execute('select idx from scn5a order by idx'):
idx.append(r[0])
idx = np.array(idx)
# Get mutations for each position
q = 'select distinct idx, new from report where pub != "exac"'
mut = np.zeros(idx.shape)
for r in c.execute(q):
mut[r[0] - 1] = 1 # Positions start at 1
# Get Human-squid-eel and domain alignment score
hse = np.zeros(idx.shape, dtype=float)
dom = np.zeros(idx.shape, dtype=float)
q = 'select idx, hse, dom from conservedness order by idx'
for k, r in enumerate(c.execute(q)):
assert(r[0] -1 == k) # Score should be stored for each idx
hse[k] = r[1]
dom[k] = r[2]
#
# 1. Overal mean mutation count and hse/dom scores
# (Text output only)
#
hse_mean = np.mean(hse)
hse_stdd = np.std(hse)
dom_mean = np.mean(dom)
dom_stdd = np.std(dom)
print('Mean hse: ' + str(hse_mean) +', std: '+ str(hse_stdd))
print('Mean dom: ' + str(dom_mean) +', std: '+ str(dom_stdd))
#
# 2. Position and dom/hse scores for positions with and without
# mutations.
# (Text output only)
#
idx_idx = idx[mut > 0]
hse_idx = hse[mut > 0]
dom_idx = dom[mut > 0]
idx_neg = idx[mut == 0]
hse_neg = hse[mut == 0]
dom_neg = dom[mut == 0]
hse_idx_mean = np.mean(hse_idx)
hse_idx_stdd = np.std(hse_idx)
dom_idx_mean = np.mean(dom_idx)
dom_idx_stdd = np.std(dom_idx)
hse_neg_mean = np.mean(hse_neg)
hse_neg_stdd = np.std(hse_neg)
dom_neg_mean = np.mean(dom_neg)
dom_neg_stdd = np.std(dom_neg)
print('HSE score:')
print(' Mean, with mutations: ' + str(hse_idx_mean)
+ ', std: ' + str(hse_idx_stdd))
print(' Mean, no mutations : ' + str(hse_neg_mean)
+ ', std: ' + str(hse_neg_stdd))
print('DOM score:')
print(' Mean, with mutations: ' + str(dom_idx_mean)
+ ', std: ' + str(dom_idx_stdd))
print(' Mean, no mutations : ' + str(dom_neg_mean)
+ ', std: ' + str(dom_neg_stdd))
#
# 3. HSE and DOM score for positions with and without mutations
# (For use in a box-plot)
#
basename = 'score-with-mutations'
filename = self.data_out(basename + '.txt')
print('Writing info to ' + filename)
with open(filename, 'w') as f:
f.write(
'Scores for positions with mutations (idx, hse, dom)')
filename = self.data_out(basename + '.csv')
print('Writing data to ' + filename)
with open(filename, 'w') as f:
c = self.csv_writer(f)
c.writerow(['position', 'hse-score', 'dom-score'])
h = iter(hse_idx)
d = iter(dom_idx)
for p in idx_idx:
c.writerow([p, next(h), next(d)])
basename = 'score-without-mutations'
filename = self.data_out(basename + '.txt')
print('Writing info to ' + filename)
with open(filename, 'w') as f:
f.write(
'Scores for positions without mutations (idx, hse, dom)')
filename = self.data_out(basename + '.csv')
print('Writing data to ' + filename)
with open(filename, 'w') as f:
c = self.csv_writer(f)
c.writerow(['position', 'hse-score', 'dom-score'])
h = iter(hse_neg)
d = iter(dom_neg)
for p in idx_neg:
c.writerow([p, next(h), next(d)])
# Write labels used to create box plots
basename = 'score-with-without-mutations-labels'
filename = self.data_out(basename + '.csv')
print('Writing label info to ' + filename)
with open(filename, 'w') as f:
c = self.csv_writer(f)
c.writerow(['HSE-'])
c.writerow(['HSE+'])
c.writerow(['DOM-'])
c.writerow(['DOM+'])
if __name__ == '__main__':
t = base.TaskRunner()
t.add_tasks(tasks())
t.run()
| StarcoderdataPython |
31142 | <gh_stars>0
"""
Reads (article_id, [tokens]) from tokens.pickle and writes:
(article_id, w2v)
(article_id, bow)
"""
import json
import sys
import os
import pickle
import psycopg2
from multiprocessing.pool import Pool
import numpy as np
import zlib
# from gensim.models import Word2Vec
from gensim.models import Word2Vec, KeyedVectors
import src
# W2V_FILE = os.environ["MODEL_PATH"] + "/word2vec.model"
from src.visualization.console import StatusVisualization
VOCABULARY_FILE = os.environ["DATA_PATH"] + "/interim/articles/vocabulary.pickle"
W2V_FILE = os.environ["MODEL_PATH"] + "/word2vec.model"
vocabulary = pickle.load(open(VOCABULARY_FILE, "rb"))
def init_worker():
global model
model = KeyedVectors.load(W2V_FILE)
def count_tokens(tokens):
token_counter = dict()
for word in vocabulary:
token_counter[word] = 0
for token in tokens:
if token in token_counter:
token_counter[token] += 1
counts = np.array([token_counter[token] for token in vocabulary], dtype=np.float32)
return zlib.compress(counts, 9)
def w2v_embed(tokens):
total = np.zeros(2048, dtype=np.float32)
for token in tokens:
if token in model: # Word2Vec model filters some token
total += model[token]
return zlib.compress(total / (len(tokens) or 1), 9)
MIN_TOKENS = 50
def extract_features(article):
article_id, tokens_string = article
tokens = json.loads(tokens_string)
if len(tokens) > MIN_TOKENS:
return "Success", article_id, count_tokens(tokens), w2v_embed(tokens)
else:
return "Too few tokens", article_id, None, None
def run():
conn = psycopg2.connect(database="video_article_retrieval", user="postgres")
article_cursor = conn.cursor()
update_cursor = conn.cursor()
article_cursor.execute("SELECT count(1) FROM articles WHERE text_extraction_status='Success'")
article_count, = article_cursor.fetchone()
# avoid loading all articles into memory.
article_cursor.execute("SELECT id, tokens FROM articles WHERE text_extraction_status='Success'")
crawling_progress = StatusVisualization(article_count, update_every=1000)
with Pool(8, initializer=init_worker) as pool:
for status, article_id, compressed_bow, compressed_w2v in pool.imap_unordered(extract_features, article_cursor):
if status == 'Success':
update_cursor.execute(
"UPDATE articles SET bow_2048=%s, w2v_2048=%s, feature_extraction_status='Success' WHERE id=%s",
[compressed_bow, compressed_w2v, article_id])
else:
update_cursor.execute(
"UPDATE articles SET feature_extraction_status=%s WHERE id=%s",
[status, article_id])
crawling_progress.inc()
conn.commit()
if __name__ == '__main__':
run()
| StarcoderdataPython |
6652246 | import datetime
from pathlib import Path
import tempfile
import logging
import sys
from rich.logging import RichHandler
LOGGER_FORMAT = '%(asctime)s - ' \
'%(module)s.%(funcName)s - ' \
'(%(levelname)s): %(message)s'
FORMATTER = logging.Formatter(LOGGER_FORMAT)
def get_console_handler():
"""Get the console log handler
:rtype: logging.StreamHandler
"""
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(FORMATTER)
return console_handler
def get_file_handler(file_name):
"""Get the file log handler
:rtype: logging.StreamHandler
"""
# set log path to temp location
# this path resolve to something like this:
# C:\Users\<USER_NAME>\AppData\Local\Temp\_LOG
log_temp_location_path = Path(tempfile.gettempdir()).joinpath("_LOG")
if not log_temp_location_path.exists():
log_temp_location_path.mkdir(parents=True, exist_ok=True)
today = str(datetime.date.today())
log_file = log_temp_location_path.joinpath(f"{file_name}_{today}.log")
file_handler = logging.FileHandler(log_file.as_posix())
file_handler.setFormatter(FORMATTER)
return file_handler
def get_logger(name=__name__):
"""get the logging handler for the application. Default to the __name__
:param logger_name: name of logging handler
:return: logger
"""
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# logger.addHandler(get_console_handler())
logger.addHandler(RichHandler())
logger.addHandler(get_file_handler(name))
logger.propagate = False
return logger
| StarcoderdataPython |
23062 | import itertools
def next_step(board, i, j):
xMin = max(0, j - 1)
xMax = min(board.shape[1] - 1, j +1)
yMin = max(0, i - 1)
yMax = min(board.shape[0] - 1, i +1)
iteration = list(itertools.product(range(yMin, yMax + 1), range(xMin, xMax+1)))
iteration.remove((i,j))
sum = 0;
for (k, m) in iteration:
if board[k, m] == 1:
sum += 1
if board[i, j] == 1:
if sum < 2 or sum > 3:
return 0
else:
return 1
elif sum == 3:
return 1
else:
return 0
| StarcoderdataPython |
4847041 | <gh_stars>100-1000
from __future__ import unicode_literals
from django.core.exceptions import ObjectDoesNotExist, PermissionDenied
from django.db.models import Q
from django.utils import six
from djblets.webapi.errors import DOES_NOT_EXIST, WebAPIError
from djblets.webapi.fields import (BooleanFieldType,
ChoiceFieldType,
DateTimeFieldType,
DictFieldType,
IntFieldType,
ResourceFieldType,
StringFieldType)
from reviewboard.reviews.models import BaseComment
from reviewboard.webapi.base import ImportExtraDataError, WebAPIResource
from reviewboard.webapi.mixins import MarkdownFieldsMixin
from reviewboard.webapi.resources import resources
class BaseCommentResource(MarkdownFieldsMixin, WebAPIResource):
"""Base class for comment resources.
Provides common fields and functionality for all comment resources.
"""
added_in = '1.6'
fields = {
'id': {
'type': IntFieldType,
'description': 'The numeric ID of the comment.',
},
'extra_data': {
'type': DictFieldType,
'description': 'Extra data as part of the comment. This depends '
'on what is being commented on, and may be '
'used in conjunction with an extension.',
'added_in': '2.0',
},
'issue_opened': {
'type': BooleanFieldType,
'description': 'Whether or not a comment opens an issue.',
},
'issue_status': {
'type': ChoiceFieldType,
'choices': tuple(six.iterkeys(BaseComment.ISSUE_STRING_TO_STATUS)),
'description': 'The status of an issue.',
},
'public': {
'type': BooleanFieldType,
'description': 'Whether or not the comment is part of a public '
'review.',
'added_in': '2.0',
},
'text': {
'type': StringFieldType,
'description': 'The comment text.',
'supports_text_types': True,
'added_in': '2.0',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The mode for the comment text field.',
'added_in': '2.0',
},
'timestamp': {
'type': DateTimeFieldType,
'description': 'The date and time that the comment was made.',
'added_in': '2.0',
},
'user': {
'type': ResourceFieldType,
'resource': 'reviewboard.webapi.resources.user.UserResource',
'description': 'The user who made the comment.',
'added_in': '2.0',
},
}
# Common field definitions for create/update requests
_COMMON_REQUIRED_CREATE_FIELDS = {
'text': {
'type': StringFieldType,
'description': 'The comment text.',
'supports_text_types': True,
'added_in': '2.0',
},
}
_COMMON_OPTIONAL_CREATE_FIELDS = {
'force_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The text type, if any, to force for returned '
'text fields. The contents will be converted '
'to the requested type in the payload, but '
'will not be saved as that type.',
'added_in': '2.0.9',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The content type for the comment text field. '
'The default is ``plain``.',
'added_in': '2.0',
},
}
_COMMON_OPTIONAL_UPDATE_FIELDS = {
'force_text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.TEXT_TYPES,
'description': 'The text type, if any, to force for returned '
'text fields. The contents will be converted '
'to the requested type in the payload, but '
'will not be saved as that type.',
'added_in': '2.0.9',
},
'text': {
'type': StringFieldType,
'description': 'The comment text.',
'supports_text_types': True,
'added_in': '2.0',
},
'text_type': {
'type': ChoiceFieldType,
'choices': MarkdownFieldsMixin.SAVEABLE_TEXT_TYPES,
'description': 'The new content type for the comment text field. '
'The default is to leave the type unchanged.',
'added_in': '2.0',
},
}
# Field definitions for top-level comment create/update requests
REQUIRED_CREATE_FIELDS = _COMMON_REQUIRED_CREATE_FIELDS
OPTIONAL_CREATE_FIELDS = dict({
'issue_opened': {
'type': BooleanFieldType,
'description': 'Whether the comment opens an issue.',
'added_in': '2.0',
},
}, **_COMMON_OPTIONAL_CREATE_FIELDS)
OPTIONAL_UPDATE_FIELDS = dict({
'issue_opened': {
'type': BooleanFieldType,
'description': 'Whether or not the comment opens an issue.',
'added_in': '2.0',
},
'issue_status': {
'type': ChoiceFieldType,
'choices': tuple(six.iterkeys(BaseComment.ISSUE_STRING_TO_STATUS)),
'description': 'The status of an open issue.',
'added_in': '2.0',
},
}, **_COMMON_OPTIONAL_UPDATE_FIELDS)
# Field definitions for comment reply create/update requests
REPLY_REQUIRED_CREATE_FIELDS = dict({
'reply_to_id': {
'type': IntFieldType,
'description': 'The ID of the comment being replied to.',
},
}, **_COMMON_REQUIRED_CREATE_FIELDS)
REPLY_OPTIONAL_CREATE_FIELDS = _COMMON_OPTIONAL_CREATE_FIELDS
REPLY_OPTIONAL_UPDATE_FIELDS = _COMMON_OPTIONAL_UPDATE_FIELDS
def serialize_issue_status_field(self, obj, **kwargs):
return BaseComment.issue_status_to_string(obj.issue_status)
def has_access_permissions(self, request, obj, *args, **kwargs):
return obj.is_accessible_by(request.user)
def has_modify_permissions(self, request, obj, *args, **kwargs):
return obj.is_mutable_by(request.user)
def has_delete_permissions(self, request, obj, *args, **kwargs):
return obj.is_mutable_by(request.user)
def create_comment(self,
review,
fields,
text,
comments_m2m,
issue_opened=False,
text_type=MarkdownFieldsMixin.TEXT_TYPE_PLAIN,
extra_fields={},
save=True,
**kwargs):
"""Create a comment based on the requested data.
This will construct a comment of the type represented by the resource,
setting the issue states, text, extra_data, and any additional fields
provided by the caller.
Args:
review (reviewboard.reviews.models.review.Review):
The review owning the comment.
fields (list of unicode):
The model fields that can be set through the API.
text (unicode):
The comment text.
comments_m2m (django.db.models.ManyToManyField):
The review's comments relation, where the new comment will
be added.
issue_opened (bool, optional):
Whether this comment opens an issue.
text_type (unicode, optional):
The text type for the comment. This defaults to plain text.
extra_fields (dict, optional):
Extra fields from the request not otherwise handled by the
API resource. Any ``extra_data`` modifications from this will
be applied to the comment.
save (bool, optional):
Whether or not to save the field and update ``comments_m2m``.
If ``False``, the caller is responsible for performing the
save.
**kwargs (dict):
Keyword arguments representing additional fields handled by
the API resource. Any that are also listed in ``fields`` will
be set on the model.
Returns:
tuple or djblets.webapi.errors.WebAPIError:
Either a successful payload containing the comment, or an error
payload.
"""
comment_kwargs = {
'issue_opened': bool(issue_opened),
'rich_text': text_type == self.TEXT_TYPE_MARKDOWN,
'text': text.strip(),
}
for field in fields:
comment_kwargs[field] = kwargs.get(field)
new_comment = self.model(**comment_kwargs)
try:
self.import_extra_data(new_comment, new_comment.extra_data,
extra_fields)
except ImportExtraDataError as e:
return e.error_payload
if issue_opened:
new_comment.issue_status = BaseComment.OPEN
else:
new_comment.issue_status = None
if save:
new_comment.save()
comments_m2m.add(new_comment)
return 201, {
self.item_result_key: new_comment,
}
def create_or_update_comment_reply(self, request, comment, reply,
comments_m2m, default_attrs={},
*args, **kwargs):
"""Create a reply to a comment based on the requested data.
If there's an existing reply to a comment, that one will be updated
instead.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
comment (reviewboard.reviews.models.base_commet.BaseComment):
The comment being replied to.
reply (reviewboard.reviews.models.review.Review):
The review reply owning the comment.
comments_m2m (django.db.models.ManyToManyField):
The reply's comments relation, where the new comment will
be added.
default_attrs (dict, optional):
Default attributes to add to the new comment reply, if an
existing one does not exist.
*args (tuple):
Positional arguments from the caller.
**kwargs (dict):
Keyword arguments from the caller.
Returns:
tuple or djblets.webapi.errors.WebAPIError:
Either a successful payload containing the comment, or an error
payload.
"""
q = self._get_queryset(request, *args, **kwargs)
q = q.filter(Q(reply_to=comment) & Q(review=reply))
try:
new_comment = q.get()
# This already exists. Go ahead and update, but we're going to
# redirect the user to the right place.
is_new = False
except self.model.DoesNotExist:
new_comment = self.model(reply_to=comment, **default_attrs)
is_new = True
rsp = self.update_comment(request=request,
review=reply,
comment=new_comment,
is_reply=True,
**kwargs)
if isinstance(rsp, WebAPIError):
return rsp
data = rsp[1]
if is_new:
comments_m2m.add(new_comment)
reply.save()
return 201, data
else:
return 303, data, {
'Location': self.get_href(new_comment, request, *args,
**kwargs)
}
def update_comment(self, request, review, comment, update_fields=(),
extra_fields={}, is_reply=False, **kwargs):
"""Update an existing comment based on the requested data.
This will modify a comment, setting new fields requested by the caller.
Args:
request (django.http.HttpRequest):
The HTTP request from the client.
review (reviewboard.reviews.models.review.Review):
The review owning the comment.
comment (reviewboard.reviews.models.base_comment.BaseComment):
The comment to update.
update_fields (list of unicode, optional):
The model fields that can be updated through the API.
extra_fields (dict, optional):
Extra fields from the request not otherwise handled by the
API resource. Any ``extra_data`` modifications from this will
be applied to the comment.
is_reply (bool, optional):
Whether this is a reply to another comment.
**kwargs (dict):
Keyword arguments representing additional fields handled by
the API resource. Any that are also listed in ``fields`` will
be set on the model.
Returns:
tuple or djblets.webapi.errors.WebAPIError:
Either a successful payload containing the comment, or an error
payload.
"""
if is_reply:
if not resources.review_reply.has_modify_permissions(request,
review):
return self.get_no_access_error(request)
else:
# Determine whether or not we're updating the issue status.
if self.should_update_issue_status(comment, **kwargs):
return self.update_issue_status(request, self, **kwargs)
if not resources.review.has_modify_permissions(request, review):
return self.get_no_access_error(request)
# If we've updated the comment from having no issue opened,
# to having an issue opened, we need to set the issue status
# to OPEN.
if not comment.issue_opened and kwargs.get('issue_opened', False):
comment.issue_status = BaseComment.OPEN
# If we've updated the comment from having an issue opened to
# having no issue opened, set the issue status back to null.
if comment.issue_opened and not kwargs.get('issue_opened', True):
comment.issue_status = None
for field in ('issue_opened',) + update_fields:
value = kwargs.get(field, None)
if value is not None:
if isinstance(value, six.string_types):
value = value.strip()
setattr(comment, field, value)
self.set_text_fields(comment, 'text', **kwargs)
if not is_reply:
try:
self.import_extra_data(comment, comment.extra_data,
extra_fields)
except ImportExtraDataError as e:
return e.error_payload
comment.save()
return 200, {
self.item_result_key: comment,
}
def update_issue_status(self, request, comment_resource, *args, **kwargs):
"""Updates the issue status for a comment.
Handles all of the logic for updating an issue status.
"""
try:
review_request = \
resources.review_request.get_object(request, *args, **kwargs)
comment = comment_resource.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
# Check permissions to change the issue status
if not comment.can_change_issue_status(request.user):
return self.get_no_access_error(request)
# We can only update the status of an issue if an issue has been
# opened
if not comment.issue_opened:
raise PermissionDenied
comment._review_request = review_request
issue_status = \
BaseComment.issue_string_to_status(kwargs.get('issue_status'))
# If the issue requires verification, ensure that only people who are
# authorized can close it.
if (comment.require_verification and
issue_status in (BaseComment.RESOLVED, BaseComment.DROPPED) and
comment.issue_status in (BaseComment.OPEN,
BaseComment.VERIFYING_RESOLVED,
BaseComment.VERIFYING_DROPPED) and
not comment.can_verify_issue_status(request.user)):
return self.get_no_access_error(request)
# We can only update the status of the issue
comment.issue_status = issue_status
comment.save(update_fields=['issue_status'])
last_activity_time = \
review_request.get_last_activity_info()['timestamp']
return 200, {
comment_resource.item_result_key: comment,
'last_activity_time': last_activity_time.isoformat(),
}
def should_update_issue_status(self, comment, issue_status=None,
issue_opened=None, **kwargs):
"""Returns True if the comment should have its issue status updated.
Determines if a comment should have its issue status updated based
on the current state of the comment, the review, and the arguments
passed in the request.
"""
if not issue_status:
return False
issue_status = BaseComment.issue_string_to_status(issue_status)
return (comment.review.get().public and
(comment.issue_opened or issue_opened) and
issue_status != comment.issue_status)
| StarcoderdataPython |
360545 | <gh_stars>0
"""
Написать программу, которая выведет на экран все числа от 1 до 100 которые кратные n (n вводится с клавиатуры).
"""
n = int(input("Enter number: "))
for x in range(1, 101):
if x % n ==0:
print(x)
| StarcoderdataPython |
8041472 | import numpy as np
# scipy.stats.cramervonmises
# Suponha que desejamos testar se os dados gerados por
# scipy.stats.norm.rvs foram, de fato, extraídos da
# distribuição normal padrão. Escolhemos um nível de
# significância alfa = 0,05.
from scipy import stats
rng = np.random.default_rng()
x = stats.norm.rvs(size=500, random_state=rng)
res = stats.cramervonmises(x, 'norm')
res.statistic, res.pvalue
#CramerVonMisesResult(statistic=0.1276613786697622, pvalue=0.46556649116631343)
#O valor de p excede nosso nível de significância escolhido,
# portanto, não rejeitamos a hipótese nula de que a amostra observada
# é extraída da distribuição normal padrão.
# Agora, suponha que desejamos verificar se as mesmas amostras
# deslocadas em 2,1 são consistentes com o fato de terem sido tiradas
# de uma distribuição normal com uma média de 2.
y = x + 2.1
res = stats.cramervonmises(y, 'norm', args=(2,))
#CramerVonMisesResult(statistic=0.7040268563073291, pvalue=0.012420322007088758)
#Aqui, usamos a palavra-chave args para especificar a média (loc)
# da distribuição normal para testar os dados. Isso é equivalente ao
# seguinte, em que criamos uma distribuição normal com média 2,1 e,
# em seguida, passamos seu cdf método como um argumento.
frozen_dist = stats.norm(loc=2)
res = stats.cramervonmises(y, frozen_dist.cdf)
res.statistic, res.pvalue
#(0.7040268563073291, 0.012420322007088758)
#Em todos dos casos, rejeitaríamos a hipótese nula de que a amostra
# observada é retirada de uma distribuição normal com uma média de 2
# (e variância padrão de 1) porque o valor de p 0,01 é menor do que
# nosso nível de significância escolhido.
# scipy.stats.cramervonmises_2samp
# Suponha que desejamos testar se duas amostras geradas por scipy.stats.norm.rvstêm a mesma distribuição. Escolhemos um nível de significância alfa = 0,05.
from scipy import stats
rng = np.random.default_rng()
x = stats.norm.rvs(size=100, random_state=rng)
y = stats.norm.rvs(size=70, random_state=rng)
res = stats.cramervonmises_2samp(x, y)
res.statistic, res.pvalue #(0.12726890756302467, 0.47115054777270216)
#O valor p excede nosso nível de significância escolhido, portanto,
# não rejeitamos a hipótese nula de que as amostras observadas são
# retiradas da mesma distribuição.
#Para tamanhos de amostra pequenos, pode-se calcular os valores p exatos:
x = stats.norm.rvs(size=7, random_state=rng)
y = stats.t.rvs(df=2, size=6, random_state=rng)
res = stats.cramervonmises_2samp(x, y, method='exact')
res.statistic, res.pvalue #(0.042124542124541975, 0.9801864801864801)
# O valor p com base na distribuição assintótica é uma boa aproximação,
# embora o tamanho da amostra seja pequeno.
res = stats.cramervonmises_2samp(x, y, method='asymptotic')
res.statistic, res.pvalue #(0.042124542124541975, 0.9937806294485269)
#Independentemente do método, não se rejeitaria a hipótese nula no
# nível de significância escolhido neste exemplo.
x = stats.norm.rvs(size=700, random_state=rng)
y = stats.t.rvs(df=2, size=600, random_state=rng)
res = stats.cramervonmises_2samp(x, y)
print(res) #CramerVonMisesResult(statistic=0.6771188644688664, pvalue=0.014472209121915047)
#scipy.stats.kstest
from scipy import stats
rng = np.random.default_rng()
x = np.linspace(-15, 15, 9)
stats.kstest(x, 'norm')
# KstestResult(statistic=0.4443560271592436, pvalue=0.03885014008678778)
stats.kstest(stats.norm.rvs(size=100, random_state=rng), stats.norm.cdf)
#As linhas acima são equivalentes a:
stats.kstest(stats.norm.rvs, 'norm', N=100)
#Testando variáveis aleatórias t distribuídas em relação à distribuição normal
# Com 100 graus de liberdade, a distribuição t parece próxima da distribuição normal,
# e o teste KS não rejeita a hipótese de que a amostra veio da distribuição normal:
stats.kstest(stats.t.rvs(100, size=100, random_state=rng), 'norm')
# KstestResult(statistic=0.10694118810178882, pvalue=0.18878890547885985)
#Com 3 graus de liberdade, a distribuição t é suficientemente diferente da distribuição normal,
# de modo que podemos rejeitar a hipótese de que a amostra veio da distribuição normal no
# nível de 10%:
stats.kstest(stats.t.rvs(3, size=100, random_state=rng), 'norm')
#KstestResult(statistic=0.11786287323060995, pvalue=0.11456645992107758)
#scipy.stats.ks_2samp
from scipy import stats
rng = np.random.default_rng()
n1 = 200 # tamanho da primeira amostra
n2 = 300 # tamanho da segunda amostra
#Para uma distribuição diferente, podemos rejeitar a hipótese nula uma vez que o valor p está abaixo de 1%:
rvs1 = stats.norm.rvs(size=n1, loc=0., scale=1, random_state=rng)
rvs2 = stats.norm.rvs(size=n2, loc=0.5, scale=1.5, random_state=rng)
stats.ks_2samp(rvs1, rvs2)
# KstestResult(statistic=0.24, pvalue=1.5876939054582095e-06)
#Para uma distribuição ligeiramente diferente, não podemos rejeitar a hipótese nula em um alfa de 10% ou inferior,
# uma vez que o valor de p em 0,219 é superior a 10%
rvs3 = stats.norm.rvs(size=n2, loc=0.01, scale=1.0, random_state=rng)
stats.ks_2samp(rvs1, rvs3)
# KstestResult(statistic=0.095, pvalue=0.2192140768654085)
#Para uma distribuição idêntica, não podemos rejeitar a hipótese nula uma vez que o valor p é alto, 41%:
rvs4 = stats.norm.rvs(size=n2, loc=0.0, scale=1.0, random_state=rng)
stats.ks_2samp(rvs1, rvs4)
#scipy.stats.anderson_ksamp
from scipy import stats
rng = np.random.default_rng()
# A hipótese nula de que as duas amostras aleatórias vêm da mesma distribuição pode ser rejeitada
# no nível de 5% porque o valor de teste retornado é maior do que o valor crítico para 5% (1,961),
# mas não no nível de 2,5%. A interpolação dá um nível de significância aproximado de 3,2%:
stats.anderson_ksamp([rng.normal(size=50),
rng.normal(loc=0.5, size=30)])
# p valor = significance_level = 0.07396028404997687
# A hipótese nula não pode ser rejeitada para três amostras de uma distribuição idêntica.
# O valor p relatado (25%) foi limitado e pode não ser muito preciso (uma vez que corresponde ao valor 0,449,
# enquanto a estatística é -0,731):
stats.anderson_ksamp([rng.normal(size=50),
rng.normal(size=30), rng.normal(size=20)])
#Anderson_ksampResult(statistic=-0.5917988120678772, critical_values=array([0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856, 4.07210043, 5.56419101]), significance_level=0.25)
#scipy.stats.ansari
from scipy.stats import ansari
rng = np.random.default_rng()
#Para esses exemplos, criaremos três conjuntos de dados aleatórios. Os dois primeiros, com tamanhos 35 e 25,
# são extraídos de uma distribuição normal com média 0 e desvio padrão 2. O terceiro conjunto de dados tem
# tamanho 25 e é extraído de uma distribuição normal com desvio padrão 1,25.
x1 = rng.normal(loc=0, scale=2, size=35)
x2 = rng.normal(loc=0, scale=2, size=25)
x3 = rng.normal(loc=0, scale=1.25, size=25)
# Primeiramente, aplicamos ansari para x1 e x2 . Essas amostras são retiradas da mesma distribuição, portanto,
# esperamos que o teste de Ansari-Bradley não nos leve a concluir que as escalas das distribuições são diferentes.
ansari(x1, x2)
#AnsariResult(statistic=534.0, pvalue=0.811752031516162)
# Com um valor de p próximo de 1, não podemos concluir que existe uma diferença significativa nas escalas (conforme o esperado).
# Agora aplique o teste a x1 e x3 :
ansari(x1, x3)
# AnsariResult(statistic=464.0, pvalue=0.01846645873767982)
# A probabilidade de observar tal valor extremo da estatística sob a hipótese nula de escalas iguais é de apenas 1,84%.
# Tomamos isso como evidência contra a hipótese nula em favor da alternativa: as escalas das distribuições das quais as
# amostras foram retiradas não são iguais.
# Podemos usar o parâmetro alternativo para realizar um teste unilateral. No exemplo acima, a escala de x1 é maior do que x3 e,
# portanto, a proporção das escalas de x1 e x3 é maior do que 1. Isso significa que o valor p quando alternative='greater'deve estar próximo de 0 e,
# portanto, devemos ser capazes de rejeitar o nulo hipótese:
ansari(x1, x3, alternative='greater')
#Como podemos ver, o valor p é de fato bastante baixo. O uso de alternative='less'deve,
# portanto, produzir um grande valor p:
ansari(x1, x3, alternative='less')
# scipy.stats.fligner
#Testa se as listas de a , b e c vêm de populações com variâncias iguais.
from scipy.stats import fligner
a = [8.88, 9.12, 9.04, 8.98, 9.00, 9.08, 9.01, 8.85, 9.06, 8.99]
b = [8.88, 8.95, 9.29, 9.44, 9.15, 9.58, 8.36, 9.18, 8.67, 9.05]
c = [8.95, 9.12, 8.95, 8.85, 9.03, 8.84, 9.07, 8.98, 8.86, 8.98]
stat, p = fligner(a, b, c)
p #pvalue=0.00450826080004775
#O pequeno valor de p sugere que as populações não têm variâncias iguais.
#Isso não é surpreendente, dado que a variância da amostra de b é muito maior do que
# a de a e c :
[np.var(x, ddof=1) for x in [a, b, c]] #[0.007054444444444413, 0.13073888888888888, 0.008890000000000002]
#outros testes de variancia
from scipy.stats import bartlett, levene
bartlett(a, b, c)
#BartlettResult(statistic=22.789434813726768, pvalue=1.1254782518834628e-05)
levene(a, b, c)
#LeveneResult(statistic=7.584952754501659, pvalue=0.002431505967249681)
#scipy.stats.jarque_bera
from scipy import stats
rng = np.random.default_rng()
x = rng.normal(0, 1, 100000)
jarque_bera_test = stats.jarque_bera(x)
jarque_bera_test
# Jarque_beraResult(statistic=3.3415184718131554, pvalue= 0.18810419594996775)
jarque_bera_test.statistic
# 3.3415184718131554
jarque_bera_test.pvalue
# 0.18810419594996775
# scipy.stats.kurtosistest
from scipy.stats import kurtosistest
kurtosistest(list(range(20)))
# KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.08804338332528348)
kurtosistest(list(range(20)), alternative='less')
# KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.04402169166264174)
kurtosistest(list(range(20)), alternative='greater')
# KurtosistestResult(statistic=-1.7058104152122062, pvalue=0.9559783083373583)
rng = np.random.default_rng()
s = rng.normal(0, 1, 1000)
kurtosistest(s)
#KurtosistestResult(statistic=-0.3188545786000282, pvalue=0.7498367888656665)
# Pacote statsmodels
import numpy as np
import pandas as pd
import statsmodels.api as sm
nsample = 100
x = np.linspace(0, 10, 100)
X = np.column_stack((x, x ** 2))
print(X[0:3])
beta = np.array([1, 0.1, 10])
e = np.random.normal(size=nsample)
print(e[0:3])
X = sm.add_constant(X)
print(X[0:3])
y = np.dot(X, beta) + e
print(y[0:3])
model = sm.OLS(y, X)
results = model.fit()
print(results.summary())
# OLS Regression Results
# ==============================================================================
# Dep. Variable: y R-squared: 1.000
# Model: OLS Adj. R-squared: 1.000
# Method: Least Squares F-statistic: 4.409e+06
# Date: Wed, 20 Oct 2021 Prob (F-statistic): 3.21e-241
# Time: 19:47:11 Log-Likelihood: -141.92
# No. Observations: 100 AIC: 289.8
# Df Residuals: 97 BIC: 297.6
# Df Model: 2
# Covariance Type: nonrobust
# ==============================================================================
# coef std err t P>|t| [0.025 0.975]
# ------------------------------------------------------------------------------
# const 1.2849 0.299 4.302 0.000 0.692 1.878
# x1 -0.0167 0.138 -0.121 0.904 -0.291 0.257
# x2 10.0099 0.013 749.389 0.000 9.983 10.036
# ==============================================================================
# Omnibus: 3.186 Durbin-Watson: 2.138
# Prob(Omnibus): 0.203 Jarque-Bera (JB): 1.927
# Skew: -0.061 Prob(JB): 0.382
# Kurtosis: 2.331 Cond. No. 144.
# ==============================================================================
print("Parameters: ", results.params)
print("R2: ", results.rsquared) | StarcoderdataPython |
6655020 | import numpy as np
from gym.spaces import Box, Dict
from multiworld.core.multitask_env import MultitaskEnv
from multiworld.envs.env_util import (
get_stat_in_paths,
create_stats_ordered_dict,
)
import railrl.torch.pytorch_util as ptu
from railrl.envs.wrappers import ProxyEnv
from railrl.torch.core import PyTorchModule
class Encoder(object):
def encode(self, input):
"""
:param input:
:return: latent_distribution_params
"""
raise NotImplementedError()
@property
def max_embedding(self):
raise NotImplementedError()
@property
def min_embedding(self):
raise NotImplementedError()
class EncoderFromMlp(Encoder, PyTorchModule):
def __init__(self, mlp):
super(PyTorchModule, self).__init__()
self._mlp = mlp
self._output_size = mlp.output_size
def encode(self, x):
x_torch = ptu.from_numpy(x)
embedding_torch = self._mlp(x_torch)
return ptu.get_numpy(embedding_torch)
@property
def max_embedding(self):
# TODO: fix hack
return 9999 * np.ones(self._output_size)
@property
def min_embedding(self):
return -9999 * np.ones(self._output_size)
class EncoderWrappedEnv(ProxyEnv, MultitaskEnv):
"""This class wraps an environment with an encoder.
Reward is defined as distance in this latent space.
"""
ENCODER_DISTANCE_REWARD = 'encoder_distance'
VECTORIZED_ENCODER_DISTANCE_REWARD = 'vectorized_encoder_distance'
ENV_REWARD = 'env'
def __init__(
self,
wrapped_env,
encoder: Encoder,
encoder_input_prefix,
key_prefix='encoder',
reward_mode='encoder_distance',
):
"""
:param wrapped_env:
:param encoder:
:param encoder_input_prefix:
:param key_prefix:
:param reward_mode:
- 'encoder_distance': l1 distance in encoder distance
- 'vectorized_encoder_distance': vectorized l1 distance in encoder
distance, i.e. negative absolute value
- 'env': use the wrapped env's reward
"""
super().__init__(wrapped_env)
if reward_mode not in {
self.ENCODER_DISTANCE_REWARD,
self.VECTORIZED_ENCODER_DISTANCE_REWARD,
self.ENV_REWARD,
}:
raise ValueError(reward_mode)
self._encoder = encoder
self._encoder_input_obs_key = '{}_observation'.format(
encoder_input_prefix)
self._encoder_input_desired_goal_key = '{}_desired_goal'.format(
encoder_input_prefix
)
self._encoder_input_achieved_goal_key = '{}_achieved_goal'.format(
encoder_input_prefix
)
self._reward_mode = reward_mode
spaces = self.wrapped_env.observation_space.spaces
latent_space = Box(
encoder.min_embedding,
encoder.max_embedding,
dtype=np.float32,
)
self._embedding_size = encoder.min_embedding.size
self._obs_key = '{}_observation'.format(key_prefix)
self._desired_goal_key = '{}_desired_goal'.format(key_prefix)
self._achieved_goal_key = '{}_achieved_goal'.format(key_prefix)
self._distance_name = '{}_distance'.format(key_prefix)
self._key_prefix = key_prefix
self._desired_goal = {
self._desired_goal_key: np.zeros_like(latent_space.sample())
}
spaces[self._obs_key] = latent_space
spaces[self._desired_goal_key] = latent_space
spaces[self._achieved_goal_key] = latent_space
self.observation_space = Dict(spaces)
self._goal_sampling_mode = 'env'
def reset(self):
obs = self.wrapped_env.reset()
self._update_obs(obs)
self._desired_goal = {
self._desired_goal_key:
self._encode_one(obs[self._encoder_input_desired_goal_key]),
**self.wrapped_env.get_goal()
}
return obs
def step(self, action):
obs, reward, done, info = self.wrapped_env.step(action)
self._update_obs(obs)
new_reward = self.compute_reward(
action,
obs,
)
self._update_info(info, obs, new_reward)
return obs, new_reward, done, info
def _update_obs(self, obs):
encoded_obs = self._encode_one(obs[self._encoder_input_obs_key])
obs[self._obs_key] = encoded_obs
obs[self._achieved_goal_key] = encoded_obs
obs[self._desired_goal_key] = self._desired_goal[self._desired_goal_key]
obs['observation'] = encoded_obs
obs['achieved_goal'] = encoded_obs
obs['desired_goal'] = self._desired_goal[self._desired_goal_key]
return obs
def _update_info(self, info, obs, new_reward):
achieved_goals = obs[self._achieved_goal_key]
desired_goals = obs[self._desired_goal_key]
dist = np.linalg.norm(desired_goals - achieved_goals, ord=1)
info[self._distance_name] = dist
"""
Multitask functions
"""
def sample_goals(self, batch_size):
if self._goal_sampling_mode == 'env':
goals = self.wrapped_env.sample_goals(batch_size)
latent_goals = self._encode(
goals[self._encoder_input_desired_goal_key])
else:
raise RuntimeError("Invalid: {}".format(self._goal_sampling_mode))
goals['desired_goal'] = latent_goals
goals[self._desired_goal_key] = latent_goals
return goals
@property
def goal_sampling_mode(self):
return self._goal_sampling_mode
@goal_sampling_mode.setter
def goal_sampling_mode(self, mode):
assert mode in [
'env',
], "Invalid env mode: {}".format(mode)
self._goal_sampling_mode = mode
@property
def goal_dim(self):
return self._embedding_size
def get_goal(self):
return self._desired_goal
def set_goal(self, goal):
self._desired_goal = goal
self.wrapped_env.set_goal(goal)
def compute_reward(self, action, obs):
actions = action[None]
next_obs = {
k: v[None] for k, v in obs.items()
}
reward = self.compute_rewards(actions, next_obs)
return reward[0]
def compute_rewards(self, actions, obs):
achieved_goals = obs[self._achieved_goal_key]
desired_goals = obs[self._desired_goal_key]
if self._reward_mode == self.VECTORIZED_ENCODER_DISTANCE_REWARD:
dist = np.abs(desired_goals - achieved_goals)
rewards = - dist
elif self._reward_mode == self.ENCODER_DISTANCE_REWARD:
dist = np.linalg.norm(desired_goals - achieved_goals, ord=1, axis=1)
rewards = - dist
elif self._reward_mode == self.ENV_REWARD:
rewards = self.wrapped_env.compute_rewards(actions, obs)
else:
raise ValueError('iNvalid reward mode: {}'.format(
self._reward_mode
))
return rewards
"""
Other functions
"""
def get_diagnostics(self, paths, **kwargs):
statistics = self.wrapped_env.get_diagnostics(paths, **kwargs)
for stat_name_in_paths in [self._distance_name]:
stats = get_stat_in_paths(paths, 'env_infos', stat_name_in_paths)
statistics.update(create_stats_ordered_dict(
stat_name_in_paths,
stats,
always_show_all_stats=True,
))
final_stats = [s[-1] for s in stats]
statistics.update(create_stats_ordered_dict(
"Final " + stat_name_in_paths,
final_stats,
always_show_all_stats=True,
))
return statistics
def _encode_one(self, ob):
return self._encode(ob[None])[0]
def _encode(self, obs):
return self._encoder.encode(obs)
| StarcoderdataPython |
215855 | """Samples are print to stdout"""
import random
import argparse
from pathlib import Path
SOURCES = {
"Random@0.1": "_baseline_random_10",
"Head@0.1": "_baseline_head_10",
"TextRank@0.1": "_original_10",
"BM25+eps.25@0.1": "_bm25pluseps025_10",
"USE-base@0.1": "_use_base_10",
"USE-large@0.1": "_use_large_10",
"USE-xling@0.1": "_use_xling_10",
}
DATA_DIR = Path("data/")
RESULTS_DIR = Path("results/")
def main(n_samples):
for i in range(n_samples):
print("=" * 20)
print(f"Sample {i + 1}")
print("=" * 20)
with open(str(DATA_DIR / "cnndm" / "test.txt.src")) as fin:
articles = fin.readlines()
sample_idx = random.randint(0, len(articles))
print("\n" + articles[sample_idx] + "\n")
del articles
with open(str(DATA_DIR / "cnndm" / "test.txt.tgt.tagged")) as fin:
summaries = fin.readlines()
print("-" * 20)
print("Reference")
print("-" * 20)
print(summaries[sample_idx].replace(
"<t> ", "").replace("</t>", "") + "\n")
for name, suffix in SOURCES.items():
with open(str(RESULTS_DIR / f"cnndm{suffix}.pred")) as fin:
summaries = fin.readlines()
print("-" * 20)
print(name)
print("-" * 20)
print(summaries[sample_idx] + "\n")
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Collect and print sample predictions.')
parser.add_argument('n', type=int, default=1, nargs='?',
help='Number of samples.')
args = parser.parse_args()
main(args.n)
| StarcoderdataPython |
384163 | import datetime, pytz
import time
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import regexp_replace
from pyspark.sql.functions import split
from pyspark.sql.functions import udf
from pyspark.sql.types import *
import matplotlib.pyplot as plt
import sys,tweepy,csv,re
from textblob import TextBlob
spark = SparkSession\
.builder\
.appName("HashtagCount")\
.getOrCreate()
df = spark.read.json("E:/phase2/data2.json")
date= df.select("created_at")
def dateMTest(dateval):
dt=datetime.datetime.strptime(dateval, '%a %b %d %H:%M:%S +0000 %Y')
return dt
d = udf(dateMTest , DateType())
df=df.withColumn("created_date",d(date.created_at))
df.createOrReplaceTempView("cricket")
sqldf= spark.sql("SELECT id,text,created_date FROM cricket WHERE 1=1 AND (upper(text) LIKE '%INDIA%'AND text LIKE '%cricket%')")
i=0
positive=0
neutral=0
negative=0
for t in sqldf.select("text").collect():
i=i+1
# print("It is ",i,str(t.text))
analysis = TextBlob(str((t.text).encode('ascii', 'ignore')))
print(analysis.sentiment.polarity)
if (analysis.sentiment.polarity<0):
negative=negative+1
print(i," in negative")
elif(analysis.sentiment.polarity==0.0):
neutral=neutral+1
print(i," in neutral")
elif(analysis.sentiment.polarity>0):
positive=positive+1
print(i," in positive")
print("Total negative % is",((negative)*100)/i)
print("Total neutral % is",((neutral)*100)/i)
print("Total positive % is",((positive)*100)/i)
negative_percent=((negative)*100)/i
positive_percent=((positive)*100)/i
neutral_percent=((neutral)*100)/i
#Draw a donut pie chart
size_of_groups=[negative_percent,positive_percent,neutral_percent]
names='negative_percent', 'positive_percent', 'neutral_percent'
# Create a pieplot
plt.pie(size_of_groups,labels=names, colors=['red','green','blue'])
# add a circle at the center
my_circle=plt.Circle( (0,0), 0.7, color='white')
p=plt.gcf()
p.gca().add_artist(my_circle)
plt.title("Supporters for India")
plt.show() | StarcoderdataPython |
4988948 | <gh_stars>1-10
import pandas as pd
import os
import tqdm
import networkx as nx
import mxnet as mx
import torch
import numpy as np
import json
from mmdet.ops.nms.nms_wrapper import nms
def soft_bbox_vote(det, vote_thresh, score_thresh=0.01):
if det.shape[0] <= 1:
return det
order = det[:, 4].ravel().argsort()[::-1]
det = det[order, :]
dets = []
while det.shape[0] > 0:
# IOU
area = (det[:, 2] - det[:, 0] + 1) * (det[:, 3] - det[:, 1] + 1)
xx1 = np.maximum(det[0, 0], det[:, 0])
yy1 = np.maximum(det[0, 1], det[:, 1])
xx2 = np.minimum(det[0, 2], det[:, 2])
yy2 = np.minimum(det[0, 3], det[:, 3])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
o = inter / (area[0] + area[:] - inter)
# get needed merge det and delete these det
merge_index = np.where(o >= vote_thresh)[0]
det_accu = det[merge_index, :]
det_accu_iou = o[merge_index]
det = np.delete(det, merge_index, 0)
if merge_index.shape[0] <= 1:
try:
dets = np.row_stack((dets, det_accu))
except:
dets = det_accu
continue
else:
soft_det_accu = det_accu.copy()
soft_det_accu[:, 4] = soft_det_accu[:, 4] * (1 - det_accu_iou)
soft_index = np.where(soft_det_accu[:, 4] >= score_thresh)[0]
soft_det_accu = soft_det_accu[soft_index, :]
det_accu[:, 0:4] = det_accu[:, 0:4] * np.tile(
det_accu[:, -1:], (1, 4))
max_score = np.mean(det_accu[:, 4])
det_accu_sum = np.zeros((1, 5))
det_accu_sum[:, 0:4] = np.sum(
det_accu[:, 0:4], axis=0) / np.sum(det_accu[:, -1:])
det_accu_sum[:, 4] = max_score
if soft_det_accu.shape[0] > 0:
det_accu_sum = np.row_stack((det_accu_sum, soft_det_accu))
try:
dets = np.row_stack((dets, det_accu_sum))
except:
dets = det_accu_sum
order = dets[:, 4].ravel().argsort()[::-1]
dets = dets[order, :]
return dets
root = "submit/"
fileter_thresh =0.01
res1 = pd.read_csv("submit/atss50_1.csv")
# res1=res1[res1["confidence"]>=0.001]
res1['area'] = res1[['xmin','xmax','ymin','ymax']].apply(lambda x:(x.xmax-x.xmin+1)*(x.ymax-x.ymin+1),axis=1)
print(res1.shape)
# res1 = res1[res1['area']<=20000*20000]
res1 = res1[res1['area']>=48*48]
res1 = res1[res1['confidence']>fileter_thresh]
print(res1.shape)
res2 = pd.read_csv("submit/atss50_2.csv")
# res2=res2[res2["confidence"]>=0.001]
res2['area'] = res2[['xmin','xmax','ymin','ymax']].apply(lambda x:(x.xmax-x.xmin+1)*(x.ymax-x.ymin+1),axis=1)
print(res2.shape)
# res2 = res2[res2['area']<=20000*20000]
# res2 = res2[res2['area']>=48*48]
res2 = res2[res2['confidence']>fileter_thresh]
print(res2.shape)
res3 = pd.read_csv("submit/atss50_3.csv")
# res2=res2[res2["confidence"]>=0.001]
res3['area'] = res3[['xmin','xmax','ymin','ymax']].apply(lambda x:(x.xmax-x.xmin+1)*(x.ymax-x.ymin+1),axis=1)
print(res3.shape)
res3 = res3[res3['area']<=500*500]
# res3 = res3[res3['area']>=48*48]
res3 = res3[res3['confidence']>fileter_thresh]
print(res3.shape)
res4 = pd.read_csv("submit/atss101_1.csv")
# res2=res2[res2["confidence"]>=0.001]
res4['area'] = res4[['xmin','xmax','ymin','ymax']].apply(lambda x:(x.xmax-x.xmin+1)*(x.ymax-x.ymin+1),axis=1)
print(res4.shape)
# res4 = res4[res4['area']<=20000*20000]
res4 = res4[res4['area']>=48*48]
res4 = res4[res4['confidence']>fileter_thresh]
print(res4.shape)
res5 = pd.read_csv("submit/atss101_2.csv")
# res2=res2[res2["confidence"]>=0.001]
res5['area'] = res5[['xmin','xmax','ymin','ymax']].apply(lambda x:(x.xmax-x.xmin+1)*(x.ymax-x.ymin+1),axis=1)
print(res5.shape)
# res5 = res5[res5['area']<=20000*20000]
# res5 = res5[res5['area']>=48*48]
res5 = res5[res5['confidence']>fileter_thresh]
print(res5.shape)
res6 = pd.read_csv("submit/atss101_3.csv")
# res2=res2[res2["confidence"]>=0.001]
res6['area'] = res6[['xmin','xmax','ymin','ymax']].apply(lambda x:(x.xmax-x.xmin+1)*(x.ymax-x.ymin+1),axis=1)
print(res6.shape)
res6 = res6[res6['area']<=500*500]
# res6 = res6[res6['area']>=48*48]
res6 = res6[res6['confidence']>fileter_thresh]
print(res6.shape)
res7 = pd.read_csv("submit/atss50_4.csv")
# res2=res2[res2["confidence"]>=0.001]
res7['area'] = res7[['xmin','xmax','ymin','ymax']].apply(lambda x:(x.xmax-x.xmin+1)*(x.ymax-x.ymin+1),axis=1)
print(res7.shape)
res7 = res7[res7['area']<=400*400]
# res7 = res7[res7['area']<=48*48]
res7 = res7[res7['confidence']>fileter_thresh]
print(res7.shape)
res8 = pd.read_csv("submit/atss101_4.csv")
# res2=res2[res2["confidence"]>=0.001]
res8['area'] = res8[['xmin','xmax','ymin','ymax']].apply(lambda x:(x.xmax-x.xmin+1)*(x.ymax-x.ymin+1),axis=1)
print(res8.shape)
res8 = res8[res8['area']<=400*400]
# res8 = res8[res8['area']>=48*48]
res8 = res8[res8['confidence']>fileter_thresh]
print(res8.shape)
# res9 = pd.read_csv("submit/atss50_5.csv")
# # res2=res2[res2["confidence"]>=0.001]
# res9['area'] = res9[['xmin','xmax','ymin','ymax']].apply(lambda x:(x.xmax-x.xmin+1)*(x.ymax-x.ymin+1),axis=1)
# print(res9.shape)
# res9 = res9[res9['area']<=270*270]
# # res9 = res9[res9['area']<=48*48]
# res9 = res9[res9['confidence']>fileter_thresh]
# print(res9.shape)
#
# res10 = pd.read_csv("submit/atss101_5.csv")
# # res2=res2[res2["confidence"]>=0.001]
# res10['area'] = res10[['xmin','xmax','ymin','ymax']].apply(lambda x:(x.xmax-x.xmin+1)*(x.ymax-x.ymin+1),axis=1)
# print(res10.shape)
# res10 = res10[res10['area']<=270*270]
# # res10 = res10[res10['area']>=48*48]
# res10 = res10[res10['confidence']>fileter_thresh]
# print(res10.shape)
res9 = pd.read_csv("submit/atss101_6.csv")
# res2=res2[res2["confidence"]>=0.001]
res9['area'] = res9[['xmin','xmax','ymin','ymax']].apply(lambda x:(x.xmax-x.xmin+1)*(x.ymax-x.ymin+1),axis=1)
print(res9.shape)
res9 = res9[res9['area']>=56*56]
# res9 = res9[res9['area']<=48*48]
res9 = res9[res9['confidence']>fileter_thresh]
print(res9.shape)
res10 = pd.read_csv("submit/atss101_7.csv")
# res2=res2[res2["confidence"]>=0.001]
res10['area'] = res10[['xmin','xmax','ymin','ymax']].apply(lambda x:(x.xmax-x.xmin+1)*(x.ymax-x.ymin+1),axis=1)
print(res10.shape)
res10 = res10[res10['area']>=48*48]
# res10 = res10[res10['area']>=48*48]
res10 = res10[res10['confidence']>fileter_thresh]
print(res10.shape)
deal_nms = pd.concat([res1,res2,res3,res4,res5,res6,res7,res8,res9,res10])
print(deal_nms.shape)
final = pd.DataFrame()
name = []
image_id =[]
con = []
xmin = []
xmax = []
ymin = []
ymax = []
for filename in tqdm.tqdm(deal_nms['image_id'].unique()):
for defect_label in ['echinus','starfish','scallop','holothurian']:
base_dets = deal_nms[deal_nms['image_id']==filename]
base_dets = base_dets[base_dets['name'] == defect_label]
dets = np.array(base_dets[['xmin','ymin','xmax','ymax','confidence']])
# scores = torch.FloatTensor(np.array(base_dets[['confidence']])).to(0)
iou_thr = 0.62
keep_boxes = soft_bbox_vote(dets,iou_thr,fileter_thresh)
for bbox in zip(keep_boxes):
# print(bbox)
x1, y1, x2, y2,score = bbox[0][:5]
# score = press_score.cpu().numpy()
# x1, y1, x2, y2 = round(float(x1), 2), round(float(y1), 2), round(float(x2), 2), round(float(y2), 2) # save 0.00
xmin.append(max(1,1+round(x1)))
xmax.append(max(1,1+round(x2)))
ymin.append(round(y1))
ymax.append(round(y2))
con.append(score)
name.append(defect_label)
image_id.append(filename)
final['xmin'] = xmin
final['xmax'] = xmax
final['ymin'] = ymin
final['ymax'] = ymax
final['name'] = name
final['image_id'] = image_id
final['confidence'] = con
final[['name','image_id','confidence','xmin','ymin','xmax','ymax']].to_csv("submit/testB_10.csv",index=False)
| StarcoderdataPython |
3504992 | import numpy as np
import tensorflow as tf
from spektral.data.utils import (
batch_generator,
collate_labels_disjoint,
get_spec,
prepend_none,
sp_matrices_to_sp_tensors,
to_batch,
to_disjoint,
to_mixed,
to_tf_signature,
)
version = tf.__version__.split(".")
major, minor = int(version[0]), int(version[1])
tf_loader_available = major >= 2 and minor >= 4
class Loader:
"""
Parent class for data loaders. The role of a Loader is to iterate over a
Dataset and yield batches of graphs to feed your Keras Models.
This is achieved by having a generator object that produces lists of Graphs,
which are then collated together and returned as Tensors.
The core of a Loader is the `collate(batch)` method.
This takes as input a list of `Graph` objects and returns a list of Tensors,
np.arrays, or SparseTensors.
For instance, if all graphs have the same number of nodes and size of the
attributes, a simple collation function can be:
```python
def collate(self, batch):
x = np.array([g.x for g in batch])
a = np.array([g.a for g in batch)]
return x, a
```
The `load()` method of a Loader returns an object that can be passed to a Keras
model when using the `fit`, `predict` and `evaluate` functions.
You can use it as follows:
```python
model.fit(loader.load(), steps_per_epoch=loader.steps_per_epoch)
```
The `steps_per_epoch` property represents the number of batches that are in
an epoch, and is a required keyword when calling `fit`, `predict` or `evaluate`
with a Loader.
If you are using a custom training function, you can specify the input signature
of your batches with the tf.TypeSpec system to avoid unnecessary re-tracings.
The signature is computed automatically by calling `loader.tf_signature()`.
For example, a simple training step can be written as:
```python
@tf.function(input_signature=loader.tf_signature()) # Specify signature here
def train_step(inputs, target):
with tf.GradientTape() as tape:
predictions = model(inputs, training=True)
loss = loss_fn(target, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
```
We can then train our model in a loop as follows:
```python
for batch in loader:
train_step(*batch)
```
**Arguments**
- `dataset`: a `spektral.data.Dataset` object;
- `batch_size`: size of the mini-batches;
- `epochs`: number of epochs to iterate over the dataset. By default (`None`)
iterates indefinitely;
- `shuffle`: whether to shuffle the dataset at the start of each epoch.
"""
def __init__(self, dataset, batch_size=1, epochs=None, shuffle=True):
self.dataset = dataset
self.batch_size = batch_size
self.epochs = epochs
self.shuffle = shuffle
self._generator = self.generator()
def __iter__(self):
return self
def __next__(self):
nxt = self._generator.__next__()
return self.collate(nxt)
def generator(self):
"""
Returns lists (batches) of `Graph` objects.
"""
return batch_generator(
self.dataset,
batch_size=self.batch_size,
epochs=self.epochs,
shuffle=self.shuffle,
)
def collate(self, batch):
"""
Converts a list of graph objects to Tensors or np.arrays representing the batch.
:param batch: a list of `Graph` objects.
"""
raise NotImplementedError
def load(self):
"""
Returns an object that can be passed to a Keras model when using the `fit`,
`predict` and `evaluate` functions.
By default, returns the Loader itself, which is a generator.
"""
return self
def tf_signature(self):
"""
Returns the signature of the collated batches using the tf.TypeSpec system.
By default, the signature is that of the dataset (`dataset.signature`):
- Adjacency matrix has shape `[n_nodes, n_nodes]`
- Node features have shape `[n_nodes, n_node_features]`
- Edge features have shape `[n_edges, n_node_features]`
- Targets have shape `[..., n_labels]`
"""
signature = self.dataset.signature
return to_tf_signature(signature)
def pack(self, batch):
"""
Given a batch of graphs, groups their attributes into separate lists and packs
them in a dictionary.
For instance, if a batch has three graphs g1, g2 and g3 with node
features (x1, x2, x3) and adjacency matrices (a1, a2, a3), this method
will return a dictionary:
```python
>>> {'a_list': [a1, a2, a3], 'x_list': [x1, x2, x3]}
```
:param batch: a list of `Graph` objects.
"""
output = [list(elem) for elem in zip(*[g.numpy() for g in batch])]
keys = [k + "_list" for k in self.dataset.signature.keys()]
return dict(zip(keys, output))
@property
def steps_per_epoch(self):
"""
:return: the number of batches of size `self.batch_size` in the dataset (i.e.,
how many batches are in an epoch).
"""
return int(np.ceil(len(self.dataset) / self.batch_size))
class SingleLoader(Loader):
"""
A Loader for [single mode](https://graphneural.network/data-modes/#single-mode).
This loader produces Tensors representing a single graph. As such, it can
only be used with Datasets of length 1 and the `batch_size` cannot be set.
The loader supports sample weights through the `sample_weights` argument.
If given, then each batch will be a tuple `(inputs, labels, sample_weights)`.
**Arguments**
- `dataset`: a `spektral.data.Dataset` object with only one graph;
- `epochs`: number of epochs to iterate over the dataset. By default (`None`)
iterates indefinitely;
- `shuffle`: whether to shuffle the data at the start of each epoch;
- `sample_weights`: if given, these will be appended to the output
automatically.
**Output**
Returns a tuple `(inputs, labels)` or `(inputs, labels, sample_weights)`.
`inputs` is a tuple containing the data matrices of the graph, only if they
are not `None`:
- `x`: same as `dataset[0].x`;
- `a`: same as `dataset[0].a` (scipy sparse matrices are converted to
SparseTensors);
- `e`: same as `dataset[0].e`;
`labels` is the same as `dataset[0].y`.
`sample_weights` is the same object passed to the constructor.
"""
def __init__(self, dataset, epochs=None, sample_weights=None):
if len(dataset) != 1:
raise ValueError(
"SingleLoader can only be used with Datasets that"
"have a single graph."
)
self.sample_weights = sample_weights
super().__init__(dataset, batch_size=1, epochs=epochs, shuffle=False)
def collate(self, batch):
packed = self.pack(batch)
y = packed.pop("y_list", None)
if y is not None:
y = collate_labels_disjoint(y, node_level=True)
output = to_disjoint(**packed)
output = output[:-1] # Discard batch index
output = sp_matrices_to_sp_tensors(output)
if len(output) == 1:
output = output[0]
output = (output,)
if y is not None:
output += (y,)
if self.sample_weights is not None:
output += (self.sample_weights,)
if len(output) == 1:
output = output[0] # Again, in case there are no targets and no SW
return output
def load(self):
output = self.collate(self.dataset)
return tf.data.Dataset.from_tensors(output).repeat(self.epochs)
class DisjointLoader(Loader):
"""
A Loader for [disjoint mode](https://graphneural.network/data-modes/#disjoint-mode).
This loader represents a batch of graphs via their disjoint union.
The loader automatically computes a batch index tensor, containing integer
indices that map each node to its corresponding graph in the batch.
The adjacency matrix os returned as a SparseTensor, regardless of the input.
If `node_level=False`, the labels are interpreted as graph-level labels and
are stacked along an additional dimension.
If `node_level=True`, then the labels are stacked vertically.
**Note:** TensorFlow 2.4 or above is required to use this Loader's `load()`
method in a Keras training loop.
**Arguments**
- `dataset`: a graph Dataset;
- `batch_size`: size of the mini-batches;
- `epochs`: number of epochs to iterate over the dataset. By default (`None`)
iterates indefinitely;
- `shuffle`: whether to shuffle the data at the start of each epoch.
**Output**
For each batch, returns a tuple `(inputs, labels)`.
`inputs` is a tuple containing:
- `x`: node attributes of shape `[n_nodes, n_node_features]`;
- `a`: adjacency matrices of shape `[n_nodes, n_nodes]`;
- `e`: edge attributes of shape `[n_edges, n_edge_features]`.
`labels` have shape `[batch, n_labels]` if `node_level=False` or
`[n_nodes, n_labels]` otherwise.
"""
def __init__(
self, dataset, node_level=False, batch_size=1, epochs=None, shuffle=True
):
self.node_level = node_level
super().__init__(dataset, batch_size=batch_size, epochs=epochs, shuffle=shuffle)
def collate(self, batch):
packed = self.pack(batch)
y = packed.pop("y_list", None)
if y is not None:
y = collate_labels_disjoint(y, node_level=self.node_level)
output = to_disjoint(**packed)
output = sp_matrices_to_sp_tensors(output)
if len(output) == 1:
output = output[0]
if y is None:
return output
else:
return output, y
def load(self):
if not tf_loader_available:
raise RuntimeError(
"Calling DisjointLoader.load() requires " "TensorFlow 2.4 or greater."
)
return tf.data.Dataset.from_generator(
lambda: self, output_signature=self.tf_signature()
)
def tf_signature(self):
"""
Adjacency matrix has shape [n_nodes, n_nodes]
Node features have shape [n_nodes, n_node_features]
Edge features have shape [n_edges, n_edge_features]
Targets have shape [..., n_labels]
"""
signature = self.dataset.signature
if "y" in signature:
signature["y"]["shape"] = prepend_none(signature["y"]["shape"])
if "a" in signature:
signature["a"]["spec"] = tf.SparseTensorSpec
signature["i"] = dict()
signature["i"]["spec"] = tf.TensorSpec
signature["i"]["shape"] = (None,)
signature["i"]["dtype"] = tf.as_dtype(tf.int64)
return to_tf_signature(signature)
class BatchLoader(Loader):
"""
A Loader for [batch mode](https://graphneural.network/data-modes/#batch-mode).
This loader returns batches of graphs stacked along an extra dimension,
with all "node" dimensions padded to be equal among all graphs.
If `n_max` is the number of nodes of the biggest graph in the batch, then
the padding consist of adding zeros to the node features, adjacency matrix,
and edge attributes of each graph so that they have shapes
`(n_max, n_node_features)`, `(n_max, n_max)`, and
`(n_max, n_max, n_edge_features)` respectively.
The zero-padding is done batch-wise, which saves up memory at the cost of
more computation. If latency is an issue but memory isn't, or if the
dataset has graphs with a similar number of nodes, you can use
the `PackedBatchLoader` that first zero-pads all the dataset and then
iterates over it.
Note that the adjacency matrix and edge attributes are returned as dense
arrays (mostly due to the lack of support for sparse tensor operations for
rank >2).
Only graph-level labels are supported with this loader (i.e., labels are not
zero-padded because they are assumed to have no "node" dimensions).
**Arguments**
- `dataset`: a graph Dataset;
- `mask`: if True, node attributes will be extended with a binary mask that
indicates valid nodes (the last feature of each node will be 1 if the node is valid
and 0 otherwise). Use this flag in conjunction with layers.base.GraphMasking to
start the propagation of masks in a model.
- `batch_size`: size of the mini-batches;
- `epochs`: number of epochs to iterate over the dataset. By default (`None`)
iterates indefinitely;
- `shuffle`: whether to shuffle the data at the start of each epoch.
**Output**
For each batch, returns a tuple `(inputs, labels)`.
`inputs` is a tuple containing:
- `x`: node attributes of shape `[batch, n_max, n_node_features]`;
- `a`: adjacency matrices of shape `[batch, n_max, n_max]`;
- `e`: edge attributes of shape `[batch, n_max, n_max, n_edge_features]`.
`labels` have shape `[batch, n_labels]`.
"""
def __init__(self, dataset, mask=False, batch_size=1, epochs=None, shuffle=True):
self.mask = mask
super().__init__(dataset, batch_size=batch_size, epochs=epochs, shuffle=shuffle)
def collate(self, batch):
packed = self.pack(batch)
y = packed.pop("y_list", None)
if y is not None:
y = np.array(y)
output = to_batch(**packed, mask=self.mask)
output = sp_matrices_to_sp_tensors(output)
if len(output) == 1:
output = output[0]
if y is None:
return output
else:
return output, y
def tf_signature(self):
"""
Adjacency matrix has shape [batch, n_nodes, n_nodes]
Node features have shape [batch, n_nodes, n_node_features]
Edge features have shape [batch, n_nodes, n_nodes, n_edge_features]
Targets have shape [batch, ..., n_labels]
"""
signature = self.dataset.signature
for k in signature:
signature[k]["shape"] = prepend_none(signature[k]["shape"])
if "x" in signature:
signature["x"]["shape"] = signature["x"]["shape"][:-1] + (
signature["x"]["shape"][-1] + 1,
)
if "a" in signature:
# Adjacency matrix in batch mode is dense
signature["a"]["spec"] = tf.TensorSpec
if "e" in signature:
# Edge attributes have an extra None dimension in batch mode
signature["e"]["shape"] = prepend_none(signature["e"]["shape"])
return to_tf_signature(signature)
class PackedBatchLoader(BatchLoader):
"""
A `BatchLoader` that zero-pads the graphs before iterating over the dataset.
This means that `n_max` is computed over the whole dataset and not just
a single batch.
While using more memory than `BatchLoader`, this loader should reduce the
computational overhead of padding each batch independently.
Use this loader if:
- memory usage isn't an issue and you want to produce the batches as fast
as possible;
- the graphs in the dataset have similar sizes and there are no outliers in
the dataset (i.e., anomalous graphs with many more nodes than the dataset
average).
**Arguments**
- `dataset`: a graph Dataset;
- `batch_size`: size of the mini-batches;
- `epochs`: number of epochs to iterate over the dataset. By default (`None`)
iterates indefinitely;
- `shuffle`: whether to shuffle the data at the start of each epoch.
**Output**
For each batch, returns a tuple `(inputs, labels)`.
`inputs` is a tuple containing:
- `x`: node attributes of shape `[batch, n_max, n_node_features]`;
- `a`: adjacency matrices of shape `[batch, n_max, n_max]`;
- `e`: edge attributes of shape `[batch, n_max, n_max, n_edge_features]`.
`labels` have shape `[batch, ..., n_labels]`.
"""
def __init__(self, dataset, mask=False, batch_size=1, epochs=None, shuffle=True):
super().__init__(
dataset, mask=mask, batch_size=batch_size, epochs=epochs, shuffle=shuffle
)
# Drop the Dataset container and work on packed tensors directly
packed = self.pack(self.dataset)
y = packed.pop("y_list", None)
if y is not None:
y = np.array(y)
self.signature = dataset.signature
self.dataset = to_batch(**packed, mask=mask)
if y is not None:
self.dataset += (y,)
# Re-instantiate generator after packing dataset
self._generator = self.generator()
def collate(self, batch):
if len(batch) == 2:
# If there is only one input, i.e., batch = [x, y], we unpack it
# like this because Keras does not support input lists with only
# one tensor.
return batch[0], batch[1]
else:
return batch[:-1], batch[-1]
def tf_signature(self):
"""
Adjacency matrix has shape [batch, n_nodes, n_nodes]
Node features have shape [batch, n_nodes, n_node_features]
Edge features have shape [batch, n_nodes, n_nodes, n_edge_features]
Targets have shape [batch, ..., n_labels]
"""
signature = self.signature
for k in signature:
signature[k]["shape"] = prepend_none(signature[k]["shape"])
if "x" in signature:
signature["x"]["shape"] = signature["x"]["shape"][:-1] + (
signature["x"]["shape"][-1] + 1,
)
if "a" in signature:
# Adjacency matrix in batch mode is dense
signature["a"]["spec"] = tf.TensorSpec
if "e" in signature:
# Edge attributes have an extra None dimension in batch mode
signature["e"]["shape"] = prepend_none(signature["e"]["shape"])
return to_tf_signature(signature)
@property
def steps_per_epoch(self):
if len(self.dataset) > 0:
return int(np.ceil(len(self.dataset[0]) / self.batch_size))
class MixedLoader(Loader):
"""
A Loader for [mixed mode](https://graphneural.network/data-modes/#mixed-mode).
This loader returns batches where the node and edge attributes are stacked
along an extra dimension, but the adjacency matrix is shared by all graphs.
The loader expects all node and edge features to have the same number of
nodes and edges.
The dataset is pre-packed like in a PackedBatchLoader.
**Arguments**
- `dataset`: a graph Dataset;
- `batch_size`: size of the mini-batches;
- `epochs`: number of epochs to iterate over the dataset. By default (`None`)
iterates indefinitely;
- `shuffle`: whether to shuffle the data at the start of each epoch.
**Output**
For each batch, returns a tuple `(inputs, labels)`.
`inputs` is a tuple containing:
- `x`: node attributes of shape `[batch, n_nodes, n_node_features]`;
- `a`: adjacency matrix of shape `[n_nodes, n_nodes]`;
- `e`: edge attributes of shape `[batch, n_edges, n_edge_features]`.
`labels` have shape `[batch, ..., n_labels]`.
"""
def __init__(self, dataset, batch_size=1, epochs=None, shuffle=True):
assert dataset.a is not None, (
"Dataset must be in mixed mode, with only "
"one adjacency matrix stored in the "
"dataset's `a` attribute.\n"
"If your dataset does not have an "
"adjacency matrix, you can use a "
"BatchLoader or PackedBatchLoader instead."
)
assert "a" not in dataset.signature, (
"Datasets in mixed mode should not"
"have the adjacency matrix stored"
"in their Graph objects."
)
super().__init__(dataset, batch_size=batch_size, epochs=epochs, shuffle=shuffle)
def collate(self, batch):
packed = self.pack(batch)
packed["a"] = self.dataset.a
y = packed.pop("y_list", None)
if y is not None:
y = np.array(y)
output = to_mixed(**packed)
output = sp_matrices_to_sp_tensors(output)
if len(output) == 1:
output = output[0]
if y is None:
return output
else:
return output, y
def tf_signature(self):
"""
Adjacency matrix has shape [n_nodes, n_nodes]
Node features have shape [batch, n_nodes, n_node_features]
Edge features have shape [batch, n_edges, n_edge_features]
Targets have shape [batch, ..., n_labels]
"""
signature = self.dataset.signature
for k in ["x", "e", "y"]:
if k in signature:
signature[k]["shape"] = prepend_none(signature[k]["shape"])
signature["a"] = dict()
signature["a"]["spec"] = get_spec(self.dataset.a)
signature["a"]["shape"] = (None, None)
signature["a"]["dtype"] = tf.as_dtype(self.dataset.a.dtype)
return to_tf_signature(signature)
| StarcoderdataPython |
3428281 | __author__ = 'rv'
from net.asserter.crawler.economic_indicator import YahooFinance
if __name__ == "__main__":
print("Hello")
# yahoo_finance = yahoo_finance()
# economic_indicators.cec(from_yahoo=True,is_save=True) | StarcoderdataPython |
4906544 | #Faça um programa que tenha uma função chamada escreva(), que receba um texto qualquer como parâmetro e mostre uma
#mensagem com tamanho adaptável.
def txt(msg):
print('-' * len(msg))
print(msg)
print('-' * len(msg))
txt('Oi')
txt('Como você está?')
txt('Obrigado!') | StarcoderdataPython |
3321093 | from datetime import date
trab = dict()
trab['nome'] = str(input('Nome: ').strip().title())
trab['Ano de Nascimento'] = int(input('Ano de nascimento: '))
idade = date.today().year - trab['Ano de Nascimento']
trab['ctps'] = int(input('Carteira de trabalho(0 se não tiver): '))
print('-=+' * 20)
if trab['ctps'] == 0:
for c, i in trab.items():
if i == trab['ctps']:
print(f' -o {c} tem valor {i} pois o usuário não tem carteira de trabalho')
else:
print(f' -o {c} é {i}')
else:
trab['Ano de Contratação'] = int(input('Ano de contratação: '))
trab['salário'] = float(input('Salário: R$'))
print('-=+'*20)
for c, i in trab.items():
print(f' -o {c} é {i}')
print(f'O usuário {trab["nome"]} tem {idade} anos\nSe aposenta com {68-idade} anos')
| StarcoderdataPython |
1917682 | #!/usr/bin/env python
from setuptools import setup,find_packages
import sys
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = ""
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import shlex
import pytest
self.pytest_args += " --cov=em_stitch --cov-report html "\
"--junitxml=test-reports/test.xml " \
"--ignore=src"
errno = pytest.main(shlex.split(self.pytest_args))
sys.exit(errno)
with open('test_requirements.txt', 'r') as f:
test_required = f.read().splitlines()
with open('requirements.txt', 'r') as f:
required = f.read().splitlines()
setup(name='em_stitch',
use_scm_version=True,
description='a python package for stitching EM images',
author_email='<EMAIL>',
url='https://github.com/AllenInstitute/em_stitch',
packages=find_packages(),
setup_requires=['setuptools_scm'],
install_requires=required,
tests_require=test_required,
cmdclass={'test': PyTest})
| StarcoderdataPython |
6567435 | from pyspark import SparkConf, SparkContext
def loadMovieNames():
movieNames = {}
skip_first = True
with open("ml-latest/movies.csv") as f:
for line in f:
if skip_first:
skip_first = False
continue
fields = line.split(",")
movieNames[int(fields[0])] = fields[1].decode('ascii', 'ignore')
return movieNames
def map1(movie_list):
list = []
movie_dict = {}
l = len(movie_list)
movie_list = map(int, movie_list)
movie_list = sorted(movie_list)
for i in range(l - 2):
for j in range(i + 1, l - 1):
if movie_list[j] in movie_dict:
movie_dict[movie_list[j]] += 1
else:
movie_dict[movie_list[j]] = 1
list.append([movie_list[i], movie_dict])
movie_dict = {}
return list
def reduce1(movie_dic, data):
all_movies = data
movie_dict = movie_dic
for movie in all_movies:
if movie in movie_dict:
movie_dict[movie] += all_movies[movie]
else:
movie_dict[movie] = all_movies[movie]
return movie_dict
def map2(data):
movie1, movie_dict = data
list = []
for movie in movie_dict:
if movie_dict[movie] > 50:
list.append(movieNameDictionary[movie1] +","+ movieNameDictionary[movie] +","+ str(movie_dict[movie]))
return list
conf = SparkConf().setMaster("local[*]").setAppName("Frequent_Stripes")
sc = SparkContext(conf=conf)
text_file = sc.textFile("input/ratings.csv")
movieNameDictionary = loadMovieNames()
out1 = text_file.map(lambda line: line.strip().split(",")).zipWithIndex().filter(lambda tup: tup[1] > 1).map(lambda x:x[0])
out2 = out1.filter(lambda a: float(a[2]) >= 4.0)
out3 = out2.map(lambda a: (a[0], a[1])).reduceByKey(lambda x, y: x + ',' + y).map(lambda x: x[1])
out4 = out3.map(lambda line: line.strip().split(",")).flatMap(map1)
out5 = out4.reduceByKey(reduce1, numPartitions=16)
out6 = out5.flatMap(map2)
out6.sample(False, 20).saveAsTextFile("spark_output/stripes/100p") | StarcoderdataPython |
1848798 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pathlib import Path
import typer
from toolz import pipe
import snappycli.auth as auth
import snappycli.client as client
app = typer.Typer()
def exception_handler(func):
def inner_func(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
typer.echo(e)
raise typer.Abort()
return inner_func
@exception_handler
def _post_file(url: str, token: str, filepath: Path):
return client.post_file(
url, token, filepath
)
@exception_handler
def _login(url: str, username: str, password: str):
pipe(
auth.add(client.token(f'{url}/auth/jwt/login', username, password)),
auth.save
)
@app.command()
def script_login(
data_server_username: str = typer.Argument(
...,
envvar = 'DATA_SERVER_USERNAME'
),
data_server_password: str = typer.Argument(
...,
envvar = 'DATA_SERVER_PASSWORD'
),
data_server_url: str = typer.Option(
'http://localhost:7070',
envvar = 'DATA_SERVER_URL'
)
):
"""
Login to a snappy data server with a shell script
using environment variables.
"""
_login(
data_server_url,
data_server_username,
data_server_password)
@app.command()
def login(
username: str = typer.Option(
...,
prompt=True,
envvar = 'DATA_SERVER_USERNAME'
),
password: str = typer.Option(
...,
prompt=True, hide_input=True, hidden=True,
),
url: str = typer.Option(
'http://localhost:7070',
envvar = 'DATA_SERVER_URL'
)
):
"""
Login to a snappy data server with a prompt.
"""
_login(url, username, password)
typer.echo('login succeeded')
@app.command()
def post_file(
filepath: Path =
typer.Argument(...),
url: str =
typer.Option(
'http://localhost:7070',
envvar = 'DATA_SERVER_URL'
)
):
typer.echo(f"""you're file is at {
_post_file(
f'{url}/api',
token = auth.token(auth.load()),
filepath = filepath)
}""")
@app.command()
def logout():
auth.save(auth.rm())
typer.echo('logged out of snappy')
@app.command()
def install():
"""Automatically add required system resource for snappy cli"""
Path(Path.home(), '.snappy').mkdir(exist_ok=True)
auth.save(auth.rm())
typer.echo('snappy ready to go!')
if __name__ == '__main__':
app()
| StarcoderdataPython |
3405190 | from src.config.logger import AppLogger
from src.helpers.file_handler import FileHandler
import pandas as pd
import category_encoders as ce
class CategoricalDataEncoder(AppLogger):
encoder_cols = ['sex', 'smoker', 'region']
def __init__(self, dataset: pd.DataFrame(), train=False):
super(CategoricalDataEncoder, self).__init__()
self.cur_file_path = self.get_working_file_location()(__file__)
self.dataset = dataset
self.train = train
def one_hot_encoder(self):
self.log(f"{self.cur_file_path}\t\tInfo: one_hot_encoder method invoked!")
self.log(f"{self.cur_file_path}\t\tInfo: Performing one hot encoder in {self.encoder_cols} features!")
file_handler = FileHandler()
if self.train:
encoder = ce.OneHotEncoder(cols=self.encoder_cols, return_df=True)
encoder.fit(self.dataset)
file_handler.save_pickle(encoder, 'category_encoder.pkl')
else:
encoder = file_handler.load_pickle('category_encoder.pkl')
encoded_df = encoder.transform(self.dataset)
return encoded_df.drop(['sex_1', 'smoker_1', 'region_1'], axis=1) | StarcoderdataPython |
1793699 | import logging
import uuid
import datetime
from six.moves import http_client
from flask import request, g, abort, url_for, jsonify
from flask.views import MethodView
import marshmallow as ma
from flask_restx import reqparse
from flask_smorest import Blueprint
from drift.core.extensions.urlregistry import Endpoints
from drift.core.extensions.jwt import current_user
from drift.core.extensions.schemachecker import simple_schema_request
from driftbase.models.db import Friendship, FriendInvite, CorePlayer
DEFAULT_INVITE_EXPIRATION_TIME_SECONDS = 60 * 60 * 1
log = logging.getLogger(__name__)
bp = Blueprint("friendships", __name__, url_prefix="/friendships", description="Player to player relationships")
endpoints = Endpoints()
def on_message(queue_name, message):
if queue_name == 'clients' and message['event'] == 'created':
log.info("Friendship is forevur! This one just connected: %s", message['payload'])
def drift_init_extension(app, api, **kwargs):
api.register_blueprint(bp)
endpoints.init_app(app)
app.messagebus.register_consumer(on_message, 'clients')
def get_player(player_id):
player = g.db.query(CorePlayer).get(player_id)
return player
@bp.route('/players/<int:player_id>', endpoint='list')
class FriendshipsAPI(MethodView):
def get(self, player_id):
"""
List my friends
"""
if player_id != current_user["player_id"]:
abort(http_client.FORBIDDEN, description="That is not your player!")
left = g.db.query(Friendship.id, Friendship.player1_id, Friendship.player2_id).filter_by(player1_id=player_id, status="active")
right = g.db.query(Friendship.id, Friendship.player2_id, Friendship.player1_id).filter_by(player2_id=player_id, status="active")
friend_rows = left.union_all(right)
friends = []
for row in friend_rows:
friendship_id = row[0]
friend_id = row[2]
friend = {
"friend_id": friend_id,
"player_url": url_for("players.entry", player_id=friend_id, _external=True),
"friendship_url": url_for("friendships.entry", friendship_id=friendship_id, _external=True)
}
friends.append(friend)
ret = friends
return jsonify(ret)
@simple_schema_request({
"token": {"type": "string", },
}, required=["token"])
def post(self, player_id):
"""
New friend
"""
if player_id != current_user["player_id"]:
abort(http_client.FORBIDDEN, description="That is not your player!")
args = request.json
invite_token = args.get("token")
invite = g.db.query(FriendInvite).filter_by(token=invite_token).first()
if invite is None:
abort(http_client.NOT_FOUND, description="The invite was not found!")
if invite.expiry_date < datetime.datetime.utcnow():
abort(http_client.FORBIDDEN, description="The invite has expired!")
if invite.deleted:
abort(http_client.FORBIDDEN, description="The invite has been deleted!")
friend_id = invite.issued_by_player_id
left_id = player_id
right_id = friend_id
if left_id == right_id:
abort(http_client.FORBIDDEN, description="You cannot befriend yourself!")
if left_id > right_id:
left_id, right_id = right_id, left_id
existing_friendship = g.db.query(Friendship).filter(
Friendship.player1_id == left_id,
Friendship.player2_id == right_id
).first()
if existing_friendship is not None:
friendship = existing_friendship
if friendship.status == "deleted":
friendship.status = "active"
else:
return "{}", http_client.OK
else:
friendship = Friendship(player1_id=left_id, player2_id=right_id)
g.db.add(friendship)
g.db.commit()
ret = {
"friend_id": friend_id,
"url": url_for("friendships.entry", friendship_id=friendship.id, _external=True),
"messagequeue_url": url_for("messages.exchange", exchange="players", exchange_id=friend_id,
_external=True) + "/{queue}",
}
return jsonify(ret), http_client.CREATED
@bp.route('/<int:friendship_id>', endpoint='entry')
class FriendshipAPI(MethodView):
def delete(self, friendship_id):
"""
Remove a friend
"""
player_id = current_user["player_id"]
friendship = g.db.query(Friendship).filter_by(id=friendship_id).first()
if friendship is None:
abort(http_client.NOT_FOUND)
elif friendship.player1_id != player_id and friendship.player2_id != player_id:
abort(http_client.FORBIDDEN)
elif friendship.status == "deleted":
return "{}", http_client.GONE
if friendship:
friendship.status = "deleted"
g.db.commit()
return "{}", http_client.NO_CONTENT
@bp.route('/invites', endpoint='invites')
class FriendInvitesAPI(MethodView):
def post(self):
"""
New Friend token
"""
player_id = current_user["player_id"]
token = str(<KEY>())
expires_seconds = DEFAULT_INVITE_EXPIRATION_TIME_SECONDS
config = g.conf.tenant.get('friends')
if config:
expires_seconds = config['invite_expiration_seconds']
expires_seconds = expires_seconds
expires = datetime.datetime.utcnow() + datetime.timedelta(seconds=expires_seconds)
invite = FriendInvite(
token=token,
issued_by_player_id=player_id,
expiry_date=expires
)
g.db.add(invite)
g.db.commit()
ret = jsonify({
"token": token,
"expires": expires,
"url": url_for("friendships.invite", invite_id=invite.id, _external=True)
}), http_client.CREATED
return ret
@bp.route('/invites/<int:invite_id>', endpoint='invite')
class FriendInviteAPI(MethodView):
def delete(self, invite_id):
"""
Delete a friend token
"""
player_id = current_user["player_id"]
invite = g.db.query(FriendInvite).filter_by(id=invite_id).first()
if not invite:
abort(http_client.NOT_FOUND)
elif invite.issued_by_player_id != player_id:
abort(http_client.FORBIDDEN)
elif invite.deleted:
return "{}", http_client.GONE
invite.deleted = True
g.db.commit()
return "{}", http_client.NO_CONTENT
@endpoints.register
def endpoint_info(*args):
ret = {}
ret["my_friends"] = None
ret["friend_invites"] = url_for("friendships.invites", _external=True)
if current_user:
ret["my_friends"] = url_for("friendships.list", player_id=current_user["player_id"], _external=True)
return ret
| StarcoderdataPython |
5020074 | # BSD 2-Clause License
#
# Copyright (c) 2021, Hewlett Packard Enterprise
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
from functools import lru_cache
from pathlib import Path
import psutil
from ...error import SSConfigError
from ..utils.helpers import expand_exe_path
# Configuration Values
#
# These values can be set through environment variables to
# override the default behavior of SmartSim.
#
# RAI_PATH
# - Path to the RAI shared library
# - Default: /smartsim/smartsim/_core/lib/redisai.so
#
# REDIS_CONF
# - Path to the redis.conf file
# - Default: /SmartSim/smartsim/config/redis6.conf
#
# REDIS_PATH
# - Path to the redis-server executable
# - Default: /SmartSim/smartsim/bin/redis-server
#
# REDIS_CLI_PATH
# - Path to the redis-cli executable
# - Default: /SmartSim/smartsim/bin/redis-cli
#
# SMARTSIM_LOG_LEVEL
# - Log level for SmartSim
# - Default: info
#
# SMARTSIM_JM_INTERVAL
# - polling interval for communication with scheduler
# - default: 10 seconds
#
# Testing Configuration Values
#
# SMARTSIM_TEST_INTERFACE
# - Network interface to use for testing
# - Default: auto-detected
#
# SMARTSIM_TEST_LAUNCHER
# - type of launcher to use for testing
# - Default: Local
#
# SMARTSIM_TEST_DEVICE
# - CPU or GPU for model serving tests
# - Default: CPU
#
# SMARTSIM_TEST_ACCOUNT
# - Account used to run full launcher test suite on external systems
# - Default: None
class Config:
def __init__(self):
# SmartSim/smartsim/_core
core_path = Path(os.path.abspath(__file__)).parent.parent
self.lib_path = Path(core_path, "lib").resolve()
self.bin_path = Path(core_path, "bin").resolve()
self.conf_path = Path(core_path, "config", "redis6.conf")
@property
def redisai(self) -> str:
rai_path = self.lib_path / "redisai.so"
redisai = Path(os.environ.get("RAI_PATH", rai_path)).resolve()
if not redisai.is_file():
raise SSConfigError(
"RedisAI dependency not found. Build with `smart` cli or specify RAI_PATH"
)
return str(redisai)
@property
def redis_conf(self) -> str:
conf = Path(os.environ.get("REDIS_CONF", self.conf_path)).resolve()
if not conf.is_file():
raise SSConfigError(
"Redis configuration file at REDIS_CONF could not be found"
)
return str(conf)
@property
def redis_exe(self) -> str:
try:
redis_exe = self.bin_path / "redis-server"
redis = Path(os.environ.get("REDIS_PATH", redis_exe)).resolve()
exe = expand_exe_path(str(redis))
return exe
except (TypeError, FileNotFoundError) as e:
raise SSConfigError(
"Specified Redis binary at REDIS_PATH could not be used"
) from e
@property
def redis_cli(self) -> str:
try:
redis_cli_exe = self.bin_path / "redis-cli"
redis_cli = Path(os.environ.get("REDIS_CLI_PATH", redis_cli_exe)).resolve()
exe = expand_exe_path(str(redis_cli))
return exe
except (TypeError, FileNotFoundError) as e:
raise SSConfigError(
"Specified Redis binary at REDIS_CLI_PATH could not be used"
) from e
@property
def log_level(self) -> str:
return os.environ.get("SMARTSIM_LOG_LEVEL", "info")
@property
def jm_interval(self) -> int:
return os.environ.get("SMARTSIM_JM_INTERVAL", 10)
@property
def test_launcher(self) -> str:
return os.environ.get("SMARTSIM_TEST_LAUNCHER", "local")
@property
def test_device(self) -> str:
return os.environ.get("SMARTSIM_TEST_DEVICE", "CPU")
@property
def test_interface(self) -> str:
interface = os.environ.get("SMARTSIM_TEST_INTERFACE", None)
if not interface:
# try to pick a sensible one
net_if_addrs = psutil.net_if_addrs()
if "ipogif0" in net_if_addrs:
return "ipogif0"
elif "ib0" in net_if_addrs:
return "ib0"
# default to aries network
return "ipogif0"
else:
return interface
@property
def test_account(self) -> str:
# no account by default
return os.environ.get("SMARTSIM_TEST_ACCOUNT", "")
@lru_cache(maxsize=128, typed=False)
def get_config():
# wrap into a function with a cached result
return Config()
| StarcoderdataPython |
11258339 | import errno
import socket
from ..util.connection import create_connection
from ..util.ssl_ import ssl_wrap_socket
from ..util import selectors
from .. import util
from ._common import DEFAULT_SELECTOR, is_readable, LoopAbort
__all__ = ["SyncBackend"]
BUFSIZE = 65536
class SyncBackend(object):
def connect(self, host, port, connect_timeout,
source_address=None, socket_options=None):
conn = create_connection(
(host, port), connect_timeout,
source_address=source_address, socket_options=socket_options)
return SyncSocket(conn)
class SyncSocket(object):
# _selector is a hack for testing. Note that normally, we create a
# new selector object each time we block, but if _selector is passed
# we use the object every time. See test_sync_connection.py for the
# tests that use this.
def __init__(self, sock, _selector=None):
self._sock = sock
# We keep the socket in non-blocking mode, except during connect() and
# during the SSL handshake:
self._sock.setblocking(False)
self._selector = _selector
def start_tls(self, server_hostname, ssl_context):
self._sock.setblocking(True)
wrapped = ssl_wrap_socket(
self._sock,
server_hostname=server_hostname, ssl_context=ssl_context)
wrapped.setblocking(False)
return SyncSocket(wrapped)
# Only for SSL-wrapped sockets
def getpeercert(self, binary_form=False):
return self._sock.getpeercert(binary_form=binary_form)
def _wait(self, readable, writable, read_timeout=None):
assert readable or writable
s = self._selector or DEFAULT_SELECTOR()
flags = 0
if readable:
flags |= selectors.EVENT_READ
if writable:
flags |= selectors.EVENT_WRITE
s.register(self._sock, flags)
events = s.select(timeout=read_timeout)
if not events:
raise socket.timeout() # XX use a backend-agnostic exception
_, event = events[0]
return (event & selectors.EVENT_READ, event & selectors.EVENT_WRITE)
def receive_some(self):
while True:
try:
return self._sock.recv(BUFSIZE)
except util.SSLWantReadError:
self._wait(readable=True, writable=False)
except util.SSLWantWriteError:
self._wait(readable=False, writable=True)
except (OSError, socket.error) as exc:
if exc.errno in (errno.EWOULDBLOCK, errno.EAGAIN):
self._wait(readable=True, writable=False)
else:
raise
def send_and_receive_for_a_while(
self, produce_bytes, consume_bytes, read_timeout):
outgoing_finished = False
outgoing = b""
try:
while True:
if not outgoing_finished and not outgoing:
# Can exit loop here with error
b = produce_bytes()
if b is None:
outgoing = None
outgoing_finished = True
else:
assert b
outgoing = memoryview(b)
# This controls whether or not we block
made_progress = False
# If we do block, then these determine what can wake us up
want_read = False
want_write = False
# Important: we do recv before send. This is because we want
# to make sure that after a send completes, we immediately
# call produce_bytes before calling recv and potentially
# getting a LoopAbort. This avoids a race condition -- see the
# "subtle invariant" in the backend API documentation.
try:
incoming = self._sock.recv(BUFSIZE)
except util.SSLWantReadError:
want_read = True
except util.SSLWantWriteError:
want_write = True
except (OSError, socket.error) as exc:
if exc.errno in (errno.EWOULDBLOCK, errno.EAGAIN):
want_read = True
else:
raise
else:
made_progress = True
# Can exit loop here with LoopAbort
consume_bytes(incoming)
if not outgoing_finished:
try:
sent = self._sock.send(outgoing)
outgoing = outgoing[sent:]
except util.SSLWantReadError:
want_read = True
except util.SSLWantWriteError:
want_write = True
except (OSError, socket.error) as exc:
if exc.errno in (errno.EWOULDBLOCK, errno.EAGAIN):
want_write = True
else:
raise
else:
made_progress = True
if not made_progress:
self._wait(want_read, want_write, read_timeout)
except LoopAbort:
pass
def forceful_close(self):
self._sock.close()
def is_readable(self):
return is_readable(self._sock)
def set_readable_watch_state(self, enabled):
pass
| StarcoderdataPython |
150056 | <gh_stars>1-10
MAX_PREFIX_LEN = 60
EXCEPTION_PREFIXES = {
"1. Une attestation de la maîtrise foncière sur l'emprise de ": None,
"2. Un plan de l'exploitation à une échelle adaptée à la supe": None,
'3. Une note succincte indiquant la nature de la substance ex': None,
'4. Pour les carrières visées à la rubrique 2510-6, la justif': None,
"5. Une description des modalités d'extraction et de remise e": None,
'6. Les documents et registres prévus aux articles 3.5 et 4.7': None,
'7. Les résultats des dernières mesures sur les effluents et ': None,
"1. Le démantèlement des installations de production d'électr": None,
"2. L'excavation de la totalité des fondations, jusqu'à la ba": None,
'3. La remise en état qui consiste en le décaissement des air': None,
'I. ― Les aires de chargement et de déchargement des produits': 'caps',
"1. Les zones d'effets Z1 et Z2 définies par l'arrêté du 20 a": None,
"2. La zone d'effets Z3 définie par l'arrêté du 20 avril 2007": None,
"3. La zone d'effets Z4 définie par l'arrêté du 20 avril 2007": None,
"4. La zone d'effets Z5 (ou la zone d'effets Z4 dans le cas o": None,
'5. Les effets dominos de toute installation, équipement ou b': None,
"1. Lorsque les distances d'éloignement mentionnées au point ": None,
"1. Le flux horaire maximal en COV à l'exclusion du méthane, ": None,
"2. Le flux horaire maximal en COV à l'exclusion du méthane, ": None,
'1. Le contrôleur vérifie la présence des documents listés ai': None,
"2. L'effectif au jour du contrôle, selon le registre, l'extr": None,
"1. L'installation est maintenue en parfait état d'entretien,": None,
"2. L'exploitant justifie de la lutte contre la prolifération": None,
"1. Lorsqu'un forage alimente en eau l'installation, il est m": None,
"2. L'exploitant dispose d'un moyen pour surveiller sa consom": None,
"1. Les effluents d'élevage issus des bâtiments d'élevage et ": None,
"2. L'exploitant justifie que les capacités des équipements d": None,
'3. Tout écoulement direct des boues ou eaux polluées vers le': None,
"1. Le niveau sonore des bruits en provenance de l'élevage ne": None,
"2. L'émergence due aux bruits engendrés par l'installation r": None,
'Méthode acoustique pour le contrôle des réservoirs enterrés ': 'caps',
'Méthode hydraulique pour le contrôle des réservoirs enterrés': 'caps',
"1. Il existe un mode d'élimination des bidons de désinfectan": None,
"2. Le contrôleur s'assure que :": None,
'1. Les surfaces effectivement épandues ;': None,
'2. Hors zone vulnérable aux pollutions par les nitrates, les': None,
"3. Les dates d'épandage ;": None,
'4. La nature des cultures ;': None,
'5. Les rendements des cultures ;': None,
"6. Les volumes par nature d'effluents et les quantités d'azo": None,
"7. Le mode d'épandage et le délai d'enfouissement ;": None,
"8. Le traitement mis en œuvre pour atténuer les odeurs (s'il": None,
'1. Cas des turbines :': None,
'1. Cas des turbines.': None,
'2. Cas des moteurs.': None,
'2. Cas des moteurs :': None,
'3. Autres appareils de combustion :': None,
'1. Lorsque la puissance est inférieure à 10 MW :': None,
'2. Lorsque la puissance est supérieure ou égale à 10 MW :': None,
'1. Réception :': None,
'2. Expédition :': None,
"1. - = Courant d'électrolyse, en A": None,
"1. En ce qui concerne les reptiles, les sites d'implantation": 'caps',
"2. En ce qui concerne les amphibiens, l'implantation des tra": 'caps',
'1. La caractérisation des sous-produits ou effluents à épand': None,
'2. La liste des parcelles avec, pour chacune, son emplacemen': None,
"3. L'identification des contraintes liées au milieu naturel ": None,
'4. La description des caractéristiques des sols ;': None,
'5. Une analyse des sols portant sur les paramètres mentionné': None,
"6. La justification des doses d'apport et des fréquences d'é": None,
'7. La description des modalités techniques de réalisation de': None,
'8. La description des modalités de surveillance des opératio': None,
'9. La définition de la périodicité des analyses et sa justif': None,
'a) Si leurs concentrations en éléments pathogènes sont supér': None,
'b) Si les teneurs en éléments-traces métalliques dans les so': None,
"c) Dès lors que l'une des teneurs en éléments ou composés in": None,
'd) Dès lors que le flux, cumulé sur une durée de dix ans, ap': None,
'e) En outre, lorsque les déchets ou effluents sont épandus s': None,
'IV-1. Détail du cycle :': 'caps',
"IV-1.1. Cas des machines munies d'un distillateur :": 'numeric-d2',
'IV-1.2. Cas des machines sans distillateur :': 'numeric-d2',
'IV-1.2.1. Machines en plein bain :': 'numeric-d3',
'IV-1.2.2. Machines à pulvérisation :': 'numeric-d3',
'IV-2. Température de séchage :': 'caps',
'IV-3. Distillation :': 'caps',
'IV-4. Capacité machine :': 'caps',
'V-1. Concernant les charges textiles :': 'caps',
'V-2. Concernant la machine en essais :': 'caps',
'VI-1. Préparation de la machine :': 'caps',
'VI-1.1. Les séparateurs :': 'numeric-d2',
'VI-1.2. Pot à charbons actifs :': 'numeric-d2',
'VI-1.3. Fixation de la machine :': 'numeric-d2',
'VI-2. Pesée initiale (machine) :': 'caps',
'VI-3. Pesée initiale (charge textile) :': 'caps',
'VII-1. Déroulement :': 'caps',
'VII-2. Utilisation des charges textiles :': 'caps',
"VII-3. Renouvellement d'air :": 'caps',
"VII-4. Opérations d'entretien :": 'caps',
'VII-4.1. Nettoyage des filtres :': 'numeric-d2',
'VII-4.2. Distillateur :': 'numeric-d2',
'VIII-1. Séparateurs :': 'caps',
'VIII-2. Pot à charbons actifs :': 'caps',
'VIII-3. Pesée de la machine :': 'caps',
'VIII-4. Prise en compte du solvant recueilli du distillateur': 'caps',
'VIII-5. Prise en compte du solvant présent dans le pot à cha': 'caps',
"2. Prescriptions spécifiques à l'emploi de l'ammoniac (insta": 'roman',
'1. La surface maximale des îlots au sol est de 2 500 mètres ': None,
"2. Pour les stockages couverts, une surface maximale d'îlots": None,
"a) Sont des réservoirs à toit fixe reliés à l'URV conforméme": None,
'b) Sont conçues avec un toit flottant (externe ou interne) d': None,
"a) Reliés à une URV conformément aux dispositions de l'annex": None,
"b) Equipés d'un toit flottant interne doté d'un joint primai": None,
'1. Etre accrédité selon la norme NF EN ISO/CEI 17025 pour la': None,
'1. Etre accrédité selon la norme NF EN ISO CEI 17025 pour la': None,
"2. Respecter les limites de quantification listées à l'artic": None,
'a) Turbine ou moteur destiné uniquement à alimenter des syst': None,
'b) Turbine dont le fonctionnement est nécessaire pour assure': None,
"a) Les produits composés d'une matière végétale agricole ou ": None,
'b) Les déchets ci-après :': None,
'i) Déchets végétaux agricoles et forestiers ;': None,
"v) Déchets de bois, à l'exception des déchets de bois qui so": None,
"1. Dispositions générales relatives à l'entretien préventif ": 'numeric-d3',
"1. Dispositions générales relatives à l'entretien préventif": 'numeric-d3',
"2. Entretien préventif de l'installation": 'numeric-d3',
"3. Surveillance de l'installation": 'numeric-d3',
'1. Actions à mener si les résultats provisoires confirmés ou': 'numeric-d3',
"2. Actions à mener si les résultats d'analyse selon la norme": 'numeric-d3',
'3. Actions à mener si le dénombrement des Legionella pneumop': 'numeric-d3',
'4. En cas de dérives répétées, consécutives ou non, de la co': 'numeric-d3',
"1. Vérification de l'installation": 'numeric-d3',
'2. Carnet de suivi': 'numeric-d3',
"a) Seul ou en association avec d'autres agents, sans subir d": None,
'b) Comme agent de nettoyage pour dissoudre des salissures ;': None,
'c) Comme dissolvant ;': None,
'd) Comme dispersant ;': None,
'e) Comme correcteur de viscosité ;': None,
'f) Comme correcteur de tension superficielle ;': None,
'g) Comme plastifiant ;': None,
'h) Comme agent protecteur ;': None,
'1. Si le flux horaire total de COV(1) dépasse 2 kg/h, la val': 'numeric-d3',
'2. Si le flux horaire total des composés organiques listés c': 'numeric-d3',
'3. Substances de mentions de danger H340, H350, H350i, H360D': 'numeric-d3',
"4. Mise en œuvre d'un schéma de maîtrise des émissions de CO": 'numeric-d3',
'1. Oxydes de soufre (exprimés en dioxyde de soufre) : si le ': 'numeric-d3',
"2. Oxydes d'azote (exprimés en dioxyde d'azote) : si le flux": 'numeric-d3',
"3. Chlorure d'hydrogène et autres composés inorganiques gaze": 'numeric-d3',
'4. Fluor et composés inorganiques du fluor (gaz, vésicules e': 'numeric-d3',
'5. Métaux :': 'numeric-d3',
'1. Rejets de cadmium, mercure et thallium, et de leurs compo': None,
"2. Rejets d'arsenic, sélénium et tellure, et de leurs compos": None,
'3. Rejets de plomb et de ses composés : si le flux horaire t': None,
"4. Rejets d'antimoine, chrome, cobalt, cuivre, étain, mangan": None,
"1. Si la quantité d'explosif susceptible d'être présente dan": None,
"2. Si la quantité d'explosif susceptible d'être présente est": None,
'1. La surface des cellules peut dépasser 12 000 m2 si leurs ': None,
'2. La hauteur des cellules peut dépasser 23 m si leurs surfa': None,
'1. Soit des échantillonneurs monoflacons fixes ou portatifs ': None,
'2. Soit des échantillonneurs multiflacons fixes ou portatifs': None,
'1. Justesse et répétabilité du volume prélevé (volume minima': None,
"2. Vitesse de circulation de l'effluent dans les tuyaux supé": None,
'a) Aucune des moyennes arithmétiques de tous les relevés eff': None,
"b) Aucune des moyennes horaires n'est supérieure à 1,5 fois ": None,
'a) La moyenne de toutes les valeurs de mesure ne dépasse pas': None,
'a) turbine ou moteur destiné uniquement à alimenter des syst': None,
'b) turbine dont le fonctionnement est nécessaire pour assure': None,
"a) les produits composés d'une matière végétale agricole ou ": None,
'b) les déchets ci-après :': None,
'i) déchets végétaux agricoles et forestiers ;': None,
"v) déchets de bois, à l'exception des déchets de bois qui so": None,
'1° Surface maximale des îlots au sol : 500 m2 ;': None,
'2° Hauteur maximale de stockage : 8 mètres maximum ;': None,
'3° Largeurs des allées entre îlots : 2 mètres minimum.': None,
'1° Hauteur maximale de stockage : 10 mètres maximum ;': None,
'2° Largeurs des allées entre ensembles de rayonnages ou de p': None,
'7.7 Epandage': 'numeric-d2',
'D.1. Les apports de phosphore et de potasse, organique et mi': 'caps',
"D.2. Les cendres ne contiennent pas d'éléments ou substances": 'caps',
"D.3. Un programme prévisionnel annuel d'épandage est établi,": 'caps',
"D.4. L'épandage des cendres est mis en œuvre afin que les nu": 'caps',
'D.5. Sous réserve des prescriptions fixées en application de': 'caps',
"D.6. Les périodes d'épandage et les quantités épandues sont ": 'caps',
'D.7. Toute anomalie constatée sur les sols, les cultures et ': 'caps',
"E.1. Les ouvrages permanents d'entreposage des cendres sont ": 'caps',
"E.2. Le dépôt temporaire de déchets, sur les parcelles d'épa": 'caps',
'G.1. Des analyses sont effectuées, sur un échantillonnage re': 'caps',
'G.2. Seuils en éléments-traces métalliques et en substances ': 'caps',
"G.3. Les méthodes d'échantillonnage et d'analyse sont défini": 'caps',
"G.3. Les méthodes d'échantillonnage et d'analyse s'appuient ": 'caps',
'22-3. La hauteur des parois des rétentions est au minimum de': 'numeric-d3-dash',
'22-4. La distance entre les parois de la rétention et la par': 'numeric-d3-dash',
'22-5. Dans tous les cas, la surface nette (réservoirs déduit': 'numeric-d3-dash',
"22-6. Les rétentions sont accessibles aux moyens d'extinctio": 'numeric-d3-dash',
'22-8. Une pompe de liquides inflammables peut être placée da': 'numeric-d3-dash',
"22-9. Lorsqu'une perte de confinement sur un réservoir peut ": 'numeric-d3-dash',
"22-10. A l'exception du point 22-9 du présent arrêté, les di": 'numeric-d3-dash',
'Art. 2.1. - Au sens du présent arrêté on entend par :': 'roman',
"Art. 2.2. - I. - Le pétitionnaire et l'exploitant sont tenus": 'roman',
"Art. 2.3. - I. - L'exploitant tient à la disposition de l'in": 'roman',
'Art. 4-1.-I.-Afin de satisfaire au premier alinéa du présent': 'roman-dash',
'Art. 4-2.-I.-Afin de satisfaire au premier alinéa du présent': 'roman-dash',
'Art. 4-3.-Les règles applicables aux avis conformes du minis': 'roman-dash',
'a) Dans tous les cas, avant rejet au milieu naturel ou dans ': None,
"b) Dans le cas de rejet dans un réseau d'assainissement coll": None,
'c) Dans le cas de rejet dans le milieu naturel (ou dans un r': None,
"a) Des prises d'eau, poteaux ou bouches d'incendie normalisé": None,
"b) Des réserves d'eau, réalimentées ou non, disponibles pour": None,
"I. - Tout stockage d'un liquide susceptible de créer une pol": None,
"II. - La capacité de rétention est étanche aux produits qu'e": None,
"III. - Lorsque les stockages sont à l'air libre, les rétenti": None,
'IV. - Le sol des aires et des locaux de stockage ou de manip': None,
'V. - Les dispositions des points I à III ne sont pas applica': None,
"I. - La vitesse d'éjection des effluents gazeux en marche co": None,
'II. - Dans le cas de mesures périodiques, la moyenne de tout': None,
'2.10 Cuvettes de rétention': 'numeric-d2',
}
| StarcoderdataPython |
4897293 | #!/usr/bin/env python
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Generate djvused input for book metadata
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from collections import namedtuple
import re
import itertools
import sys
import io
BookMark = namedtuple('BookMark', ['title', 'book_page'])
DjvuPage = namedtuple('DjvuPage', ['djvu', 'prefix', 'number'])
metadata = list() # list of metadata strings
bookmarks = list() # list of BookMarks
djvu_pages = list() # list of DjvuPage
# Pre-compile the regexps ...
comments_line = re.compile('^\s*#|^\s*$')
metadata_line = re.compile('^\s*meta\s+([^:]+):\s+(.*?)\s*$',re.IGNORECASE)
djvu_line = re.compile('^\s*djvu\s+(\d+)\s+(?:=|is)\s+book\s+(\S*?)(\d*)\s*$',re.IGNORECASE)
bookmark_line = re.compile('^(\S+)\s+(.*?)\s*$')
def parse_line(l : str) -> None:
match = None
if comments_line.match(l):
pass
elif match := metadata_line.match(l):
metadata.append(match.expand('\\1\t\\2'))
elif match := djvu_line.match(l):
djvu,prefix,number = int(match.group(1)), match.group(2), match.group(3)
number = number and int(number)
djvu_pages.append(DjvuPage(djvu, prefix, number))
elif match := bookmark_line.match(l):
bookmarks.append(BookMark(title=match.group(2), book_page=match.group(1)))
else:
raise RuntimeError(f"Bad line <{l}>!!")
def generate_all_pages() -> dict:
"""Generate a map from book pages to djvu pages, for all pages in the book"""
result = dict()
def add_page(p: DjvuPage) -> None:
result[f"{p.prefix}{p.number}"] = p.djvu
def next_page(page : DjvuPage) -> DjvuPage:
d,p,n = page
return DjvuPage(d + 1, p, (n or 1) + 1)
p1,p2 = itertools.tee(djvu_pages,2)
if djvu_pages: next(p2)
for startp,endp in itertools.zip_longest(p1,p2,fillvalue=None):
if endp:
if startp.djvu >= endp.djvu: raise RuntimeError(f"{startp} is after {endp}!")
while startp.djvu < endp.djvu:
add_page(startp)
startp = next_page(startp)
else:
add_page(startp)
return result
try:
with io.open(sys.argv[1]) as infile:
for l in infile.readlines():
parse_line(l)
all_pages = generate_all_pages()
if metadata:
print('select; set-meta', *metadata, '.', sep='\n')
if bookmarks:
print('select; set-outline','(bookmarks', sep='\n')
for mark in bookmarks:
dpage = all_pages.get(mark.book_page,None)
if not dpage:
if mark.book_page.isnumeric(): dpage = mark.book_page
else: raise RuntimeError(f"bookmark {mark} not found!")
print(f'("{mark.title}" "#{dpage}")')
print(')','.', sep='\n')
if all_pages:
for bp,dp in all_pages.items():
print(f'select {dp}; set-page-title "{bp}"')
except Exception as e:
print(e, file=sys.stderr)
print('Usage: gen-dsed <infile>', file=sys.stderr)
| StarcoderdataPython |
3252646 | import tensorflow as tf
import argparse
import os
import statistics as stat
from models.utils import plot_test_images, plot_images, print_metrics
from models.espcn.model_espcn import ESPCN as espcn
from models.evsrnet.model_evsrnet import EVSRNet
from models.rtsrgan.model_generator import G_RTSRGAN as g_rtsrgan
from models.rtsrgan.model_discriminator import d_rtsrgan
from models.rtsrgan.model_gan import GAN
from models.rtvsrgan.model_generator import G_RTVSRGAN as g_rtvsrgan
from models.rtvsrgan.KnowledgeDistillation import Distiller
from models.rtvsrgan.model_discriminator import d_rtvsrgan, rad_rtvsrgan
from models.rtvsrgan.model_ragan import RaGAN
from models.percsr.model_discriminator import d_percsr, rad_percsr
from models.percsr.model_percsr import PercSR
from models.percsr.model_teacher import Teacher
from models.imdn.model_imdn import IMDN
from models.dataset import Dataset
from models.metrics import psnr, ssim, rmse, lpips
from models.losses import VGGLossNoActivation as VGGLoss, GANLoss
from models.save_img_callback import SaveImageCallback
from models.utils import scale_1 as scale
hot_test= {'hot_test_generic': {
'lr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/generic/lr/270p_qp17/",
'hr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/generic/hr/1080p/"
},
'hot_test_game': {
'lr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/game/lr/270p_qp17/",
'hr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/game/hr/1080p/"
},
'hot_test_sport': {
'lr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/sport/lr/270p_qp17/",
'hr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/sport/hr/1080p/"
},
'hot_test_podcast': {
'lr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/podcast/lr/270p_qp17/",
'hr_hot_test_path': "datasets/loaded_harmonic/img_hot_test/podcast/hr/1080p/"
}}
test= {
'test_generic': {
'lr_test_path': "/home/joao/Documentos/projetos/sr-tf2/datasets/loaded_harmonic/img_test/lr/270p_qp17/",
'hr_test_path': "/home/joao/Documentos/projetos/sr-tf2/datasets/loaded_harmonic/img_test/hr/1080p/",
'logdir': "test_logdir/test/generic/"
},
'test_game': {
'lr_test_path': "/media/joao/SAMSUNG/Youtube/game/img_test/lr/270p_qp17/",
'hr_test_path': "/media/joao/SAMSUNG/Youtube/game/img_test/hr/1080p/",
'logdir': "test_logdir/test/game/"
},
'test_sport': {
'lr_test_path': "/media/joao/SAMSUNG/Youtube/sport/img_test/lr/270p_qp17/",
'hr_test_path': "/media/joao/SAMSUNG/Youtube/sport/img_test/hr/1080p/",
'logdir': "test_logdir/test/sport/"
},
'test_podcast': {
'lr_test_path': "/media/joao/SAMSUNG/Youtube/podcast/img_test/lr/270p_qp17/",
'hr_test_path': "/media/joao/SAMSUNG/Youtube/podcast/img_test/hr/1080p/",
'logdir': "test_logdir/test/podcast/"
}}
test_datasets = {
'test_generic': {
'test_dataset_path': "datasets/loaded_harmonic/output/generic/test/4X/270p_qp17/dataset.tfrecords",
'test_dataset_info_path': "datasets/loaded_harmonic/output/generic/test/4X/270p_qp17/dataset_info.txt"
},
'test_game': {
'test_dataset_path': "datasets/loaded_harmonic/output/game/test/4X/270p_qp17/dataset.tfrecords",
'test_dataset_info_path': "datasets/loaded_harmonic/output/game/test/4X/270p_qp17/dataset_info.txt"
},
'test_sport': {
'test_dataset_path': "datasets/loaded_harmonic/output/sport/test/4X/270p_qp17/dataset.tfrecords",
'test_dataset_info_path': "datasets/loaded_harmonic/output/sport/test/4X/270p_qp17/dataset_info.txt"
},
'test_podcast': {
'test_dataset_path': "datasets/loaded_harmonic/output/podcast/test/4X/270p_qp17/dataset.tfrecords",
'test_dataset_info_path': "datasets/loaded_harmonic/output/podcast/test/4X/270p_qp17/dataset_info.txt"
}}
LIST_MODEL=['espcn','g_rtsrgan','rtsrgan','g_rtvsrgan','teacher','rtvsrgan','imdn','k_dist','percsr','evsrnet']
MODEL='rtvsrgan'
LIST_GENERATOR=[None,'espcn','g_rtsrgan','imdn','evsrnet','g_rtvsrgan']
GENERATOR=None
BATCH_SIZE = 32
VAL_BATCH_SIZE = 16
TEST_BATCH_SIZE = 4
SHUFFLE_BUFFER_SIZE = 64
LIST_TEST_CLUSTER = ['generic','game','sport','podcast']
TEST_CLUSTER = ['sport']
SCHEDULE_VALUES=[100]
# Knowledge distillation model
LOSS_FN='mae'
DISTILLATION_RATE=0.8
ALPHA=0.3
BETA=0.65
LIST_WEIGHTS=[1e-5,1e-2,1e-2]
TYPE_REDUCE_LR='schedules'
LEARNING_RATE = 1e-4
LEARNING_DECAY_RATE = 1e-1
LEARNING_DECAY_EPOCHS = 20
NUM_EPOCHS = 100
STEPS_PER_EPOCH = 100
VAL_STEPS = 1
TEST_STEPS = 0
EPOCHS_PER_SAVE = 5
LOGDIR = 'logdir'
CHECKPOINT = 'checkpoint/'
TRAINNABLE_LAYER = 'final'
PATH_TO_EVAL = 'test_logdir/stats.txt'
TEST_LOGDIR='test_logdir/'
HOT_TEST_SIZE=5
LR_HOT_TEST_PATH="datasets/loaded_harmonic/img_test/lr/270p_qp28/"
HR_HOT_TEST_PATH="datasets/loaded_harmonic/img_test/hr/1080p/"
TRAIN_DATASET_PATH='datasets/loaded_harmonic/output/train/2X/270p_qp17/dataset.tfrecords'
TRAIN_DATASET_INFO_PATH='datasets/loaded_harmonic/output/train/2X/270p_qp17/dataset_info.txt'
VAL_DATASET_PATH='datasets/loaded_harmonic/output/val/2X/270p_qp17/dataset.tfrecords'
VAL_DATASET_INFO_PATH='datasets/loaded_harmonic/output/val/2X/270p_qp17/dataset_info.txt'
TEST_DATASET_PATH='datasets/loaded_harmonic/output/test/2X/270p_qp17/dataset.tfrecords'
TEST_DATASET_INFO_PATH='datasets/loaded_harmonic/output/test/2X/270p_qp17/dataset_info.txt'
def get_arguments():
parser = argparse.ArgumentParser(description='train one of the models for image and video super-resolution')
parser.add_argument('--model', type=str, default=MODEL, choices=LIST_MODEL,
help='What model to train', required=True)
parser.add_argument('--generator', type=str, default=GENERATOR, choices=LIST_GENERATOR,
help='What model to train', required=False)
parser.add_argument('--batch_size', type=int, default=BATCH_SIZE,
help='Number of images in batch', required=True)
parser.add_argument('--train_dataset_path', type=str, default=TRAIN_DATASET_PATH,
help='Path to the train dataset', required=True)
parser.add_argument('--train_dataset_info_path', type=str, default=TRAIN_DATASET_INFO_PATH,
help='Path to the train dataset info', required=True)
parser.add_argument('--num_epochs', type=int, default=NUM_EPOCHS,
help='Number of training epochs', required=True)
parser.add_argument('--steps_per_epoch', type=int, default=STEPS_PER_EPOCH,
help='Total number of steps (batches of samples) before declaring one epoch finished and starting the next epoch.')
parser.add_argument('--val_batch_size', type=int, default=VAL_BATCH_SIZE,
help='Number of images in val batch')
parser.add_argument('--val_dataset_path', type=str, default=VAL_DATASET_PATH,
help='Path to the val dataset')
parser.add_argument('--val_dataset_info_path', type=str, default=VAL_DATASET_INFO_PATH,
help='Path to the val dataset info')
parser.add_argument('--validation_steps', type=int, default=VAL_STEPS,
help='Total number of steps (batches of samples) to draw before stopping when performing validation at the end of every epoch.')
parser.add_argument('--test_batch_size', type=int, default=TEST_BATCH_SIZE,
help='Number of images in test batch')
parser.add_argument('--test_dataset_path', type=str, default=TEST_DATASET_PATH,
help='Path to the test dataset')
parser.add_argument('--test_dataset_info_path', type=str, default=TEST_DATASET_INFO_PATH,
help='Path to the test dataset info')
parser.add_argument('--test_steps', type=int, default=TEST_STEPS,
help='Total number of steps (batches of samples) to draw before stopping when performing evaluate at the end of every epoch.')
parser.add_argument('--test_cluster', nargs='*', type=str, default=TEST_CLUSTER, choices=LIST_TEST_CLUSTER,
help='What cluster dataset to eval', required=False)
parser.add_argument('--hot_test_size', type=int, default=HOT_TEST_SIZE,
help='Number of images in hot test')
parser.add_argument('--lr_hot_test_path', type=str, default=LR_HOT_TEST_PATH,
help='Path to the hot test dataset')
parser.add_argument('--hr_hot_test_path', type=str, default=HR_HOT_TEST_PATH,
help='Path to the hr hot test path')
parser.add_argument('--ckpt_path', default=CHECKPOINT,
help='Path to the model checkpoint to evaluate')
parser.add_argument('--load_weights', action='store_true',
help='Load weights')
parser.add_argument('--load_weights_perc', action='store_true',
help='Load weights perceptual')
parser.add_argument('--eval', action='store_true',
help='Avaluete model')
parser.add_argument('--range_to_save', type=int, default=10,
help='Range of image to save for teste.' )
parser.add_argument('--transfer_learning', action='store_true',
help='Transfer learning from lower-upscale model')
parser.add_argument('--trainable_layer', type=str, default=TRAINNABLE_LAYER,
help='Transfer learning from lower-upscale model')
parser.add_argument('--scaleFrom', type=int, default=2,
help='Perform transfer learning from lower-upscale model' )
parser.add_argument('--shuffle_buffer_size', type=int, default=SHUFFLE_BUFFER_SIZE,
help='Buffer size used for shuffling examples in dataset')
parser.add_argument('--learning_rate', type=float, default=LEARNING_RATE,
help='Learning rate used for training')
parser.add_argument('--lr_decay_rate', type=float, default=LEARNING_DECAY_RATE,
help='Learning rate decay rate used in exponential decay')
parser.add_argument('--lr_decay_epochs', type=int, default=LEARNING_DECAY_EPOCHS,
help='Number of epochs before full decay rate tick used in exponential decay')
parser.add_argument('--type_reduce_lr', type=str, default=TYPE_REDUCE_LR, choices=['plateau','schedules'],
help='Type of reduce learning rate')
parser.add_argument('--schedule_values',nargs='*', type=int, default=SCHEDULE_VALUES,
help='list of epochs values to reduce lr')
parser.add_argument('--loss_fn', type=str, default=LOSS_FN, choices=['mse','mae','huber', 'fea'],
help='Set the loss function to knowledge distillation model')
parser.add_argument('--distillation_rate', type=float, default=DISTILLATION_RATE,
help='Distillation rate in knowledge distillation model')
parser.add_argument('--alpha', type=float, default=ALPHA,
help='Weight for distillation loss function in knowledge distillation model')
parser.add_argument('--beta', type=float, default=BETA,
help='Weight for perceptual loss function in knowledge distillation model')
parser.add_argument('--list_weights', nargs='*', type=float, default=LIST_WEIGHTS,
help='Auxiliary list to weight values')
parser.add_argument('--inter_method', type=str, default=None, choices=['bilinear','lanczos3','lanczos5','bicubic','nearest','mitchellcubic'],
help='Type of interpolation resize used of same models')
parser.add_argument('--epochs_per_save', type=int, default=EPOCHS_PER_SAVE,
help='How often to save checkpoints')
parser.add_argument('--logdir', type=str, default=LOGDIR,
help='Where to save checkpoints and summaries')
parser.add_argument('--test_logdir', type=str, default=TEST_LOGDIR,
help='Where to save tests images')
parser.add_argument('--path_to_eval', type=str, default=PATH_TO_EVAL,
help='Path to save evals')
return parser.parse_args()
def main():
args = get_arguments()
# train dataset
train_dataset = Dataset(args.batch_size,
args.train_dataset_path,
args.train_dataset_info_path,
args.shuffle_buffer_size)
scale_factor = train_dataset.scale_factor
if args.steps_per_epoch == 0:
steps_per_epoch = train_dataset.examples_num // args.batch_size \
if train_dataset.examples_num % args.batch_size != 0 else 0
else:
steps_per_epoch = args.steps_per_epoch
train_dataset = train_dataset.get_data(args.num_epochs)
train_batch = train_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
# val dataset
val_dataset = Dataset(args.val_batch_size,
args.val_dataset_path,
args.val_dataset_info_path,
args.shuffle_buffer_size)
if args.validation_steps == 0:
validation_steps = val_dataset.examples_num // args.val_batch_size \
if val_dataset.examples_num % args.val_batch_size != 0 else 0
else:
validation_steps = args.validation_steps
val_dataset = val_dataset.get_data()
val_batch = val_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
# test dataset
test_dataset = Dataset(args.test_batch_size,
args.test_dataset_path,
args.test_dataset_info_path,
args.shuffle_buffer_size)
if args.test_steps == 0:
test_steps = test_dataset.examples_num // args.test_batch_size \
if test_dataset.examples_num % args.test_batch_size != 0 else 0
else:
test_steps = args.test_steps
test_dataset = test_dataset.get_data()
test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
# hot test
lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(args.lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(args.hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
test_print = [lr_img_paths,hr_img_paths]
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_paph,
save_weights_only=True,
monitor='val_loss',
save_freq= 'epoch',
mode='min',
save_best_only=True)
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=args.logdir+"/"+args.model,
histogram_freq=1,
write_graph=True,
write_images=True,
write_steps_per_second=True,
update_freq='batch')
file_writer_cm = tf.summary.create_file_writer(args.logdir+"/"+args.model + '/validation')
earlystopping = tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=1e-5,
patience=100, verbose=1,
mode='min',
restore_best_weights=True)
if args.type_reduce_lr == 'plateau':
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_rmse', factor=args.lr_decay_rate,
patience=args.lr_decay_epochs, mode='min', min_lr=1e-6,verbose=1)
elif args.type_reduce_lr == 'schedules':
def scheduler(epoch, lr):
if epoch in args.schedule_values:
return lr * tf.math.exp(-0.1)
else:
return lr
reduce_lr=tf.keras.callbacks.LearningRateScheduler(scheduler)
else:
print("--type_reduce_lr not valid!")
exit(1)
if args.model == 'espcn':
callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]
eval,run_time=train_espcn(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'imdn':
callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]
eval,run_time=train_imdn(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'g_rtsrgan':
callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]
eval, run_time=train_g_rtsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'rtsrgan':
callbacks=[tensorboard_callback]
eval,run_time=train_rtsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'evsrnet':
callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]
eval,run_time=train_evsrnet(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
# Ours models
elif args.model == 'g_rtvsrgan':
callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]
eval,run_time=train_g_rtvsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'teacher':
callbacks=[checkpoint_callback,tensorboard_callback,earlystopping,reduce_lr]
eval,run_time=train_teacher(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'rtvsrgan':
callbacks=[tensorboard_callback,reduce_lr]
eval,run_time=train_rtvsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'k_dist':
callbacks=[tensorboard_callback, reduce_lr]
eval,run_time=train_k_distillation(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
elif args.model == 'percsr':
callbacks=[tensorboard_callback, reduce_lr]
print("CALLING MODEL {}".format(args.model))
eval,run_time=train_percsr(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer=args.trainable_layer)
print_eval(args.path_to_eval,eval,args.model+'_'+args.generator+"_{}X_q{}".format(str(scale_factor),str(args.train_dataset_path).split('_q')[-1]),run_time)
else:
exit(1)
def trainable_weights(model):
print("Weights:", len(model.weights))
print("Trainable_weights:", len(model.trainable_weights))
print("Non_trainable_weights:", len(model.non_trainable_weights))
def trainable_layers(model, trainable_layer):
for i in range(len(model.layers)):
if(i+1 == trainable_layer):
break
else:
model.layers[i].trainable=False
def print_eval(file_stats,eval,model_name,run_time):
statsFile=open(file_stats,"a")
print(model_name, file = statsFile)
print(eval, file = statsFile)
print(run_time, file = statsFile)
statsFile.close()
def saved_model(model, filepath):
tf.keras.models.save_model(model, filepath, save_traces=True)
def train_espcn(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,
file_writer_cm,trainable_layer):
model = espcn(scale_factor=scale_factor)
if args.load_weights:
print("Loading weights...")
model.load_weights(checkpoint_paph)
if args.transfer_learning:
checkpoint_paph_from="{}{}_{}x/model.ckpt".format("checkpoint/",args.model,args.scaleFrom)
print("Transfer learning from {}x-upscale model...".format(args.scaleFrom))
modelFrom = espcn(scale_factor=args.scaleFrom)
modelFrom.load_weights(checkpoint_paph_from)
for i in range(len(modelFrom.layers)):
if(modelFrom.layers[i].name == trainable_layer):
break
else:
print("Set_weights in: {} layer".format(model.layers[i].name))
model.layers[i].set_weights(modelFrom.layers[i].get_weights())
model.layers[i].trainable=False
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae":
loss_fn = tf.keras.losses.MeanAbsoluteError()
model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])
trainable_weights(model)
if(args.eval==True):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
model.load_weights(checkpoint_paph)
print("Evaluate model")
get_test_dataset(model,scale_factor,args)
exit(1)
save_img_callback = SaveImageCallback(
model=model,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
print("Evaluate model")
eval = model.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(model, 'saved_model/{}/'.format(args.model))
return eval,model.get_run_time()
def train_imdn(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,
file_writer_cm,trainable_layer):
model = IMDN(scale_factor=scale_factor)
if args.load_weights:
print("Loading weights...")
model.load_weights(checkpoint_paph)
if args.transfer_learning:
checkpoint_paph_from="{}{}_{}x/model.ckpt".format("checkpoint/",args.model,args.scaleFrom)
print("Transfer learning from {}x-upscale model...".format(args.scaleFrom))
modelFrom = IMDN(scale_factor=args.scaleFrom)
modelFrom.load_weights(checkpoint_paph_from)
for i in range(len(modelFrom.layers)):
if(modelFrom.layers[i].name == trainable_layer):
break
else:
print("Set_weights in: {} layer".format(model.layers[i].name))
model.layers[i].set_weights(modelFrom.layers[i].get_weights())
model.layers[i].trainable=False
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae":
loss_fn = tf.keras.losses.MeanAbsoluteError()
model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])
trainable_weights(model)
if(args.eval==True):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
model.load_weights(checkpoint_paph)
print("Evaluate model")
get_test_dataset(model,scale_factor,args)
exit(1)
save_img_callback = SaveImageCallback(
model=model,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
print("Evaluate model")
eval = model.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(model, 'saved_model/{}/'.format(args.model))
return eval, model.get_run_time()
def train_g_rtsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,
file_writer_cm,trainable_layer):
model = g_rtsrgan(scale_factor=scale_factor)
if args.load_weights:
print("Loading weights...")
model.load_weights(checkpoint_paph)
if args.transfer_learning:
checkpoint_paph_from="{}{}_{}x/model.ckpt".format("checkpoint/",args.model,args.scaleFrom)
print("Transfer learning from {}x-upscale model...".format(args.scaleFrom))
modelFrom = g_rtsrgan(scale_factor=args.scaleFrom)
modelFrom.load_weights(checkpoint_paph_from)
for i in range(len(modelFrom.layers)):
if(modelFrom.layers[i].name == trainable_layer):
break
else:
print("Set_weights in: {} layer".format(model.layers[i].name))
model.layers[i].set_weights(modelFrom.layers[i].get_weights())
model.layers[i].trainable=False
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae":
loss_fn = tf.keras.losses.MeanAbsoluteError()
model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])
trainable_weights(model)
if(args.eval==True):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
model.load_weights(checkpoint_paph)
print("Evaluate model")
get_test_dataset(model,scale_factor,args)
exit(1)
save_img_callback = SaveImageCallback(
model=model,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
print("Evaluate model")
eval = model.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(model, 'saved_model/{}/'.format(args.model))
return eval,model.get_run_time()
def train_rtsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):
g=g_rtsrgan(scale_factor=scale_factor)
g.compile(metrics=[psnr,ssim,rmse,lpips])
d=d_rtsrgan(input_shape=(36*scale_factor,36*scale_factor,1))
gan = GAN(discriminator = d, generator = g)
if args.loss_fn == "mse":
cont_loss = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
cont_loss = tf.keras.losses.Huber()
if args.loss_fn == "mae":
cont_loss = tf.keras.losses.MeanAbsoluteError()
shape_hr = (36*scale_factor,36*scale_factor,3)
vgg_loss = VGGLoss(shape_hr,cont_loss)
perc_loss = vgg_loss.custom_perceptual_loss
adv_loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)
lbd = 1 * 1e-5
eta = 1 * 1e-2
mu = 1 * 1e-2
gan_loss=GANLoss(perc_loss, cont_loss, adv_loss,lbd,eta,mu)
if (args.load_weights):
print("Loading weights...")
checkpoint_paph="{}g_rtsrgan_{}x/model.ckpt".format(args.ckpt_path,scale_factor)
gan.load_weights_gen(checkpoint_paph)
for i in range(len(g.layers)):
if(g.layers[i].name == trainable_layer):
break
else:
g.layers[i].trainable=False
gan.compile(d_optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),
g_optimizer = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),
d_loss = gan_loss.discriminator_loss,
g_loss = gan_loss.generator_loss,
metrics=[psnr,ssim,rmse,lpips])
trainable_weights(gan)
save_img_callback = SaveImageCallback(
model=g,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_paph,
save_weights_only=True,
monitor='val_lpips',
save_freq= 'epoch',
mode='min',
save_best_only=True)
callbacks.append(checkpoint_callback)
gan.fit(train_batch, epochs=args.num_epochs,callbacks=callbacks,verbose=1,steps_per_epoch=steps_per_epoch)
checkpoint_paph="{}{}_{}x/g_rtsrgan/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
gan.save_weights_gen(checkpoint_paph)
print("Evaluate model")
eval = g.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(g, 'saved_model/{}/'.format(args.model))
return eval, g.get_run_time()
def train_evsrnet(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,
file_writer_cm,trainable_layer):
model = EVSRNet(scale_factor=scale_factor,method=args.inter_method)
model.build((None, None, None,1))
#print(model.summary())
if args.load_weights:
print("Loading weights...")
model.load_weights(checkpoint_paph)
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae": # default
loss_fn = tf.keras.losses.MeanAbsoluteError()
model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])
trainable_weights(model)
if(args.eval==True):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
model.load_weights(checkpoint_paph)
print("Evaluate model")
get_test_dataset(model,scale_factor,args)
exit(1)
save_img_callback = SaveImageCallback(
model=model,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
print("Evaluate model")
eval = model.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(model, 'saved_model/{}/'.format(args.model))
return eval,model.get_run_time()
def train_teacher(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):
model = Teacher(channels=1,scale_factor=scale_factor,distillation_rate=args.distillation_rate)
model.build((None, None, None,1))
print(model.summary())
if args.load_weights:
print("Loading weights...")
model.load_weights(checkpoint_paph)
if(args.eval==True):
print("Loading weights...")
model.load_weights(checkpoint_paph)
print("Evaluate model")
model.compile(metrics=[psnr,ssim,rmse,lpips])
get_test_dataset(model,scale_factor,args)
exit(1)
if args.transfer_learning:
checkpoint_paph_from="{}{}_{}x/model.ckpt".format("checkpoint/",args.model,args.scaleFrom)
print("Transfer learning from {}x-upscale model...".format(args.scaleFrom))
modelFrom = g_rtvsrgan(scale_factor=args.scaleFrom)
modelFrom.load_weights(checkpoint_paph_from)
for i in range(len(modelFrom.layers)):
if(modelFrom.layers[i].name == trainable_layer):
break
else:
print("Set_weights in: {} layer".format(model.layers[i].name))
model.layers[i].set_weights(modelFrom.layers[i].get_weights())
model.layers[i].trainable=False
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae":
loss_fn = tf.keras.losses.MeanAbsoluteError()
if args.loss_fn == "fea":
loss_aux = tf.keras.losses.MeanAbsoluteError()
shape_hr = (36*scale_factor,36*scale_factor,3)
vgg_loss = VGGLoss(shape_hr,loss_aux)
loss_fn = vgg_loss.custom_perceptual_loss
model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])
trainable_weights(model)
save_img_callback = SaveImageCallback(
model=model,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
print("Evaluate model")
if args.loss_fn == "fea":
eval = []
else:
eval = model.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(model, 'saved_model/{}/'.format(args.model))
return eval, model.get_run_time()
def train_g_rtvsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):
model = g_rtvsrgan(scale_factor=scale_factor,method=args.inter_method)
if args.load_weights:
print("Loading weights...")
model.load_weights(checkpoint_paph)
if args.transfer_learning:
checkpoint_paph_from="{}{}_{}x/model.ckpt".format("checkpoint/",args.model,args.scaleFrom)
print("Transfer learning from {}x-upscale model...".format(args.scaleFrom))
modelFrom = g_rtvsrgan(scale_factor=args.scaleFrom)
modelFrom.load_weights(checkpoint_paph_from)
for i in range(len(modelFrom.layers)):
if(modelFrom.layers[i].name == trainable_layer):
break
else:
print("Set_weights in: {} layer".format(model.layers[i].name))
model.layers[i].set_weights(modelFrom.layers[i].get_weights())
model.layers[i].trainable=False
opt = tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae":
loss_fn = tf.keras.losses.MeanAbsoluteError()
model.compile(optimizer=opt, loss=loss_fn, metrics=[psnr,ssim,rmse,lpips])
trainable_weights(model)
if(args.eval==True):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
model.load_weights(checkpoint_paph)
print("Evaluate model")
get_test_dataset(model,scale_factor,args)
exit(1)
save_img_callback = SaveImageCallback(
model=model,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
model.fit(train_batch,epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
print("Evaluate model")
eval = model.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(model, 'saved_model/{}/'.format(args.model))
return eval,model.get_run_time()
def train_k_distillation(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):
opt=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0)
if args.loss_fn == "mse":
aux_loss_fn = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
aux_loss_fn = tf.keras.losses.Huber()
if args.loss_fn == "mae":
aux_loss_fn = tf.keras.losses.MeanAbsoluteError()
student_loss_fn = tf.keras.losses.MeanSquaredError()
distillation_loss_fn= tf.keras.losses.MeanAbsoluteError()
shape_hr = (36*scale_factor,36*scale_factor,3)
vgg_loss = VGGLoss(shape_hr,aux_loss_fn)
perc_loss = vgg_loss.custom_perceptual_loss
teacher = g_rtvsrgan(channels=1,scale_factor=scale_factor)
print("Loading teacher weights...")
weights_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,'g_rtvsrgan',scale_factor)
teacher.load_weights(weights_paph)
student = g_rtvsrgan(channels=1,scale_factor=scale_factor)
student.build((None, None, None,1))
# Initialize and compile distiller
distiller = Distiller(student=student, teacher=teacher)
distiller.compile(
optimizer=opt,
metrics=[psnr,ssim,rmse,lpips],
student_loss_fn=student_loss_fn,
distillation_loss_fn=distillation_loss_fn,
perc_loss_fn=perc_loss,
alpha=args.alpha,
beta=args.beta
)
trainable_weights(student)
if args.load_weights:
print("Loading student weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,'g_rtvsrgan',scale_factor)
student.load_weights(checkpoint_paph)
trainable_layers(student, len(student.layers)-1)
trainable_weights(student)
if args.transfer_learning:
checkpoint_paph_from="{}{}_{}x/model.ckpt".format("checkpoint/",args.model,args.scaleFrom)
print("Transfer learning from {}x-upscale model...".format(args.scaleFrom))
modelFrom = student(scale_factor=args.scaleFrom)
modelFrom.load_weights(checkpoint_paph_from)
for i in range(len(modelFrom.layers)):
if(modelFrom.layers[i].name == trainable_layer):
break
else:
print("Set_weights in: {} layer".format(student.layers[i].name))
student.layers[i].set_weights(modelFrom.layers[i].get_weights())
student.layers[i].trainable=False
save_img_callback = SaveImageCallback(
model=distiller.student,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
earlystopping = tf.keras.callbacks.EarlyStopping(
monitor='val_rmse',
min_delta=1e-5,
patience=50, verbose=1,
mode='min',
restore_best_weights=True)
callbacks.append(earlystopping)
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_paph,
save_weights_only=True,
monitor='val_lpips',
save_freq= 'epoch',
mode='min',
save_best_only=True)
callbacks.append(checkpoint_callback)
# Distill teacher to student
distiller.fit(train_batch, epochs=args.num_epochs,callbacks=callbacks,
verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
checkpoint_paph="{}{}_{}x/g_rtsrgan/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
student.save_weights(checkpoint_paph)
print("Evaluate model")
# Evaluate student on test dataset
eval = distiller.evaluate(test_batch, verbose=1, steps=test_steps)
saved_model(distiller.student, 'saved_model/{}/'.format(args.model))
return eval,distiller.student.get_run_time()
def train_rtvsrgan(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):
g=g_rtvsrgan(scale_factor=scale_factor)
g.build((None, None, None,1))
d=d_rtvsrgan(input_shape=(36*scale_factor,36*scale_factor,1))
ra_d=rad_rtvsrgan(discriminator=d,shape_hr=(36*scale_factor,36*scale_factor,1))
if args.loss_fn == "mse":
aux_loss = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
aux_loss = tf.keras.losses.Huber()
if args.loss_fn == "mae":
aux_loss = tf.keras.losses.MeanAbsoluteError()
cont_loss = tf.keras.losses.MeanSquaredError()
shape_hr = (36*scale_factor,36*scale_factor,3)
vgg_loss = VGGLoss(shape_hr,aux_loss)
perc_loss = vgg_loss.custom_perceptual_loss
adv_loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)
lbd = args.list_weights[0]
eta = args.list_weights[1]
mu = args.list_weights[2]
gan_loss=GANLoss(perc_loss, cont_loss, adv_loss,lbd,eta,mu)
ra_gan = RaGAN(ra_discriminator=ra_d, generator=g)
ra_gan.compile(d_optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),
g_optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),
ra_d_loss=gan_loss.discriminator_loss,
g_loss = gan_loss.generator_loss,
metrics=[psnr,ssim,rmse,lpips])
if (args.load_weights):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,'g_rtvsrgan',scale_factor)
ra_gan.load_weights_gen(checkpoint_paph)
trainable_layers(g, len(g.layers)-1)
trainable_weights(g)
save_img_callback = SaveImageCallback(
model=g,
model_name=args.model,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_paph,
save_weights_only=True,
monitor='val_lpips',
save_freq= 'epoch',
mode='min',
save_best_only=True)
callbacks.append(checkpoint_callback)
ra_gan.fit(train_batch, epochs=args.num_epochs,callbacks=callbacks,verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
checkpoint_paph="{}{}_{}x/g_rtvsrgan/model.ckpt".format(args.ckpt_path,args.model,scale_factor)
ra_gan.save_weights_gen(checkpoint_paph)
print("Evaluate model")
eval = ra_gan.evaluate(test_batch, verbose=1)
saved_model(ra_gan.generator, 'saved_model/{}/'.format(args.model))
return eval,ra_gan.student.get_run_time()
def model_generator(args=None,scale_factor=None):
if args.generator== 'espcn':
model= espcn(scale_factor=scale_factor)
elif args.generator== 'g_rtsrgan':
model= g_rtsrgan(scale_factor=scale_factor)
elif args.generator== 'imdn':
model= IMDN(scale_factor=scale_factor)
elif args.generator== 'evsrnet':
model= EVSRNet(scale_factor=scale_factor,method=args.inter_method)
elif args.generator== 'g_rtvsrgan':
model= g_rtvsrgan(scale_factor=scale_factor)
elif args.generator== 'teacher':
model = Teacher(channels=1,scale_factor=scale_factor,distillation_rate=args.distillation_rate)
else:
exit(1)
return model
def print_hot_test(lr_hot_test_path,hr_hot_test_path,model=None,model_name=None,args=None,scale_factor=2):
time_elapsed = plot_test_images(model,lr_hot_test_path,hr_hot_test_path,
args.test_logdir,scale_factor=scale_factor,model_name=model_name,epoch=0)
return time_elapsed
def get_test_dataset(model,scale_factor,args):
bic = True
if ('generic' in args.test_cluster):
# test dataset
test_dataset_path=test_datasets['test_generic']['test_dataset_path']
test_dataset_info_path=test_datasets['test_generic']['test_dataset_info_path']
test_dataset = Dataset(
args.test_batch_size,
test_dataset_path,
test_dataset_info_path,
args.shuffle_buffer_size)
if args.test_steps == 0:
test_steps = test_dataset.examples_num // args.test_batch_size \
if test_dataset.examples_num % args.test_batch_size != 0 else 0
else:
test_steps = args.test_steps
test_dataset = test_dataset.get_data()
test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
name_dataset = args.model+'_'+args.generator+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) if args.generator!=None else args.model+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1])
print(name_dataset,args.path_to_eval)
lr_path=test['test_generic']['lr_test_path']
hr_path=test['test_generic']['hr_test_path']
logdir=test['test_generic']['logdir']
lr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_path) if len(filenames)!=0][0])
hr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_path) if len(filenames)!=0][0])
if (bic):
print_metrics(lr_paths, hr_paths, scale_factor=scale_factor)
exit(1)
# plot_images("bi", lr_paths, hr_paths, args, logdir+"/"+"bicubic"+"/",scale_factor=scale_factor)
# plot_images("hr", lr_paths, hr_paths, args, logdir+"/"+"hr"+"/",scale_factor=scale_factor)
# run_time = plot_images(model, lr_paths, hr_paths, args, logdir+"/"+args.generator+"/",scale_factor=scale_factor)
run_time = print_hot_test(lr_paths,hr_paths,model=model,model_name=args.model,args=args,scale_factor=scale_factor)
eval = model.evaluate(test_batch, verbose=1)
lr_hot_test_path=hot_test['hot_test_generic']['lr_hot_test_path']
hr_hot_test_path=hot_test['hot_test_generic']['hr_hot_test_path']
lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
test_print = [lr_img_paths,hr_img_paths]
name_model = "generic"+'_'+args.model+'_'+args.generator if args.generator != None else "generic"+'_'+args.model
# run_time = print_hot_test(test_print[0],test_print[1],model=model,model_name=name_model,args=args,scale_factor=scale_factor)
print_eval(args.path_to_eval,eval,name_dataset,stat.mean(run_time))
if ('game' in args.test_cluster):
# test dataset
test_dataset_path=test_datasets['test_game']['test_dataset_path']
test_dataset_info_path=test_datasets['test_game']['test_dataset_info_path']
test_dataset = Dataset(
args.test_batch_size,
test_dataset_path,
test_dataset_info_path,
args.shuffle_buffer_size)
if args.test_steps == 0:
test_steps = test_dataset.examples_num // args.test_batch_size \
if test_dataset.examples_num % args.test_batch_size != 0 else 0
else:
test_steps = args.test_steps
test_dataset = test_dataset.get_data()
test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
name_dataset = args.model+'_'+args.generator+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) if args.generator != None else args.model+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1])
print(name_dataset,args.path_to_eval)
lr_path=test['test_game']['lr_test_path']
hr_path=test['test_game']['hr_test_path']
logdir=test['test_game']['logdir']
lr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_path) if len(filenames)!=0][0])
hr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_path) if len(filenames)!=0][0])
if (bic):
print_metrics(lr_paths, hr_paths, scale_factor=scale_factor)
exit(1)
# plot_images("bi", lr_paths, hr_paths, args, logdir+"/"+"bicubic"+"/",scale_factor=scale_factor)
# plot_images("hr", lr_paths, hr_paths, args, logdir+"/"+"hr"+"/",scale_factor=scale_factor)
# run_time = plot_images(model, lr_paths, hr_paths, args, logdir+"/"+args.generator+"/",scale_factor=scale_factor)
run_time = print_hot_test(lr_paths,hr_paths,model=model,model_name=args.model,args=args,scale_factor=scale_factor)
eval = model.evaluate(test_batch, verbose=1)
lr_hot_test_path=hot_test['hot_test_game']['lr_hot_test_path']
hr_hot_test_path=hot_test['hot_test_game']['hr_hot_test_path']
lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
test_print = [lr_img_paths,hr_img_paths]
name_model = "game"+'_'+args.model+'_'+args.generator if args.generator != None else "game"+'_'+args.model
# run_time = print_hot_test(test_print[0],test_print[1],model=model,model_name=name_model,args=args,scale_factor=scale_factor)
print_eval(args.path_to_eval,eval,name_dataset,stat.mean(run_time))
if ('sport' in args.test_cluster):
# test dataset
test_dataset_path=test_datasets['test_sport']['test_dataset_path']
test_dataset_info_path=test_datasets['test_sport']['test_dataset_info_path']
test_dataset = Dataset(
args.test_batch_size,
test_dataset_path,
test_dataset_info_path,
args.shuffle_buffer_size)
if args.test_steps == 0:
test_steps = test_dataset.examples_num // args.test_batch_size \
if test_dataset.examples_num % args.test_batch_size != 0 else 0
else:
test_steps = args.test_steps
test_dataset = test_dataset.get_data()
test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
name_dataset = args.model+'_'+args.generator+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) if args.generator != None else args.model+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1])
print(name_dataset,args.path_to_eval)
lr_path=test['test_sport']['lr_test_path']
hr_path=test['test_sport']['hr_test_path']
logdir=test['test_sport']['logdir']
lr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_path) if len(filenames)!=0][0])
hr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_path) if len(filenames)!=0][0])
if (bic):
print_metrics(lr_paths, hr_paths, scale_factor=scale_factor)
exit(1)
# plot_images("bi", lr_paths, hr_paths, args, logdir+"/"+"bicubic"+"/",scale_factor=scale_factor)
# plot_images("hr", lr_paths, hr_paths, args, logdir+"/"+"hr"+"/",scale_factor=scale_factor)
# run_time = plot_images(model, lr_paths, hr_paths, args, logdir+"/"+args.generator+"/",scale_factor=scale_factor)
run_time = print_hot_test(lr_paths,hr_paths,model=model,model_name=args.model,args=args,scale_factor=scale_factor)
eval = model.evaluate(test_batch, verbose=1)
lr_hot_test_path=hot_test['hot_test_sport']['lr_hot_test_path']
hr_hot_test_path=hot_test['hot_test_sport']['hr_hot_test_path']
lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
test_print = [lr_img_paths,hr_img_paths]
name_model = "sport"+'_'+args.model+'_'+args.generator if args.generator != None else "sport"+'_'+args.model
# run_time = print_hot_test(test_print[0],test_print[1],model=model,model_name=name_model,args=args,scale_factor=scale_factor)
print_eval(args.path_to_eval,eval,name_dataset,stat.mean(run_time))
if ('podcast' in args.test_cluster):
# test dataset
test_dataset_path=test_datasets['test_podcast']['test_dataset_path']
test_dataset_info_path=test_datasets['test_podcast']['test_dataset_info_path']
test_dataset = Dataset(
args.test_batch_size,
test_dataset_path,
test_dataset_info_path,
args.shuffle_buffer_size)
if args.test_steps == 0:
test_steps = test_dataset.examples_num // args.test_batch_size \
if test_dataset.examples_num % args.test_batch_size != 0 else 0
else:
test_steps = args.test_steps
test_dataset = test_dataset.get_data()
test_batch = test_dataset.map(lambda x0,x1,x2,y: (scale(x1),scale(y)))
name_dataset = args.model+'_'+args.generator+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1]) if args.generator != None else args.model+"_{}_{}X_q{}".format(str(test_dataset_path).split('/')[3],str(scale_factor),str(test_dataset_path).split('_q')[-1])
print(name_dataset,args.path_to_eval)
lr_path=test['test_podcast']['lr_test_path']
hr_path=test['test_podcast']['hr_test_path']
logdir=test['test_podcast']['logdir']
lr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_path) if len(filenames)!=0][0])
hr_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_path) if len(filenames)!=0][0])
if (bic):
print_metrics(lr_paths, hr_paths, scale_factor=scale_factor)
exit(1)
# plot_images("bi", lr_paths, hr_paths, args, logdir+"/"+"bicubic"+"/",scale_factor=scale_factor)
# plot_images("hr", lr_paths, hr_paths, args, logdir+"/"+"hr"+"/",scale_factor=scale_factor)
# run_time = plot_images(model, lr_paths, hr_paths, args, logdir+"/"+args.generator+"/",scale_factor=scale_factor)
run_time = print_hot_test(lr_paths,hr_paths,model=model,model_name=args.model,args=args,scale_factor=scale_factor)
eval = model.evaluate(test_batch, verbose=1)
lr_hot_test_path=hot_test['hot_test_podcast']['lr_hot_test_path']
hr_hot_test_path=hot_test['hot_test_podcast']['hr_hot_test_path']
lr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(lr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
hr_img_paths=sorted([[dp+filename for filename in filenames] for dp, dn, filenames in os.walk(hr_hot_test_path) if len(filenames)!=0][0])[0:args.hot_test_size]
test_print = [lr_img_paths,hr_img_paths]
name_model = "podcast"+'_'+args.model+'_'+args.generator if args.generator != None else "podcast"+'_'+args.model
# run_time = print_hot_test(test_print[0],test_print[1],model=model,model_name=name_model,args=args,scale_factor=scale_factor)
print_eval(args.path_to_eval,eval,name_dataset,stat.mean(run_time))
def train_percsr(train_batch,steps_per_epoch, validation_steps,val_batch, test_batch, test_steps, test_print, scale_factor,args,callbacks,checkpoint_paph,file_writer_cm,trainable_layer):
g=model_generator(scale_factor=scale_factor,args=args)
g.build((None, None, None,1))
d=d_percsr(input_shape=(36*scale_factor,36*scale_factor,1))
ra_d=rad_percsr(discriminator=d,shape_hr=(36*scale_factor,36*scale_factor,1))
if args.loss_fn == "mse":
aux_loss = tf.keras.losses.MeanSquaredError()
if args.loss_fn == "huber":
aux_loss = tf.keras.losses.Huber()
if args.loss_fn == "mae":
aux_loss = tf.keras.losses.MeanAbsoluteError()
loss_pix = tf.keras.losses.MeanSquaredError()
shape_hr = (36*scale_factor,36*scale_factor,3)
vgg_loss = VGGLoss(shape_hr,aux_loss)
loss_fea = vgg_loss.custom_perceptual_loss
loss_dis = tf.keras.losses.MeanAbsoluteError()
adv_loss = tf.keras.losses.BinaryCrossentropy(from_logits=False)
alfa = args.list_weights[0]
eta = args.list_weights[1]
lbd = args.list_weights[2]
mu = args.list_weights[3]
gan_loss=GANLoss(loss_pix, loss_fea, loss_dis, adv_loss, alfa, eta, lbd, mu)
teacher = Teacher(channels=1,scale_factor=scale_factor,distillation_rate=args.distillation_rate)
print("Loading teacher weights...")
weights_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,'teacher',scale_factor)
teacher.load_weights(weights_paph)
teacher.build((None, None, None,1))
ra_gan = PercSR(ra_discriminator=ra_d, generator=g,teacher=teacher)
ra_gan.compile(d_optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),
g_optimizer=tf.keras.optimizers.Adam(learning_rate=args.learning_rate,clipnorm=1.0),
perc_loss=gan_loss.generative_loss,
metrics=[psnr,ssim,rmse,lpips])
if(args.eval==True):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/{}/model.ckpt".format(args.ckpt_path,args.model,scale_factor,args.generator)
ra_gan.load_weights(checkpoint_paph)
print("Evaluate model")
g.compile(metrics=[psnr,ssim,rmse,lpips])
get_test_dataset(g,scale_factor,args)
exit(1)
if (args.load_weights):
print("Loading weights...")
checkpoint_paph="{}{}_{}x/model.ckpt".format(args.ckpt_path,args.generator,scale_factor)
ra_gan.load_weights_gen(checkpoint_paph)
# trainable_layers(g, len(g.layers)-1)
trainable_weights(g)
if (args.load_weights_perc):
print("Loading weights perceptual...")
checkpoint_paph="{}{}_{}x/{}/model.ckpt".format(args.ckpt_path,args.model,scale_factor,args.generator)
ra_gan.load_weights(checkpoint_paph)
for i in range(len(g.layers)):
print("Camada: {}".format(g.layers[i].name))
if(g.layers[i].name == trainable_layer):
break
else:
g.layers[i].trainable=False
#trainable_layers(g, len(g.layers)-1)
trainable_weights(g)
save_img_callback = SaveImageCallback(
model=g,
model_name=args.model+'_'+args.generator,
scale_factor=scale_factor,
epochs_per_save=args.epochs_per_save,
lr_paths=test_print[0],
hr_paths=test_print[1],
log_dir=args.test_logdir,
file_writer_cm=file_writer_cm)
callbacks.append(save_img_callback)
checkpoint_paph="{}{}_{}x/{}/model.ckpt".format(args.ckpt_path,args.model,scale_factor,args.generator)
checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_paph,
save_weights_only=True,
monitor='val_lpips',
save_freq= 'epoch',
mode='min',
save_best_only=True)
callbacks.append(checkpoint_callback)
ra_gan.fit(train_batch, epochs=args.num_epochs,callbacks=callbacks,verbose=1,steps_per_epoch=steps_per_epoch,validation_steps=validation_steps,validation_data=val_batch)
checkpoint_paph="{}{}_{}x/{}/{}/model.ckpt".format(args.ckpt_path,args.model,scale_factor,args.generator,'generator')
ra_gan.save_weights_gen(checkpoint_paph)
print("Evaluate model")
eval = ra_gan.evaluate(test_batch, verbose=1)
saved_model(ra_gan.generator, 'saved_model/{}/'.format(args.model))
return eval, ra_gan.generator.get_run_time()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4807885 | <reponame>knutsonchris/stacki
# @copyright@
# Copyright (c) 2006 - 2019 Teradata
# All rights reserved. Stacki(r) v5.x stacki.com
# https://github.com/Teradata/stacki/blob/master/LICENSE.txt
# @copyright@
#
# @rocks@
# Copyright (c) 2000 - 2010 The Regents of the University of California
# All rights reserved. Rocks(r) v5.4 www.rocksclusters.org
# https://github.com/Teradata/stacki/blob/master/LICENSE-ROCKS.txt
# @rocks@
import ipaddress
import stack.commands
from stack.exception import ParamRequired, CommandError
class Command(stack.commands.set.host.interface.command):
"""
Sets the IP address for the named interface for one host.
<arg type='string' name='host' optional='0'>
A single host.
</arg>
<param type='string' name='interface'>
Name of the interface.
</param>
<param type='string' name='mac'>
MAC address of the interface.
</param>
<param type='string' name='network'>
Network name of the interface.
</param>
<param type='string' name='ip' optional='0'>
The IP address to set. Use ip=AUTO to let the system pick one for
you or ip=NULL to clear the IP address.
</param>
<example cmd='set host interface ip backend-0-0 interface=eth1 ip=192.168.0.10'>
Sets the IP Address for the eth1 device on host backend-0-0.
</example>
"""
def run(self, params, args):
host = self.getSingleHost(args)
(ip, interface, mac, network) = self.fillParams([
('ip', None, True),
('interface', None),
('mac', None),
('network', None)
])
# Gotta have one of these
if not any([interface, mac, network]):
raise ParamRequired(self, ('interface', 'mac', 'network'))
# Make sure interface, mac, and/or network exist on our host
self.validate([host], interface, mac, network)
# If ip is an empty sting or NULL, we are clearing it
if not ip or ip.upper() == 'NULL':
ip = None
# See if we are picking the next IP address
if ip and ip.upper() == 'AUTO':
# We gotta have a network to get the IP space
if not network:
for row in self.call('list.host.interface', []):
if (
(interface and row['interface'] == interface) or
(mac and row['mac'] == mac)
):
network = row['network']
break
# Make sure we were successful at getting a network
if not network:
raise CommandError(self, 'unknown network for interface')
# Now get our IP space
data = self.call('list.network', [network])[0]
ip_space = ipaddress.IPv4Network(f"{data['address']}/{data['mask']}")
# And a set of all IPs already in use on this network
existing = {
row['ip']
for row in self.call('list.host.interface', [])
if row['network'] == network
}
# It would be bad to trample the gateway
if data['gateway']:
existing.add(data['gateway'])
# Now run through the IP space and find the first unused IP
ip = None
for address in ip_space.hosts():
if str(address) not in existing:
ip = str(address)
break
# Error out if we couldn't find a free IP
if not ip:
raise CommandError(self, 'no free ip addresses left in the network')
# Make the change in the DB
if network:
sql = """
update networks,nodes,subnets set networks.ip=%s
where nodes.name=%s and subnets.name=%s
and networks.node=nodes.id and networks.subnet=subnets.id
"""
values = [ip, host, network]
else:
sql = """
update networks,nodes set networks.ip=%s
where nodes.name=%s and networks.node=nodes.id
"""
values = [ip, host]
if interface:
sql += " and networks.device=%s"
values.append(interface)
if mac:
sql += " and networks.mac=%s"
values.append(mac)
self.db.execute(sql, values)
| StarcoderdataPython |
11372627 |
import abc
import enum
import typing as t
import weakref
from pathlib import Path
from nr.caching.api import KeyDoesNotExist
from nr.preconditions import check_instance_of, check_not_none
import craftr
from craftr.core.property import HavingProperties, collect_properties
from craftr.core.configurable import Closure
from craftr.core.util.collections import unique
from .state import calculate_task_hash, unwrap_file_property
if t.TYPE_CHECKING:
import craftr.core.actions
from craftr.core.actions import Action
from craftr.core.project import Project
TASK_HASH_NAMESPACE = 'task-hashes'
class TaskPropertyType(enum.Enum):
Input = enum.auto()
InputFile = enum.auto()
Output = enum.auto()
OutputFile = enum.auto()
class Task(abc.ABC):
"""
The raw base class for tasks that represents a logically closed unit of work. It is common to subclass the
#DefaultTask class instead.
"""
#: A list of direct dependencies of this task.
dependencies: t.List['Task']
#: A list of actions to perform before any other actions in the task.
do_first_actions: t.List['craftr.core.actions.Action']
#: A list of actions to perform after any other actions in the task.
do_last_actions: t.List['craftr.core.actions.Action']
#: Whether the task should be included if no explicit set of tasks is selected for execution.
#: This is `True` by default for all tasks (but can be overwritten by subclasses).
default: bool = True
#: A short description of the task.
description: t.Optional[str] = None
#: A name for the group that the task belongs to. Task groups are used to select tasks via
#: common identifiers (e.g. `run`, `compile` or `debug` are generic terms that could apply to
#: a variety of tasks).
group: t.Optional[str] = None
#: A boolean flag that indicates whether the task is always to be considered outdated.
always_outdated: bool = False
def __init__(self, project: 'Project', name: str) -> None:
super().__init__()
self._project = weakref.ref(project)
self._name = name
self._finalized = False
self.dependencies = []
self.do_first_actions: t.List['Action'] = []
self.do_last_actions: t.List['Action'] = []
self.init()
def __repr__(self) -> str:
return f'{type(self).__name__}({self.path!r})'
@property
def project(self) -> 'Project':
return check_not_none(self._project(), 'lost reference to project')
@property
def name(self) -> str:
return self._name
@property
def path(self) -> str:
return f'{self.project.path}:{self.name}'
@property
def finalized(self) -> bool:
""" True if #finalize() was called. """
return self._finalized
def init(self) -> None:
"""
Called from #__init__().
"""
def finalize(self) -> None:
"""
Called to finalize the state of the task. Raises a #RuntimeError if the task has already been finalized.
"""
if self._finalized:
raise RuntimeError('Task already finalized')
self._finalized = True
def get_dependencies(self) -> t.List['Task']:
"""
Return a list of the task's dependencies. This does not not need to include #dependencies as they will be
taken into account by the executor automatically.
"""
return []
def get_actions(self) -> t.List['Action']:
"""
Return the actions that need to be executed for this task. This does not have to include #do_first_actions
and #do_last_actions as they will be handled separately by the executor.
"""
return []
def is_outdated(self) -> bool:
"""
Check if the task is outdated and needs to be re-run. This does not have to take into account #always_outdated,
because the executor can check it separately. The default implementation returns always #True.
Tasks should use the #Context.metadata_store to read and write previous information about itself.
"""
return True
def on_completed(self) -> None:
"""
Called when the task has finished executing.
"""
def depends_on(self, *tasks: t.Union[str, 'Task']) -> None:
"""
Specify that the task dependends on the specified other tasks. Strings are resolved from the tasks own project.
"""
for index, item in enumerate(tasks):
check_instance_of(item, (str, Task), lambda: 'task ' + str(index))
if isinstance(item, str):
self.dependencies += self.project.tasks.resolve(item)
elif isinstance(item, Task):
self.dependencies.append(item)
def do_first(self, action: t.Union['Action', Closure]) -> None:
from craftr.core.actions import Action, LambdaAction
check_instance_of(action, (Action, Closure), 'action')
if isinstance(action, Closure):
closure = action
action = LambdaAction(lambda context: closure(self, context).apply(self))
self.do_first_actions.append(action)
def do_last(self, action: t.Union['Action', Closure]) -> None:
from craftr.core.actions import Action, LambdaAction
check_instance_of(action, (Action, Closure), 'action')
if isinstance(action, Closure):
closure = action
action = LambdaAction(lambda context: closure(self, context).apply(self))
self.do_last_actions.append(action)
def __call__(self, closure: Closure) -> 'Task':
"""
Allows the task to be configured using a closure in Craftr DSL land.
"""
closure(self)
return self
class DefaultTask(Task, HavingProperties):
"""
This task implementation is what is commonly used to implement custom tasks, as it provides capabilities to
automatically deduce dependencies between tasks via property relationships (see #HavingProperties). If you
use the property of one task to set the value of another, that first task becomes a dependency of the latter.
Furthermore, the type of the property can define how the task's properties are handled in respect to its
up-to-date calculation. E.g. if a property is marked as a #TaskPropertyType.OutputFile, the task is considered
out-of-date if the output file does not exist or if any of the task's input files (marked with
#TaskPropertyType.InputFile) have been changed since the output file was produced.
"""
Input = TaskPropertyType.Input
InputFile = TaskPropertyType.InputFile
Output = TaskPropertyType.Output
OutputFile = TaskPropertyType.OutputFile
def finalize(self) -> None:
"""
Called to finalize the task. This is called automatically after the task is configured.
Properties are finalized in this call. The subclass gets a chance to set any output properties
that are derived other properties.
"""
if self._finalized:
raise RuntimeError('Task already finalized')
self._finalized = True
for prop in self.get_properties().values():
prop.finalize()
def get_dependencies(self) -> t.List['Task']:
""" Get all direct dependencies of the task, including those inherited through properties. """
dependencies = self.dependencies[:]
for prop in self.get_properties().values():
if TaskPropertyType.Output not in prop.annotations:
dependencies.extend(t.cast(Task, p.owner) for p in collect_properties(prop) if isinstance(p.owner, Task))
dependencies = list(unique(dependencies))
try:
dependencies.remove(self)
except ValueError:
pass
return dependencies
def is_outdated(self) -> bool:
"""
Checks if the task is outdated.
"""
if self.always_outdated:
return True
# Check if any of the output file(s) don't exist.
for prop in self.get_properties().values():
_is_input, is_output, files = unwrap_file_property(prop)
if is_output and any(not Path(f).exists() for f in files):
return True
# TODO(NiklasRosenstein): If the task has no input file properties or does not produce output
# files should always be considered as outdated.
hash_value = calculate_task_hash(self)
try:
stored_hash: t.Optional[str] = self.project.context.metadata_store.\
namespace(TASK_HASH_NAMESPACE).load(self.path).decode()
except KeyDoesNotExist:
stored_hash = None
return hash_value != stored_hash
def on_completed(self) -> None:
"""
Called when the task was executed.
"""
if not self.always_outdated:
self.project.context.metadata_store.\
namespace(TASK_HASH_NAMESPACE).store(self.path, calculate_task_hash(self).encode())
| StarcoderdataPython |
8013989 | import itertools
def longestWPI(hours: list[int]) -> int:
prefix = list(itertools.accumulate([1 if h > 8 else -1 for h in hours]))[::-1]
indexes = { c: i for i, c in enumerate(prefix) }
highest = 0
for i, v in enumerate(prefix):
if v > 0:
highest = max(highest, len(prefix) - i)
elif v - 1 in indexes and i < indexes[v - 1]:
highest = max(highest, indexes[v - 1] - i)
return highest | StarcoderdataPython |
8178305 | from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.fields import GenericForeignKey
from django.db import models
class Pigeon(models.Model):
""" A pigeon is a message that will be delivered to a number of users """
# Reference to the object we are sending an email about
source_content_type = models.ForeignKey(ContentType)
# Assumes the models have an integer primary key
source_id = models.PositiveIntegerField(null=True, blank=True)
source = GenericForeignKey('source_content_type', 'source_id')
successes = models.IntegerField(default=0,
help_text="Number of successful messages sent.")
failures = models.IntegerField(default=0,
help_text="Number of errors encountered while sending.")
to_send = models.BooleanField(default=True,
help_text="Whether this object should be sent (some time in the future).")
sent_at = models.DateTimeField(null=True, blank=True,
help_text="Indicates the time that this job was sent.")
send_to = models.ForeignKey(User, null=True, blank=True,
help_text="If specified, we call only call render_email_method for this user.")
send_to_method = models.TextField(null=True, blank=True,
help_text="If specified, we call send_to_method to get the users that will be called with render_email_method.")
render_email_method = models.TextField(default="render_email",
help_text="The name of the method to be called on the sender to generates an EmailMessage for each User.")
scheduled_for = models.DateTimeField(
help_text="The datetime when emails should be sent. Defaults to ASAP.")
class Meta:
ordering = ['scheduled_for']
class Outbox(models.Model):
pigeon = models.ForeignKey(Pigeon, null=True, blank=True)
user = models.ForeignKey(User)
message = models.TextField()
sent_at = models.DateTimeField(null=True, blank=True)
succeeded = models.BooleanField(default=False)
failures = models.IntegerField(default=0)
class Meta:
unique_together = ('pigeon', 'user')
ordering = ['sent_at']
verbose_name_plural = 'outboxes'
| StarcoderdataPython |
1806481 | # This code is taken from https://github.com/open-mmlab/mmediting
# Modified by <NAME>
import torch
import torch.nn as nn
from mmcv.parallel import MODULE_WRAPPERS, MMDistributedDataParallel
from mmcv.parallel.scatter_gather import scatter_kwargs
from torch.cuda._utils import _get_device_index
@MODULE_WRAPPERS.register_module()
class DistributedDataParallelWrapper(nn.Module):
"""A DistributedDataParallel wrapper for models.
In mmderain, there is a need to wrap different modules in the models
with separate DistributedDataParallel. Otherwise, it will cause
errors for GAN training.
More specific, the GAN model, usually has two sub-modules:
generator and discriminator. If we wrap both of them in one
standard DistributedDataParallel, it will cause errors during training,
because when we update the parameters of the generator (or discriminator),
the parameters of the discriminator (or generator) is not updated, which is
not allowed for DistributedDataParallel.
So we design this wrapper to separately wrap DistributedDataParallel
for generator and discriminator.
In this wrapper, we perform two operations:
1. Wrap the modules in the models with separate MMDistributedDataParallel.
Note that only modules with parameters will be wrapped.
2. Do scatter operation for 'forward', 'train_step' and 'val_step'.
Note that the arguments of this wrapper is the same as those in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Args:
module (nn.Module): Module that needs to be wrapped.
device_ids (list[int | `torch.device`]): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
dim (int, optional): Same as that in the official scatter function in
pytorch. Defaults to 0.
broadcast_buffers (bool): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Defaults to False.
find_unused_parameters (bool, optional): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Traverse the autograd graph of all tensors contained in returned
value of the wrapped module’s forward function. Defaults to False.
kwargs (dict): Other arguments used in
`torch.nn.parallel.distributed.DistributedDataParallel`.
"""
def __init__(self,
module,
device_ids,
dim=0,
broadcast_buffers=False,
find_unused_parameters=False,
**kwargs):
super().__init__()
assert len(device_ids) == 1, (
'Currently, DistributedDataParallelWrapper only supports one'
'single CUDA device for each process.'
f'The length of device_ids must be 1, but got {len(device_ids)}.')
self.module = module
self.dim = dim
self.to_ddp(
device_ids=device_ids,
dim=dim,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
**kwargs)
self.output_device = _get_device_index(device_ids[0], True)
def to_ddp(self, device_ids, dim, broadcast_buffers,
find_unused_parameters, **kwargs):
"""Wrap models with separate MMDistributedDataParallel.
It only wraps the modules with parameters.
"""
for name, module in self.module._modules.items():
if next(module.parameters(), None) is None:
module = module.cuda()
elif all(not p.requires_grad for p in module.parameters()):
module = module.cuda()
else:
module = MMDistributedDataParallel(
module.cuda(),
device_ids=device_ids,
dim=dim,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
**kwargs)
self.module._modules[name] = module
def scatter(self, inputs, kwargs, device_ids):
"""Scatter function.
Args:
inputs (Tensor): Input Tensor.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
device_ids (int): Device id.
"""
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def forward(self, *inputs, **kwargs):
"""Forward function.
Args:
inputs (tuple): Input data.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
return self.module(*inputs[0], **kwargs[0])
def train_step(self, *inputs, **kwargs):
"""Train step function.
Args:
inputs (Tensor): Input Tensor.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
output = self.module.train_step(*inputs[0], **kwargs[0])
return output
def val_step(self, *inputs, **kwargs):
"""Validation step function.
Args:
inputs (tuple): Input data.
kwargs (dict): Args for ``scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
output = self.module.val_step(*inputs[0], **kwargs[0])
return output
| StarcoderdataPython |
6478438 | <filename>stubs.min/System/Windows/__init___parts/StaticResourceExtension.py
class StaticResourceExtension(MarkupExtension):
"""
Implements a markup extension that supports static (XAML load time) resource references made from XAML.
StaticResourceExtension(resourceKey: object)
StaticResourceExtension()
"""
def ProvideValue(self,serviceProvider):
"""
ProvideValue(self: StaticResourceExtension,serviceProvider: IServiceProvider) -> object
Returns an object that should be set on the property where this extension is
applied. For System.Windows.StaticResourceExtension,this is the object found
in a resource dictionary,where the object to find is identified by the
System.Windows.StaticResourceExtension.ResourceKey.
serviceProvider: Object that can provide services for the markup extension.
Returns: The object value to set on the property where the markup extension provided
value is evaluated.
"""
pass
@staticmethod
def __new__(self,resourceKey=None):
"""
__new__(cls: type)
__new__(cls: type,resourceKey: object)
"""
pass
ResourceKey=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets or sets the key value passed by this static resource reference. They key is used to return the object matching that key in resource dictionaries.
Get: ResourceKey(self: StaticResourceExtension) -> object
Set: ResourceKey(self: StaticResourceExtension)=value
"""
| StarcoderdataPython |
1914311 | <filename>e2e/cfgs/gen_single_im_cfgs.py
import os
import yaml
def gen_e2e_single(base_cfg_fname, yaml_out_dir,
dataset, model_depth, model_type,
data_type, data_loader,
yaml_out_fname=None):
with open(base_cfg_fname, 'r') as f:
cfg = yaml.safe_load(f.read())
model_fname = model_type.format(model_depth)
model_base = os.path.splitext(model_fname)[0].format(64)
os.makedirs(yaml_out_dir, exist_ok=True)
if yaml_out_fname is None:
yaml_out_fname = os.path.join(
yaml_out_dir, '{}-{}-{}.yaml'.format(
data_type, data_loader, model_base))
print(yaml_out_fname)
model_bs64 = model_fname.format(64)
model_bs1 = model_fname.format(1)
cfg['model-config']['model-single']['onnx-path'] = \
cfg['model-config']['model-single']['onnx-path'].format(dataset, model_bs64)
cfg['model-config']['model-single']['onnx-path-bs1'] = \
cfg['model-config']['model-single']['onnx-path-bs1'].format(dataset, model_bs1)
engine_fname = model_bs64.replace(
'onnx', '{loader}-{dtype}.engine'.format(
loader=data_loader, dtype=data_type))
cfg['model-config']['model-single']['engine-path'] = \
cfg['model-config']['model-single']['engine-path'].format(
dataset, engine_fname)
cfg['model-config']['model-single']['data-path'] = \
cfg['model-config']['model-single']['data-path'].format(dataset, data_type)
cfg['model-config']['model-single']['data-loader'] = data_loader
if dataset == 'imagenet':
cfg['experiment-config']['multiplier'] = 4
with open(yaml_out_fname, 'w') as f:
yaml.dump(cfg, f)
# FIXME: full only?
def gen_preproc_ablation(base_cfg_fname, dataset, condition, data_loader):
with open(base_cfg_fname, 'r') as f:
cfg = yaml.safe_load(f.read())
yaml_out_dir = os.path.join(dataset, 'preproc-ablation')
os.makedirs(yaml_out_dir, exist_ok=True)
yaml_out_fname = os.path.join(
yaml_out_dir, 'full-{}-{}.yaml'.format(data_loader, condition))
print(yaml_out_fname)
cfg['model-config']['model-single']['onnx-path'] = \
cfg['model-config']['model-single']['onnx-path'].format(dataset, 'fullres_rn18_ft.bs64.onnx')
cfg['model-config']['model-single']['onnx-path-bs1'] = \
cfg['model-config']['model-single']['onnx-path-bs1'].format(dataset, 'fullres_rn18_ft.bs1.onnx')
cfg['model-config']['model-single']['engine-path'] = \
cfg['model-config']['model-single']['engine-path'].format(dataset, 'fullres_rn18_ft.engine')
cfg['model-config']['model-single']['data-path'] = \
cfg['model-config']['model-single']['data-path'].format(dataset, 'full')
cfg['model-config']['model-single']['data-loader'] = data_loader
cfg['experiment-config']['run-infer'] = False
cfg['experiment-config']['write-out'] = False
cfg['experiment-config']['exp-type'] = condition
if dataset == 'imagenet':
cfg['experiment-config']['multiplier'] = 4
with open(yaml_out_fname, 'w') as f:
yaml.dump(cfg, f)
def gen_tahoma_single(base_cfg_fname, yaml_out_dir,
dataset, model_id,
yaml_out_fname=None):
with open(base_cfg_fname, 'r') as f:
cfg = yaml.safe_load(f.read())
os.makedirs(yaml_out_dir, exist_ok=True)
if yaml_out_fname is None:
yaml_out_fname = os.path.join(
yaml_out_dir, '{}.yaml'.format(model_id))
print(yaml_out_fname)
# FIXME: HORRIBLE HACK
if int(model_id) < 4:
cfg['model-config']['model-single']['input-dim'] = [30, 30]
cfg['model-config']['model-single']['resize-dim'] = [34, 34]
cfg['model-config']['model-single']['onnx-path'] = \
'/lfs/1/ddkang/vision-inf/data/models/tahoma/{}/{}.bs64.onnx'.format(dataset, model_id)
cfg['model-config']['model-single']['onnx-path-bs1'] = \
'/lfs/1/ddkang/vision-inf/data/models/tahoma/{}/{}.bs1.onnx'.format(dataset, model_id)
engine_fname = cfg['model-config']['model-single']['onnx-path'].replace(
'.onnx', '{loader}-{dtype}.engine'.format(
loader='naive', dtype='full'))
cfg['model-config']['model-single']['engine-path'] = engine_fname
cfg['model-config']['model-single']['data-path'] = \
cfg['model-config']['model-single']['data-path'].format(dataset, 'full')
cfg['model-config']['model-single']['data-loader'] = 'naive'
if dataset == 'imagenet':
cfg['experiment-config']['multiplier'] = 4
with open(yaml_out_fname, 'w') as f:
yaml.dump(cfg, f)
def main():
fname = './im-single-full-base.yaml'
datasets = ['bike-bird', 'birds-200', 'animals-10', 'imagenet']
model_types = ['fullres_{}_ft.bs{{}}.onnx',
'thumbnail_{}_upsample_ft.bs{{}}.onnx']
dtl = [('full', 'naive'), ('full', 'opt-jpg'),
('161-jpeg-75', 'opt-jpg'), ('161-jpeg-95', 'opt-jpg'),
('161-png', 'opt-png')]
# Single end-to-end cfgs
for dataset in datasets:
for model_depth in ['rn18', 'rn34', 'rn50']:
for model_type in model_types:
for data_type, data_loader in dtl:
yaml_out_dir = os.path.join(dataset, 'full')
gen_e2e_single(fname, yaml_out_dir,
dataset, model_depth, model_type,
data_type, data_loader)
# Preproc ablations
for dataset in datasets:
for condition in ['decode-only', 'decode-resize', 'decode-resize-norm', 'all']:
for data_loader in ['opt-jpg', 'naive']:
gen_preproc_ablation(fname, dataset, condition, data_loader)
# Tahoma
for dataset in datasets:
for model_id in map(str, range(8)):
yaml_out_dir = os.path.join('tahoma', dataset)
gen_tahoma_single(fname, yaml_out_dir, dataset, model_id)
if __name__ == '__main__':
main()
| StarcoderdataPython |
3258909 | <gh_stars>0
from typing import Sequence
from numbers import Number
from tabulate import tabulate
class Matrix(Sequence):
def __init__(self, matrix: Sequence[Sequence[float]]):
assert (isinstance(matrix, Sequence) and
isinstance(matrix, Sequence)), "Wrong data"
self.__matrix = [[float(x) for x in row] for row in matrix]
@staticmethod
def one(rows: int, columns: int):
return [
[1 if i == j else 0 for j in range(columns)] for i in range(rows)
]
@staticmethod
def zero(rows: int, columns: int):
return [[0] * columns for _ in range(rows)]
def __repr__(self):
return 'Matrix({})'.format(self.__matrix)
def __str__(self):
return tabulate(self.__matrix)
def __len__(self):
return len(self.__matrix)
def __getitem__(self, item):
return self.__matrix.__getitem__(item)
def __iter__(self):
return iter(self.__matrix)
def __mul__(self, other):
assert isinstance(other, Sequence)
# Количество столбцов равно количеству строк / элементов
assert len(self.__matrix[0]) == len(other), "Wrong data"
if isinstance(other[0], Sequence):
return Matrix([
[
sum(self[i][k] * other[k][j] for k in range(len(other))) for j in range(len(other[0]))
] for i in range(len(self))
])
else:
return [
sum(x * y for x, y in zip(row, other)) for row in self
]
def __rmul__(self, other):
assert isinstance(other, Number)
return Matrix([
[other * x for x in row] for row in self.__matrix
])
def __add__(self, other):
# and all(len(other) == len(row) for row in other)), "Wrong data"
assert (isinstance(other, Sequence) and
isinstance(other[0], Sequence) and
len(self) == len(other) and
len(self[0]) == len(other[0])), "Wrong data"
return Matrix([
[x + y for x, y in zip(r1, r2)] for r1, r2 in zip(self.__matrix, other)
])
def __neg__(self):
return Matrix([
[-x for x in row] for row in self.__matrix
])
def __sub__(self, other):
assert (isinstance(other, Sequence) and
isinstance(other[0], Sequence) and
all(len(other) == len(row) for row in other)), "Wrong data"
return Matrix([
[x - y for x, y in zip(r1, r2)] for r1, r2 in zip(self, other)
])
@property
def shape(self):
return len(self.__matrix), len(self.__matrix[0])
if __name__ == '__main__':
m = Matrix([[1, 2, 1], [2, 3, 0]])
a = Matrix([[1, 0, 0], [2, 1, 0], [1, 1, 0]])
# print(m, m.shape)
# print(a, a.shape)
print(m * a)
| StarcoderdataPython |
8096102 | from django.urls import path
from . import views
app_name = 'restrito'
urlpatterns = [
path('', views.home, name='home'),
path('matriculas/', views.matricula_lista, name="matricula_lista"),
path('matriculas/solicitar/', views.matricula_solicitar, name="matricula_solicitar"),
path('matriculas/solicitar/<int:id_do>/', views.matricula_solicitar, name="matricula_solicitar"),
path('matriculas/remover/<int:id_sm>/', views.matricula_remover, name="matricula_remover"),
path('turma/<int:id_do>/', views.turma, name='turma'),
path('turma/<int:id_do>/atividade/', views.atividade_vinculada_form, name='vinculada_form'),
path('turma/<int:id_do>/atividade/<int:id_vin>/', views.atividade_vinculada_form, name='vinculada_form'),
path('turma/<int:id_do>/atividade/<int:id_vin>/remover/', views.atividade_vinculada_remover, name='vinculada_remover'),
path('turma/<int:id_do>/atividade/<int:id_vin>/entregas/', views.entrega_listar, name='entrega_lista'),
path('turma/<int:id_do>/atividade/<int:id_vin>/entregar/', views.entrega_form, name='entrega_form'),
path('atividades/', views.atividade_lista, name='atividade_lista'),
path('atividades/form/', views.atividade_form, name='atividade_form'),
path('atividades/form/<int:id>/', views.atividade_form, name='atividade_form'),
path('atividades/remover/<int:id>/', views.atividade_remover, name='atividade_remover'),
] | StarcoderdataPython |
5109289 | <filename>plugins/xlsimg/xlsimg.py
import os
class Excel2imgPlugin(object):
def __init__(self, preprocessor):
self.pp = preprocessor
self.token = "<PASSWORD>"
self.pp.register_plugin(self)
def process(self, code, fname, sheet="", range="", title=None, div_style=None):
"""
Snapshot specified range from Excel file as picture and dump it to png
```xlsimg("fname", "optional sheet", "optional range", "optional title")
"""
if title is None:
# construct default title
atitle = []
if sheet != '': atitle.append(sheet)
if range != '': atitle.append(range)
if atitle == []: atitle.append(os.path.splitext(os.path.basename(fname))[0])
title = '_'.join(atitle)
fn_base = self.pp.tofname("%s_%s_%s"%(fname, sheet, range))
fn_input, fname = self.pp.get_asset(fname, False)
fn_out = os.path.join(self.pp.dirs[-1], self.pp.auto, fn_base + '.png')
fn_out_relative = os.path.relpath(fn_out, self.pp.dirs[0])
if sheet == '': sheet = None
if range == '': range = None
if (not self.pp.exists_and_newer(fn_out, fn_input)):
import excel2img
excel2img.export_img(fn_input, fn_out, sheet, range)
self.pp.timestamp(fn_out)
# Return the link to the new png file
return "\n<div class=\"figure\">\n[](%s)<p class=\"caption\">%s</p></div>" % (title, fn_out_relative, fname, title)
new = Excel2imgPlugin
| StarcoderdataPython |
9670213 | from .const import *
__all__ = ['INSERT_USER','GET_USER','GET_USERS','INIT_DATABASE','GET_PRMS'] | StarcoderdataPython |
3333088 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['LogsCustomPipelineArgs', 'LogsCustomPipeline']
@pulumi.input_type
class LogsCustomPipelineArgs:
def __init__(__self__, *,
filters: pulumi.Input[Sequence[pulumi.Input['LogsCustomPipelineFilterArgs']]],
name: pulumi.Input[str],
is_enabled: Optional[pulumi.Input[bool]] = None,
processors: Optional[pulumi.Input[Sequence[pulumi.Input['LogsCustomPipelineProcessorArgs']]]] = None):
"""
The set of arguments for constructing a LogsCustomPipeline resource.
"""
pulumi.set(__self__, "filters", filters)
pulumi.set(__self__, "name", name)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if processors is not None:
pulumi.set(__self__, "processors", processors)
@property
@pulumi.getter
def filters(self) -> pulumi.Input[Sequence[pulumi.Input['LogsCustomPipelineFilterArgs']]]:
return pulumi.get(self, "filters")
@filters.setter
def filters(self, value: pulumi.Input[Sequence[pulumi.Input['LogsCustomPipelineFilterArgs']]]):
pulumi.set(self, "filters", value)
@property
@pulumi.getter
def name(self) -> pulumi.Input[str]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: pulumi.Input[str]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter
def processors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LogsCustomPipelineProcessorArgs']]]]:
return pulumi.get(self, "processors")
@processors.setter
def processors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LogsCustomPipelineProcessorArgs']]]]):
pulumi.set(self, "processors", value)
@pulumi.input_type
class _LogsCustomPipelineState:
def __init__(__self__, *,
filters: Optional[pulumi.Input[Sequence[pulumi.Input['LogsCustomPipelineFilterArgs']]]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
processors: Optional[pulumi.Input[Sequence[pulumi.Input['LogsCustomPipelineProcessorArgs']]]] = None):
"""
Input properties used for looking up and filtering LogsCustomPipeline resources.
"""
if filters is not None:
pulumi.set(__self__, "filters", filters)
if is_enabled is not None:
pulumi.set(__self__, "is_enabled", is_enabled)
if name is not None:
pulumi.set(__self__, "name", name)
if processors is not None:
pulumi.set(__self__, "processors", processors)
@property
@pulumi.getter
def filters(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LogsCustomPipelineFilterArgs']]]]:
return pulumi.get(self, "filters")
@filters.setter
def filters(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LogsCustomPipelineFilterArgs']]]]):
pulumi.set(self, "filters", value)
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "is_enabled")
@is_enabled.setter
def is_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "is_enabled", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def processors(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['LogsCustomPipelineProcessorArgs']]]]:
return pulumi.get(self, "processors")
@processors.setter
def processors(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['LogsCustomPipelineProcessorArgs']]]]):
pulumi.set(self, "processors", value)
class LogsCustomPipeline(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsCustomPipelineFilterArgs']]]]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
processors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsCustomPipelineProcessorArgs']]]]] = None,
__props__=None):
"""
Provides a Datadog [Logs Pipeline API](https://docs.datadoghq.com/api/v1/logs-pipelines/) resource, which is used to create and manage Datadog logs custom pipelines. Each `LogsCustomPipeline` resource defines a complete pipeline. The order of the pipelines is maintained in a different resource: `LogsPipelineOrder`. When creating a new pipeline, you need to **explicitly** add this pipeline to the `LogsPipelineOrder` resource to track the pipeline. Similarly, when a pipeline needs to be destroyed, remove its references from the `LogsPipelineOrder` resource.
## Example Usage
```python
import pulumi
import pulumi_datadog as datadog
sample_pipeline = datadog.LogsCustomPipeline("samplePipeline",
filters=[datadog.LogsCustomPipelineFilterArgs(
query="source:foo",
)],
is_enabled=True,
name="sample pipeline",
processors=[
datadog.LogsCustomPipelineProcessorArgs(
arithmetic_processor=datadog.LogsCustomPipelineProcessorArithmeticProcessorArgs(
expression="(time1 - time2)*1000",
is_enabled=True,
is_replace_missing=True,
name="sample arithmetic processor",
target="my_arithmetic",
),
),
datadog.LogsCustomPipelineProcessorArgs(
attribute_remapper=datadog.LogsCustomPipelineProcessorAttributeRemapperArgs(
is_enabled=True,
name="sample attribute processor",
override_on_conflict=False,
preserve_source=True,
source_type="tag",
sources=["db.instance"],
target="db",
target_format="string",
target_type="attribute",
),
),
datadog.LogsCustomPipelineProcessorArgs(
category_processor=datadog.LogsCustomPipelineProcessorCategoryProcessorArgs(
category=[
{
"filter": {
"query": "@severity: \".\"",
},
"name": "debug",
},
{
"filter": {
"query": "@severity: \"-\"",
},
"name": "verbose",
},
],
is_enabled=True,
name="sample category processor",
target="foo.severity",
),
),
datadog.LogsCustomPipelineProcessorArgs(
date_remapper=datadog.LogsCustomPipelineProcessorDateRemapperArgs(
is_enabled=True,
name="sample date remapper",
sources=[
"_timestamp",
"published_date",
],
),
),
datadog.LogsCustomPipelineProcessorArgs(
geo_ip_parser=datadog.LogsCustomPipelineProcessorGeoIpParserArgs(
is_enabled=True,
name="sample geo ip parser",
sources=["network.client.ip"],
target="network.client.geoip",
),
),
datadog.LogsCustomPipelineProcessorArgs(
grok_parser=datadog.LogsCustomPipelineProcessorGrokParserArgs(
grok=datadog.LogsCustomPipelineProcessorGrokParserGrokArgs(
match_rules="Rule %{word:my_word2} %{number:my_float2}",
support_rules="",
),
is_enabled=True,
name="sample grok parser",
samples=["sample log 1"],
source="message",
),
),
datadog.LogsCustomPipelineProcessorArgs(
lookup_processor=datadog.LogsCustomPipelineProcessorLookupProcessorArgs(
default_lookup="unknown service",
is_enabled=True,
lookup_table=["1,my service"],
name="sample lookup processor",
source="service_id",
target="service_name",
),
),
datadog.LogsCustomPipelineProcessorArgs(
message_remapper=datadog.LogsCustomPipelineProcessorMessageRemapperArgs(
is_enabled=True,
name="sample message remapper",
sources=["msg"],
),
),
datadog.LogsCustomPipelineProcessorArgs(
pipeline=datadog.LogsCustomPipelineProcessorPipelineArgs(
filter=[{
"query": "source:foo",
}],
is_enabled=True,
name="nested pipeline",
processor=[{
"urlParser": {
"name": "sample url parser",
"normalizeEndingSlashes": True,
"sources": [
"url",
"extra",
],
"target": "http_url",
},
}],
),
),
datadog.LogsCustomPipelineProcessorArgs(
service_remapper=datadog.LogsCustomPipelineProcessorServiceRemapperArgs(
is_enabled=True,
name="sample service remapper",
sources=["service"],
),
),
datadog.LogsCustomPipelineProcessorArgs(
status_remapper=datadog.LogsCustomPipelineProcessorStatusRemapperArgs(
is_enabled=True,
name="sample status remapper",
sources=[
"info",
"trace",
],
),
),
datadog.LogsCustomPipelineProcessorArgs(
string_builder_processor=datadog.LogsCustomPipelineProcessorStringBuilderProcessorArgs(
is_enabled=True,
is_replace_missing=False,
name="sample string builder processor",
target="user_activity",
template="%{user.name} logged in at %{timestamp}",
),
),
datadog.LogsCustomPipelineProcessorArgs(
trace_id_remapper=datadog.LogsCustomPipelineProcessorTraceIdRemapperArgs(
is_enabled=True,
name="sample trace id remapper",
sources=["dd.trace_id"],
),
),
datadog.LogsCustomPipelineProcessorArgs(
user_agent_parser=datadog.LogsCustomPipelineProcessorUserAgentParserArgs(
is_enabled=True,
is_encoded=False,
name="sample user agent parser",
sources=[
"user",
"agent",
],
target="http_agent",
),
),
])
```
## Import
# To find the pipeline ID, click the "edit" button in the UI to open the pipeline details. # The pipeline ID is the last part of the URL.
```sh
$ pulumi import datadog:index/logsCustomPipeline:LogsCustomPipeline name> <pipelineID>
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: LogsCustomPipelineArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Datadog [Logs Pipeline API](https://docs.datadoghq.com/api/v1/logs-pipelines/) resource, which is used to create and manage Datadog logs custom pipelines. Each `LogsCustomPipeline` resource defines a complete pipeline. The order of the pipelines is maintained in a different resource: `LogsPipelineOrder`. When creating a new pipeline, you need to **explicitly** add this pipeline to the `LogsPipelineOrder` resource to track the pipeline. Similarly, when a pipeline needs to be destroyed, remove its references from the `LogsPipelineOrder` resource.
## Example Usage
```python
import pulumi
import pulumi_datadog as datadog
sample_pipeline = datadog.LogsCustomPipeline("samplePipeline",
filters=[datadog.LogsCustomPipelineFilterArgs(
query="source:foo",
)],
is_enabled=True,
name="sample pipeline",
processors=[
datadog.LogsCustomPipelineProcessorArgs(
arithmetic_processor=datadog.LogsCustomPipelineProcessorArithmeticProcessorArgs(
expression="(time1 - time2)*1000",
is_enabled=True,
is_replace_missing=True,
name="sample arithmetic processor",
target="my_arithmetic",
),
),
datadog.LogsCustomPipelineProcessorArgs(
attribute_remapper=datadog.LogsCustomPipelineProcessorAttributeRemapperArgs(
is_enabled=True,
name="sample attribute processor",
override_on_conflict=False,
preserve_source=True,
source_type="tag",
sources=["db.instance"],
target="db",
target_format="string",
target_type="attribute",
),
),
datadog.LogsCustomPipelineProcessorArgs(
category_processor=datadog.LogsCustomPipelineProcessorCategoryProcessorArgs(
category=[
{
"filter": {
"query": "@severity: \".\"",
},
"name": "debug",
},
{
"filter": {
"query": "@severity: \"-\"",
},
"name": "verbose",
},
],
is_enabled=True,
name="sample category processor",
target="foo.severity",
),
),
datadog.LogsCustomPipelineProcessorArgs(
date_remapper=datadog.LogsCustomPipelineProcessorDateRemapperArgs(
is_enabled=True,
name="sample date remapper",
sources=[
"_timestamp",
"published_date",
],
),
),
datadog.LogsCustomPipelineProcessorArgs(
geo_ip_parser=datadog.LogsCustomPipelineProcessorGeoIpParserArgs(
is_enabled=True,
name="sample geo ip parser",
sources=["network.client.ip"],
target="network.client.geoip",
),
),
datadog.LogsCustomPipelineProcessorArgs(
grok_parser=datadog.LogsCustomPipelineProcessorGrokParserArgs(
grok=datadog.LogsCustomPipelineProcessorGrokParserGrokArgs(
match_rules="Rule %{word:my_word2} %{number:my_float2}",
support_rules="",
),
is_enabled=True,
name="sample grok parser",
samples=["sample log 1"],
source="message",
),
),
datadog.LogsCustomPipelineProcessorArgs(
lookup_processor=datadog.LogsCustomPipelineProcessorLookupProcessorArgs(
default_lookup="unknown service",
is_enabled=True,
lookup_table=["1,my service"],
name="sample lookup processor",
source="service_id",
target="service_name",
),
),
datadog.LogsCustomPipelineProcessorArgs(
message_remapper=datadog.LogsCustomPipelineProcessorMessageRemapperArgs(
is_enabled=True,
name="sample message remapper",
sources=["msg"],
),
),
datadog.LogsCustomPipelineProcessorArgs(
pipeline=datadog.LogsCustomPipelineProcessorPipelineArgs(
filter=[{
"query": "source:foo",
}],
is_enabled=True,
name="nested pipeline",
processor=[{
"urlParser": {
"name": "sample url parser",
"normalizeEndingSlashes": True,
"sources": [
"url",
"extra",
],
"target": "http_url",
},
}],
),
),
datadog.LogsCustomPipelineProcessorArgs(
service_remapper=datadog.LogsCustomPipelineProcessorServiceRemapperArgs(
is_enabled=True,
name="sample service remapper",
sources=["service"],
),
),
datadog.LogsCustomPipelineProcessorArgs(
status_remapper=datadog.LogsCustomPipelineProcessorStatusRemapperArgs(
is_enabled=True,
name="sample status remapper",
sources=[
"info",
"trace",
],
),
),
datadog.LogsCustomPipelineProcessorArgs(
string_builder_processor=datadog.LogsCustomPipelineProcessorStringBuilderProcessorArgs(
is_enabled=True,
is_replace_missing=False,
name="sample string builder processor",
target="user_activity",
template="%{user.name} logged in at %{timestamp}",
),
),
datadog.LogsCustomPipelineProcessorArgs(
trace_id_remapper=datadog.LogsCustomPipelineProcessorTraceIdRemapperArgs(
is_enabled=True,
name="sample trace id remapper",
sources=["dd.trace_id"],
),
),
datadog.LogsCustomPipelineProcessorArgs(
user_agent_parser=datadog.LogsCustomPipelineProcessorUserAgentParserArgs(
is_enabled=True,
is_encoded=False,
name="sample user agent parser",
sources=[
"user",
"agent",
],
target="http_agent",
),
),
])
```
## Import
# To find the pipeline ID, click the "edit" button in the UI to open the pipeline details. # The pipeline ID is the last part of the URL.
```sh
$ pulumi import datadog:index/logsCustomPipeline:LogsCustomPipeline name> <pipelineID>
```
:param str resource_name: The name of the resource.
:param LogsCustomPipelineArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(LogsCustomPipelineArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsCustomPipelineFilterArgs']]]]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
processors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsCustomPipelineProcessorArgs']]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = LogsCustomPipelineArgs.__new__(LogsCustomPipelineArgs)
if filters is None and not opts.urn:
raise TypeError("Missing required property 'filters'")
__props__.__dict__["filters"] = filters
__props__.__dict__["is_enabled"] = is_enabled
if name is None and not opts.urn:
raise TypeError("Missing required property 'name'")
__props__.__dict__["name"] = name
__props__.__dict__["processors"] = processors
super(LogsCustomPipeline, __self__).__init__(
'datadog:index/logsCustomPipeline:LogsCustomPipeline',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
filters: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsCustomPipelineFilterArgs']]]]] = None,
is_enabled: Optional[pulumi.Input[bool]] = None,
name: Optional[pulumi.Input[str]] = None,
processors: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['LogsCustomPipelineProcessorArgs']]]]] = None) -> 'LogsCustomPipeline':
"""
Get an existing LogsCustomPipeline resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _LogsCustomPipelineState.__new__(_LogsCustomPipelineState)
__props__.__dict__["filters"] = filters
__props__.__dict__["is_enabled"] = is_enabled
__props__.__dict__["name"] = name
__props__.__dict__["processors"] = processors
return LogsCustomPipeline(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def filters(self) -> pulumi.Output[Sequence['outputs.LogsCustomPipelineFilter']]:
return pulumi.get(self, "filters")
@property
@pulumi.getter(name="isEnabled")
def is_enabled(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "is_enabled")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def processors(self) -> pulumi.Output[Optional[Sequence['outputs.LogsCustomPipelineProcessor']]]:
return pulumi.get(self, "processors")
| StarcoderdataPython |
6630433 | import sys
f = open(sys.argv[1],mode = 'rt', encoding='utf-8')
for line in f:
sys.stdout.write(line)
f.close()
| StarcoderdataPython |
6455167 | from flask import Blueprint
#
# @author: andy
#
from .measure_service import profiling_service
profiler_blueprint = Blueprint("profiler", __name__)
@profiler_blueprint.route("/profiler", methods=["GET"])
def index():
return profiling_service.as_html()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.