content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
# Copyright 2020 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for wfa_cardinality_estimation_evaluation_framework.common.random."""
from absl.testing import absltest
import numpy as np
from wfa_cardinality_estimation_evaluation_framework.common import random
class PlottingTest(absltest.TestCase):
def test_choice_fast_same_random_state_same_output(self):
rs1 = np.random.RandomState(1)
rs2 = np.random.RandomState(1)
a = random.choice_fast(10000, 5000, rs1)
b = random.choice_fast(10000, 5000, rs2)
self.assertSameElements(a, b)
def test_choice_fast_len_is_m(self):
for i in range(1000):
a = random.choice_fast(10000, i)
self.assertLen(a, i)
def test_choice_fast_choose_elements_from_list(self):
for i in range(50, 500):
# Get a random list of numbers from 0 to 5000 size i
elements = np.random.randint(0, 5000, i)
# Choose up to i elements from that list
chosen = random.choice_fast(elements, np.random.randint(1, i))
# Make sure chosen elements are actually from our original elements.
for element in chosen:
self.assertTrue(element in elements)
def test_choice_fast_is_unique(self):
for i in range(50, 500):
chosen = random.choice_fast(500, i)
no_repeats = set(chosen)
self.assertTrue(len(chosen) == len(no_repeats))
if __name__ == '__main__':
absltest.main()
|
nilq/baby-python
|
python
|
from gbdxtools.images.worldview import WorldViewImage
from gbdxtools.images.drivers import WorldViewDriver
from gbdxtools.images.util import vector_services_query
from gbdxtools.rda.interface import RDA
rda = RDA()
band_types = {
'MS': 'BGRN',
'Panchromatic': 'PAN',
'Pan': 'PAN',
'pan': 'PAN'
}
class GeoEyeDriver(WorldViewDriver):
__image_option_defaults__ = {"correctionType": "DN"}
class GE01(WorldViewImage):
__Driver__ = GeoEyeDriver
@property
def _rgb_bands(self):
return [2,1,0]
|
nilq/baby-python
|
python
|
"""
README:
docs/everything-about-prop-delegators.zh.md
"""
# noinspection PyUnresolvedReferences,PyProtectedMember
from typing import _UnionGenericAlias as RealUnionType
from PySide6.QtQml import QQmlProperty
from .typehint import *
from ....qmlside import qmlside
from ....qmlside.qmlside import convert_name_case
from ....qmlside.qmlside import convert_primitive_type
_REGISTERED_NAMES = (
'qobj', 'name', 'prop', 'read', 'write', 'kiss', 'bind'
)
class PrimitivePropDelegator:
qobj: TQObject
name: TPropName
def __init__(self, qobj: TQObject, name: TPropName):
self.qobj = qobj
self.name = name
def read(self):
return self.qobj.property(convert_name_case(self.name))
def write(self, value):
self.qobj.setProperty(convert_name_case(self.name), value)
class PropDelegator:
qobj: TQObject
name: TPropName
prop: TProperty
def __init__(self, qobj: TQObject, name: TPropName):
self.qobj = qobj
self.name = name
self.prop = QQmlProperty(qobj, convert_name_case(name))
def __getattr__(self, item):
if item in _REGISTERED_NAMES or item.startswith('_'):
return super().__getattribute__(item)
else:
return self.__get_subprop__(item)
def __setattr__(self, key, value):
"""
Examples:
xxx.name = 'xxx'
xxx.width = 12
"""
if key in _REGISTERED_NAMES or key.startswith('_'):
super().__setattr__(key, value)
else:
self.__set_subprop__(key, value)
def __get_subprop__(self, name: TPropName):
raise NotImplementedError
def __set_subprop__(self, name, value):
raise NotImplementedError
def read(self):
return self.prop.read()
def write(self, value):
self.prop.write(value)
def kiss(self, value):
self.write(value)
def bind(self, abstract_prop_expression: tuple[TQObject, str]):
"""
Documents:
See `docs/black-magic-about-binding-mechanism.zh.md`
Notes:
Trying hard to complete dynamic binding feature. You cannot use
this method for now.
If you want to dynamically bind the others' properties, try the
following instead:
# WIP
<item_A>.<prop>.bind(<item_B>.<prop>)
# Workaround
<item_B>.<prop_changed>.connect(
lambda: <item_A>.<prop> = <item_B>.<prop>
)
"""
# last_frame = currentframe().f_back
# event, participants = self._extract_frame_info(last_frame)
raise NotImplemented
# @staticmethod
# def _extract_frame_info(frame):
# """
# Learning:
# source code of lk-logger
#
# TODO: much work (unittest & optimization) need to be done...
# """
# filename = frame.f_code.co_filename
# lineno = frame.f_lineno
# file = open(filename, 'r', encoding='utf-8')
# source_line = file.read().splitlines()[lineno - 1]
# file.close()
#
# assert (m := re.match(r'^ +(?:\w+\.)+\.bind\(', source_line)), '''
# Your binding statement is too complex to analyse!
# In current verison (v0.1.x) we can only parse format likes
# `<some_qobj>.<property_name>.bind(<expression>)`.
# Here's the position error happened FYI:
# Filename: {}
# Lineno: {}
# Source Line: {}
# '''.format(filename, lineno, source_line)
# source_line_stem = source_line[m.span()[0]:]
#
# from lk_logger.scanner import get_all_blocks
# from ...base_item import BaseItem # FIXME: not a good way
#
# segs = source_line_stem[1:].split(',')
# segs[-1] = segs[-1].rstrip(', ')
# event = ''
# participants = []
# locals_ = frame.f_locals()
# for match0 in get_all_blocks(source_line_stem):
# event = match0.fulltext.strip()
# break
# for match in get_all_blocks(*segs, end_mark=','):
# obj_name, prop_name, *_ = match.fulltext.split('.')
# # e.g. 'btn.x' -> 'btn'
# if obj_name in locals_:
# obj = locals_[obj_name]
# if isinstance(obj, BaseItem) and prop_name in obj.auth_props:
# participants.append(QQmlProperty(obj.qobj, prop_name))
#
# return event, participants
class PropDelegatorA(PropDelegator):
def __get_subprop__(self, name):
# e.g. xxx.width.color -> error
raise AttributeError(
'Illegal property: {}.{}!'.format(self.name, name),
'This property ({}) doesn\'t support accessing secondary property '
'from it.'.format(self.name),
'Did you mean `PropDelegatorB` or `PropDelegatorC`?'
)
def __set_subprop__(self, name, value):
# e.g. xxx.width.color = '#FFFFFF'
raise AttributeError(
'Illegal property: {}.{}!'.format(self.name, name),
'This property ({}) doesn\'t support setting a secondary property '
'value to it.'.format(self.name),
'Did you mean `PropDelegatorB` or `PropDelegatorC`?'
)
class PropDelegatorB(PropDelegator):
def __get_subprop__(self, name) -> PropDelegatorA:
# e.g. border.width -> PropDelegator(<border.width>)
# ^^^^^
# name
return PropDelegatorA(self.prop.read(), name)
def __set_subprop__(self, name, value):
# e.g. border.width = 12
# ^^^^^ ^^
# name value
prop = self.__get_subprop__(name)
if isinstance(value, PropDelegator):
prop.write(value.read())
else:
prop.write(getattr(value, 'qobj', value))
def read(self):
return self
class PropDelegatorC(PropDelegator):
def __get_subprop__(self, name):
# e.g. anchors.top -> QQmlSideProp(<anchors.top>)
return QmlSideProp(self.qobj, f'{self.name}.{name}')
def __set_subprop__(self, name, value: 'QmlSideProp'):
# e.g. anchors.top = xxx.anchors.bottom
self.__get_subprop__(name).write(value)
# t = self.__get_subprop__(name)
# s = value
# qmlside.bind_prop(t.qobj, t.prop_name, s.qobj, s.prop_name)
def read(self):
return self
def write(self, value: 'QmlSideProp'):
# e.g. anchors.write(xxx.anchors.top)
raise AttributeError('Property not writable: {}'.format(self.name))
class QmlSideProp:
def __init__(self, qobj: TQObject, prop_name: str, **kwargs):
self.qobj = qobj
self.prop_name = prop_name
for k, v in kwargs.items():
setattr(self, k, v)
def read(self):
return qmlside.eval_js('{{0}}.{}'.format(
convert_name_case(self.prop_name)
), self.qobj)
def write(self, value: 'QmlSideProp'):
t_obj, t_prop_name = self.qobj, self.prop_name
if isinstance(value, QmlSideProp):
s_obj, s_prop_name = value.qobj, value.prop_name
elif hasattr(value, 'qobj'):
s_obj, s_prop_name = value.qobj, ''
else:
s_obj, s_prop_name = convert_primitive_type(value), ''
if t_prop_name == 'anchors.center_in':
s_prop_name = ''
elif t_prop_name == 'anchors.fill':
pass
elif t_prop_name.startswith('anchors.'):
s_prop_name = s_prop_name.removeprefix('anchors.')
qmlside.bind_prop(t_obj, t_prop_name, s_obj, s_prop_name)
def __add__(self, other):
return self.read() + other
def __radd__(self, other):
return other + self.read()
def adapt_delegator(qobj: TQObject, name: TPropName,
constructor: TConstructor) -> TDelegator:
if type(constructor) is RealUnionType:
# e.g. Union[float, PropDelegatorA]
delegator = constructor.__args__[-1] # -> PropDelegatorA
# we had an agreement that always put `type:TDelegator` in the last
# position of `TConstructor`. see reason at [TODO] and some
# implementation code at `..authorized_props.ItemProps`.
else:
# noinspection PyTypeChecker
if issubclass(constructor, PropDelegator):
# e.g. constructor is PropDelegatorA
delegator = constructor
else:
# e.g. constructor is float
delegator = PrimitivePropDelegator
return delegator(qobj, name)
|
nilq/baby-python
|
python
|
import time,calendar,os,json,sys,datetime
from requests import get
from subprocess import Popen,PIPE
from math import sqrt,log,exp
from scipy.optimize import minimize
import numpy as np
np.set_printoptions(precision=3,linewidth=120)
def datetoday(x):
t=time.strptime(x+'UTC','%Y-%m-%d%Z')
return calendar.timegm(t)//86400
def daytodate(r):
t=time.gmtime(r*86400)
return time.strftime('%Y-%m-%d',t)
def get_data(req):
url='https://api.coronavirus.data.gov.uk/v1/data?'
response = get(url+req, timeout=10)
if not response.ok:
raise RuntimeError(f'Request failed: { response.text }')
date=time.strftime('%Y-%m-%d',time.strptime(response.headers['Last-Modified'],'%a, %d %b %Y %H:%M:%S %Z'))# Not currently used
data=response.json()['data']
# Convert from list form to dictionary keyed by age
day=datetoday(data[0]['date'])
n=1
while n<len(data) and datetoday(data[n]['date'])==day-n: n+=1# Find maximal contiguous date range
data1=[]
for i in range(n-1,-1,-1):
d=data[i]
e={'date':d['date']}
for x in d:
if x!='date':
for y in d[x]:
if 'value' in y: val=y['value']
else: val=y['deaths']
e[y['age']]=e.get(y['age'],0)+val
data1.append(e)
return data1
req='filters=areaType=nation;areaName=england&structure={"date":"date","blah":"newDeaths28DaysByDeathDateAgeDemographics"}'; mortdata=get_data(req)
req='filters=areaType=nation;areaName=england&structure={"date":"date","blah":"cumAdmissionsByAge"}'; hospdata=get_data(req)
req='filters=areaType=nation;areaName=england&structure={"date":"date","male":"maleCases"}'; malecases=get_data(req)
req='filters=areaType=nation;areaName=england&structure={"date":"date","female":"femaleCases"}'; femalecases=get_data(req)
casedata=[]
for (m,f) in zip(malecases,femalecases):
d={'date': m['date']}
assert m['date']==f['date']
for s in [m,f]:
for x in s:
if x!='date': d[x]=d.get(x,0)+s[x]
casedata.append(d)
updatedate=casedata[-1]['date']
now=datetime.datetime.utcnow().strftime('%Y-%m-%d')
# Save case data because we might want to artificially implement cases-by-publication-date-and-age. (newCasesByPublishDateAgeDemographics not working)
fn=os.path.join('apidata',updatedate)
if len(sys.argv)==1 and os.path.isfile(fn): sys.exit(1)# Exit signalling no update needs to be done
os.makedirs('apidata', exist_ok=True)
with open(fn,'w') as fp:
json.dump(casedata,fp,indent=2)
def getdiff(data):
n=len(data)
newdata=[]
for i in range(1,n):
l={'date':data[i]['date']}
for age in data[i]:
if age!='date': l[age]=data[i][age]-data[i-1].get(age,0)
newdata.append(l)
return newdata
newhosp=getdiff(hospdata)
newcases=getdiff(casedata)
newmcases=getdiff(malecases)
newfcases=getdiff(femalecases)
newcases=newcases[:-1]# Last entry seems particularly unreliable, I think because it using specimen date and there are biases with recent entries
newmcases=newmcases[:-1]
newfcases=newfcases[:-1]
# Convert (eg) string ages '15_19', '15_to_19', '60+' to (15,20), (15,20), (60,150) respectively
def parseage(x):
if x[-1]=='+': return (int(x[:-1]),150)
x=x.replace('_to_','_')# cater for 65_to_69 and 65_69 formats
aa=[int(y) for y in x.split("_")]
return (aa[0],aa[1]+1)
# Convert (eg) (15,20) to "15 - 19"
def unparse(r):
(a,b)=r
if b==150: return "%d+"%a
return "%d - %d"%(a,b)
# Convert dictionary from using '15_19' (etc) format to (15,20) format
# At the same time remove age brackets such as '60+' and '00_59' that strictly contain other age brackets, so avoiding overcounting
# Return list of ages
def convertages(dd):
ages0=[(x,parseage(x)) for x in dd[-1] if x!='date']
ages1={}
for (x,(a,b)) in ages0:
for (y,(c,d)) in ages0:
if c>=a and d<=b and (c>a or d<b): break
else: ages1[x]=(a,b)
ee=[]
for d in dd:
e={}
e['date']=d['date']
for x in ages1:
e[ages1[x]]=d.get(x,0)
ee.append(e)
ages2=sorted(ages1.values())
return (ee,ages2)
#date=max(hosp[-1]['date'],cases[-1]['date'])
#mindate=daytodate(datetoday(updatedate)-90)
mindate='2020-12-30'#daytodate(datetoday(updatedate)-90)
hosp,hospages=convertages(newhosp)
cases,caseages=convertages(newcases)
deaths,deathages=convertages(mortdata)
fcases,_=convertages(newfcases)
mcases,_=convertages(newmcases)
# For fancysmooth - not currently used
smoothness=1e6
def LL(rr,xx,lx):
L=0
n=len(rr)
er=np.exp(rr)
for i in range(7):
x=xx[i::7].sum()
ew=x/(er[i::7].sum())
L+=x*log(ew)
# xx.lx is only a constant, but subtracting makes LL more meaningful and keeps it in a better range of values
L+=(xx*(rr-lx)).sum()
dd=-rr[:-2]+2*rr[1:-1]-rr[2:]
t=(dd*dd).sum()
#s=(rr*rr).sum();L-=n*log(t/s)
L-=smoothness/2*t
# Seems that scaling down objective function to control precision works significantly better than reducing tolerance in SLSQP (option ftol)
return -L/n/300
# Not currently used
def fancysmooth1(data):
deb=0
ages=[x for x in data[0].keys() if x!='date']
xx=np.array([sum(d[age] for age in ages) for d in data])
lx=np.log(xx)
n=len(xx)
# Convenient to optimise in the 'gauge' rr.sum()=0 because it doesn't involve xx (minimize can't handle auxiliary variables?) but transform to other gauge afterwards
# (Actually, probably don't need this constraint)
constr={'type':'eq', 'fun':lambda rr: rr.sum()}
# bounds=[(-30,30) for d in range(n)]
res=minimize(LL,np.zeros(n),args=(xx,lx),method="SLSQP",constraints=[constr],options={"maxiter":10000})
if not res.success: raise RuntimeError(res.message)
if deb: print(res.nit,"iterations")
rr=res.x
if deb: print(LL(rr,xx,lx));print()
# Regauge to put underlying Poisson parameter on the same footing as original data
rr+=log(xx.sum()/np.exp(rr).sum())
er=np.exp(rr)
if deb:
ww=[log(xx[i::7].sum()/er[i::7].sum()) for i in range(7)]
vv=[ww[d%7] for d in range(n)]
ev=np.exp(vv)
print((-np.exp(vv+rr).sum()))
print((xx*(vv+rr-lx)).sum())
dd=-rr[:-2]+2*rr[1:-1]-rr[2:]
t=(dd*dd).sum()
s=(rr*rr).sum()
print(-smoothness/2*t,n*log(t/s))
aa=[xx[i::7].sum()/len(xx[i::7]) for i in range(7)]
bb=[aa[d%7] for d in range(n)]
yy=xx/bb
yy*=xx.sum()/yy.sum()
with open('temp','w') as fp:
for i in range(n):
print("%12g %12g %12g %12g %12g"%(xx[i],er[i],ev[i],er[i]*ev[i],yy[i]),file=fp)
return
def simplesmooth1(data):
n=len(data)
ages=[x for x in data[0].keys() if x!='date']
xx=np.array([sum(d[age] for age in ages) for d in data])
ww=[xx[i::7].sum()/len(xx[i::7]) for i in range(7)]
vv=np.array([ww[d%7] for d in range(n)])
vv*=(xx/vv).sum()/xx.sum()
smoothed=[]
for d in range(n):
di={'date': data[d]['date']}
for age in ages:
di[age]=data[d][age]/vv[d]
smoothed.append(di)
return smoothed
def simplesmooth2(data):
ages=[x for x in data[0].keys() if x!='date']
n=len(data)
smoothed=[]
for i in range(n):
d={'date': data[i]['date']}
j0=max(i-3,0)
j1=min(i+4,n)
for age in ages:
d[age]=sum(data[j][age] for j in range(j0,j1))/(j1-j0)
smoothed.append(d)
return smoothed
def smooth(data):
#return data
#return simplesmooth1(data)
#return simplesmooth2(data)
return simplesmooth2(simplesmooth1(data))
hosp=smooth(hosp)
cases=smooth(cases)
deaths=smooth(deaths)
mcases=smooth(mcases)
fcases=smooth(fcases)
def makegraph(title='A graph', data=[], mindate='0000-00-00', ylabel='', outfn='temp.png', extra=[]):
po=Popen("gnuplot",shell=True,stdin=PIPE);p=po.stdin
# Use this to cater for earlier versions of Python whose Popen()s don't have the 'encoding' keyword
def write(*s): p.write((' '.join(map(str,s))+'\n').encode('utf-8'))
write('set terminal pngcairo font "sans,13" size 1920,1280')
write('set bmargin 5;set lmargin 15;set rmargin 15;set tmargin 5')
write('set output "%s"'%outfn)
write('set for [i=9:16] linetype i dashtype (20,7)')
write('set key right')
write('set title "%s"'%title)
write('set ylabel "'+ylabel+'"')
write('set xdata time')
write('set format x "%Y-%m-%d"')
write('set timefmt "%Y-%m-%d"')
write('set tics scale 2,0.5')
write('set xtics "2020-01-06", 604800')#%startdate)# Date labels on Mondays
write('set xtics rotate by 45 right offset 0.5,0')
write('set grid xtics ytics lc rgb "#dddddd" lt 1')
write('set xtics nomirror')
for x in extra: write(x)
s='plot '
first=True
for dat in data:
if not first: s+=', '
first=False
s+='"-" using 1:2 with lines '+dat.get('extra','')+' lw 3 title "%s"'%(dat['title'])
write(s)
for dat in data:
for (date,val) in dat['values']:
if date>=mindate: write(date,val)
write("e")
p.close();po.wait()
print("Written graph to %s"%outfn)
if 0:
days=(range(330,340),[-1])
ll=[]
for (ages,numthings,desc) in [(caseages,cases,"cases"), (deathages,deaths,"deaths")]:
aa={}
dd={}
for end in [0,1]:
for cut in [x[0] for x in ages]+[150]:
dd[(end,cut)]=sum(numthings[day][age] for day in days[end] for age in ages if age[0]<cut)/len(days[end])
n=len(ages)
for c0 in range(n-2):
cut0=ages[c0][0]
for c1 in range(c0+1,n-1):
cut1=ages[c1][0]
for c2 in range(c1,n):
cut2=ages[c2][0]
rr=[]
for end in [0,1]:
rr.append(dd[(end,cut1)]-dd[(end,cut0)])
rr.append(dd[(end,150)] -dd[(end,cut2)])
if min(rr)>=10:
aa[cut0,cut1,cut2]=rr[1]/rr[0]/(rr[3]/rr[2])
ll.append(aa)
l=[]
for x in ll[0]:
if x in ll[1]:
l.append((sqrt(ll[0][x]*ll[1][x]),*x))
l.sort(reverse=True)
for (r,cut0,cut1,cut2) in l:
if cut2<=70: print("%2d %2d %2d %7.3f"%(cut0,cut1,cut2,r))
if r<0.9*l[0][0]: break
title='Hospital admissions and confirmed cases/deaths ratios for Covid-19 in England, adjusted to be 1 on 1st January 2021\\nLast few values subject to change. Source: https://coronavirus.data.gov.uk/ at '+now
data=[]
for (desc, dat, ages, cutoff0, cutoff1, cutoff2) in [
("Hospital admissions", hosp, hospages, 0, 18, 65),
("Confirmed cases", cases, caseages, 0, 50, 55),
("Deaths", deaths, deathages, 0, 50, 55)]:
lowages=[age for age in ages if age[0]>=cutoff0 and age[1]<=cutoff1]
highages=[age for age in ages if age[0]>=cutoff2]
for d in dat:
if d["date"]=="2021-01-01": break
f=sum(d[a] for a in highages)/sum(d[a] for a in lowages)
if desc=="Deaths": maxdate="2021-03-29"
else: maxdate="9999-99-99"
data.append({
'title': desc+": %.2g * (aged %s) / (aged %s)"%(1/f,unparse((highages[0][0],highages[-1][1])),unparse((lowages[0][0],lowages[-1][1]))),
'values': [(d['date'],sum(d[a] for a in highages)/sum(d[a] for a in lowages)/f) for d in dat if d['date']>=mindate and d['date']<=maxdate]
})
makegraph(title=title, data=data, mindate=mindate, ylabel='Adjusted Ratio', outfn='admissionandcaseageratios2.png')
#################################
# Old graphs (14 Jan - 5 March) #
#################################
title='Hospital admissions and confirmed cases/deaths ratios for Covid-19 in England. Last few values subject to change.\\nSource: https://coronavirus.data.gov.uk/ at '+now
cutoff0=65;cutoff1=150;cutoff2=80
data=[]
data.append({
'title': 'Hospital admissions: (aged 85+) / (aged 18-64 or 85+)',
'values': [(d['date'],(d[(85,150)])/(d[(18,65)]+d[(85,150)])*100) for d in hosp if d['date']>=mindate]
})
lowages=[age for age in caseages if age[0]>=cutoff0 and age[1]<=cutoff1]
highages=[age for age in caseages if age[0]>=cutoff2]
data.append({
'title': 'Confirmed cases: (aged %s) / (aged %s)'%(unparse((cutoff2,150)),unparse((cutoff0,cutoff1))),
'values': [(d['date'],sum(d[a] for a in highages)/sum(d[a] for a in lowages)*100) for d in cases if d['date']>=mindate]
})
lowages=[age for age in deathages if age[0]>=cutoff0 and age[1]<=cutoff1]
highages=[age for age in deathages if age[0]>=cutoff2]
data.append({
'title': 'Deaths: (aged %s) / (aged %s) - 25%%'%(unparse((cutoff2,150)),unparse((cutoff0,cutoff1))),
'values': [(d['date'],sum(d[a] for a in highages)/sum(d[a] for a in lowages)*100-25) for d in deaths if d['date']>=mindate],
#'extra': 'axis x1y2'
})
makegraph(title=title, data=data, mindate=mindate, ylabel='Percentage', outfn='admissionandcaseageratios.png')
########################
data=[]
lowages=[age for age in caseages if age[0]>=16 and age[1]<=65]
data.append({
'title': 'Confirmed cases: #(female aged 16-65) / #(male aged 16-65)',
'values': [(f['date'],sum(f[a] for a in lowages)/sum(m[a] for a in lowages)) for (f,m) in zip(fcases,mcases) if f['date']>=mindate]
})
makegraph(title=title, data=data, mindate=mindate, ylabel='Ratio', outfn='femalemalecaseratio.png')
########################
data=[]
for age in [(18,65), (65,85), (85,150)]:
data.append({
'title': unparse(age),
'values': [(d['date'],d[age]) for d in hosp]
})
title='Hospital admissions for Covid-19 in England by age group. Last few values subject to change.\\nSource: https://coronavirus.data.gov.uk/ at '+now
makegraph(title=title, data=data, mindate=mindate, ylabel='Number of age group admitted', outfn='hospitaladmissionsbyage-abs.png')
########################
# Todo when can be bothered: normalise this by number in each age group
data=[]
for ageband in range(0,90,10):
if ageband<80: lim=ageband+10
else: lim=150
data.append({
'title': unparse((ageband,lim)),
'values': [(d['date'],sum(d[age] for age in caseages if age[0]>=ageband and age[1]<=lim)) for d in cases]
})
title='Confirmed cases per day for Covid-19 in England by age group. Last few values subject to change.\\nSource: https://coronavirus.data.gov.uk/ at '+now
makegraph(title=title, data=data, mindate=mindate, ylabel='Number of cases per day', outfn='confirmedcasesbyage-abs.png')#, extra=['set logscale y'])
if 0:
# Looking at hospitalisations per case
ave=14
delay=10
for t in range(-ave,-250,-ave):
print(cases[t]['date']+":",end='')
for age in hospages:
print(" %s:"%str(age),end='')
nh=nc=0
for i in range(ave):
nh+=hosp[t+i][age]
c=cases[t+i-delay]
for a in c:
if a=='date': continue
if a[0]>=age[0] and a[1]<=age[1]: nc+=c[a]
print("%5.1f"%(nh/nc*100),end='')
print()
print()
for t in range(-ave,-250,-ave):
nh=nc=0
for i in range(ave):
nh+=sum(hosp[t+i][x] for x in hospages)
nc+=sum(cases[t+i-delay][x] for x in caseages)
print("%s: %5.1f"%(cases[t]['date'],nh/nc*100))
|
nilq/baby-python
|
python
|
#
# Copyright 2018 Asylo authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provides a function to look up a toolchain installation path."""
def _fail_if_directory_does_not_exist(repository_ctx, path, what):
result = repository_ctx.execute(["test", "-d", path])
if result.return_code == 0:
return path
fail("Install path to " + what + " does not exist: " + path)
def _try_get_file_line1(repository_ctx, path):
result = repository_ctx.execute(["cat", path])
if result.return_code == 0:
# First line of output with no \n:
return result.stdout.split("\n", 1)[0]
return None
def installation_path(repository_ctx, file, user_defined, default, what):
"""Looks up an installation location.
Args:
repository_ctx: A repository_rule context object.
file: The file that should contain the installation location.
user_defined: A path that user may provide to override lookup (may be None).
default: When both |file| and |user_defined| are unavailable, fall back on
this value (may be None).
what: A string for the failure message to indicate which component could not
retrieve its installation location.
Returns:
string: A path to a directory.
"""
result = ""
if user_defined:
result = user_defined
if not result:
result = _try_get_file_line1(
repository_ctx,
repository_ctx.os.environ["HOME"] +
"/.asylo/" + file,
)
if not result:
result = _try_get_file_line1(
repository_ctx,
"/usr/local/share/asylo/" + file,
)
if not result:
result = default
what = what + " [default]"
test_result = repository_ctx.execute(["test", "-d", result])
if test_result.return_code != 0:
result = "/opt/asylo/toolchains/sgx_x86_64"
what = what + " [INTERNAL TRANSITION]"
if not result:
fail("Unknown install location for " + what)
return _fail_if_directory_does_not_exist(repository_ctx, result, what)
|
nilq/baby-python
|
python
|
"""
Author : Varundev Suresh Babu
Version: 0.1
"""
import rospy
from std_msgs.msg import Float64
steering_publisher = ospy.Publisher("/servo/position", Float64, queue_size = 10)
throttle_publisher = rospy.Publisher("/motor/duty_cycle", Float64, queue_size = 10)
def steering_callback(data):
global steering
steering.data = (data.data + 100.0)/200.0
def throttle_callback(data):
global throttle
throttle = data
if __name__ == '__main__':
global steering
global throttle
steering = Float64()
throttle = Float64()
rospy.init_node('basic_racecar_control_node')
rospy.Subscriber("steering_control", Float64, steering_callback)
rospy.Subscriber("throttle_control", Float64, throttle_callback)
steering_publisher.publish(steering)
throttle_publisher.publish(throttle)
rospy.spin()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# created by inhzus
from .smms import ImageHost
from .md_parser import parse_md
|
nilq/baby-python
|
python
|
def rawify_url(url):
if url.startswith("https://github.com"):
urlparts = url.replace("https://github.com", "", 1).strip('/').split('/') + [None] * 5
ownername, reponame, _, refvalue, *filename_parts = urlparts
filename = '/'.join([p for p in filename_parts if p is not None])
assert ownername is not None, "URL should include the name of the owner/organization."
assert reponame is not None, "URL should include the name of the repository."
if refvalue is None:
refvalue = "main"
if filename == '':
filename = "CITATION.cff"
return f"https://raw.githubusercontent.com/{ownername}/{reponame}/{refvalue}/{filename}"
# return unrecognized URLs as-is
return url
|
nilq/baby-python
|
python
|
"""Raw message parser implementations."""
from twisted.words.protocols.irc import ctcpExtract, parsemsg, X_DELIM
from . import Message
from ..hostmask import Hostmask
class RawMessageParser(object):
"""An implementation of the parsing rules for a specific version of
the IRC protocol.
In most cases, you should use the `~.Connection.parser` attribute of
a `.Connection` to retrieve an instance of this class.
"""
def __init__(self):
self.functions = {}
def command(self, *commands):
"""A decorator that registers a function as a parameter parser
for messages of the types given in *commands*."""
def decorator(function):
for command in commands:
self.functions[command] = function
return function
return decorator
def parse(self, connection, outgoing, raw, **kwargs):
"""Parse a raw IRC message string and return a corresponding
`.Message` object. Any keyword arguments override field values
returned by the parser."""
try:
prefix, command, params = parsemsg(raw)
except IndexError:
parsed_kwargs = {'action': 'unknown'}
else:
parsed_kwargs = {'actor': Hostmask.from_string(prefix)}
if command in self.functions:
try:
parsed_kwargs['action'] = command.lower()
parsed_kwargs.update(
self.functions[command](command, params))
except IndexError:
del parsed_kwargs['action']
if 'action' not in parsed_kwargs:
parsed_kwargs['action'] = 'unknown'
parsed_kwargs['subaction'] = command
splits = 2 if raw.startswith(':') else 1
params = raw.split(None, splits)
if len(params) > splits:
parsed_kwargs['content'] = params[splits]
else:
parsed_kwargs['content'] = ''
parsed_kwargs.update(kwargs)
return Message(connection, outgoing, raw=raw, **parsed_kwargs)
#: A parser for the standard IRC version 2 protocol.
IRCV2_PARSER = RawMessageParser()
@IRCV2_PARSER.command('QUIT', 'NICK')
def parse_undirected_message(command, params):
return {'content': params[0]}
@IRCV2_PARSER.command('TOPIC')
def parse_directed_message(command, params):
return {'venue': params[0], 'content': params[1]}
@IRCV2_PARSER.command('PRIVMSG', 'NOTICE')
def parse_ctcpable_directed_message(command, params):
kwargs = parse_directed_message(command, params)
if params[1].startswith(X_DELIM):
# CTCP extended message quoting is pathologically designed, but
# nobody actually sends more than one at a time. Thankfully.
tag, data = ctcpExtract(params[1])['extended'][0]
kwargs['content'] = data
if tag.lower() == 'action':
kwargs['action'] = 'action'
else:
kwargs['action'] = ('ctcpquery' if command == 'PRIVMSG'
else 'ctcpreply')
kwargs['subaction'] = tag
return kwargs
@IRCV2_PARSER.command('JOIN')
def parse_join(command, params):
return {'venue': params[0]}
@IRCV2_PARSER.command('PART', 'MODE')
def parse_part_mode(command, params):
return {'venue': params[0], 'content': ' '.join(params[1:])}
@IRCV2_PARSER.command('KICK')
def parse_kick(command, params):
return {'venue': params[0], 'target': params[1], 'content': params[2]}
|
nilq/baby-python
|
python
|
#-*- coding: utf-8 -*-
from bgesdk.error import APIError
import pytest
import six
def check_result(result):
assert 'result' in result
assert 'count' in result
assert 'next_page' in result
next_page = result['next_page']
assert isinstance(result['result'], list)
assert isinstance(result['count'], int)
assert isinstance(next_page, int) or next_page is None
class TestTaxonAbundance:
@pytest.mark.parametrize('taxon_ids', [None, 'tx1', 'tx2'])
def test_result(self, api, logger, self_meta_biosample_id, taxon_ids):
"""正常返回的数据"""
ret = api.get_taxon_abundance(self_meta_biosample_id)
logger.debug(ret)
check_result(ret)
@pytest.mark.parametrize('taxon_ids', ['txdemo', 'tx', 'test'])
def test_invalid_txid(self, api, logger, self_meta_biosample_id, taxon_ids):
"""格式错误的 taxon 编号"""
ret = api.get_taxon_abundance(self_meta_biosample_id, taxon_ids)
logger.debug(ret)
check_result(ret)
assert ret['count'] == 0
@pytest.mark.parametrize('taxon_ids', ['txid815'])
def test_valid_txid(self, api, logger, self_meta_biosample_id, taxon_ids):
"""在平台类群丰度 taxon_id 集合内的编号"""
ret = api.get_taxon_abundance(self_meta_biosample_id, taxon_ids)
logger.debug(ret)
check_result(ret)
assert ret['count'] == 1
@pytest.mark.parametrize('taxon_ids', ['txid1323'])
def test_outter_txid(self, api, logger, self_meta_biosample_id, taxon_ids):
"""不在平台类群丰度 taxon_id 集合内的编号"""
ret = api.get_taxon_abundance(self_meta_biosample_id, taxon_ids)
logger.debug(ret)
check_result(ret)
assert ret['count'] == 0
class TestFuncAbundance:
@pytest.mark.parametrize('catalog', ['go', 'ko', 'eggnog', 'pfam',
'kegg-pwy', 'kegg-mdl', 'level4ec',
'metacyc-rxn', 'metacyc-pwy'])
def test_result(self, api, logger, self_meta_biosample_id, catalog):
"""正常返回的数据"""
try:
ret = api.get_func_abundance(self_meta_biosample_id, catalog)
except APIError as error:
with pytest.raises(APIError) as e:
raise error
e.value.code == 41202
e.value.msg == u'BGE 私有接口错误: 样品数据未入仓'
return
logger.debug(ret)
check_result(ret)
class TestGeneAbundance:
def check_result(self, result):
assert 'result' in result
assert 'count' in result
assert 'next_page' in result
next_page = result['next_page']
assert isinstance(result['result'], list)
assert isinstance(result['count'], int)
assert next_page is None or isinstance(next_page, six.text_type)
@pytest.mark.parametrize('catalog, data_type', [
('UniRef90_HUMAnN2_0.11', 'file')])
def test_result(self, api, logger, self_meta_biosample_id, catalog,
data_type):
"""正常返回的数据"""
ret = api.get_gene_abundance(self_meta_biosample_id, catalog, data_type)
logger.debug(ret)
self.check_result(ret)
@pytest.mark.parametrize('catalog, data_type', [
('UniRef90_HUMAnN2_0.11', 'list')])
def test_invalid_args(self, api, self_meta_biosample_id, catalog, data_type):
"""正常返回的数据"""
with pytest.raises(APIError) as e:
api.get_gene_abundance(self_meta_biosample_id, catalog, data_type)
assert e.value.code == 41001
assert e.value.msg == u'参数错误'
|
nilq/baby-python
|
python
|
# coding=utf-8
from .email import EmailFromTemplate
def send_email(name, ctx_dict, send_to=None, subject=u'Subject', **kwargs):
"""
Shortcut function for EmailFromTemplate class
@return: None
"""
eft = EmailFromTemplate(name=name)
eft.subject = subject
eft.context = ctx_dict
eft.get_object()
eft.render_message()
eft.send_email(send_to=send_to, **kwargs)
|
nilq/baby-python
|
python
|
import weakref
import uuid
from types import MethodType
from collections import OrderedDict
from Qt import QtGui
from Qt.QtWidgets import QPushButton
from Qt.QtWidgets import QGraphicsProxyWidget
from Qt.QtWidgets import QMenu
from PyFlow.Core.Common import *
from PyFlow.UI.Utils.Settings import *
from PyFlow.Core.NodeBase import NodeBase
from PyFlow import getPinDefaultValueByType
from PyFlow.Core.PyCodeCompiler import Py3FunctionCompiler
class pythonNode(NodeBase):
def __init__(self, name):
super(pythonNode, self).__init__(name)
self.currentComputeCode = ''
@staticmethod
def pinTypeHints():
return {'inputs': [], 'outputs': []}
def serialize(self):
default = super(pythonNode, self).serialize()
default['computeCode'] = self.currentComputeCode
return default
def postCreate(self, jsonTemplate=None):
super(pythonNode, self).postCreate(jsonTemplate)
if jsonTemplate is None:
return
if 'computeCode' in jsonTemplate:
self.currentComputeCode = jsonTemplate['computeCode']
compute = Py3FunctionCompiler(
'compute').compile(self.currentComputeCode)
self.compute = MethodType(compute, self)
# recreate pins
for i in jsonTemplate['inputs']:
inPin = self.createInputPin(i['name'],
i['dataType'],
getPinDefaultValueByType(i['dataType']))
inPin.setData(i['value'])
inPin.dirty = i['bDirty']
for o in jsonTemplate['outputs']:
compute = self.compute if o['dataType'] in ('AnyPin', 'ExecPin') else None
outPin = self.createOutputPin(o['name'],
o['dataType'],
getPinDefaultValueByType(o['dataType']),
compute)
self.autoAffectPins()
@staticmethod
def category():
return 'Common'
@staticmethod
def keywords():
return ['Code', 'Expression', 'py']
@staticmethod
def description():
return 'Python script node'
|
nilq/baby-python
|
python
|
import logging
from rest_framework import exceptions
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth import get_user_model
from galaxy.api import serializers
from galaxy.api.views import base_views
from galaxy.main import models
__all__ = [
'UserList',
'UserDetail',
'ActiveUserView',
'UserNotificationSecretList',
'UserRepositoriesList',
'UserRolesList',
'UserStarredList',
'UserSubscriptionList',
]
logger = logging.getLogger(__name__)
User = get_user_model()
class UserDetail(base_views.RetrieveUpdateAPIView):
model = User
serializer_class = serializers.UserSerializer
def get_object(self, qs=None):
obj = super(UserDetail, self).get_object()
if not obj.is_active:
raise exceptions.PermissionDenied()
return obj
class UserList(base_views.ListAPIView):
model = User
serializer_class = serializers.UserSerializer
def get_queryset(self):
qs = super(UserList, self).get_queryset()
return qs.filter(is_active=True)
class ActiveUserView(base_views.RetrieveAPIView):
model = User
serializer_class = serializers.ActiveUserSerializer
view_name = 'Me'
def get_object(self):
try:
obj = self.model.objects.get(pk=self.request.user.pk)
except ObjectDoesNotExist:
obj = AnonymousUser()
return obj
class UserRepositoriesList(base_views.SubListAPIView):
model = models.Repository
serializer_class = serializers.RepositorySerializer
parent_model = User
relationship = 'repositories'
class UserRolesList(base_views.SubListAPIView):
model = models.Content
serializer_class = serializers.RoleDetailSerializer
parent_model = User
relationship = 'roles'
def get_queryset(self):
qs = super(UserRolesList, self).get_queryset()
return qs.filter(active=True, is_valid=True)
class UserSubscriptionList(base_views.SubListAPIView):
model = models.Subscription
serializer_class = serializers.SubscriptionSerializer
parent_model = User
relationship = 'subscriptions'
class UserStarredList(base_views.SubListAPIView):
model = models.Stargazer
serializer_class = serializers.StargazerSerializer
parent_model = User
relationship = 'starred'
class UserNotificationSecretList(base_views.SubListAPIView):
model = models.NotificationSecret
serializer_class = serializers.NotificationSecretSerializer
parent_model = User
relationship = 'notification_secrets'
|
nilq/baby-python
|
python
|
import re
import pandas as pd
from dojo.models import Finding
__author__ = 'Matt Sicker'
class DsopParser:
def __init__(self, file, test):
self._test = test
self._items = []
f = pd.ExcelFile(file)
self.__parse_disa(pd.read_excel(f, sheet_name='OpenSCAP - DISA Compliance', parse_dates=['scanned_date'],
dtype={'result': 'category', 'severity': 'category'}))
self.__parse_oval(pd.read_excel(f, sheet_name='OpenSCAP - OVAL Results'))
self.__parse_twistlock(
pd.read_excel(f, sheet_name='Twistlock Vulnerability Results', dtype={'severity': 'category'}))
self.__parse_anchore(pd.read_excel(f, sheet_name='Anchore CVE Results', dtype={'severity': 'category'}))
self.__parse_anchore_compliance(
pd.read_excel(f, sheet_name='Anchore Compliance Results', dtype={'severity': 'category'}))
def __parse_disa(self, df: pd.DataFrame):
for row in df.itertuples(index=False):
if row.result not in ('fail', 'notchecked'):
continue
title = row.title
unique_id = row.ruleid
if row.severity == 'unknown':
severity = 'Info'
else:
severity = row.severity.title()
cve = row.identifiers
references = row.refs
description = row.desc
impact = row.rationale
date = row.scanned_date.date()
tags = "disa"
finding = Finding(title=title, date=date, cve=cve, severity=severity, description=description,
impact=impact, references=references, test=self._test, unique_id_from_tool=unique_id,
static_finding=True, dynamic_finding=False)
finding.unsaved_tags = tags
self._items.append(finding)
def __parse_oval(self, df: pd.DataFrame):
severity_pattern = re.compile(r'\((.*)\)')
for row in df.itertuples(index=False):
if not row.result or row.result in ('false'):
continue
title = row.title
match = severity_pattern.search(title)
if match:
sev = match.group(1)
if sev == 'Important':
severity = 'High'
elif sev == 'Moderate':
severity = 'Medium'
elif sev == 'None':
severity = 'Info'
else:
severity = sev
else:
severity = 'Info'
unique_id = row.id
cve = row.ref
tags = "oval"
finding = Finding(title=title, cve=cve, severity=severity, unique_id_from_tool=unique_id,
test=self._test, static_finding=True, dynamic_finding=False)
finding.unsaved_tags = tags
self._items.append(finding)
def __parse_twistlock(self, df: pd.DataFrame):
for row in df.itertuples(index=False):
cve = row.id
description = row.desc
mitigation = row.status
url = row.link
component_name = row.packageName
component_version = row.packageVersion
title = '{}: {} - {}'.format(cve, component_name, component_version)
if row.severity == 'important':
severity = 'High'
elif row.severity == 'moderate':
severity = 'Medium'
else:
severity = row.severity.title()
severity_justification = row.vecStr
tags = "twistlock"
finding = Finding(title=title, cve=cve, url=url, severity=severity, description=description,
component_name=component_name, component_version=component_version,
severity_justification=severity_justification, test=self._test,
static_finding=True, dynamic_finding=False)
finding.unsaved_tags = tags
self._items.append(finding)
def __parse_anchore(self, df: pd.DataFrame):
for row in df.itertuples(index=False):
cve = row.cve
severity = row.severity
component = row.package
file_path = row.package_path
mitigation = row.fix
description = "Image affected: {}".format(row.tag)
title = '{}: {}'.format(cve, component)
tags = "anchore"
finding = Finding(title=title, cve=cve, severity=severity,
mitigation=mitigation, component_name=component,
description=description, test=self._test,
static_finding=True, dynamic_finding=False,
file_path=file_path)
finding.unsaved_tags = tags
self._items.append(finding)
def __parse_anchore_compliance(self, df: pd.DataFrame):
for row in df.itertuples(index=False):
if row.policy_id != "DoDFileChecks":
continue
if row.gate_action == "warn":
severity = "Medium"
elif row.gate_action == "stop":
severity = "Critical"
else:
severity = "Info"
severity = severity
mitigation = "To be investigated"
description = "Gate: {} (Trigger: {}): {}".format(row.gate, row.trigger, row.check_output)
title = '{}: {}'.format(row.policy_id, row.trigger_id)
tags = "anchore_compliance"
finding = Finding(title=title, severity=severity,
mitigation=mitigation,
description=description, test=self._test,
static_finding=True, dynamic_finding=False)
finding.unsaved_tags = tags
self._items.append(finding)
@property
def items(self):
return self._items
|
nilq/baby-python
|
python
|
from exterminate.Utilities import builtins
from exterminate.Gizoogle import translate
_print = builtins.print
builtins.print = lambda *args, **kwargs: _print(
translate(' '.join([str(x) for x in args])), **kwargs
)
|
nilq/baby-python
|
python
|
from builtins import object
import abc
from future.utils import with_metaclass
class Solver(with_metaclass(abc.ABCMeta, object)):
def __init__(self, **kwargs):
self.options = kwargs
if 'verbose' not in self.options:
self.options['verbose'] = False
@abc.abstractmethod
def solve(self, p):
"""Solve QP problem
"""
pass
|
nilq/baby-python
|
python
|
# Copyright (c) 2015 Microsoft Corporation
from z3 import *
set_option(auto_config=True)
x = Int('x')
y = Int('y')
f = Function('f', IntSort(), IntSort())
solve(f(f(x)) == x, f(x) == y, x != y)
|
nilq/baby-python
|
python
|
"""Capture synthesizer audio for each of a batch of random chords.
By default, prints the number of JACK xruns (buffer overruns or underruns)
produced during the MIDI playback and capture process.
"""
import cProfile
import datetime
import json
import os
import pstats
import time
import numpy as np
import scipy.io.wavfile
import muser.audio as audio
import muser.live as live
import muser.sequencer as sequencer
import muser.utils as utils
rnd = np.random.RandomState()
date = datetime.datetime.now().strftime("%y%m%d-%Hh%M")
## Output configuration
out_dir = '/tmp/muser/chord_batches'
# save each chord's captured audio data to a .wav file
wav_out = False
# profile the audio capture operation
profile_capture = False
## Chord generation and capture parameters
batches = 10
batch_size = 32
chord_size = 1 #lambda: rnd.randint(1, 4)
# function to generate random velocity vectors
chord_gen = sequencer.random_velocity_vector
# scalar or range of velocity
velocity = (30, 128)
# duration of silence captured efore sending chord's events
init_silence = 0.1
# duration of capture, before and after chord release
chord_time = 2.0
release_time = 0.0
## Synthesizer parameters
pianoteq_stereo = dict(
name='Pianoteq55',
midi_inports=['Pianoteq55:midi_in'],
outports=['Pianoteq55:out_1', 'Pianoteq55:out_2'],
reset=(0xB0, 0, 0),
)
## File name and path formats
out_subdir = os.path.join(out_dir, date)
os.makedirs(out_subdir, exist_ok=True)
names = dict(
pickle='batch{}.pickle',
wav='batch{}-chord{}.wav',
start_log='params.json',
end_log='end_log',
capture_profile='capture_events-batch{}_chord{}-profile',
)
paths = {k: os.path.join(out_subdir, name) for k, name in names.items()}
## Data structure for chord batches
chord_dtype = np.dtype([('velocity_vector', np.float32, sequencer.N_PITCHES),
('captured_buffers', object)])
batch = np.ndarray([batch_size], dtype=chord_dtype)
## JACK client initialization
client = live.SynthInterfaceClient(synth_config=pianoteq_stereo)
blocksize, samplerate = client.blocksize, client.samplerate
## Write to parameter log---for file monitors
# TODO: update utils.FileMonitor to use JSON logs
with open(paths['start_log'], 'w') as start_log:
params = {'paths': paths, 'samplerate': samplerate, 'blocksize': blocksize,
'batches': batches, 'batch_size': batch_size,
'times': [init_silence, chord_time, release_time]}
start_log.write(json.dumps(params))
with client:
client.connect_synth()
start_clock = time.perf_counter()
for i_batch in range(batches):
# generate batch of random chords (velocity vectors)
batch['velocity_vector'] = [chord_gen(chord_size, velocity=velocity)
for _ in range(batch_size)]
for i_chord, chord in enumerate(batch):
init_pause = {'events': None, 'duration': init_silence}
# prepare the chord's MIDI events
velocity_vector = chord['velocity_vector']
notes_on = sequencer.vector_to_midi_events('ON', velocity_vector)
on_events = {'events': notes_on, 'duration': chord_time}
notes_off = sequencer.vector_to_midi_events('OFF', velocity_vector)
off_events = {'events': notes_off, 'duration': release_time}
# collate event groups for client.capture_events
event_groups = [init_pause, on_events, off_events]
# send the event groups to the client for capture
if profile_capture:
name_i = paths['capture_profile'].format(i_batch, i_chord)
cProfile.run('client.capture_events(event_groups)', name_i)
else:
client.capture_events(event_groups)
# retrieve the captured audio for the chord
chord['captured_buffers'] = client.drop_captured()
# save the chord audio data to a .wav file
if wav_out:
snd = audio.buffers_to_snd(chord['captured_buffers'])
wav_path = paths['wav'].format(i_batch, i_chord)
scipy.io.wavfile.write(wav_path, samplerate, snd)
batch.dump(paths['pickle'].format(i_batch))
## print profile of the capture operation
# TODO: statistics across chord profiles
if profile_capture:
# (currently prints profile for first captured chord only)
name = paths['capture_profile'].format(0, 0)
profile = pstats.Stats(name).strip_dirs()
profile.sort_stats('time').print_stats(10)
## generate and write post-capture log
log_str = "Captured {} batches of {} chords, at [s]:\n".format(batches,
batch_size)
log_str += utils.logs_entryexit(client.capture_log,
output_labels={None: 'Xrun'},
ref_clock=start_clock,
header=('Start', 'End'))
xrun_print_end = ', at:' if client.n_xruns else '.'
log_str += "\n\n{} total Xruns{}\n".format(client.n_xruns, xrun_print_end)
for xrun in client.xruns - start_clock:
log_str += '{:10.4f} s\n'.format(xrun[0])
print('\n' + log_str)
with open(paths['end_log'], 'w') as end_log:
end_log.write(log_str)
|
nilq/baby-python
|
python
|
#swap 4 variables
# swap variable
w=input("enter any nymber")
x=input("enter any nymber")
y=input("enter any number")
z=input("enter any number")
print("w before swap :{}".format(w))
print("x before swap:{}".format(x))
print("y before swap :{}".format(y))
print("z before swap :{}".format(z))
w=w+x+y+z
x=w-x-y-z
print("x after swap is {}".format(x))
y=w-x-y-z
print("y after swap is {}".format(y))
z=w-x-y-z
print("z after swap is {}".format(z))
w=w-x-y-z
print("w after swap is {}".format(w))
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from django.http import HttpResponse
from django.template import Template, RequestContext
from django.shortcuts import render
from metahumans import models
# Create your views here.
def all_heroes(request):
return render(request, 'metahumans/list_heroes.html', {
'heroes': models.SuperHero.objects.select_related('team').all(),
'title': 'Listado de superhéroes',
})
def list_levels(request):
return render(request, 'metahumans/levels.html', {
'heroes': models.SuperHero.objects.only('name', 'level').all().order_by('-level'),
'title': 'Listado de superhéroes por niveles',
})
def hero_details(request, slug):
sh = models.SuperHero.objects.get(slug=slug)
return render(request, 'metahumans/hero_details.html', {
'superhero': sh,
'title': sh.name,
})
|
nilq/baby-python
|
python
|
import UpdateItem as ui
import UpdateChecker as uc
import UpdateFileReader as ufr
import tkinter
from tkinter import messagebox
is_verbose = True
root = tkinter.Tk()
root.withdraw()
userfile = "updateList.txt"
currentReader = ufr.UpdateFileReader(userfile, is_verbose)
while currentReader.getNextItem():
currentItem = currentReader.getCurrentItemData()
if currentItem:
currentSoftware = uc.UpdateChecker(currentItem, is_verbose)
if currentSoftware.status:
currentVersion = currentSoftware.getCurrentVersion()
if currentVersion.new_version:
msg_result = messagebox.askyesno("Update available for " + currentVersion.name,"Version " + currentVersion.version_info + " available for " + currentVersion.name + " (current: " + currentVersion.installed_version + ") Have you updated yet?")
if msg_result:
currentItem.installed_version = currentVersion.version_info
currentReader.updateCurrentItemData(currentItem)
|
nilq/baby-python
|
python
|
# Modified: 2022-06-02
# Description: Defines the FastAPI app
#
from pathlib import Path
from motor.motor_asyncio import AsyncIOMotorClient
from fastapi import FastAPI
from fastapi.middleware.cors import CORSMiddleware
from fastapi.staticfiles import StaticFiles
from controllers import game_controller, player_controller
from db import db
from config import settings
# create the app
app = FastAPI()
# attach CORS middleware; current settings are only appropriate for development environments
origins = [
"http://localhost",
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
class ReactStaticFiles(StaticFiles):
"""Extends StaticFiles to allow React SPA to handle 404s"""
async def get_response(self, path, scope):
res = await super().get_response(path, scope)
if res.status_code == 404:
# funnel 404s back to React App: source https://stackoverflow.com/a/68363904
res = await super().get_response('.', scope)
return res
# attach API endpoints
app.include_router(game_controller.router, tags=["game"], prefix="/api/game")
app.include_router(player_controller.router, tags=["player"], prefix="/api/player")
if settings.STATIC_CONTENT_SRV and Path(settings.STATIC_CONTENT_DIR).is_dir():
app.mount("/", ReactStaticFiles(directory=settings.STATIC_CONTENT_DIR, html=True), name="static")
# open an asynchronous database connection on startup
@app.on_event("startup")
async def open_mongodb_connection():
print("Connecting to MongoDB client...")
db.client = AsyncIOMotorClient(settings.MONGODB_URI)
await db.index() # index the db for faster lookups and to enforce uniqueness
print("Connection successful" if db.client else "Connection failed")
# close the asynchronous database connection on shutdown
@app.on_event("shutdown")
async def close_mongodb_connection():
print("Closing connection to MongoDB client...")
db.client.close()
|
nilq/baby-python
|
python
|
'''
Modified run-length encoding.
Modify the result of problem P10 in such a way that if an element
has no duplicates it is simply copied into the result list. Only
elements with duplicates are transferred as (N E) lists.
Example:
* (encode-modified '(a a a a b c c a a d e e e e))
((4 A) B (2 C) (2 A) D (4 E))
'''
'''
Modified run-length encoding.
Modify the result of problem P10 in such a way that if an element
has no duplicates it is simply copied into the result list. Only
elements with duplicates are transferred as (N E) lists.
Example:
* (encode-modified '(a a a a b c c a a d e e e e))
((4 A) B (2 C) (2 A) D (4 E))
'''
#taking input of list elements at a single time seperating by space and splitting each by split() method
demo_list = input("Enter elememts sep by space: ").split(' ')
#creating new lists
runLength_converted_list = list()
encoded_list = list()
previous_item = demo_list[0] #assigning first element of demo_list to previous_item
temp_list = list() #creating new list as temp_list
for current_item in demo_list: #iterating through all elements of demo_list
if current_item == previous_item: #checking if previously added element is same as current element of list, for checking repetative elements
temp_list.append(current_item) #appending current element to temp_list. for creation of sublist
else: #if not repetative element
runLength_converted_list.append(temp_list[:]) #appending previously created sublist(temp_list) copy to new_list
temp_list.clear() #clearing temp_list to create new sublist
temp_list.append(current_item) #appending current_item to temp_list
previous_item = current_item #assigning current_item to previous_item
else:
runLength_converted_list.append(temp_list[:]) #appending temp_list copy to new_list
for item in runLength_converted_list: #iterating through all elements of demo_list
count_sublist_items = len(item) #new_list contains sublist of repetative elements, so finding size of sublist and appending to temp_list.
if count_sublist_items == 1:
encoded_list.append(item[0])
else:
encoded_list.append([count_sublist_items,item[0]]) #appending temp_list to encoded_list
#pritning demo_list and its encoded list
print(f"old list: {demo_list}")
print(f"encoded list: {encoded_list}")
|
nilq/baby-python
|
python
|
import dash_html_components as html
class Component:
def render(self) -> html.Div:
raise NotImplementedError
|
nilq/baby-python
|
python
|
# The MIT License (MIT)
#
# Copyright (c) 2014 Steve Milner
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
"""
SQLAlchemy backend.
"""
from sqlalchemy import (
Column, Integer, SmallInteger, String, ForeignKey, create_engine)
from sqlalchemy.orm import relationship, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from flagon import errors
from flagon.backends import Backend
Base = declarative_base()
class Feature(Base):
__tablename__ = 'features'
name = Column(String, primary_key=True)
active = Column(SmallInteger)
strategy = Column(String)
params = relationship('Param', backref='feature')
class Param(Base):
__tablename__ = 'params'
id = Column(Integer, primary_key=True)
name = Column(String)
value = Column(String)
feature_id = Column(Integer, ForeignKey('features.name'))
class SQLAlchemyBackend(Backend):
def __init__(self, connection_str):
"""
:param connection_str: information can be found at
http://docs.sqlalchemy.org/en/rel_0_9/core/engines.html
Example: sqlite:///test.db
:type connection_str: str
:rtpe: SQLAlchemyBackend
"""
self._engine = create_engine(connection_str, echo=False)
Base.metadata.create_all(self._engine)
self._session = sessionmaker(bind=self._engine).__call__()
def exists(self, name):
"""
Checks if a feature exists.
:param name: name of the feature.
:rtype: bool
"""
return bool(self._session.query(Feature).filter_by(name=name).count())
def is_active(self, name):
"""
Checks if a feature is on.
:param name: name of the feature.
:rtype: bool
:raises: UnknownFeatureError
"""
if not self.exists(name):
raise errors.UnknownFeatureError('Unknown feature: %s' % name)
feature = self._session.query(Feature).filter_by(name=name).first()
return bool(feature.active)
def _turn(self, name, value):
"""
Turns a feature on.
:param name: name of the feature.
:param value: 0 or 1
:raises: UnknownFeatureError
"""
if not self.exists(name):
raise errors.UnknownFeatureError('Unknown feature: %s' % name)
self._session.merge(Feature(name=name, active=value))
self._session.commit()
turn_on = lambda s, name: s._turn(name, 1)
turn_off = lambda s, name: s._turn(name, 2)
|
nilq/baby-python
|
python
|
import smartpy as sp
FA12 = sp.io.import_script_from_url("file:Fa12.py", name="FA12")
"""
Possible states of the swap
"""
class State():
Waiting = 1
Initiated = 2
"""
Swap record -
hashedSecret(bytes): current swap hash
initiator(address): initiators tezos address
initiator_eth_addr(string): initiators ethereum address
participant(address): counter-party/participant's tezos address
refundTimestamp(timestamp): unix time(sec) after which the swap expires
value(nat): value of the swap in fa1.2 tokens
state(State): current state of swap
"""
Swap = sp.TRecord(hashedSecret=sp.TBytes, initiator_eth_addr=sp.TString, initiator=sp.TAddress,
participant=sp.TAddress, refundTimestamp=sp.TTimestamp, value=sp.TNat, state=sp.TInt)
"""
Contract Storage -
admin(address): tezos address of the admin
reward(nat): reward in basis points for swap response
fa12(address): fa1.2 contract address
active(bool): contract state [true:active, false:inactive]
swaps(big_map(bytes,Swap)): map of hashed secrets and swap details
"""
class TokenSwap(sp.Contract):
def __init__(self, _admin, _fa12):
self.init(admin=_admin, reward=sp.as_nat(15), fa12=_fa12, active=sp.bool(False),
swaps=sp.big_map(tkey=sp.TBytes, tvalue=Swap))
"""
ensures only admin can call a function
"""
def onlyByAdmin(self):
sp.verify(sp.sender == self.data.admin)
"""
ensures only initiator of the swap can call a function
args:
_hashedSecret: hashed secret of the swap
"""
def onlyByInitiator(self, _hashedSecret):
sp.verify(sp.sender == self.data.swaps[_hashedSecret].initiator)
"""
checks if the contract is active
"""
def contractIsActive(self):
sp.verify(self.data.active == sp.bool(True))
"""
checks whether a swap can be initiated
args:
_hashedSecret: hashed secret of the swap
_refundTimestamp: unix time(sec) after which the swap expires
"""
def isInitiable(self, _hashedSecret, _refundTimestamp):
sp.verify(~self.data.swaps.contains(_hashedSecret))
sp.verify(sp.now < _refundTimestamp)
"""
ensures the currest swap state matches the required `state`
args:
_hashedSecret: hashed secret of the swap
_state: state the current swap is expected to be in
"""
def checkState(self, _hashedSecret, _state):
sp.verify(self.data.swaps[_hashedSecret].state == _state)
"""
checks whether the swap can be redeemed
args:
_hashedSecret: hashed secret of the swap
_secret: secret for the swap which produced the corresponding hashedSecret
"""
def isRedeemable(self, _hashedSecret, _secret):
sp.verify(self.data.swaps[_hashedSecret].refundTimestamp > sp.now)
sp.verify(self.data.swaps[_hashedSecret].hashedSecret == sp.sha256(
sp.sha256(_secret)))
"""
checks whether the swap can bve refunded
args:
_hashedSecret: hashed secret of the swap
"""
def isRefundable(self, _hashedSecret):
sp.verify((self.data.swaps[_hashedSecret].state == State.Initiated) | (
self.data.swaps[_hashedSecret].state == State.Waiting))
sp.verify(self.data.swaps[_hashedSecret].refundTimestamp <= sp.now)
"""
Toggle contract active state
args:
_active: boolean value [tru:active, false:inactive] representing contract state
"""
@sp.entry_point
def toggleContractState(self, _active):
self.onlyByAdmin()
self.data.active = _active
"""
Update reward for swaps responses
args:
_reward: a value representing the reward basis points
"""
@sp.entry_point
def updateReward(self, _reward):
self.onlyByAdmin()
self.data.reward = _reward
"""
Initiate new swap without counterParty details
args:
_hashedSecret: hash of the current swap secret
_initiator_eth_addr: tezos address of the current swap initiator
_amount: amount of fa1.2 tokens exchanged in the swap
_refundTimestamp: unix time(sec) after which the swap expires
"""
@sp.entry_point
def initiateWait(self, _amount, _hashedSecret, _refundTimestamp, initiator_eth_addr):
self.contractIsActive()
self.isInitiable(_hashedSecret, _refundTimestamp)
c = sp.contract(sp.TRecord(from_=sp.TAddress, to_=sp.TAddress,
value=sp.TNat).layout(("from_ as from", ("to_ as to", "value"))), self.data.fa12, entry_point="transfer").open_some()
transferData = sp.record(
from_=sp.sender, to_=sp.self_address, value=_amount)
sp.transfer(transferData, sp.mutez(0), c)
self.data.swaps[_hashedSecret] = sp.record(hashedSecret=_hashedSecret, initiator_eth_addr=initiator_eth_addr, initiator=sp.sender,
participant=sp.sender, refundTimestamp=_refundTimestamp, value=_amount, state=State.Waiting)
"""
Add counter-party details to an existing(initiated) swap
args:
_hashedSecret: hashed secret of the swap being updated
_participant: participant/counter-party tezos address
"""
@sp.entry_point
def addCounterParty(self, _hashedSecret, _participant):
self.contractIsActive()
self.checkState(_hashedSecret, State.Waiting)
self.onlyByInitiator(_hashedSecret)
self.data.swaps[_hashedSecret].state = State.Initiated
self.data.swaps[_hashedSecret].participant = _participant
"""
Redeem the swap if possible
args:
_hashedSecret: hashed secret of the swap being redeemed
_secret: secret for the swap which produced the corresponding hashedSecret
"""
@sp.entry_point
def redeem(self, _hashedSecret, _secret):
self.checkState(_hashedSecret, State.Initiated)
self.isRedeemable(_hashedSecret, _secret)
c = sp.contract(sp.TRecord(from_=sp.TAddress, to_=sp.TAddress,
value=sp.TNat).layout(("from_ as from", ("to_ as to", "value"))), self.data.fa12, entry_point="transfer").open_some()
transferData = sp.record(
from_=sp.self_address, to_=self.data.swaps[_hashedSecret].participant, value=self.data.swaps[_hashedSecret].value)
sp.transfer(transferData, sp.mutez(0), c)
del self.data.swaps[_hashedSecret]
"""
Refund the swap if possible
args:
_hashedSecret: hashed secret of the swap being refunded
"""
@sp.entry_point
def refund(self, _hashedSecret):
self.isRefundable(_hashedSecret)
c = sp.contract(sp.TRecord(from_=sp.TAddress, to_=sp.TAddress,
value=sp.TNat).layout(("from_ as from", ("to_ as to", "value"))), self.data.fa12, entry_point="transfer").open_some()
transferData = sp.record(
from_=sp.self_address, to_=self.data.swaps[_hashedSecret].initiator, value=self.data.swaps[_hashedSecret].value)
sp.transfer(transferData, sp.mutez(0), c)
del self.data.swaps[_hashedSecret]
@sp.add_test(name="Token Swap")
def test():
admin = sp.test_account("Administrator")
alice = sp.test_account("Alice")
bob = sp.test_account("Bob")
init_eth = "0x91f79893E7B923410Ef1aEba6a67c6fab0sfsdgffd"
hashSecret = sp.sha256(sp.sha256(sp.bytes(
"0x68656c6c6f666473667364666c64736a666c73646a6664736a6673646a6b666a")))
token_metadata = {
"decimals" : "18", # Mandatory by the spec
"name" : "My Great Token", # Recommended
"symbol" : "MGT", # Recommended
# Extra fields
"icon" : 'https://smartpy.io/static/img/logo-only.svg'
}
contract_metadata = {
"" : "ipfs://QmaiAUj1FFNGYTu8rLBjc3eeN9cSKwaF8EGMBNDmhzPNFd",
}
c2 = FA12.FA12(admin.address,
config = FA12.FA12_config(support_upgradable_metadata = True),
token_metadata = token_metadata,
contract_metadata = contract_metadata)
c1 = TokenSwap(_admin=admin.address, _fa12=c2.address)
scenario = sp.test_scenario()
scenario.table_of_contents()
scenario.h1("Atomic Swap")
scenario += c1
scenario.h2("Accounts")
scenario.show([admin, alice, bob])
scenario.h2("FA1.2")
scenario.h3("Entry points")
scenario += c2
scenario.h3("Admin mints a few coins")
scenario += c2.mint(address=alice.address, value=12).run(sender=admin)
scenario += c2.mint(address=alice.address, value=3).run(sender=admin)
scenario += c2.mint(address=alice.address, value=3).run(sender=admin)
scenario.h2("Alice approves Contract")
scenario += c2.approve(spender=c1.address, value=10).run(sender=alice)
scenario.h2("Swap[Wait] Testing")
# no operations work without contract being active
scenario += c1.initiateWait(_hashedSecret=hashSecret, initiator_eth_addr=init_eth, _refundTimestamp=sp.timestamp(
159682500), _amount=5).run(sender=alice, now=sp.timestamp(159682400), valid=False)
# activate only by admin
scenario += c1.toggleContractState(True).run(sender=alice, valid=False)
scenario += c1.toggleContractState(True).run(sender=admin)
# update reward only by admin
scenario += c1.updateReward(50).run(sender=alice, valid=False)
scenario += c1.updateReward(50).run(sender=admin)
# initiate new swap
scenario += c1.initiateWait(_hashedSecret=hashSecret, initiator_eth_addr=init_eth, _refundTimestamp=sp.timestamp(
159682500), _amount=5).run(sender=alice, now=sp.timestamp(159682400))
# balance check
scenario.verify(c2.data.balances[c1.address].balance == sp.nat(5))
scenario.verify(c2.data.balances[alice.address].balance == sp.nat(13))
# cannot redeem before it is activated & initiated
scenario += c1.redeem(_hashedSecret=hashSecret, _secret=sp.bytes(
"0x68656c6c6f666473667364666c64736a666c73646a6664736a6673646a6b666a")).run(sender=bob, now=sp.timestamp(159682450), valid=False)
# successful add participant only by initiator
scenario += c1.addCounterParty(_hashedSecret=hashSecret,
_participant=bob.address).run(sender=bob, valid=False)
# successful add participant only by initiator
scenario += c1.addCounterParty(_hashedSecret=hashSecret,
_participant=bob.address).run(sender=alice)
# cannot be redeemed with wrong secret
scenario += c1.redeem(_hashedSecret=hashSecret, _secret=sp.bytes(
"0x12345678aa")).run(sender=bob, now=sp.timestamp(159682450), valid=False)
# cannot be redeemed after refundtime has come
scenario += c1.redeem(_hashedSecret=hashSecret, _secret=sp.bytes(
"0x68656c6c6f666473667364666c64736a666c73646a6664736a6673646a6b666a")).run(sender=bob, now=sp.timestamp(159682550), valid=False)
# new swap with the same hash cannot be added unless the previous one is redeemed/refunded
scenario += c1.initiateWait(_hashedSecret=hashSecret, initiator_eth_addr=init_eth, _refundTimestamp=sp.timestamp(
159682500), _amount=5).run(sender=alice, amount=sp.tez(2), now=sp.timestamp(159682400), valid=False)
# successful redeem can be initiated by anyone but funds transfered to participant
scenario += c1.redeem(_hashedSecret=hashSecret,
_secret=sp.bytes("0x68656c6c6f666473667364666c64736a666c73646a6664736a6673646a6b666a")).run(sender=bob, now=sp.timestamp(159682450))
# balance check
scenario.verify(c2.data.balances[c1.address].balance == sp.nat(0))
scenario.verify(c2.data.balances[bob.address].balance == sp.nat(5))
# successful swap creation with same hash after redeem
scenario += c1.initiateWait(_hashedSecret=hashSecret, initiator_eth_addr=init_eth, _refundTimestamp=sp.timestamp(
159682500), _amount=5).run(sender=alice, now=sp.timestamp(159682400))
# balance check
scenario.verify(c2.data.balances[c1.address].balance == sp.nat(5))
scenario.verify(c2.data.balances[alice.address].balance == sp.nat(8))
# cannot be refunded before the refundtime
scenario += c1.refund(hashSecret).run(sender=bob,
now=sp.timestamp(159682450), valid=False)
scenario += c1.refund(hashSecret).run(sender=alice,
now=sp.timestamp(159682450), valid=False)
# can be refunded in any initated or waiting state if refund time has come, can be done by anyone but funds transfered only to initiator
scenario += c1.refund(hashSecret).run(sender=bob,
now=sp.timestamp(159682550))
# cannot be refunded again once it has been refunded
scenario += c1.refund(hashSecret).run(sender=alice,
now=sp.timestamp(159682550), valid=False)
# balance check
scenario.verify(c2.data.balances[c1.address].balance == sp.nat(0))
scenario.verify(c2.data.balances[alice.address].balance == sp.nat(13))
sp.add_compilation_target("TokenSwap", TokenSwap(_admin=sp.address("tz1Y8UNsMSCXyDgma8Ya51eLx8Qu4AoLm8vt"), _fa12=sp.address("KT1Y8UNsMSCXyDgma8Ya51eLx8Qu4AoLm8vt")), storage=None)
|
nilq/baby-python
|
python
|
with open('Day10 input.txt') as f:
lines = f.readlines()
chunk_dict = {
'(':')',
'[':']',
'{':'}',
'<':'>'
}
score_dict = {
')':3,
']':57,
'}':1197,
'>':25137
}
corrupted = []
score = 0
for line in lines:
chunk = ''
for l in line:
if l in ['(','[','{','<']:
chunk+=l
print(chunk)
if l in [')',']','}','>']:
chunk, c = chunk[:-1], chunk[-1]
if chunk_dict[c] != l:
score += score_dict[l]
print('Found an unexpected '+l)
corrupted.append(line)
break
print(score)
incompletes = [x for x in lines if x not in corrupted]
inc_chunks = []
for inc in incompletes:
chunk = ''
for l in inc:
if l in ['(','[','{','<']:
chunk+=l
print(chunk)
if l in [')',']','}','>']:
chunk = chunk[:-1]
inc_chunks.append(chunk)
inc_score_dict = {
'(':1,
'[':2,
'{':3,
'<':4
}
inc_scores = []
for inc in inc_chunks:
score = 0
for i in inc[::-1]:
score *= 5
score += inc_score_dict[i]
inc_scores.append(score)
inc_scores.sort()
print(inc_scores[(len(inc_scores)//2)])
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# encoding: utf-8
import random
import torch
from torch.utils.data import Dataset
from torch.utils.data import sampler
import torchvision.transforms as transforms
import lmdb
import six
import sys
from PIL import Image
import numpy as np
# 关于lmdb数据库使用, 当时对接Python 2.x,所以使用Bytestrings,而不是unicode,
# 所以在Python 3.x中要显示encode,decode。
# https://lmdb.readthedocs.io/en/release/
# uses bytestring to mean either the Python<=2.7 str() type, or the Python>=3.0 bytes() type, d
# Always explicitly encode and decode any Unicode values before passing them to LMDB.
class lmdbDataset(Dataset):
def __init__(self, root=None, transform=None, target_transform=None):
self.env = lmdb.open(
root,
max_readers=1,
readonly=True,
lock=False,
readahead=False,
meminit=False)
if not self.env:
print('cannot creat lmdb from %s' % (root))
sys.exit(0)
with self.env.begin(write=False) as txn:
nSamples = int(txn.get('num-samples'.encode()).decode())
self.nSamples = nSamples
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return self.nSamples
def __getitem__(self, index):
assert index <= len(self), 'index range error'
index += 1
with self.env.begin(write=False) as txn:
img_key = 'image-%09d' % index
imgbuf = txn.get(img_key.encode())
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
img = Image.open(buf).convert('L')
except IOError:
print('Corrupted image for %d' % index)
return self[index + 1]
if self.transform is not None:
img = self.transform(img)
label_key = 'label-%09d' % index
label = txn.get(label_key.encode()).decode()
if self.target_transform is not None:
label = self.target_transform(label)
return (img, label)
class resizeNormalize(object):
def __init__(self, size, interpolation=Image.BILINEAR):
self.size = size
self.interpolation = interpolation
self.toTensor = transforms.ToTensor()
def __call__(self, img):
img = img.resize(self.size, self.interpolation)
img = self.toTensor(img)
img.sub_(0.5).div_(0.5)
return img
class randomSequentialSampler(sampler.Sampler):
def __init__(self, data_source, batch_size):
self.num_samples = len(data_source)
self.batch_size = batch_size
def __iter__(self):
n_batch = len(self) // self.batch_size
tail = len(self) % self.batch_size
index = torch.LongTensor(len(self)).fill_(0)
for i in range(n_batch):
random_start = random.randint(0, len(self) - self.batch_size)
batch_index = random_start + torch.range(0, self.batch_size - 1)
index[i * self.batch_size:(i + 1) * self.batch_size] = batch_index
# deal with tail
if tail:
random_start = random.randint(0, len(self) - self.batch_size)
tail_index = random_start + torch.range(0, tail - 1)
index[(i + 1) * self.batch_size:] = tail_index
return iter(index)
def __len__(self):
return self.num_samples
class alignCollate(object):
def __init__(self, imgH=32, imgW=100, keep_ratio=False, min_ratio=1):
self.imgH = imgH
self.imgW = imgW
self.keep_ratio = keep_ratio
self.min_ratio = min_ratio
def __call__(self, batch):
images, labels = zip(*batch)
imgH = self.imgH
imgW = self.imgW
output_images = []
for image in images:
if self.keep_ratio:
w, h = image.size
ratio = w / float(h)
imgW = int(np.floor(ratio * imgH))
imgW = min(imgH * self.min_ratio, imgW) # assure image.w <= imgW
# resize to the same imgH
transform = resizeNormalize((imgW, imgH))
output_images.append(transform(image))
# padding
# image.shape i.e. (1, 32, 100)
max_image_width = max([image.shape[2] for image in output_images])
max_label_length = max([len(label) for label in labels])
batch_size = len(output_images)
channel_size = 1
inputs = np.zeros((batch_size, channel_size, imgH, max_image_width), dtype='float32')
# '_' for blank label
output_labels =[['_'] * max_label_length for _ in range(batch_size)]
for x in range(batch_size):
image = output_images[x]
width = image.shape[2]
inputs[x, :, :, :width] = image
output_labels[x][:len(labels[x])] = labels[x]
# list to str
output_labels = [''.join(x) for x in output_labels]
images = torch.cat([torch.from_numpy(t).unsqueeze(0) for t in inputs], 0)
return images, output_labels
|
nilq/baby-python
|
python
|
class Occurrence(object):
"""
An Occurrence is an incarnation of a recurring event for a given date.
"""
def __init__(self,event,start,end):
self.event = event
self.start = start
self.end = end
def __unicode__(self):
return "%s to %s" %(self.start, self.end)
def __cmp__(self, other):
rank = cmp(self.start, other.start)
if rank == 0:
return cmp(self.end, other.end)
return rank
|
nilq/baby-python
|
python
|
# some modules use the old-style import: explicitly include
# the new module when the old one is referenced
hiddenimports = ["email.mime.text", "email.mime.multipart"]
|
nilq/baby-python
|
python
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
from xnas.search_space.DARTS.ops import *
from torch.autograd import Variable
def channel_shuffle(x, groups):
batchsize, num_channels, height, width = x.data.size()
channels_per_group = num_channels // groups
# reshape
x = x.view(batchsize, groups,
channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
class PcMixedOp(nn.Module):
def __init__(self, C_in, C_out, stride, basic_op_list=None):
super().__init__()
self.k = 4
self.mp = nn.MaxPool2d(2, 2)
self._ops = nn.ModuleList()
assert basic_op_list is not None, "the basic op list cannot be none!"
basic_primitives = basic_op_list
for primitive in basic_primitives:
op = OPS_[primitive](C_in//self.k, C_out//self.k, stride, affine=False)
self._ops.append(op)
def forward(self, x, weights):
# channel proportion k=4
dim_2 = x.shape[1]
xtemp = x[:, : dim_2//self.k, :, :]
xtemp2 = x[:, dim_2//self.k:, :, :]
assert len(self._ops) == len(weights)
'''
temp1 = 0
for i, value in enumerate(weights):
if value == 1:
temp1 += self._ops[i](xtemp)
if 0 < value < 1:
temp1 += value * self._ops[i](xtemp)'''
_x = []
for i, value in enumerate(weights):
if value == 1:
_x.append(self._ops[i](xtemp))
if 0 < value < 1:
_x.append(value * self._ops[i](xtemp))
# reduction cell needs pooling before concat
part_x = sum(_x)
if part_x.shape[2] == x.shape[2]:
ans = torch.cat([part_x, xtemp2], dim=1)
else:
ans = torch.cat([part_x, self.mp(xtemp2)], dim=1)
ans = channel_shuffle(ans, self.k)
# ans = torch.cat([ans[ : , dim_2//4:, :, :],ans[ : , : dim_2//4, :, :]],dim=1)
# except channe shuffle, channel shift also works
return ans
# the search cell in darts
class PcDartsCell(nn.Module):
def __init__(self, n_nodes, C_pp, C_p, C, reduction_p, reduction, basic_op_list, multiplier):
"""
Args:
n_nodes: # of intermediate n_nodes
C_pp: C_out[k-2]
C_p : C_out[k-1]
C : C_in[k] (current)
reduction_p: flag for whether the previous cell is reduction cell or not
reduction: flag for whether the current cell is reduction cell or not
"""
super().__init__()
self.reduction = reduction
self.n_nodes = n_nodes
self._multiplier = multiplier
self.basic_op_list = basic_op_list
# If previous cell is reduction cell, current input size does not match with
# output size of cell[k-2]. So the output[k-2] should be reduced by preprocessing.
if reduction_p:
self.preproc0 = FactorizedReduce(C_pp, C, affine=False)
else:
self.preproc0 = ReluConvBn(C_pp, C, 1, 1, 0, affine=False)
self.preproc1 = ReluConvBn(C_p, C, 1, 1, 0, affine=False)
# generate dag
self.dag = nn.ModuleList()
for i in range(self.n_nodes):
self.dag.append(nn.ModuleList())
for j in range(2+i): # include 2 input nodes
# reduction should be used only for input node
stride = 2 if reduction and j < 2 else 1
op = PcMixedOp(C, C, stride, self.basic_op_list)
self.dag[i].append(op)
def forward(self, s0, s1, sample, sample2):
s0 = self.preproc0(s0)
s1 = self.preproc1(s1)
states = [s0, s1]
w_dag = darts_weight_unpack(sample, self.n_nodes)
w_w_dag = darts_weight_unpack(sample2, self.n_nodes)
for edges, w_list, w_w_list in zip(self.dag, w_dag, w_w_dag):
s_cur = sum(ww * edges[i](s, w)
for i, (s, w, ww) in enumerate(zip(states, w_list, w_w_list)))
states.append(s_cur)
s_out = torch.cat(states[-self._multiplier:], 1)
return s_out
# PcDartsCNN
class PcDartsCNN(nn.Module):
def __init__(self, C=16, n_classes=10, n_layers=8, n_nodes=4, basic_op_list=[], multiplier=4):
super().__init__()
stem_multiplier = 3
self._multiplier = multiplier
self.C_in = 3 # 3
self.C = C # 16
self.n_classes = n_classes # 10
self.n_layers = n_layers # 8
self.n_nodes = n_nodes # 4
self.basic_op_list = ['none','max_pool_3x3', 'avg_pool_3x3', 'skip_connect', 'sep_conv_3x3',
'sep_conv_5x5', 'dil_conv_3x3', 'dil_conv_5x5' ] if len(basic_op_list) == 0 else basic_op_list
C_cur = stem_multiplier * C # 3 * 16 = 48
self.stem = nn.Sequential(
nn.Conv2d(self.C_in, C_cur, 3, 1, 1, bias=False),
nn.BatchNorm2d(C_cur)
)
# for the first cell, stem is used for both s0 and s1
# [!] C_pp and C_p is output channel size, but C_cur is input channel size.
C_pp, C_p, C_cur = C_cur, C_cur, C
# 48 48 16
self.cells = nn.ModuleList()
reduction_p = False
for i in range(n_layers):
# Reduce featuremap size and double channels in 1/3 and 2/3 layer.
if i in [n_layers // 3, 2 * n_layers // 3]:
C_cur *= 2
reduction = True
else:
reduction = False
cell = PcDartsCell(n_nodes, C_pp, C_p, C_cur, reduction_p, reduction, self.basic_op_list, multiplier)
reduction_p = reduction
self.cells.append(cell)
C_cur_out = C_cur * n_nodes
C_pp, C_p = C_p, C_cur_out
self.gap = nn.AdaptiveAvgPool2d(1)
self.linear = nn.Linear(C_p, n_classes)
# number of edges per cell
self.num_edges = sum(list(range(2, self.n_nodes + 2)))
# whole edges
self.all_edges = 2 * self.num_edges
def forward(self, x, sample, sample2):
s0 = s1 = self.stem(x)
for i, cell in enumerate(self.cells):
if cell.reduction:
alphas_reduce = sample[self.num_edges:]
betas_reduce = sample2[self.num_edges:]
weights = F.softmax(alphas_reduce, dim=-1)
n = 3
start = 2
weights2 = F.softmax(betas_reduce[0:2], dim=-1)
for i in range(self.n_nodes - 1):
end = start + n
tw2 = F.softmax(betas_reduce[start:end], dim=-1)
start = end
n += 1
weights2 = torch.cat([weights2, tw2], dim=0)
else:
alphas_normal = sample[0:self.num_edges]
betas_normal = sample2[0:self.num_edges]
weights = F.softmax(alphas_normal, dim=-1)
n = 3
start = 2
weights2 = F.softmax(betas_normal[0:2], dim=-1)
for i in range(self.n_nodes - 1):
end = start + n
tw2 = F.softmax(betas_normal[start:end], dim=-1)
start = end
n += 1
weights2 = torch.cat([weights2, tw2], dim=0)
s0, s1 = s1, cell(s0, s1, weights, weights2)
out = self.gap(s1)
out = out.view(out.size(0), -1) # flatten
logits = self.linear(out)
return logits
def genotype(self, theta, theta2):
Genotype = namedtuple(
'Genotype', 'normal normal_concat reduce reduce_concat')
a_norm = theta[0:self.num_edges]
a_reduce = theta[self.num_edges:]
b_norm = theta2[0:self.num_edges]
b_reduce = theta2[self.num_edges:]
weightn = F.softmax(a_norm, dim=-1)
weightr = F.softmax(a_reduce, dim=-1)
n = 3
start = 2
weightsn2 = F.softmax(b_norm[0:2], dim=-1)
weightsr2 = F.softmax(b_reduce[0:2], dim=-1)
for i in range(self.n_nodes - 1):
end = start + n
tn2 = F.softmax(b_norm[start:end], dim=-1)
tw2 = F.softmax(b_reduce[start:end], dim=-1)
start = end
n += 1
weightsn2 = torch.cat([weightsn2, tn2], dim=0)
weightsr2 = torch.cat([weightsr2, tw2], dim=0)
theta_norm = darts_weight_unpack(weightn, self.n_nodes)
theta_reduce = darts_weight_unpack(weightr, self.n_nodes)
theta2_norm = darts_weight_unpack(weightsn2, self.n_nodes)
theta2_reduce = darts_weight_unpack(weightsr2, self.n_nodes)
for t, etheta in enumerate(theta_norm):
for tt, eetheta in enumerate(etheta):
theta_norm[t][tt] *= theta2_norm[t][tt]
for t, etheta in enumerate(theta_reduce):
for tt, eetheta in enumerate(etheta):
theta_reduce[t][tt] *= theta2_reduce[t][tt]
gene_normal = pc_parse_from_numpy(
theta_norm, k=2, basic_op_list=self.basic_op_list)
gene_reduce = pc_parse_from_numpy(
theta_reduce, k=2, basic_op_list=self.basic_op_list)
concat = range(2 + self.n_nodes - self._multiplier, 2 + self.n_nodes) # concat all intermediate nodes
return Genotype(normal=gene_normal, normal_concat=concat,
reduce=gene_reduce, reduce_concat=concat)
def pc_parse_from_numpy(alpha, k, basic_op_list=None):
"""
parse continuous alpha to discrete gene.
alpha is ParameterList:
ParameterList [
Parameter(n_edges1, n_ops),
Parameter(n_edges2, n_ops),
...
]
gene is list:
[
[('node1_ops_1', node_idx), ..., ('node1_ops_k', node_idx)],
[('node2_ops_1', node_idx), ..., ('node2_ops_k', node_idx)],
...
]
each node has two edges (k=2) in CNN.
"""
gene = []
assert basic_op_list[0] == 'none' # assume last PRIMITIVE is 'none'
# 1) Convert the mixed op to discrete edge (single op) by choosing top-1 weight edge
# 2) Choose top-k edges per node by edge score (top-1 weight in edge)
for edges in alpha:
# edges: Tensor(n_edges, n_ops)
edge_max, primitive_indices = torch.topk(
torch.tensor(edges[:, 1:]), 1) # ignore 'none'
topk_edge_values, topk_edge_indices = torch.topk(edge_max.view(-1), k)
node_gene = []
for edge_idx in topk_edge_indices:
prim_idx = primitive_indices[edge_idx]
prim = basic_op_list[prim_idx+1]
node_gene.append((prim, edge_idx.item()))
gene.append(node_gene)
return gene
def _PcdartsCNN():
from xnas.core.config import cfg
return PcDartsCNN(
C=cfg.SPACE.CHANNEL,
n_classes=cfg.SEARCH.NUM_CLASSES,
n_layers=cfg.SPACE.LAYERS,
n_nodes=cfg.SPACE.NODES,
basic_op_list=cfg.SPACE.BASIC_OP)
|
nilq/baby-python
|
python
|
# This is just a demo file
print("Hello world")
print("this is update to my previous code")
|
nilq/baby-python
|
python
|
import os
import asyncio
import sys
from typing import Any, Dict, Union, List # noqa
from tomodachi.watcher import Watcher
def test_watcher_auto_root() -> None:
watcher = Watcher()
assert watcher.root == [os.path.realpath(sys.argv[0].rsplit('/', 1)[0])]
def test_watcher_empty_directory() -> None:
root_path = '{}/tests/watcher_root/empty'.format(os.path.realpath(os.getcwd()))
watcher = Watcher(root=[root_path])
assert len(watcher.root) == 1
assert isinstance(watcher.watched_files, dict)
assert len(watcher.watched_files) == 0
def test_watcher_default_ignored_directory() -> None:
root_path = '{}/tests/watcher_root/__tmp__'.format(os.path.realpath(os.getcwd()))
watcher = Watcher(root=[root_path])
assert len(watcher.root) == 1
assert isinstance(watcher.watched_files, dict)
assert len(watcher.watched_files) == 0
def test_watcher_configurable_ignored_directory() -> None:
root_path = '{}/tests/watcher_root/configurable_ignored'.format(os.path.realpath(os.getcwd()))
watcher = Watcher(root=[root_path])
assert len(watcher.root) == 1
assert isinstance(watcher.watched_files, dict)
assert len(watcher.watched_files) == 1
watcher = Watcher(root=[root_path], configuration={'options': {'watcher': {'ignored_dirs': ['configurable_ignored']}}})
assert len(watcher.root) == 1
assert isinstance(watcher.watched_files, dict)
assert len(watcher.watched_files) == 0
def test_watcher_callback(loop: Any) -> None:
root_path = '{}/tests/watcher_root'.format(os.path.realpath(os.getcwd()))
watcher = Watcher(root=[root_path])
assert len(watcher.root) == 1
assert isinstance(watcher.watched_files, dict)
assert len(watcher.watched_files) == 2
result = watcher.update_watched_files()
assert result == {}
watcher.watched_files = {'_test': 0}
watcher.watched_files_crc = {'_test': ''}
result = watcher.update_watched_files(reindex=True)
assert len(result.get('added', 0)) == 2
assert len(result.get('removed', 0)) == 1
assert len(result.get('updated', 0)) == 0
class Test():
callbacks_run = {} # type: Dict[int, bool]
@classmethod
async def _async(cls) -> None:
async def cb1(updated_files: Union[List, set]) -> None:
cls.callbacks_run[1] = True
async def cb2(updated_files: Union[List, set]) -> None:
cls.callbacks_run[2] = True
task = await watcher.watch(callback_func=cb1)
await asyncio.sleep(1.0)
task.cancel()
watcher.watched_files = {'_test': 0}
watcher.watched_files_crc = {'_test': ''}
task = await watcher.watch(callback_func=cb2)
await asyncio.sleep(1.0)
task.cancel()
assert cls.callbacks_run.get(1) is None
assert cls.callbacks_run.get(2) is True
loop.run_until_complete(Test._async())
|
nilq/baby-python
|
python
|
import tensorflow as tf
import numpy as np
from optimizer import distributed_optimizer
from task_module import pretrain, classifier, pretrain_albert
import tensorflow as tf
try:
from distributed_single_sentence_classification.model_interface import model_zoo
except:
from distributed_single_sentence_classification.model_interface import model_zoo
import tensorflow as tf
import numpy as np
from optimizer import optimizer
from model_io import model_io
from utils.bert import bert_seq_utils, bert_seq_sample_utils
from task_module import classifier
from task_module import tsa_pretrain
import tensorflow as tf
from metric import tf_metrics
def train_metric(input_ids, predicted_logits, features, **kargs):
labels = input_ids[:, 1:] # <S>,1,2,3,<T>,<PAD>, <PAD>
logits = predicted_logits[:, :-1] # 1,2,3,<T>, xxx, xxx
input_id_logits = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels,
logits=logits)
if kargs.get('mask_type', 'left2right') == 'left2right':
tf.logging.info("***** using left2right mask and loss *****")
sequence_mask = tf.to_float(tf.not_equal(features['input_ori_ids'][:, 1:],
kargs.get('[PAD]', 0)))
elif kargs.get('mask_type', 'left2right') == 'seq2seq':
tf.logging.info("***** using seq2seq mask and loss *****")
sequence_mask = tf.to_float(features['segment_ids'][:, 1:])
if not kargs.get('use_tpu', False):
tf.summary.scalar("loss mask", tf.reduce_mean(sequence_mask))
# sequence_mask = tf.to_float(tf.not_equal(labels,
# kargs.get('[PAD]', 0)))
per_example_perplexity = tf.reduce_sum(input_id_logits * sequence_mask, axis=-1) # batch
per_example_perplexity /= tf.reduce_sum(sequence_mask, axis=-1) # batch
perplexity = tf.reduce_mean(tf.exp(per_example_perplexity))
lm_token_accuracy = tf.equal(
tf.cast(labels, tf.int32),
tf.cast(tf.argmax(logits, axis=-1), tf.int32))
lm_token_accuracy = tf.reduce_sum(tf.cast(lm_token_accuracy, tf.float32) * sequence_mask, axis=-1)
lm_token_accuracy /= tf.reduce_sum(sequence_mask, axis=-1) # batch
return {
"perplexity": perplexity,
"token_acc": tf.reduce_mean(lm_token_accuracy)
}
def eval_metric(input_ids, predicted_logits, sequence_mask):
labels = input_ids[:, 1:] # <S>,1,2,3,<T>,<PAD>, <PAD>
logits = predicted_logits[:, :-1] # 1,2,3,<T>, xxx, xxx
input_id_logits = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels,
logits=logits)
# sequence_mask = tf.to_float(tf.not_equal(labels,
# kargs.get('[PAD]', 0)))
per_example_perplexity = tf.reduce_sum(input_id_logits * sequence_mask, axis=-1) # batch
per_example_perplexity /= tf.reduce_sum(sequence_mask, axis=-1) # batch
perplexity = tf.exp(per_example_perplexity)
ppl_avg = tf.metrics.mean(values=perplexity)
lm_token_accuracy = tf.metrics.accuracy(
labels=tf.cast(labels, tf.int32),
predictions=tf.cast(tf.argmax(logits, axis=-1), tf.int32),
weights=sequence_mask)
return {
"perplexity":ppl_avg,
"token_acc":lm_token_accuracy
}
def classifier_model_fn_builder(
model_config,
num_labels,
init_checkpoint,
model_reuse=None,
load_pretrained=True,
model_io_config={},
opt_config={},
exclude_scope="",
not_storage_params=[],
target="a",
**kargs):
def model_fn(features, labels, mode, params):
model_api = model_zoo(model_config)
seq_features = {}
for key in features:
seq_features[key] = features[key]
if 'input_ori_ids' in features:
seq_features['input_ids'] = features["input_ori_ids"]
else:
features['input_ori_ids'] = seq_features['input_ids']
model = model_api(model_config, seq_features, labels,
mode, target, reuse=tf.AUTO_REUSE,
**kargs)
if mode == tf.estimator.ModeKeys.TRAIN:
dropout_prob = model_config.dropout_prob
else:
dropout_prob = 0.0
if model_io_config.fix_lm == True:
scope = model_config.scope + "_finetuning"
else:
scope = model_config.scope
# if mode == tf.estimator.ModeKeys.TRAIN:
if kargs.get('mask_type', 'left2right') == 'left2right':
tf.logging.info("***** using left2right mask and loss *****")
sequence_mask = tf.to_float(tf.not_equal(features['input_ori_ids'][:, 1:],
kargs.get('[PAD]', 0)))
elif kargs.get('mask_type', 'left2right') == 'seq2seq':
tf.logging.info("***** using seq2seq mask and loss *****")
sequence_mask = tf.to_float(features['segment_ids'][:, 1:])
if not kargs.get('use_tpu', False):
tf.summary.scalar("loss mask", tf.reduce_mean(sequence_mask))
# batch x seq_length
print(model.get_sequence_output_logits().get_shape(), "===logits shape===")
seq_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=features['input_ori_ids'][:, 1:],
logits=model.get_sequence_output_logits()[:, :-1])
per_example_loss = tf.reduce_sum(seq_loss*sequence_mask, axis=-1) / (tf.reduce_sum(sequence_mask, axis=-1)+1e-10)
loss = tf.reduce_mean(per_example_loss)
model_io_fn = model_io.ModelIO(model_io_config)
pretrained_tvars = model_io_fn.get_params(model_config.scope,
not_storage_params=not_storage_params)
lm_pretrain_tvars = model_io_fn.get_params("cls/predictions",
not_storage_params=not_storage_params)
pretrained_tvars.extend(lm_pretrain_tvars)
use_tpu = 1 if kargs.get('use_tpu', False) else 0
if load_pretrained == "yes":
use_tpu = 1 if kargs.get('use_tpu', False) else 0
scaffold_fn = model_io_fn.load_pretrained(pretrained_tvars,
init_checkpoint,
exclude_scope=exclude_scope,
use_tpu=use_tpu)
else:
scaffold_fn = None
if mode == tf.estimator.ModeKeys.TRAIN:
if kargs.get('use_tpu', False):
optimizer_fn = optimizer.Optimizer(opt_config)
use_tpu = 1
tf.logging.info("***** using tpu with tpu-captiable optimizer *****")
else:
optimizer_fn = distributed_optimizer.Optimizer(opt_config)
use_tpu = 0
tf.logging.info("***** using gpu with gpu-captiable optimizer *****")
tvars = pretrained_tvars
model_io_fn.print_params(tvars, string=", trainable params")
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
train_op = optimizer_fn.get_train_op(loss, tvars,
opt_config.init_lr,
opt_config.num_train_steps,
use_tpu=use_tpu)
train_metric_dict = train_metric(features['input_ori_ids'],
model.get_sequence_output_logits(),
seq_features,
**kargs)
if not kargs.get('use_tpu', False):
for key in train_metric_dict:
tf.summary.scalar(key, train_metric_dict[key])
tf.summary.scalar('learning_rate', optimizer_fn.learning_rate)
tf.logging.info("***** logging metric *****")
tf.summary.scalar("causal_attenion_mask_length", tf.reduce_sum(model.attention_mask))
tf.summary.scalar("bi_attenion_mask_length", tf.reduce_sum(model.bi_attention_mask))
if kargs.get('use_tpu', False):
estimator_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
else:
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op)
return estimator_spec
elif mode == tf.estimator.ModeKeys.EVAL:
if kargs.get('mask_type', 'left2right') == 'left2right':
tf.logging.info("***** using left2right mask and loss *****")
sequence_mask = tf.to_float(tf.not_equal(features['input_ori_ids'][:, 1:],
kargs.get('[PAD]', 0)))
elif kargs.get('mask_type', 'left2right') == 'seq2seq':
tf.logging.info("***** using seq2seq mask and loss *****")
sequence_mask = tf.to_float(features['segment_ids'][:, 1:])
if not kargs.get('use_tpu', False):
tf.summary.scalar("loss mask", tf.reduce_mean(sequence_mask))
if not kargs.get('use_tpu', False):
tf.summary.scalar("loss mask", tf.reduce_mean(sequence_mask))
gpu_eval_metrics = eval_metric(features['input_ori_ids'],
model.get_sequence_output_logits(),
sequence_mask,
mask_type=kargs.get('mask_type', 'left2right'))
else:
tpu_eval_metrics = (eval_metric, [
features['input_ori_ids'],
model.get_sequence_output_logits(),
sequence_mask
])
print("===tpu metric==", tpu_eval_metrics, "==tpu metric++")
if kargs.get('use_tpu', False):
estimator_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=loss,
eval_metrics=tpu_eval_metrics,
scaffold_fn=scaffold_fn)
else:
estimator_spec = tf.estimator.EstimatorSpec(mode=mode,
loss=loss,
eval_metric_ops=gpu_eval_metrics)
return estimator_spec
elif mode == tf.estimator.ModeKeys.PREDICT:
if kargs.get('predict_type', 'sample_sequence') == 'sample_sequence':
results = bert_seq_sample_utils.sample_sequence(model_api,
model_config,
mode,
features,
target="",
start_token=kargs.get("start_token_id", 101),
batch_size=None,
context=features.get("context", None),
temperature=kargs.get("sample_temp", 1.0),
n_samples=kargs.get("n_samples", 1),
top_k=0,
end_token=kargs.get("end_token_id", 102),
greedy_or_sample="greedy",
gumbel_temp=0.01,
estimator="stop_gradient",
back_prop=True,
swap_memory=True,
seq_type=kargs.get("seq_type", "seq2seq"),
mask_type=kargs.get("mask_type", "seq2seq"),
attention_type=kargs.get('attention_type', 'normal_attention')
)
# stop_gradient output:
# samples, mask_sequence, presents, logits, final
sampled_token = results['samples']
sampled_token_logits = results['logits']
mask_sequence = results['mask_sequence']
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'token':sampled_token,
"logits":sampled_token_logits,
"mask_sequence":mask_sequence
},
export_outputs={
"output":tf.estimator.export.PredictOutput(
{
'token':sampled_token,
"logits":sampled_token_logits,
"mask_sequence":mask_sequence
}
)
}
)
return estimator_spec
elif kargs.get('predict_type', 'sample_sequence') == 'infer_inputs':
sequence_mask = tf.to_float(tf.not_equal(features['input_ids'][:, 1:],
kargs.get('[PAD]', 0)))
if kargs.get('mask_type', 'left2right') == 'left2right':
tf.logging.info("***** using left2right mask and loss *****")
sequence_mask = tf.to_float(tf.not_equal(features['input_ori_ids'][:, 1:],
kargs.get('[PAD]', 0)))
elif kargs.get('mask_type', 'left2right') == 'seq2seq':
tf.logging.info("***** using seq2seq mask and loss *****")
sequence_mask = tf.to_float(features['segment_ids'][:, 1:])
if not kargs.get('use_tpu', False):
tf.summary.scalar("loss mask", tf.reduce_mean(sequence_mask))
output_logits = model.get_sequence_output_logits()[:, :-1]
# output_logits = tf.nn.log_softmax(output_logits, axis=-1)
output_id_logits = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=features['input_ids'][:, 1:],
logits=output_logits)
per_example_perplexity = tf.reduce_sum(output_id_logits * sequence_mask,
axis=-1) # batch
per_example_perplexity /= tf.reduce_sum(sequence_mask, axis=-1) # batch
perplexity = tf.exp(per_example_perplexity)
estimator_spec = tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'token':features['input_ids'][:, 1:],
"logits":output_id_logits,
'perplexity':perplexity,
"all_logits":output_logits
},
export_outputs={
"output":tf.estimator.export.PredictOutput(
{
'token':features['input_ids'][:,1:],
"logits":output_id_logits,
'perplexity':perplexity,
"all_logits":output_logits
}
)
}
)
return estimator_spec
else:
raise NotImplementedError()
return model_fn
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
'''test cases for config_loader module'''
import unittest
import os
import shutil
import cray.craylib.config_loader as config_loader
from cray.craylib.generate_manager import GenerateManager
ROOT_DIR = os.path.join(os.path.dirname(__file__), "test_site")
SITE_DIR = os.path.join(os.path.dirname(__file__), "_site")
def get_test_suites():
'''Return test cases as a suite in this module'''
suite = unittest.TestSuite()
suite.addTest(SiteGenerationTestCase())
return suite
class SiteGenerationTestCase(unittest.TestCase):
'''Test case for post generation'''
def runTest(self):
'''Run test'''
if os.path.exists(SITE_DIR):
shutil.rmtree(SITE_DIR, ignore_errors=True)
conf_loader = config_loader.ConfigLoader(ROOT_DIR)
self.assertTrue(conf_loader.parse_config())
generate_manager = GenerateManager(ROOT_DIR)
generate_manager.read_config(conf_loader)
generate_manager.generate_site()
self.assertTrue(os.path.exists(SITE_DIR))
index_path = os.path.join(SITE_DIR, 'index.html')
about_path = os.path.join(SITE_DIR, 'about', 'index.html')
hello_post_path = os.path.join(SITE_DIR, 'post', '2017', '6', '2', 'hello-world', \
'index.html')
rss_path = os.path.join(SITE_DIR, 'feed.xml')
self.assertTrue(os.path.exists(index_path))
self.assertTrue(os.path.exists(about_path))
self.assertTrue(os.path.exists(hello_post_path))
self.assertTrue(os.path.exists(rss_path))
index_content = r'''<html>
<head>
<meta charset="utf-8">
<title>Index</title>
</head>
<body>
<header class="site-header">
<div class="wrapper">
<a class="site-title" href="/">Index</a>
<nav class="site-nav">
<!--
<a href="#" class="menu-icon">
<svg viewBox="0 0 18 15">
<path fill="#424242" d="M18,1.484c0,0.82-0.665,1.484-1.484,1.484H1.484C0.665,2.969,0,2.304,0,1.484l0,0C0,0.665,0.665,0,1.484,0 h15.031C17.335,0,18,0.665,18,1.484L18,1.484z"/>
<path fill="#424242" d="M18,7.516C18,8.335,17.335,9,16.516,9H1.484C0.665,9,0,8.335,0,7.516l0,0c0-0.82,0.665-1.484,1.484-1.484 h15.031C17.335,6.031,18,6.696,18,7.516L18,7.516z"/>
<path fill="#424242" d="M18,13.516C18,14.335,17.335,15,16.516,15H1.484C0.665,15,0,14.335,0,13.516l0,0 c0-0.82,0.665-1.484,1.484-1.484h15.031C17.335,12.031,18,12.696,18,13.516L18,13.516z"/>
</svg>
</a>
-->
<div class="trigger">
<a class="page-link" href="/about/">about</a>
</div>
</nav>
</div>
</header>
<h1>Post list:</h1>
<ul id="navigation">
<li><a href="post/2017/6/2/hello-world">Welcome to Cray!</a></li>
</ul>
<footer>
<h3>Powered by Bolun 2013 - 2017</h3>
</footer>
</body>
</html>'''
about_content = r'''<html>
<head>
<meta charset="utf-8">
<title>about</title>
</head>
<body>
<header class="site-header">
<div class="wrapper">
<a class="site-title" href="/">Index</a>
<nav class="site-nav">
<!--
<a href="#" class="menu-icon">
<svg viewBox="0 0 18 15">
<path fill="#424242" d="M18,1.484c0,0.82-0.665,1.484-1.484,1.484H1.484C0.665,2.969,0,2.304,0,1.484l0,0C0,0.665,0.665,0,1.484,0 h15.031C17.335,0,18,0.665,18,1.484L18,1.484z"/>
<path fill="#424242" d="M18,7.516C18,8.335,17.335,9,16.516,9H1.484C0.665,9,0,8.335,0,7.516l0,0c0-0.82,0.665-1.484,1.484-1.484 h15.031C17.335,6.031,18,6.696,18,7.516L18,7.516z"/>
<path fill="#424242" d="M18,13.516C18,14.335,17.335,15,16.516,15H1.484C0.665,15,0,14.335,0,13.516l0,0 c0-0.82,0.665-1.484,1.484-1.484h15.031C17.335,12.031,18,12.696,18,13.516L18,13.516z"/>
</svg>
</a>
-->
<div class="trigger">
<a class="page-link" href="/about/">about</a>
</div>
</nav>
</div>
</header>
<h1>about</h1>
<div><p>This is the first test page for test_site</p></div>
<footer>
<h3>Powered by Bolun 2013 - 2017</h3>
</footer>
</body>
</html>'''
hello_content = r'''<html>
<head>
<meta charset="utf-8">
<title>Welcome to Cray!</title>
</head>
<body>
<header class="site-header">
<div class="wrapper">
<a class="site-title" href="/">Index</a>
<nav class="site-nav">
<!--
<a href="#" class="menu-icon">
<svg viewBox="0 0 18 15">
<path fill="#424242" d="M18,1.484c0,0.82-0.665,1.484-1.484,1.484H1.484C0.665,2.969,0,2.304,0,1.484l0,0C0,0.665,0.665,0,1.484,0 h15.031C17.335,0,18,0.665,18,1.484L18,1.484z"/>
<path fill="#424242" d="M18,7.516C18,8.335,17.335,9,16.516,9H1.484C0.665,9,0,8.335,0,7.516l0,0c0-0.82,0.665-1.484,1.484-1.484 h15.031C17.335,6.031,18,6.696,18,7.516L18,7.516z"/>
<path fill="#424242" d="M18,13.516C18,14.335,17.335,15,16.516,15H1.484C0.665,15,0,14.335,0,13.516l0,0 c0-0.82,0.665-1.484,1.484-1.484h15.031C17.335,12.031,18,12.696,18,13.516L18,13.516z"/>
</svg>
</a>
-->
<div class="trigger">
<a class="page-link" href="/about/">about</a>
</div>
</nav>
</div>
</header>
<h1>Welcome to Cray!</h1>
<p>2017-06-02 22:22:22</p>
<div><p>hello world!</p></div>
<footer>
<h3>Powered by Bolun 2013 - 2017</h3>
</footer>
</body>
</html>'''
rss_title_regex = '<title>Demo</title>'
rss_description_regex = '<description>demo site description</description>'
rss_item_title_regex = '<title>Welcome to Cray!</title>'
rss_item_description_regex = r'<description>\s+hello world!</description>'
rss_item_link_regex = r'<link>http://www.demo.com/post/2017/6/2/hello-world</link>'
rss_item_guid_regex = r'<guid isPermaLink=\"false\">5876f9d8-bd18-3935-9d2f-5dc36c00ae5f</guid>'
rss_item_pubdate_regex = r'<pubDate>2017-06-02 22:22:22</pubDate>\s+</item>'
self.maxDiff = None
with open(index_path) as index_fd:
self.assertEqual(index_content, index_fd.read())
with open(about_path) as about_fd:
self.assertEqual(about_content, about_fd.read())
with open(hello_post_path) as hello_fd:
self.assertEqual(hello_content, hello_fd.read())
with open(rss_path) as rss_fd:
cotent = rss_fd.read()
self.assertRegex(cotent, rss_title_regex)
self.assertRegex(cotent, rss_description_regex)
self.assertRegex(cotent, rss_item_title_regex)
self.assertRegex(cotent, rss_item_description_regex)
self.assertRegex(cotent, rss_item_link_regex)
self.assertRegex(cotent, rss_item_guid_regex)
self.assertRegex(cotent, rss_item_pubdate_regex)
if os.path.exists(SITE_DIR):
shutil.rmtree(SITE_DIR, ignore_errors=True)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import sys
import time
import math
def go(l, n, partials):
return (partials[-1] - partials[n]) % 10
def fft(l):
"""Fucked Fourier Transform"""
partials = [0]
sum = 0
for v in l:
sum += v
partials.append(sum)
x = []
for i, y in enumerate(l):
x.append(go(l, i, partials))
return x
def main(args):
orig_data = [int(x) for x in [s.strip() for s in sys.stdin][0]]
data = orig_data * 10000
offset = int(''.join(str(x) for x in data[:7]))
assert offset*2 > len(data)
data = data[offset:]
for i in range(100):
data = fft(data)
print(''.join(str(x) for x in data[:8]))
if __name__ == '__main__':
main(sys.argv)
|
nilq/baby-python
|
python
|
import enolib
def test_querying_an_existing_single_line_required_string_comment_from_a_section_produces_the_expected_result():
input = ("> comment\n"
"# section")
output = enolib.parse(input).section('section').required_string_comment()
expected = ("comment")
assert output == expected
def test_querying_an_existing_two_line_required_string_comment_from_a_section_produces_the_expected_result():
input = (">comment\n"
"> comment\n"
"# section")
output = enolib.parse(input).section('section').required_string_comment()
expected = ("comment\n"
" comment")
assert output == expected
def test_querying_an_existing_required_string_comment_with_blank_lines_from_a_section_produces_the_expected_result():
input = (">\n"
"> comment\n"
">\n"
"> comment\n"
">\n"
"> comment\n"
">\n"
"# section")
output = enolib.parse(input).section('section').required_string_comment()
expected = (" comment\n"
"\n"
" comment\n"
"\n"
"comment")
assert output == expected
def test_querying_an_optional_existing_string_comment_from_a_section_produces_the_expected_result():
input = ("> comment\n"
"# section")
output = enolib.parse(input).section('section').optional_string_comment()
expected = ("comment")
assert output == expected
def test_querying_an_optional_missing_string_comment_from_a_section_produces_the_expected_result():
input = ("# section")
output = enolib.parse(input).section('section').optional_string_comment()
assert output == None
|
nilq/baby-python
|
python
|
"""
test_finger_pks.py
Copyright 2012 Andres Riancho
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
from nose.plugins.attrib import attr
from w3af.plugins.tests.helper import PluginTest, PluginConfig
class TestFingerPKS(PluginTest):
base_url = 'http://www.bonsai-sec.com/'
_run_configs = {
'cfg': {
'target': base_url,
'plugins': {'infrastructure': (PluginConfig('finger_pks'),)}
}
}
@attr('ci_fails')
def test_find_pks_email(self):
cfg = self._run_configs['cfg']
self._scan(cfg['target'], cfg['plugins'])
emails = self.kb.get('emails', 'emails')
self.assertEqual(len(emails), 2, emails)
|
nilq/baby-python
|
python
|
import numpy as np
import cv2
# 'uint8' assigns an 8bit unsigned integer to the colour values in the array
pic = np.zeros((512, 512, 3), dtype = 'uint8')
# Draw a rectangle from 0px to 512px
# Magenta colour, not color
colour = (255, 0, 255)
# Circles overview: https://www.khanacademy.org/math/basic-geo/basic-geo-area-and-perimeter/area-circumference-circle/a/radius-diameter-circumference
# Radius is "from the centre to any point on the circle itself"
# Diameter is "from any point on the circle through the centre itself all the way to the other side (which is 2x the radius!)"
# Circumference is "the distance of circle itself all the way around (diameter * 3.14159 or C/d = π)"
# Draws an unaliased circle with a diameter of 128px
cv2.circle(pic, (256, 256), 128, colour)
# Learn more: https://docs.opencv.org/2.4/modules/core/doc/drawing_functions.html
# Antialiasing should be straightforward: https://stackoverflow.com/questions/11055837/drawing-a-line-in-opencv-with-cv-aa-flags-is-not-producing-an-anti-aliased-line#25420463
cv2.imshow('Circle', pic)
cv2.waitKey(0)
cv2.destroyAllWindows()
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
# _*_ coding:utf-8 _*_
class Solution(object):
def generateParenthesis(self, n):
if n <= 0:
return []
if n == 1:
return ['()']
res = self.generateParenthesis(n - 1)
ret = set()
for v in res:
for i in range(len(v)):
ret.add(v[0: i] + '()' + v[i:])
return list(ret)
if __name__ == '__main__':
so = Solution()
# assert (so.generateParenthesis(0) == [])
# assert (so.generateParenthesis(1) == ['()'])
# print so.generateParenthesis(2)
a = so.generateParenthesis(3)
print 'n:3 sum:', len(a)
a = so.generateParenthesis(4)
print 'n:4 sum:', len(a)
a = so.generateParenthesis(5)
print 'n:5 sum:', len(a)
a = so.generateParenthesis(6)
print 'n:6 sum:', len(a)
|
nilq/baby-python
|
python
|
from django_codemod.constants import DJANGO_1_9, DJANGO_3_1
from django_codemod.visitors.base import BaseRenameTransformer
class PrettyNameTransformer(BaseRenameTransformer):
"""Replace `django.forms.forms.pretty_name` compatibility import."""
deprecated_in = DJANGO_1_9
removed_in = DJANGO_3_1
rename_from = "django.forms.forms.pretty_name"
rename_to = "django.forms.utils.pretty_name"
class BoundFieldTransformer(BaseRenameTransformer):
"""Replace `django.forms.forms.BoundField` compatibility import."""
deprecated_in = DJANGO_1_9
removed_in = DJANGO_3_1
rename_from = "django.forms.forms.BoundField"
rename_to = "django.forms.boundfield.BoundField"
|
nilq/baby-python
|
python
|
from yahoo import Quote, YahooQuote
stocks = ['AA', 'AXP', 'BA', 'BAC', 'CAT', 'CSCO', 'CVX', 'DD', 'DIS', 'GE', 'HD', 'HPQ', 'IBM', 'INTC', 'JNJ']
stocks += ['JPM', 'KO', 'MCD', 'MMM', 'MRK', 'MSFT', 'PFE', 'PG', 'T', 'TRV', 'UNH', 'UTX', 'VZ', 'WMT', 'XOM']
price = {}
quotes = {}
returns = {}
for s in stocks:
print 'Stock', s
for year in range(1993, 2015):
try:
quotes[year, s] = YahooQuote(s,'%s-01-01'%(str(year)), '%s-01-08'%(str(year)))
except ValueError:
pass
for q in str(quotes[year, s]).split('\n'):
if q.split(',')[0] == s:
price[year, s] = float(q.split(',')[5])
break
for s in stocks:
for year in range(1994, 2015):
returns[year, s] = (price[year, s]-price[year -1, s])/price[year -1, s]
f = open('DJIA.dat', 'w')
f.write('set assets := ')
for s in stocks:
f.write(s+' ')
f.write(';\n')
f.write('param R :')
for s in stocks:
f.write(s+' ')
f.write(':=\n')
for year in range(1994, 2015):
f.write(str(year)+' ')
for s in stocks:
f.write('%.3f '%(returns[year, s]))
f.write('\n')
f.write(';\n')
f.close()
print 'param R :',
for s in stocks:
print s,
print ':='
for year in range(1994, 2015):
print year,
for s in stocks:
print '%.3f'%(returns[year, s]),
print
|
nilq/baby-python
|
python
|
"""Support for control of ElkM1 outputs (relays)."""
from homeassistant.components.switch import SwitchEntity
from . import ElkAttachedEntity, create_elk_entities
from .const import DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Create the Elk-M1 switch platform."""
elk_data = hass.data[DOMAIN][config_entry.entry_id]
entities = []
elk = elk_data["elk"]
create_elk_entities(elk_data, elk.outputs, "output", ElkOutput, entities)
async_add_entities(entities, True)
class ElkOutput(ElkAttachedEntity, SwitchEntity):
"""Elk output as switch."""
@property
def is_on(self) -> bool:
"""Get the current output status."""
return self._element.output_on
async def async_turn_on(self, **kwargs):
"""Turn on the output."""
self._element.turn_on(0)
async def async_turn_off(self, **kwargs):
"""Turn off the output."""
self._element.turn_off()
|
nilq/baby-python
|
python
|
import pymongo
import config
from . import connection, db
def create_indexes():
"""
Create mongodb indexes.
"""
# VCF collection indexes
db.vcfs.drop_indexes()
db.vcfs.create_index("name")
db.vcfs.create_index("samples")
db.vcfs.create_index( [ ("filename", pymongo.ASCENDING), ("fileformat", pymongo.ASCENDING), ("filedate", pymongo.ASCENDING) ], sparse=True )
db.vcfs.create_index("INFO")
db.vcfs.create_index("FORMAT")
db.vcfs.create_index("FILTER")
# Variant collection indexes
db.variants.drop_indexes()
db.variants.create_index("samples.sample")
db.variants.create_index([("samples.sample", pymongo.ASCENDING),("samples.filter", pymongo.ASCENDING)], sparse=True)
db.variants.create_index("samples.vcf_id")
# Filter indexes
db.variants.create_index([("chr",pymongo.ASCENDING),("samples.info.POS_RANGE",pymongo.ASCENDING),("orientation",pymongo.ASCENDING),("chr2",pymongo.ASCENDING),("remoteOrientation",pymongo.ASCENDING),("samples.sample",pymongo.ASCENDING)], sparse=True)
def resetdb():
"""
Drop database and recreate indexes.
"""
connection.drop_database(config.MONGODB_NAME)
create_indexes()
|
nilq/baby-python
|
python
|
#
# Copyright (C) 2018 SecurityCentral Contributors see LICENSE for license
#
"""
This base platform module exports platform related tasks.
"""
from securitycentralplatform.os_detection import platform_detection
class SecurityCentralPlatformTasks(platform_detection("tasks")):
pass
tasks = SecurityCentralPlatformTasks()
|
nilq/baby-python
|
python
|
from django import forms
from apps.link.models import Link, Advertise
from apps.post.models import Category, Post
class CategoryAddForm(forms.ModelForm):
class Meta:
model = Category
fields = "__all__"
class CategoryEditForm(forms.ModelForm):
pk = forms.CharField(max_length=100)
class Meta:
model = Category
fields = "__all__"
class PostAddForm(forms.ModelForm):
class Meta:
model = Post
exclude = ('read_num',)
class PostEditForm(forms.ModelForm):
pk = forms.CharField(max_length=100)
class Meta:
model = Post
exclude = ('read_num',)
class LinkAddForm(forms.ModelForm):
class Meta:
model = Link
fields = "__all__"
class LinkEditForm(forms.ModelForm):
pk = forms.CharField(max_length=100)
class Meta:
model = Link
fields = "__all__"
class AdvertiseAddForm(forms.ModelForm):
class Meta:
model = Advertise
fields = "__all__"
class AdvertiseEditForm(forms.ModelForm):
pk = forms.CharField(max_length=100)
class Meta:
model = Advertise
fields = "__all__"
class UserAddForm(forms.Form):
username = forms.CharField()
email = forms.EmailField()
password = forms.CharField(max_length=20, min_length=6)
class UserEditForm(forms.Form):
pk = forms.CharField()
username = forms.CharField()
email = forms.EmailField()
password = forms.CharField(max_length=20, min_length=6)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.9 on 2016-08-18 23:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('climate_data', '0006_auto_20160816_1429'),
]
operations = [
migrations.AlterModelOptions(
name='stationsensorlink',
options={'ordering': ('station_order',)},
),
migrations.AddField(
model_name='stationsensorlink',
name='read_frequency',
field=models.PositiveSmallIntegerField(default=4),
),
]
|
nilq/baby-python
|
python
|
import doctest
import unittest
import zeit.cms.testing
def test_suite():
suite = unittest.TestSuite()
suite.addTest(doctest.DocFileSuite(
'content.txt',
package='zeit.cms'
))
suite.addTest(zeit.cms.testing.FunctionalDocFileSuite(
'cleanup.txt',
'cmscontent.txt',
package='zeit.cms'
))
return suite
|
nilq/baby-python
|
python
|
# https://stackoverflow.com/questions/31663288/how-do-i-properly-use-connection-pools-in-redis
# settings.py:
import redis
def get_redis_connection():
return redis.StrictRedis(host='localhost', port=6379, db=0)
# task1.py
import settings
connection = settings.get_redis_connection()
def do_something1():
return connection.hgetall(...)
# task2.py
import settings
connection = settings.get_redis_connection()
def do_something1():
return connection.hgetall(...)
# So each task file has its own redis instance (which presumably is very expensive).
# What's the best way of optimizing this process. Is it possible to use connection pools for this example?
# You could choose to setup the connection pool in the init method and make the pool global
# (you can look at other options if uncomfortable with global).
redis_pool = None
def init():
global redis_pool
print("PID %d: initializing redis pool..." % os.getpid())
redis_pool = redis.ConnectionPool(host='10.0.0.1', port=6379, db=0)
# You can then retrieve the connection from a pool like this:
redis_conn = redis.Redis(connection_pool=redis_pool)
redis-cli info
Redis-py provides a connection pool for you from which you can retrieve a connection. Connection pools create a set of connections which you can use as needed (and when done - the connection is returned to the connection pool for further reuse). Trying to create connections on the fly without discarding them (i.e. not using a pool or not using the pool correctly) will leave you with way too many connections to redis (until you hit the connection limit).
You could choose to setup the connection pool in the init method and make the pool global (you can look at other options if uncomfortable with global).
redis_pool = None
def init():
global redis_pool
print("PID %d: initializing redis pool..." % os.getpid())
redis_pool = redis.ConnectionPool(host='10.0.0.1', port=6379, db=0)
You can then retrieve the connection from a pool like this:
redis_conn = redis.Redis(connection_pool=redis_pool)
Also, I am assuming you are using hiredis along with redis-py as it should improve performance in certain cases. Have you also checked the number of connections open to the redis server with your existing setup as it most likely is quite high? You can use the INFO commmand to get that information:
# redis-cli info
# Check for the Clients section in which you will see the "connected_clients" field that will tell you how many connections
# you have open to the redis server at that instant.
|
nilq/baby-python
|
python
|
import base64
import gzip
import io
import json
import re
import struct
from pathlib import Path
from typing import Any, BinaryIO, Dict, List, Optional, Tuple, Union
from backend import constants
_here = Path(__file__).parent
with open(_here/'exceptions/enchants.json') as f:
ENCHANT_EXCEPTIONS = json.load(f)
with open(_here/'exceptions/reforges.json') as f:
REFORGE_EXCEPTIONS = json.load(f)
def _pop_byte(bytes_f: BinaryIO) -> int:
return int.from_bytes(bytes_f.read(1),
byteorder='big', signed=True)
def _pop_ushort(bytes_f: BinaryIO) -> int:
return int.from_bytes(bytes_f.read(2),
byteorder='big', signed=False)
def _pop_short(bytes_f: BinaryIO) -> int:
return int.from_bytes(bytes_f.read(2),
byteorder='big', signed=True)
def _pop_int(bytes_f: BinaryIO) -> int:
return int.from_bytes(bytes_f.read(4),
byteorder='big', signed=True)
def _pop_long(bytes_f: BinaryIO) -> int:
return int.from_bytes(bytes_f.read(8),
byteorder='big', signed=True)
def _pop_string(bytes_f: BinaryIO) -> str:
payload = _pop_ushort(bytes_f)
return bytes_f.read(payload).decode('utf-8')
class NbtTag:
"""
Class defining an NbtTag: a value with an intrinsic name.
"""
name: str
value: Any
def __init__(self, name: str, value: Any):
"""
Construct an NbtTag instance.
:param name: The name of the NbtTag.
:param value: The value of the NbtTag.
"""
self.name = name
self.value = value
def __getitem__(self, key: Union[str, int]):
"""
Call __getitem__ on the NbtTag's value instance variable.
:param key: The desired key.
:return: The value of the key in the value instance variable.
"""
return self.value[key]
def parse_byte(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag:
name = _pop_string(bytes_f) if read_name else ''
return NbtTag(name, _pop_byte(bytes_f))
def parse_short(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag:
name = _pop_string(bytes_f) if read_name else ''
return NbtTag(name, _pop_short(bytes_f))
def parse_int(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag:
name = _pop_string(bytes_f) if read_name else ''
return NbtTag(name, _pop_int(bytes_f))
def parse_long(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag:
name = _pop_string(bytes_f) if read_name else ''
return NbtTag(name, _pop_long(bytes_f))
def parse_float(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag:
name = _pop_string(bytes_f) if read_name else ''
return NbtTag(name, struct.unpack('>f', bytes_f.read(4)))
def parse_double(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag:
name = _pop_string(bytes_f) if read_name else ''
return NbtTag(name, struct.unpack('>d', bytes_f.read(8)))
def parse_byte_array(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag:
name = _pop_string(bytes_f) if read_name else ''
payload = _pop_int(bytes_f)
arr = [_pop_byte(bytes_f) for _ in range(payload)]
return NbtTag(name, arr)
def parse_string(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag:
name = _pop_string(bytes_f) if read_name else ''
return NbtTag(name, _pop_string(bytes_f))
def parse_list(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag:
name = _pop_string(bytes_f) if read_name else ''
content_type = _pop_byte(bytes_f)
payload = _pop_int(bytes_f)
ret = []
for _ in range(payload):
ret.append(PARSERS[content_type](bytes_f, read_name=False))
return NbtTag(name, ret)
def parse_compound(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag:
name = _pop_string(bytes_f) if read_name else ''
tag_type = _pop_byte(bytes_f)
ret = {}
while tag_type != 0:
tag = PARSERS[tag_type](bytes_f)
ret[tag.name] = tag.value
tag_type = _pop_byte(bytes_f)
return NbtTag(name, ret)
def parse_int_array(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag:
name = _pop_string(bytes_f) if read_name else ''
payload = _pop_int(bytes_f)
arr = [_pop_int(bytes_f) for _ in range(payload)]
return NbtTag(name, arr)
def parse_long_array(bytes_f: BinaryIO, read_name: bool = True) -> NbtTag:
name = _pop_string(bytes_f) if read_name else ''
payload = _pop_int(bytes_f)
arr = [_pop_long(bytes_f) for _ in range(payload)]
return NbtTag(name, arr)
PARSERS = [
None,
parse_byte,
parse_short,
parse_int,
parse_long,
parse_float,
parse_double,
parse_byte_array,
parse_string,
parse_list,
parse_compound,
parse_int_array,
parse_long_array
]
def _without_nbt_style(s: str) -> str:
"""
Given a full string with NBT styling, return the string without coloring
and recomb symbols.
:param s: The given string.
:return: The given string without NBT styling.
"""
return re.sub('§ka|§.', '', s).strip()
def deserialize(b64: str) -> NbtTag:
"""
Decode the gzipped base-64 encoding of an item's metadata.
:param b64: The gzipped base-64 item metadata.
:return: A NbtTag with the decoded metadata.
"""
bytes_gz = base64.b64decode(b64)
bytes_f = io.BytesIO(gzip.decompress(bytes_gz))
# Pop the outer compound tag indicator
_pop_byte(bytes_f)
return parse_compound(bytes_f)
def _get_extra_attrs(nbt: NbtTag) -> Dict[str, Any]:
"""
Helper method to get the 'ExtraAttributes' tag compound from an item
NbtTag. Useful for other extraction methods.
:param nbt: The NbtTag to be read.
:return: The 'ExtraAttributes' tag compound.
"""
return nbt['i'][0]['tag']['ExtraAttributes']
def _get_pet_attrs(nbt: NbtTag) -> Dict[str, Any]:
"""
Helper method to get the 'petInfo' tag and parse it into a dictionary.
Returns an empty dictionary if no pet attributes are found.
:param nbt: The NbtTag to be read.
:return: Dictionary containing the pet attributes of the item.
"""
extra_attrs = _get_extra_attrs(nbt)
as_str = extra_attrs.get('petInfo', '{}')
return json.loads(as_str)
def extract_api_id(nbt: NbtTag) -> str:
"""
Get the API ID of an item from its NbtTag.
:param nbt: The NbtTag to be read.
:return: The ID of the item, directly as it appears in the Skyblock API.
"""
extra_attrs = _get_extra_attrs(nbt)
return extra_attrs['id']
def extract_generic_base_name(nbt: NbtTag) -> str:
"""
Given the NbtTag corresponding to an item, return its generic base name.
This corresponds to removing special symbols and reforges from the raw
display name. Often, dropping the first word is enough to remove the
reforge, but some exceptions apply and are specified in REFORGE_EXCEPTIONS.
:param nbt: The NbtTag to be read.
:return: The name of the item with extra symbols removed and reforge
dropped, if applicable.
"""
name = re.sub('[✪⚚✦◆™©�]', '', extract_generic_display_name(nbt)).strip()
# No reforge, we are done
if not extract_reforge(nbt):
return name
general_case = name.split(' ', 1)[-1]
# If it's not an exception, just return the general case
return REFORGE_EXCEPTIONS.get(name, general_case)
def extract_generic_display_name(nbt: NbtTag) -> str:
"""
Extract the raw display name of an item (with NBT styling) from its NbtTag.
:param nbt: The NbtTag to be read.
:return: The api_name of the item, as a string.
"""
return _without_nbt_style(nbt['i'][0]['tag']['display']['Name'])
def extract_identifiers(nbt: NbtTag) -> Tuple[str, str, str]:
"""
Extract the item ID, base name, and display name of an items from its
NbtTag.
:param nbt: The NbtTag to be read.
:return: A tuple describing the item ID, base name, and display name of the
item.
"""
api_id = extract_api_id(nbt)
# Specialization for single-enchantment books
if api_id == 'ENCHANTED_BOOK' and \
len(enchants := extract_enchants(nbt)) == 1:
enchant, lvl = enchants[0]
# Replace enchant if it matches an exception
enchant = ENCHANT_EXCEPTIONS.get(enchant, enchant)
item_id = f'{enchant.upper()}_{lvl}_BOOK'
base_name = item_id.title().replace('_', ' ')
display_name = base_name
# Specialization for runes
elif api_id == 'RUNE':
rune, lvl = extract_rune(nbt)
item_id = f'{rune}_RUNE_{lvl}'
base_name = extract_generic_base_name(nbt).rsplit(' ', 1)[0] \
+ f' {lvl}'
display_name = extract_generic_display_name(nbt)
# Specialization for pets
elif api_id == 'PET':
pet_type = extract_pet_type(nbt)
item_id = f'{pet_type}_PET'
base_name = item_id.title().replace('_', ' ')
display_name = extract_generic_display_name(nbt)
# Specialization for cake souls
elif api_id == 'CAKE_SOUL':
item_id = 'CAKE_SOUL'
base_name = 'Cake Soul'
display_name = extract_generic_display_name(nbt)
# General case
else:
# Drop the fragment prefix
item_id = api_id.removeprefix('STARRED_')
base_name = extract_generic_base_name(nbt)
display_name = extract_generic_display_name(nbt)
return item_id, base_name, display_name
def extract_stack_size(nbt: NbtTag) -> int:
"""
Get the number of items in an item stack from the associated NbtTag.
:param nbt: The NbtTag to be read.
:return: The number of items in the item stack.
"""
return nbt['i'][0]['Count']
def extract_rarity(nbt: NbtTag) -> str:
"""
Get the rarity of an item from its NbtTag.
:param nbt: The NbtTag to be read.
:return: The rarity of the item.
"""
try:
lore = nbt['i'][0]['tag']['display']['Lore']
rarity_line = nbt['i'][0]['tag']['display']['Lore'][-1].value
# Some runes have a weird footer in their lore
if extract_api_id(nbt) == 'RUNE':
for tag in lore:
line = tag.value
if _without_nbt_style(line).endswith('COSMETIC'):
rarity_line = line
words = _without_nbt_style(rarity_line).split()
# Account for 'VERY_SPECIAL' case
rarity = words[0] if words[0] != 'VERY' else 'VERY_SPECIAL'
return rarity if rarity in constants.DISPLAY_RARITIES.keys() else 'UNKNOWN'
except KeyError:
# Some weird items don't have lore for some reason
return 'UNKNOWN'
def extract_rune(nbt: NbtTag) -> Optional[Tuple[str, int]]:
"""
Get rune information of an item from its NbtTag.
:param nbt: The NbtTag to be read.
:return: The rune of the item as a (rune name, level) pair, or None if no
rune is associated with the item.
"""
extra_attrs = _get_extra_attrs(nbt)
if 'runes' in extra_attrs:
return list(extra_attrs['runes'].items())[0]
return None
def extract_enchants(nbt: NbtTag) -> List[Tuple[str, int]]:
"""
Get enchantment information of an item from its NbtTag.
:param nbt: The NbtTag to be read.
:return: A list of (enchantment, level) pairs describing the enchantments
on the item
"""
extra_attrs = _get_extra_attrs(nbt)
enchantments = extra_attrs.get('enchantments', {}).items()
return [(ench, lvl) for ench, lvl in enchantments]
def extract_is_recombobulated(nbt: NbtTag) -> bool:
"""
Determine whether or not an item is recombobulated from its NbtTag.
:param nbt: The NbtTag to be read.
:return: Boolean, whether or not the item is recombobulated.
"""
extra_attrs = _get_extra_attrs(nbt)
return 'rarity_upgrades' in extra_attrs
def extract_is_fragged(nbt: NbtTag) -> bool:
"""
Determine whether or not an item has a Bonzo or Livid fragment applied to
it from its NbtTag.
:param nbt: The NbtTag to be read.
:return: Boolean, whether or not the item is fragged.
"""
return extract_api_id(nbt).startswith('STARRED_')
def extract_hot_potato_count(nbt: NbtTag) -> int:
"""
Determine the number of hot potato book upgrades on an item from its
NbtTag.
:param nbt: The NbtTag to be read.
:return: The number of hot potato book upgrades on the given item.
"""
extra_attrs = _get_extra_attrs(nbt)
return extra_attrs.get('hot_potato_count', 0)
def extract_reforge(nbt: NbtTag) -> Optional[str]:
"""
Get the reforge on an item from its NbtTag.
:param nbt: The NbtTag to be read.
:return: The reforge of the item, or None if no reforge is present.
"""
extra_attrs = _get_extra_attrs(nbt)
return extra_attrs.get('modifier')
def extract_dungeon_stars(nbt: NbtTag) -> int:
"""
Get the number of dungeon stars on an item from its NbtTag.
:param nbt: The NbtTag to be read.
:return: The number of dungeon stars on the item.
"""
extra_attrs = _get_extra_attrs(nbt)
return extra_attrs.get('dungeon_item_level', 0)
def extract_pet_type(nbt: NbtTag) -> Optional[str]:
"""
Get the pet type of an item from its NbtTag.
:param nbt: The NbtTag to be read.
:return: The pet type of the item, if applicable.
"""
pet_attrs = _get_pet_attrs(nbt)
return pet_attrs.get('type')
def extract_pet_exp(nbt: NbtTag) -> float:
"""
Get the pet experience of an item from its NbtTag.
:param nbt: The NbtTag to be read.
:return: The pet experience on the item.
"""
pet_attrs = _get_pet_attrs(nbt)
return pet_attrs.get('exp', 0)
def extract_pet_candy_used(nbt: NbtTag) -> int:
"""
Get the number of pet candies used on an item from its NbtTag.
:param nbt: The NbtTag to be read.
:return: The number of pet candies on the item.
"""
pet_attrs = _get_pet_attrs(nbt)
return pet_attrs.get('candyUsed', 0)
|
nilq/baby-python
|
python
|
from timeit import timeit
nTests=10000
print("Each operation performed {} times".format(nTests))
print("")
print("Custom Quaternion")
print("")
importQuatVec = '''
from MAPLEAF.Motion import Quaternion
from MAPLEAF.Motion import Vector
v1 = Vector(1, 1, 2)
'''
# Test Quaternion speed (init)
print("Initializing Quaternion (Axis-Angle):")
print(timeit("a = Quaternion(axisOfRotation=v1, angle=1.2)", setup=importQuatVec, number=nTests))
print("Initializing Quaternion (Components):")
print(timeit("a = Quaternion(components=[1, 1.2, 2.3, 4.5])", setup=importQuatVec, number=nTests))
setupRotQuat = '''
from MAPLEAF.Motion import Quaternion
from MAPLEAF.Motion import Vector
qRot = Quaternion(axisOfRotation=Vector(1, 1, 2), angle=1.2)
vec = Vector(1, 2, 3)
'''
# Test Quaternion speed (rotate)
print("Quaternion Rotating Vector:")
print(timeit("a = qRot.rotate(vec)", setup=setupRotQuat, number=nTests))
print("")
print("Scipy")
print("")
setupScipyRot = '''
from scipy.spatial.transform import Rotation as R
from MAPLEAF.Motion import Vector
v1 = list(Vector(1, 1, 2).normalize() * 1.2)
'''
# Test Scipy speed (init)
print("Initializing Scipy Rotation (Rotation Vector):")
print(timeit("a = R.from_rotvec(v1)", setup=setupScipyRot, number=nTests))
setupScipyRot = '''
from scipy.spatial.transform import Rotation as R
from MAPLEAF.Motion import Vector
v1 = list(Vector(1, 1, 2).normalize() * 1.2)
sRot = R.from_rotvec(v1)
vec = [1, 2, 3]
'''
# Test Scipy speed (rotation)
print("Scipy Rotating Vector:")
print(timeit("a = sRot.apply(vec)", setup=setupScipyRot, number=nTests))
print("")
print("Custom Vector")
print("")
setup = '''
from MAPLEAF.Motion import Vector
import numpy as np
a = [1,2,3]
v1 = Vector(1,2,3)
v2 = Vector(2,3,4)
nV1 = np.array([1,2,3])
nV2 = np.array([2,3,4])
'''
print("Initializing Vector (Components):")
print(timeit("v1 = Vector(1, 1, 2)", setup=setup, number=nTests))
print("Initializing Vector (list):")
print(timeit("v1 = Vector(*a)", setup=setup, number=nTests))
print("Initializing Vector (String):")
print(timeit("v1 = Vector('(1 1 2)')", setup=setup, number=nTests))
print("Dot Product:")
print(timeit("v3 = v1 * v2", setup=setup, number=nTests))
print("Cross Product:")
print(timeit("v1.crossProduct(v2)", setup=setup, number=nTests))
print("")
print("Numpy Vector")
print("")
print("Initializing Vector (Components):")
print(timeit("v1 = np.array([1,2,3])", setup=setup, number=nTests))
print("Dot Product:")
print(timeit("v3 = np.dot(nV1, nV2)", setup=setup, number=nTests))
print("Cross Product:")
print(timeit("v3 = np.cross(nV1, nV2)", setup=setup, number=nTests))
|
nilq/baby-python
|
python
|
# TI & TA
from pyti.smoothed_moving_average import smoothed_moving_average as pyti_smmoothed_ma
from pyti.simple_moving_average import simple_moving_average as pyti_sma
from pyti.bollinger_bands import lower_bollinger_band as pyti_lbb
from pyti.bollinger_bands import upper_bollinger_band as pyti_ubb
from pyti.accumulation_distribution import accumulation_distribution as acd
from pyti.aroon import aroon_up
from pyti.aroon import aroon_down
from pyti.rate_of_change import rate_of_change as roc
from pyti.relative_strength_index import relative_strength_index as pyti_rsi
from pyti.commodity_channel_index import commodity_channel_index
from pyti.exponential_moving_average import exponential_moving_average as pyti_ema
from pyjuque.Indicators.CustomIndicators.SuperTrend import ST
from pyjuque.Indicators.CustomIndicators.OTT import ott, smoothrng
from pyjuque.Indicators.CustomIndicators.HA import HA
from traceback import print_exc
def cci(df, period):
return commodity_channel_index(
df['close'].tolist(), df['high'].tolist(), df['low'].tolist(), period)
def sma(df, source, period):
return pyti_sma(df[source].tolist(), period)
def ema(df, source, period):
return pyti_ema(df[source].tolist(), period)
def lbb(df, source, period):
return pyti_lbb(df[source].tolist(), period)
def ubb(df, source, period):
return pyti_ubb(df[source].tolist(), period)
def rsi(df, source, period):
return pyti_rsi(df[source].tolist(), period)
def isSupport(df,i):
return df['low'][i] < df['low'][i-1] \
and df['low'][i] < df['low'][i+1] \
and df['low'][i+1] < df['low'][i+2] \
and df['low'][i-1] < df['low'][i-2]
def isResistance(df,i):
return df['high'][i] > df['high'][i-1] \
and df['high'][i] > df['high'][i+1] \
and df['high'][i+1] > df['high'][i+2] \
and df['high'][i-1] > df['high'][i-2]
INDICATOR_DICT = {
"sma": sma,
"ema": ema,
"lbb": lbb,
"ubb": ubb,
"cci": cci,
"rsi": rsi,
"smoothrng": smoothrng,
"ott": ott
}
def AddIndicator(df, indicator_name:str, col_name, *args):
# print("Args are", indicator_name, col_name)
# print(args)
try:
if indicator_name == "ott":
df[col_name[0]], df[col_name[1]] = ott(df, *args)
else:
df[col_name] = INDICATOR_DICT[indicator_name](df, *args)
except Exception as e:
print_exc()
print("\nException raised when trying to compute the", indicator_name, "indicator:\n")
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""
Tests for LinearRegionVisual
All images are of size (100,100) to keep a small file size
"""
import numpy as np
from vispy.scene import visuals
from vispy.testing import (requires_application, TestingCanvas,
run_tests_if_main)
from vispy.testing.image_tester import assert_image_approved
from vispy.testing import assert_raises
@requires_application()
def test_linear_region_vertical_horizontal():
"""Test vertical and horizontal LinearRegionVisual with a single color"""
# Definition of the region
pos = np.array([5, 15, 24, 36, 40, 42], dtype=np.float32)
# Expected internal pos buffer for vertical region
expected_pos_v = np.array([[5.0, -1.],
[5.0, 1.],
[15.0, -1.],
[15.0, 1.],
[24.0, -1.],
[24.0, 1.],
[36.0, -1.],
[36.0, 1.],
[40.0, -1.],
[40.0, 1.],
[42.0, -1.],
[42.0, 1.]], dtype=np.float32)
# Expected internal pos buffer for horizontal region
expected_pos_h = np.array([expected_pos_v[:, 1] * -1,
expected_pos_v[:, 0]], dtype=np.float32).T
# Test both horizontal and vertical region
for is_vertical, reference_image in [(True, 'linear_region1.png'),
(False, 'linear_region1_h.png')]:
expected_pos = expected_pos_v if is_vertical else expected_pos_h
with TestingCanvas() as c:
# Check set_data is working correctly within visual constructor
region = visuals.LinearRegion(pos=pos,
color=[0.0, 1.0, 0.0, 0.5],
vertical=is_vertical,
parent=c.scene)
assert np.all(region._pos == expected_pos)
assert np.all(region.pos == pos)
assert region.is_vertical == is_vertical
# Check set_data is working as expected when passing a list as
# pos argument
region.set_data(pos=list(pos))
assert np.all(region._pos == expected_pos)
assert np.all(region.pos == pos)
# Check set_data is working as expected when passing a tuple as
# pos argument
region.set_data(pos=tuple(pos))
assert np.all(region._pos == expected_pos)
assert np.all(region.pos == pos)
# Test with different dtypes that must be converted to float32
for t in [np.int64, np.float64, np.int32]:
region.set_data(pos=pos.astype(t))
assert np.all(region._pos == expected_pos)
assert np.all(region.pos == pos)
assert_image_approved(c.render(), 'visuals/%s' % reference_image)
# Check ValueError is raised when pos is not 1D
assert_raises(ValueError, region.set_data, pos=[[1, 2], [3, 4]])
@requires_application()
def test_linear_region_color():
"""Test the color argument of LinearRegionVisual.set_data() method
using a single color
"""
# Definition of the region
pos1 = [5, 42]
# Definition of the color of the region
color1 = np.array([0.0, 1.0, 0.0, 0.5], dtype=np.float32)
# Expected internal color buffer
color1_expected = np.array([color1, color1, color1, color1],
dtype=np.float32)
with TestingCanvas() as c:
# Check set_data is working correctly within visual constructor
region = visuals.LinearRegion(pos=pos1, color=color1, parent=c.scene)
assert np.all(region._color == color1_expected)
assert np.all(region.color == color1)
# Check set_data is working as expected when passing a list as
# color argument
region.set_data(color=list(color1))
assert np.all(region._color == color1_expected)
assert np.all(region.color == color1)
# Check set_data is working as expected when passing a tuple as
# color argument
region.set_data(color=tuple(color1))
assert np.all(region._color == color1_expected)
assert np.all(region.color == color1)
# Test with different dtypes that must be converted to float32
region.set_data(color=color1.astype(np.float64))
assert np.all(region._color == color1_expected)
assert np.all(region.color == color1)
assert_image_approved(c.render(), 'visuals/linear_region1.png')
# Check a ValueError is raised when the length of color argument
# is not 4.
assert_raises(ValueError, region.set_data, color=[1.0, 0.5, 0.5])
# Check a ValueError is raised when too many colors are provided
assert_raises(ValueError, region.set_data,
color=[color1, color1, color1])
@requires_application()
def test_linear_region_gradient():
"""Test LinearRegionVisual with a gradient as color"""
# Definition of the region
pos2 = [5, 42, 80]
# Definition of the color of the region
color2 = np.array([[0.0, 1.0, 0.0, 0.5],
[1.0, 0.0, 0.0, 0.75],
[0.0, 0.0, 1.0, 1.0]], dtype=np.float32)
# Expected internal color buffer
color2_expected = np.array([color2[0], color2[0],
color2[1], color2[1],
color2[2], color2[2]],
dtype=np.float32)
with TestingCanvas() as c:
# Check set_data is working correctly within visual constructor
region = visuals.LinearRegion(pos=pos2, color=color2, parent=c.scene)
assert np.all(region._color == color2_expected)
assert np.all(region.color == color2)
assert_image_approved(c.render(), 'visuals/linear_region2.png')
run_tests_if_main()
|
nilq/baby-python
|
python
|
import numpy as np
from heapq import heappush, heappop
from dataclasses import dataclass, field
import os
@dataclass(order=True)
class PosItem:
priority: int
pos: tuple[int, int] = field(compare=False)
path = os.path.join(os.path.dirname(__file__), "input.txt")
def find_path(arr):
pq = []
visited = set()
cost = np.zeros_like(arr, dtype=np.int32)
cost.fill(2 ** 31 - 1)
prev = np.zeros(shape=(cost.shape[0], cost.shape[1], 2), dtype=np.int32)
cost[0, 0] = 0
pq.append(PosItem(0, (0, 0)))
while pq:
item = heappop(pq)
r, c = item.pos
visited.add((r, c))
if (
(r + 1, c) not in visited
and r < arr.shape[0] - 1
and cost[r, c] + arr[r + 1, c] < cost[r + 1, c]
):
cost[r + 1, c] = cost[r, c] + arr[r + 1, c]
prev[r + 1, c, :] = [r, c]
heappush(pq, PosItem(cost[r + 1, c], (r + 1, c)))
if (
(r, c + 1) not in visited
and c < arr.shape[1] - 1
and cost[r, c] + arr[r, c + 1] < cost[r, c + 1]
):
cost[r, c + 1] = cost[r, c] + arr[r, c + 1]
prev[r, c + 1, :] = [r, c]
heappush(pq, PosItem(cost[r, c + 1], (r, c + 1)))
return prev, cost
if __name__ == "__main__":
with open(path) as file:
contents = file.read()
arr = np.asarray(
[[int(n) for n in line] for line in contents.split("\n")], dtype=np.int32
)
prev, cost = find_path(arr)
print(f"Lowest cost path is {cost[cost.shape[0]-1, cost.shape[1]-1]}")
|
nilq/baby-python
|
python
|
from selenium import webdriver
browser = webdriver.Firefox(executable_path=r"C:\Windows\geckodriver.exe")
browser.get("https://github.com")
browser.maximize_window()
browser.implicitly_wait(20)
sign_in = browser.find_element_by_link_text("Sign in")
sign_in.click()
user_name = browser.find_element_by_id("login_field")
user_name.send_keys("user_name")
password = browser.find_element_by_id("password")
password.send_keys("password")
password.submit()
profile_link = browser.find_element_by_class_name("user-profile-link")
link_label = profile_link.get_attribute("innerHTML")
assert "username" in link_label
browser.quit()
|
nilq/baby-python
|
python
|
import unittest
import Models
class BasicTestMethods(unittest.TestCase):
def test_asdf(self):
self.assertEqual(Models.asdf(), "asdf", 'nah')
self.assertNotEqual(Models.asdf(), "asdf1", 'nah')
#self.assertEqual(asdf(), "asdf1", 'nah')
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
"""AVIM build configuration"""
from os import path
from datetime import date
from build import BuildConfig
# Type of build to produce.
CONFIG = BuildConfig.RELEASE
# Incremented version number.
# See <https://developer.mozilla.org/en-US/docs/Toolkit_version_format>.
VERSION = (5, 8, 2)
# Build date.
DATE = None
# Name to use in the build's directories.
PACKAGE_NAME = "avim"
# Paths to directories that consitute the chrome JAR file.
CHROME_PROVIDERS = ["content", "locale", "skin"]
# Paths to miscellaneous files that should be included in the build's root
# directory. install.rdf and chrome.manifest are automatically included.
ROOT_FILES = ["LICENSE"]
# Paths to directories that should be included, uncompressed, in the build's
# root directory.
ROOT_DIRS = ["components", "defaults"]
# Paths to files to be preprocessed. These files contain placeholders that
# should be interpreted as variables.
VAR_FILES = ["install.rdf", "chrome.manifest", "LICENSE",
path.join("content", "options.xul")]
# File extensions of files to be preprocessed.
VAR_EXTS = ["js"]
# Names of files to be preprocessed.
VAR_NAMES = ["options.dtd"]
# Paths to directories that should be omitted from a release build.
DEBUG_DIRS = [path.join("originals"),
path.join("tests"),
path.join("content", "test"),
path.join("content", "skin", "test"), path.join("skin", "test"),
# Unmaintained localizations
path.join("locale", "fr"), path.join("locale", "zh-TW")]
# Names of localization files that should be omitted from a release build.
L10N_FILES = ["amo.dtd", "install.dtd"]
# Dictionary mapping subdirectories of locale/ to BabelZilla-compatible locale
# codes. Locale names that are already compatible can be omitted.
LOCALE_DIRS = {"en": "en-US", "es": "es-ES"}
# Name of the fallback locale that is guaranteed to contain translations for all
# the extension's strings and that contains documentation for each string.
MAIN_LOCALE = "en-US"
# Paths to the final XPI files.
XPI_FILES = ["%(package)s.xpi", "%(package)s-%(version)s.xpi"]
|
nilq/baby-python
|
python
|
import sys
sys.path.append('../src/')
print(sys.path)
import Histograms
import unittest
import numpy
import time
class MyTestCase(unittest.TestCase):
def setUp(self):
pass
def test_learnSingleton(self):
m = Histograms.Histograms({
"histograms": ["test"]
, "AllowLimit": 10
, "LearnLimit": 3
, "collectorId": "mygate"
, "minimumLearning": 100
})
for i in range(1000):
r = m.assess({'histograms': [[4, 4, 0, 1E10-1, 0, 0]]})
print(r)
print(m.mean)
self.assertLess(r[0], 0.25)
m.learn()
print(m.keys)
self.assertEqual(len(m.keys["test-01"]), 1)
self.assertAlmostEqual(m.mean[0][0], 1.0, delta=0.05)
self.assertLess(m.sdev[0][0], 0.2)
def test_store_load(self):
m = Histograms.Histograms({
"histograms": ["test"]
, "AllowLimit": 10
, "LearnLimit": 3
, "collectorId": "mygate"
, "minimumLearning": 100
})
for i in range(1000):
r = m.assess({'histograms': [[4, 4, 0, 1E10-1, 0, 0]]})
print (r)
self.assertLess(r[0], 0.25)
m.learn()
status = {}
m.crdstore(status)
print(status)
self.assertTrue("histograms" in status)
values = status["histograms"]
self.assertTrue(isinstance(values, dict))
self.assertTrue("_n" in values)
self.assertEqual(values["_n"], 1000)
self.assertTrue("test-01" in values)
val = values["test-01"]
self.assertTrue(isinstance(val, dict))
keys = list(val.keys())
self.assertEqual(len(keys), 1)
key = keys[0]
val = val[key]
self.assertTrue(isinstance(val, dict))
self.assertTrue("c" in val)
self.assertTrue("s" in val)
self.assertTrue("s2" in val)
self.assertAlmostEqual(1000, val["c"], delta=10)
self.assertAlmostEqual(1000, val["s"], delta=10)
self.assertAlmostEqual(1000, val["s2"], delta=10)
self.assertTrue("test-12" in values)
val = values["test-12"]
self.assertTrue(isinstance(val, dict))
keys = list(val.keys())
self.assertEqual(len(keys), 1)
key = keys[0]
val = val[key]
self.assertTrue(isinstance(val, dict))
self.assertTrue("c" in val)
self.assertTrue("s" in val)
self.assertTrue("s2" in val)
self.assertAlmostEqual(1000, val["c"], delta=10)
self.assertAlmostEqual(5000, val["s"], delta=10)
self.assertAlmostEqual(25000, val["s2"], delta=100)
self.assertTrue("test-23" in values)
val = values["test-23"]
self.assertTrue(isinstance(val, dict))
keys = list(val.keys())
self.assertEqual(len(keys), 1)
key = keys[0]
val = val[key]
self.assertTrue(isinstance(val, dict))
self.assertTrue("c" in val)
self.assertTrue("s" in val)
self.assertTrue("s2" in val)
self.assertAlmostEqual(1000, val["c"], delta=10)
self.assertAlmostEqual(1E-7, val["s"] , delta=1E-7)
self.assertAlmostEqual(1E-10, val["s2"], delta=1E-6)
self.assertTrue("test-34" in values)
val = values["test-34"]
self.assertTrue(isinstance(val, dict))
keys = list(val.keys())
self.assertGreaterEqual(len(keys), 1)
key = keys[0]
val = val[key]
self.assertTrue(isinstance(val, dict))
self.assertTrue("c" in val)
self.assertTrue("s" in val)
self.assertTrue("s2" in val)
self.assertAlmostEqual(1000, val["c"], delta=10)
self.assertAlmostEqual(1000000, val["s"] , delta=100)
self.assertAlmostEqual(10000000, val["s2"], delta=1000)
self.assertTrue("test-45" in values)
val = values["test-45"]
self.assertTrue(isinstance(val, dict))
keys = list(val.keys())
self.assertEqual(len(keys), 1)
key = keys[0]
val = val[key]
self.assertTrue(isinstance(val, dict))
self.assertTrue("c" in val)
self.assertTrue("s" in val)
self.assertTrue("s2" in val)
self.assertAlmostEqual(1000, val["c"], delta=10)
self.assertAlmostEqual(10, val["s"] , delta=1)
self.assertAlmostEqual(0.1, val["s2"], delta=0.01)
|
nilq/baby-python
|
python
|
"""User details and sex of patient added
Revision ID: 7d4bab0acebb
Revises: b4bb7697ace6
Create Date: 2017-09-14 14:53:07.958616
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '7d4bab0acebb'
down_revision = 'b4bb7697ace6'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('patients', sa.Column('sex', sa.String(length=1), nullable=True))
op.add_column('users', sa.Column('f_name', sa.String(length=50), nullable=True))
op.add_column('users', sa.Column('initials', sa.String(length=10), nullable=True))
op.add_column('users', sa.Column('l_name', sa.String(length=50), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'l_name')
op.drop_column('users', 'initials')
op.drop_column('users', 'f_name')
op.drop_column('patients', 'sex')
# ### end Alembic commands ###
|
nilq/baby-python
|
python
|
"""
Author: Justin Cappos
Start date: October 9th, 2009
Purpose: A simple library that serializes and deserializes built-in repy types.
This includes strings, integers, floats, booleans, None, complex, tuples,
lists, sets, frozensets, and dictionaries.
There are no plans for including objects.
Note: that all items are treated as separate references. This means things
like 'a = []; a.append(a)' will result in an infinite loop. If you have
'b = []; c = (b,b)' then 'c[0] is c[1]' is True. After deserialization
'c[0] is c[1]' is False.
I can add support or detection of this if desired.
"""
# The basic idea is simple. Say the type (a character) followed by the
# type specific data. This is adequate for simple types
# that do not contain other types. Types that contain other types, have
# a length indicator and then the underlying items listed sequentially.
# For a dict, this is key1value1key2value2.
def serializedata(data):
"""
<Purpose>
Convert a data item of any type into a string such that we can
deserialize it later.
<Arguments>
data: the thing to seriailize. Can be of essentially any type except
objects.
<Exceptions>
TypeError if the type of 'data' isn't allowed
<Side Effects>
None.
<Returns>
A string suitable for deserialization.
"""
# this is essentially one huge case statement...
# None
if type(data) == type(None):
return 'N'
# Boolean
elif type(data) == type(True):
if data == True:
return 'BT'
else:
return 'BF'
# Integer / Long
elif type(data) is int or type(data) is long:
datastr = str(data)
return 'I'+datastr
# Float
elif type(data) is float:
datastr = str(data)
return 'F'+datastr
# Complex
elif type(data) is complex:
datastr = str(data)
if datastr[0] == '(' and datastr[-1] == ')':
datastr = datastr[1:-1]
return 'C'+datastr
# String
elif type(data) is str:
return 'S'+data
# List or tuple or set or frozenset
elif type(data) is list or type(data) is tuple or type(data) is set or type(data) is frozenset:
# the only impact is the first letter...
if type(data) is list:
mystr = 'L'
elif type(data) is tuple:
mystr = 'T'
elif type(data) is set:
mystr = 's'
elif type(data) is frozenset:
mystr = 'f'
else:
raise Exception("InternalError: not a known type after checking")
for item in data:
thisitem = serializedata(item)
# Append the length of the item, plus ':', plus the item. 1 -> '2:I1'
mystr = mystr + str(len(thisitem))+":"+thisitem
mystr = mystr + '0:'
return mystr
# dict
elif type(data) is dict:
mystr = 'D'
keysstr = serializedata(data.keys())
# Append the length of the list, plus ':', plus the list.
mystr = mystr + str(len(keysstr))+":"+keysstr
# just plop the values on the end.
valuestr = serializedata(data.values())
mystr = mystr + valuestr
return mystr
# Unknown!!!
else:
raise TypeError("Unknown type '"+str(type(data))+"' for data :"+str(data))
def deserializedata(datastr):
"""
<Purpose>
Convert a serialized data string back into its original types.
<Arguments>
datastr: the string to deseriailize.
<Exceptions>
ValueError if the string is corrupted
TypeError if the type of 'data' isn't allowed
<Side Effects>
None.
<Returns>
Items of the original type
"""
if type(datastr) != str:
raise TypeError("Cannot deserialize non-string of type '"+str(type(datastr))+"'")
typeindicator = datastr[0]
restofstring = datastr[1:]
# this is essentially one huge case statement...
# None
if typeindicator == 'N':
if restofstring != '':
raise ValueError("Malformed None string '"+restofstring+"'")
return None
# Boolean
elif typeindicator == 'B':
if restofstring == 'T':
return True
elif restofstring == 'F':
return False
raise ValueError("Malformed Boolean string '"+restofstring+"'")
# Integer / Long
elif typeindicator == 'I':
try:
return int(restofstring)
except ValueError:
raise ValueError("Malformed Integer string '"+restofstring+"'")
# Float
elif typeindicator == 'F':
try:
return float(restofstring)
except ValueError:
raise ValueError("Malformed Float string '"+restofstring+"'")
# Float
elif typeindicator == 'C':
try:
return complex(restofstring)
except ValueError:
raise ValueError("Malformed Complex string '"+restofstring+"'")
# String
elif typeindicator == 'S':
return restofstring
# List / Tuple / set / frozenset / dict
elif typeindicator == 'L' or typeindicator == 'T' or typeindicator == 's' or typeindicator == 'f':
# We'll split this and keep adding items to the list. At the end, we'll
# convert it to the right type
thislist = []
data = restofstring
# We'll use '0:' as our 'end separator'
while data != '0:':
lengthstr, restofdata = data.split(':', 1)
length = int(lengthstr)
# get this item, convert to a string, append to the list.
thisitemdata = restofdata[:length]
thisitem = deserializedata(thisitemdata)
thislist.append(thisitem)
# Now toss away the part we parsed.
data = restofdata[length:]
if typeindicator == 'L':
return thislist
elif typeindicator == 'T':
return tuple(thislist)
elif typeindicator == 's':
return set(thislist)
elif typeindicator == 'f':
return frozenset(thislist)
else:
raise Exception("InternalError: not a known type after checking")
elif typeindicator == 'D':
lengthstr, restofdata = restofstring.split(':', 1)
length = int(lengthstr)
# get this item, convert to a string, append to the list.
keysdata = restofdata[:length]
keys = deserializedata(keysdata)
# The rest should be the values list.
values = deserializedata(restofdata[length:])
if type(keys) != list or type(values) != list or len(keys) != len(values):
raise ValueError("Malformed Dict string '"+restofstring+"'")
thisdict = {}
for position in xrange(len(keys)):
thisdict[keys[position]] = values[position]
return thisdict
# Unknown!!!
else:
raise ValueError("Unknown typeindicator '"+str(typeindicator)+"' for data :"+str(restofstring))
|
nilq/baby-python
|
python
|
"""
Author: William Gabriel Carreras Oropesa
Date: April 19, 2020, Neuqué, Argentina
module body: This module has implemented a series of functions and objects
that will be useful when solving the problem of the N bodies.
"""
# necessary modules
import numpy as np
from copy import copy
class body(object):
def __init__(self, mass, rVec):
super(body, self).__init__()
self.mass = mass
self.rVec = rVec
self.vVec = np.array([0, 0], dtype=float)
def __str__(self):
return "body object: M = {}, R = ({}, {}), V = ({}, {})".format(self.mass,
self.rVec[0], self.rVec[1], self.vVec[0], self.vVec[1])
def setV(self, newV):
self.vVec = newV
def setR(self, newR):
self.rVec = newR
def gravitationForce(self, P):
return (P.mass * (P.rVec - self.rVec))/np.linalg.norm(P.rVec - self.rVec)**3
|
nilq/baby-python
|
python
|
import logging
import multiprocessing
import unicodedata
from argparse import Namespace
from contextlib import closing
from itertools import chain, repeat
from multiprocessing.pool import Pool
from tqdm import tqdm
from transformers.tokenization_roberta import RobertaTokenizer
logger = logging.getLogger(__name__)
class InputFeatures(object):
def __init__(
self,
unique_id,
example_index,
doc_span_index,
tokens,
mentions,
token_to_orig_map,
token_is_max_context,
word_ids,
word_segment_ids,
word_attention_mask,
entity_ids,
entity_position_ids,
entity_segment_ids,
entity_attention_mask,
start_positions,
end_positions,
):
self.unique_id = unique_id
self.example_index = example_index
self.doc_span_index = doc_span_index
self.tokens = tokens
self.mentions = mentions
self.token_to_orig_map = token_to_orig_map
self.token_is_max_context = token_is_max_context
self.word_ids = word_ids
self.word_segment_ids = word_segment_ids
self.word_attention_mask = word_attention_mask
self.entity_ids = entity_ids
self.entity_position_ids = entity_position_ids
self.entity_segment_ids = entity_segment_ids
self.entity_attention_mask = entity_attention_mask
self.start_positions = start_positions
self.end_positions = end_positions
def convert_examples_to_features(
examples,
tokenizer,
entity_vocab,
wiki_link_db,
model_redirect_mappings,
link_redirect_mappings,
max_seq_length,
max_mention_length,
doc_stride,
max_query_length,
min_mention_link_prob,
segment_b_id,
add_extra_sep_token,
is_training,
pool_size=multiprocessing.cpu_count(),
chunk_size=30,
):
passage_encoder = PassageEncoder(
tokenizer,
entity_vocab,
wiki_link_db,
model_redirect_mappings,
link_redirect_mappings,
max_mention_length,
min_mention_link_prob,
add_extra_sep_token,
segment_b_id,
)
worker_params = Namespace(
tokenizer=tokenizer,
max_seq_length=max_seq_length,
doc_stride=doc_stride,
max_query_length=max_query_length,
add_extra_sep_token=add_extra_sep_token,
passage_encoder=passage_encoder,
is_training=is_training,
)
features = []
unique_id = 1000000000
with closing(Pool(pool_size, initializer=_initialize_worker, initargs=(worker_params,))) as pool:
with tqdm(total=len(examples)) as pbar:
for ret in pool.imap(_process_example, enumerate(examples), chunksize=chunk_size):
for feature in ret:
feature.unique_id = unique_id
features.append(feature)
unique_id += 1
pbar.update()
return features
class PassageEncoder(object):
def __init__(
self,
tokenizer,
entity_vocab,
wiki_link_db,
model_redirect_mappings,
link_redirect_mappings,
max_mention_length,
min_mention_link_prob,
add_extra_sep_token,
segment_b_id,
):
self._tokenizer = tokenizer
self._entity_vocab = entity_vocab
self._wiki_link_db = wiki_link_db
self._model_redirect_mappings = model_redirect_mappings
self._link_redirect_mappings = link_redirect_mappings
self._max_mention_length = max_mention_length
self._add_extra_sep_token = add_extra_sep_token
self._segment_b_id = segment_b_id
self._min_mention_link_prob = min_mention_link_prob
def encode(self, title, tokens_a, tokens_b):
if self._add_extra_sep_token:
mid_sep_tokens = [self._tokenizer.sep_token] * 2
else:
mid_sep_tokens = [self._tokenizer.sep_token]
all_tokens = [self._tokenizer.cls_token] + tokens_a + mid_sep_tokens + tokens_b + [self._tokenizer.sep_token]
word_ids = self._tokenizer.convert_tokens_to_ids(all_tokens)
word_segment_ids = [0] * (len(tokens_a) + len(mid_sep_tokens) + 1) + [self._segment_b_id] * (len(tokens_b) + 1)
word_attention_mask = [1] * len(all_tokens)
try:
title = self._link_redirect_mappings.get(title, title)
mention_candidates = {}
ambiguous_mentions = set()
for link in self._wiki_link_db.get(title):
if link.link_prob < self._min_mention_link_prob:
continue
link_text = self._normalize_mention(link.text)
if link_text in mention_candidates and mention_candidates[link_text] != link.title:
ambiguous_mentions.add(link_text)
continue
mention_candidates[link_text] = link.title
for link_text in ambiguous_mentions:
del mention_candidates[link_text]
except KeyError:
mention_candidates = {}
logger.warning("Not found in the Dump DB: %s", title)
mentions_a = self._detect_mentions(tokens_a, mention_candidates)
mentions_b = self._detect_mentions(tokens_b, mention_candidates)
all_mentions = mentions_a + mentions_b
if not all_mentions:
entity_ids = [0, 0]
entity_segment_ids = [0, 0]
entity_attention_mask = [0, 0]
entity_position_ids = [[-1 for y in range(self._max_mention_length)]] * 2
else:
entity_ids = [0] * len(all_mentions)
entity_segment_ids = [0] * len(mentions_a) + [self._segment_b_id] * len(mentions_b)
entity_attention_mask = [1] * len(all_mentions)
entity_position_ids = [[-1 for y in range(self._max_mention_length)] for x in range(len(all_mentions))]
offset_a = 1
offset_b = len(tokens_a) + 2 # 2 for CLS and SEP tokens
if self._add_extra_sep_token:
offset_b += 1
for i, (offset, (entity_id, start, end)) in enumerate(
chain(zip(repeat(offset_a), mentions_a), zip(repeat(offset_b), mentions_b))
):
entity_ids[i] = entity_id
entity_position_ids[i][: end - start] = range(start + offset, end + offset)
if len(all_mentions) == 1:
entity_ids.append(0)
entity_segment_ids.append(0)
entity_attention_mask.append(0)
entity_position_ids.append([-1 for y in range(self._max_mention_length)])
return dict(
tokens=all_tokens,
mentions=all_mentions,
word_ids=word_ids,
word_segment_ids=word_segment_ids,
word_attention_mask=word_attention_mask,
entity_ids=entity_ids,
entity_position_ids=entity_position_ids,
entity_segment_ids=entity_segment_ids,
entity_attention_mask=entity_attention_mask,
)
def _detect_mentions(self, tokens, mention_candidates):
mentions = []
cur = 0
for start, token in enumerate(tokens):
if start < cur:
continue
if self._is_subword(token):
continue
for end in range(min(start + self._max_mention_length, len(tokens)), start, -1):
if end < len(tokens) and self._is_subword(tokens[end]):
continue
mention_text = self._tokenizer.convert_tokens_to_string(tokens[start:end])
mention_text = self._normalize_mention(mention_text)
if mention_text in mention_candidates:
cur = end
title = mention_candidates[mention_text]
title = self._model_redirect_mappings.get(title, title) # resolve mismatch between two dumps
if title in self._entity_vocab:
mentions.append((self._entity_vocab[title], start, end))
break
return mentions
def _is_subword(self, token):
if isinstance(self._tokenizer, RobertaTokenizer):
token = self._tokenizer.convert_tokens_to_string(token)
if not token.startswith(" ") and not self._is_punctuation(token[0]):
return True
elif token.startswith("##"):
return True
return False
@staticmethod
def _is_punctuation(char):
# obtained from:
# https://github.com/huggingface/transformers/blob/5f25a5f367497278bf19c9994569db43f96d5278/transformers/tokenization_bert.py#L489
cp = ord(char)
if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
return True
cat = unicodedata.category(char)
if cat.startswith("P"):
return True
return False
@staticmethod
def _normalize_mention(text):
return " ".join(text.lower().split(" ")).strip()
params = None
def _initialize_worker(_params):
global params
params = _params
def _process_example(args):
example_index, example = args
tokenizer = params.tokenizer
query_tokens = _tokenize(example.question_text)
if len(query_tokens) > params.max_query_length:
query_tokens = query_tokens[0 : params.max_query_length]
tok_to_orig_index = []
orig_to_tok_index = []
all_doc_tokens = []
for i, token in enumerate(example.doc_tokens):
orig_to_tok_index.append(len(all_doc_tokens))
sub_tokens = _tokenize(token)
for sub_token in sub_tokens:
tok_to_orig_index.append(i)
all_doc_tokens.append(sub_token)
tok_start_positions = []
tok_end_positions = []
if params.is_training and not example.is_impossible:
for start, end, answer_text in zip(example.start_positions, example.end_positions, example.answer_texts):
tok_start = orig_to_tok_index[start]
if end < len(example.doc_tokens) - 1:
tok_end = orig_to_tok_index[end + 1] - 1
else:
tok_end = len(all_doc_tokens) - 1
tok_start, tok_end = _improve_answer_span(all_doc_tokens, tok_start, tok_end, tokenizer, answer_text)
tok_start_positions.append(tok_start)
tok_end_positions.append(tok_end)
max_tokens_for_doc = params.max_seq_length - len(query_tokens) - 3
if params.add_extra_sep_token:
max_tokens_for_doc -= 1
doc_spans = []
start_offset = 0
while start_offset < len(all_doc_tokens):
length = len(all_doc_tokens) - start_offset
if length > max_tokens_for_doc:
length = max_tokens_for_doc
doc_spans.append(dict(start=start_offset, length=length))
if start_offset + length == len(all_doc_tokens):
break
start_offset += min(length, params.doc_stride)
features = []
for doc_span_index, doc_span in enumerate(doc_spans):
token_to_orig_map = {}
token_is_max_context = {}
answer_tokens = []
answer_offset = len(query_tokens) + 2
if params.add_extra_sep_token:
answer_offset += 1
for i in range(doc_span["length"]):
split_token_index = doc_span["start"] + i
token_to_orig_map[answer_offset + i] = tok_to_orig_index[split_token_index]
is_max_context = _check_is_max_context(doc_spans, doc_span_index, split_token_index)
token_is_max_context[answer_offset + i] = is_max_context
answer_tokens.append(all_doc_tokens[split_token_index])
start_positions = []
end_positions = []
if params.is_training:
if example.is_impossible:
start_positions = [0]
end_positions = [0]
else:
doc_start = doc_span["start"]
doc_end = doc_span["start"] + doc_span["length"] - 1
for tok_start, tok_end in zip(tok_start_positions, tok_end_positions):
if not (tok_start >= doc_start and tok_end <= doc_end):
continue
doc_offset = len(query_tokens) + 2
if params.add_extra_sep_token:
doc_offset += 1
start_positions.append(tok_start - doc_start + doc_offset)
end_positions.append(tok_end - doc_start + doc_offset)
if not start_positions:
start_positions = [0]
end_positions = [0]
features.append(
InputFeatures(
unique_id=None,
example_index=example_index,
doc_span_index=doc_span_index,
token_to_orig_map=token_to_orig_map,
token_is_max_context=token_is_max_context,
start_positions=start_positions,
end_positions=end_positions,
**params.passage_encoder.encode(example.title, query_tokens, answer_tokens)
)
)
return features
def _tokenize(text):
if isinstance(params.tokenizer, RobertaTokenizer):
return params.tokenizer.tokenize(text, add_prefix_space=True)
else:
return params.tokenizer.tokenize(text)
def _improve_answer_span(doc_tokens, input_start, input_end, tokenizer, orig_answer_text):
"""Returns tokenized answer spans that better match the annotated answer.
Original version was obtained from here:
https://github.com/huggingface/transformers/blob/23c6998bf46e43092fc59543ea7795074a720f08/src/transformers/data/processors/squad.py#L25
"""
tok_answer_text = tokenizer.convert_tokens_to_string(_tokenize(orig_answer_text)).strip()
for new_start in range(input_start, input_end + 1):
for new_end in range(input_end, new_start - 1, -1):
text_span = tokenizer.convert_tokens_to_string(doc_tokens[new_start : (new_end + 1)]).strip()
if text_span == tok_answer_text:
return new_start, new_end
return input_start, input_end
def _check_is_max_context(doc_spans, cur_span_index, position):
"""Check if this is the 'max context' doc span for the token.
Original version was obtained from here:
https://github.com/huggingface/transformers/blob/23c6998bf46e43092fc59543ea7795074a720f08/src/transformers/data/processors/squad.py#L38
"""
best_score = None
best_span_index = None
for span_index, doc_span in enumerate(doc_spans):
end = doc_span["start"] + doc_span["length"] - 1
if position < doc_span["start"]:
continue
if position > end:
continue
num_left_context = position - doc_span["start"]
num_right_context = end - position
score = min(num_left_context, num_right_context) + 0.01 * doc_span["length"]
if best_score is None or score > best_score:
best_score = score
best_span_index = span_index
return cur_span_index == best_span_index
|
nilq/baby-python
|
python
|
name = input("Hello! What's your name? ")
print('Nice to meet you \033[31m{}\033[m!'.format(name))
|
nilq/baby-python
|
python
|
"""
This file handles Reservation related HTTP request.
"""
from flask import request
from flask_restplus import Resource
from flask_jwt_extended import jwt_required
from flask_jwt_extended.exceptions import NoAuthorizationError,InvalidHeaderError,RevokedTokenError
from jwt import ExpiredSignatureError, InvalidTokenError, InvalidAudienceError
# local imports
from api.v1.main.service.rsvp_service import save_new_rsvp
from api.v1.main.util.rvsp_dto import RsvpDto
api = RsvpDto.api
rsvp = RsvpDto.rsvp
@api.route('/<int:meetup_id>/rsvp')
@api.param('meetup_id', 'Meetup Identification')
@api.errorhandler(NoAuthorizationError)
@api.errorhandler(RevokedTokenError)
@api.errorhandler(ExpiredSignatureError)
@api.errorhandler(InvalidTokenError)
@api.errorhandler(InvalidHeaderError)
class CreateQuestion(Resource):
@api.response(201, 'You have successfully reserved a meetup')
@api.doc('Reserve a meetup')
@api.expect(rsvp, validate=True)
@api.doc(security='Bearer Auth')
@jwt_required
def post(self, meetup_id):
"""
Reserve a meetup
"""
input_data = request.json
return save_new_rsvp(user_input=input_data, meetup_id=meetup_id)
|
nilq/baby-python
|
python
|
from wtforms import Form, StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email
from flask_wtf import FlaskForm
class RegistrationForm(FlaskForm):
email = StringField(
'Email', [DataRequired(), Email(), Length(min=6, max=36)])
username = StringField(
'Username', [DataRequired(), Length(min=3, max=36)])
password = PasswordField(
'Password', [DataRequired(), Length(min=8, max=36)])
remember_me = BooleanField('Remember Me')
submit = SubmitField('Sign Up')
|
nilq/baby-python
|
python
|
# !/usr/bin/env python
# coding=utf-8
"""
Calcs for HW3
"""
from __future__ import print_function
import sys
import numpy as np
from common import GOOD_RET, R_J, temp_c_to_k, k_at_new_temp, R_ATM, make_fig
__author__ = 'hbmayes'
def pfr_design_eq(x_out, x_in, vol, nuo, k):
"""
PFR design eq for HW3 problem 1, set up for f(Xi) = 0 for fsolve function
:param x_in: initial conversion (unitless)
:param x_out: final conversion (unitless)
:param vol: PFR volume in L
:param nuo: volumetric flow in L/min
:param k: rate coefficient in 1/min
:return: function residual (want close to zero)
"""
return vol - nuo / k * (4.0 * np.log(1 / (1 - x_out)) - 3.0 * x_out - 4.0 * np.log(1 / (1 - x_in)) + 3.0 * x_in)
def cstr_design_eq(x_out, x_in, vol, nuo, k):
"""
PFR design eq for HW3 problem 1, set up for f(Xi) = 0 for fsolve function
:param x_in: initial conversion (unitless)
:param x_out: final conversion (unitless)
:param vol: PFR volume in L
:param nuo: volumetric flow in L/min
:param k: rate coefficient in 1/min
:return: function residual (want close to zero)
"""
return vol - nuo / k * (x_out - x_in) * (1 + 3 * x_out) / (1 - x_out)
def r_dis_a(k, cao, x, k_equil):
"""
rate of consumption (disappearance) of species A for HW3 prob 1
:param k: rate coefficient at temp of interest (1/min)
:param cao: initial concentration of A in mol/L
:param x: conversion of A
:return: rate in mol/L-mib
"""
return 2.0 * k * cao * (cao * np.square(1 - x) - x / (2 * k_equil))
def pfr_design(k, cao, x, k_equil, nuo):
"""
rate of consumption (disappearance) of species A for HW3 prob 1
:param k: rate coefficient at temp of interest (1/min)
:param cao: initial concentration of A in mol/L
:param x: conversion of A
:return: rate in mol/L-mib
"""
return nuo / (2.0 * k * (cao * np.square(1 - x) - x / (2 * k_equil)))
# noinspection PyTypeChecker
def prob1a():
"""
Given a few points, makes a line
:return: nothing--saves a file with the graph
"""
cao = 0.2 # mol / L
nuo = 10.0 # L / s
k_equil = 20.0 # L / mol
k = 0.2 # L / mol s
fao = cao * nuo
vol = 600.0 # L
tau = vol / nuo # s
# x_in = 0.0
# x_out = 0.65
x_in = np.zeros(4)
x_out = np.empty(4)
print(x_in)
x_begin = 0.0
x_end = 0.65
x_cstr = np.array([x_begin, x_end])
x_pfr = np.linspace(x_end, x_end, 10001)
neg_ra = r_dis_a(k, cao, x_pfr, k_equil)
leven_cstr = np.empty(2)
leven_cstr.fill(fao / neg_ra[-1])
leven_pfr = fao / neg_ra
fig_name = 'lect06_alt'
volume_limit = 2000
make_fig(fig_name, x_pfr, leven_pfr,
x_label=r'conversion (X, unitless)', y_label=r'$\displaystyle\frac{F_{A0}}{-r_A} \left(L\right)$',
x_lima=0.0, x_limb=0.65,
y_lima=0.0, y_limb=volume_limit,
color1="black",
x_fill=x_cstr,
y_fill=leven_cstr,
x2_fill=x_pfr, y2_fill=leven_pfr,
# fill1_label="CSTR", fill2_label="PFR",
)
print("yo")
def main():
""" Runs the main program.
"""
prob1a()
return GOOD_RET # success
if __name__ == '__main__':
status = main()
sys.exit(status)
|
nilq/baby-python
|
python
|
'''Wrapper for nviz.h
Generated with:
./ctypesgen.py --cpp gcc -E -I/Applications/GRASS-7.8.app/Contents/Resources/include -D_Nullable= -I/Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include -I/Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include -D__GLIBC_HAVE_LONG_LONG -lgrass_nviz.7.8 /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h -o OBJ.x86_64-apple-darwin18.7.0/nviz.py
Do not modify this file.
'''
__docformat__ = 'restructuredtext'
_libs = {}
_libdirs = []
from .ctypes_preamble import *
from .ctypes_preamble import _variadic_function
from .ctypes_loader import *
add_library_search_dirs([])
# Begin libraries
_libs["grass_nviz.7.8"] = load_library("grass_nviz.7.8")
# 1 libraries
# End libraries
# No modules
# /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/OpenGL.framework/Headers/CGLTypes.h: 45
class struct__CGLContextObject(Structure):
pass
CGLContextObj = POINTER(struct__CGLContextObject) # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/OpenGL.framework/Headers/CGLTypes.h: 45
GLubyte = c_uint8 # /Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk/System/Library/Frameworks/OpenGL.framework/Headers/gltypes.h: 18
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 76
class struct_anon_1348(Structure):
pass
struct_anon_1348.__slots__ = [
'id',
'brt',
'r',
'g',
'b',
'ar',
'ag',
'ab',
'x',
'y',
'z',
'w',
]
struct_anon_1348._fields_ = [
('id', c_int),
('brt', c_float),
('r', c_float),
('g', c_float),
('b', c_float),
('ar', c_float),
('ag', c_float),
('ab', c_float),
('x', c_float),
('y', c_float),
('z', c_float),
('w', c_float),
]
light_data = struct_anon_1348 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 76
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 78
class struct_fringe_data(Structure):
pass
struct_fringe_data.__slots__ = [
'id',
'color',
'elev',
'where',
]
struct_fringe_data._fields_ = [
('id', c_int),
('color', c_ulong),
('elev', c_float),
('where', c_int * 4),
]
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 86
class struct_arrow_data(Structure):
pass
struct_arrow_data.__slots__ = [
'color',
'size',
'where',
]
struct_arrow_data._fields_ = [
('color', c_ulong),
('size', c_float),
('where', c_float * 3),
]
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 93
class struct_scalebar_data(Structure):
pass
struct_scalebar_data.__slots__ = [
'id',
'color',
'size',
'where',
]
struct_scalebar_data._fields_ = [
('id', c_int),
('color', c_ulong),
('size', c_float),
('where', c_float * 3),
]
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 130
class struct_anon_1349(Structure):
pass
struct_anon_1349.__slots__ = [
'zrange',
'xyrange',
'num_cplanes',
'cur_cplane',
'cp_on',
'cp_trans',
'cp_rot',
'light',
'num_fringes',
'fringe',
'draw_arrow',
'arrow',
'num_scalebars',
'scalebar',
'bgcolor',
]
struct_anon_1349._fields_ = [
('zrange', c_float),
('xyrange', c_float),
('num_cplanes', c_int),
('cur_cplane', c_int),
('cp_on', c_int * 6),
('cp_trans', (c_float * 3) * 6),
('cp_rot', (c_float * 3) * 6),
('light', light_data * 3),
('num_fringes', c_int),
('fringe', POINTER(POINTER(struct_fringe_data))),
('draw_arrow', c_int),
('arrow', POINTER(struct_arrow_data)),
('num_scalebars', c_int),
('scalebar', POINTER(POINTER(struct_scalebar_data))),
('bgcolor', c_int),
]
nv_data = struct_anon_1349 # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 130
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 132
class struct_render_window(Structure):
pass
struct_render_window.__slots__ = [
'contextId',
'width',
'height',
]
struct_render_window._fields_ = [
('contextId', CGLContextObj),
('width', c_int),
('height', c_int),
]
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 5
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_resize_window'):
continue
Nviz_resize_window = _lib.Nviz_resize_window
Nviz_resize_window.argtypes = [c_int, c_int]
Nviz_resize_window.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 6
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_update_ranges'):
continue
Nviz_update_ranges = _lib.Nviz_update_ranges
Nviz_update_ranges.argtypes = [POINTER(nv_data)]
Nviz_update_ranges.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 7
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_viewpoint_position'):
continue
Nviz_set_viewpoint_position = _lib.Nviz_set_viewpoint_position
Nviz_set_viewpoint_position.argtypes = [c_double, c_double]
Nviz_set_viewpoint_position.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 8
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_viewpoint_position'):
continue
Nviz_get_viewpoint_position = _lib.Nviz_get_viewpoint_position
Nviz_get_viewpoint_position.argtypes = [POINTER(c_double), POINTER(c_double)]
Nviz_get_viewpoint_position.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 9
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_viewpoint_height'):
continue
Nviz_set_viewpoint_height = _lib.Nviz_set_viewpoint_height
Nviz_set_viewpoint_height.argtypes = [c_double]
Nviz_set_viewpoint_height.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 10
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_viewpoint_height'):
continue
Nviz_get_viewpoint_height = _lib.Nviz_get_viewpoint_height
Nviz_get_viewpoint_height.argtypes = [POINTER(c_double)]
Nviz_get_viewpoint_height.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 11
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_viewpoint_persp'):
continue
Nviz_set_viewpoint_persp = _lib.Nviz_set_viewpoint_persp
Nviz_set_viewpoint_persp.argtypes = [c_int]
Nviz_set_viewpoint_persp.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 12
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_viewpoint_twist'):
continue
Nviz_set_viewpoint_twist = _lib.Nviz_set_viewpoint_twist
Nviz_set_viewpoint_twist.argtypes = [c_int]
Nviz_set_viewpoint_twist.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 13
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_change_exag'):
continue
Nviz_change_exag = _lib.Nviz_change_exag
Nviz_change_exag.argtypes = [POINTER(nv_data), c_double]
Nviz_change_exag.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 14
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_look_here'):
continue
Nviz_look_here = _lib.Nviz_look_here
Nviz_look_here.argtypes = [c_double, c_double]
Nviz_look_here.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 15
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_modelview'):
continue
Nviz_get_modelview = _lib.Nviz_get_modelview
Nviz_get_modelview.argtypes = [POINTER(c_double)]
Nviz_get_modelview.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 16
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_rotation'):
continue
Nviz_set_rotation = _lib.Nviz_set_rotation
Nviz_set_rotation.argtypes = [c_double, c_double, c_double, c_double]
Nviz_set_rotation.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 17
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_unset_rotation'):
continue
Nviz_unset_rotation = _lib.Nviz_unset_rotation
Nviz_unset_rotation.argtypes = []
Nviz_unset_rotation.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 18
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_init_rotation'):
continue
Nviz_init_rotation = _lib.Nviz_init_rotation
Nviz_init_rotation.argtypes = []
Nviz_init_rotation.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 19
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_flythrough'):
continue
Nviz_flythrough = _lib.Nviz_flythrough
Nviz_flythrough.argtypes = [POINTER(nv_data), POINTER(c_float), POINTER(c_int), c_int]
Nviz_flythrough.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 22
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_new_cplane'):
continue
Nviz_new_cplane = _lib.Nviz_new_cplane
Nviz_new_cplane.argtypes = [POINTER(nv_data), c_int]
Nviz_new_cplane.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 23
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_on_cplane'):
continue
Nviz_on_cplane = _lib.Nviz_on_cplane
Nviz_on_cplane.argtypes = [POINTER(nv_data), c_int]
Nviz_on_cplane.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 24
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_off_cplane'):
continue
Nviz_off_cplane = _lib.Nviz_off_cplane
Nviz_off_cplane.argtypes = [POINTER(nv_data), c_int]
Nviz_off_cplane.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 25
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_draw_cplane'):
continue
Nviz_draw_cplane = _lib.Nviz_draw_cplane
Nviz_draw_cplane.argtypes = [POINTER(nv_data), c_int, c_int]
Nviz_draw_cplane.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 26
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_num_cplanes'):
continue
Nviz_num_cplanes = _lib.Nviz_num_cplanes
Nviz_num_cplanes.argtypes = [POINTER(nv_data)]
Nviz_num_cplanes.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 27
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_current_cplane'):
continue
Nviz_get_current_cplane = _lib.Nviz_get_current_cplane
Nviz_get_current_cplane.argtypes = [POINTER(nv_data)]
Nviz_get_current_cplane.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 28
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_cplane_rotation'):
continue
Nviz_set_cplane_rotation = _lib.Nviz_set_cplane_rotation
Nviz_set_cplane_rotation.argtypes = [POINTER(nv_data), c_int, c_float, c_float, c_float]
Nviz_set_cplane_rotation.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 29
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_cplane_rotation'):
continue
Nviz_get_cplane_rotation = _lib.Nviz_get_cplane_rotation
Nviz_get_cplane_rotation.argtypes = [POINTER(nv_data), c_int, POINTER(c_float), POINTER(c_float), POINTER(c_float)]
Nviz_get_cplane_rotation.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 30
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_cplane_translation'):
continue
Nviz_set_cplane_translation = _lib.Nviz_set_cplane_translation
Nviz_set_cplane_translation.argtypes = [POINTER(nv_data), c_int, c_float, c_float, c_float]
Nviz_set_cplane_translation.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 31
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_cplane_translation'):
continue
Nviz_get_cplane_translation = _lib.Nviz_get_cplane_translation
Nviz_get_cplane_translation.argtypes = [POINTER(nv_data), c_int, POINTER(c_float), POINTER(c_float), POINTER(c_float)]
Nviz_get_cplane_translation.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 32
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_fence_color'):
continue
Nviz_set_fence_color = _lib.Nviz_set_fence_color
Nviz_set_fence_color.argtypes = [POINTER(nv_data), c_int]
Nviz_set_fence_color.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 33
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_cplane_here'):
continue
Nviz_set_cplane_here = _lib.Nviz_set_cplane_here
Nviz_set_cplane_here.argtypes = [POINTER(nv_data), c_int, c_float, c_float]
Nviz_set_cplane_here.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 37
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_draw_all_surf'):
continue
Nviz_draw_all_surf = _lib.Nviz_draw_all_surf
Nviz_draw_all_surf.argtypes = [POINTER(nv_data)]
Nviz_draw_all_surf.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 38
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_draw_all_vect'):
continue
Nviz_draw_all_vect = _lib.Nviz_draw_all_vect
Nviz_draw_all_vect.argtypes = []
Nviz_draw_all_vect.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 39
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_draw_all_site'):
continue
Nviz_draw_all_site = _lib.Nviz_draw_all_site
Nviz_draw_all_site.argtypes = []
Nviz_draw_all_site.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 40
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_draw_all_vol'):
continue
Nviz_draw_all_vol = _lib.Nviz_draw_all_vol
Nviz_draw_all_vol.argtypes = []
Nviz_draw_all_vol.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 41
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_draw_all'):
continue
Nviz_draw_all = _lib.Nviz_draw_all
Nviz_draw_all.argtypes = [POINTER(nv_data)]
Nviz_draw_all.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 42
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_draw_quick'):
continue
Nviz_draw_quick = _lib.Nviz_draw_quick
Nviz_draw_quick.argtypes = [POINTER(nv_data), c_int]
Nviz_draw_quick.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 43
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_load_image'):
continue
Nviz_load_image = _lib.Nviz_load_image
Nviz_load_image.argtypes = [POINTER(GLubyte), c_int, c_int, c_int]
Nviz_load_image.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 44
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_draw_image'):
continue
Nviz_draw_image = _lib.Nviz_draw_image
Nviz_draw_image.argtypes = [c_int, c_int, c_int, c_int, c_int]
Nviz_draw_image.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 45
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_2D'):
continue
Nviz_set_2D = _lib.Nviz_set_2D
Nviz_set_2D.argtypes = [c_int, c_int]
Nviz_set_2D.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 46
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_del_texture'):
continue
Nviz_del_texture = _lib.Nviz_del_texture
Nviz_del_texture.argtypes = [c_int]
Nviz_del_texture.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 47
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_max_texture'):
continue
Nviz_get_max_texture = _lib.Nviz_get_max_texture
Nviz_get_max_texture.argtypes = [POINTER(c_int)]
Nviz_get_max_texture.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 50
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_exag_height'):
continue
Nviz_get_exag_height = _lib.Nviz_get_exag_height
Nviz_get_exag_height.argtypes = [POINTER(c_double), POINTER(c_double), POINTER(c_double)]
Nviz_get_exag_height.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 51
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_exag'):
continue
Nviz_get_exag = _lib.Nviz_get_exag
Nviz_get_exag.argtypes = []
Nviz_get_exag.restype = c_double
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 54
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_light_position'):
continue
Nviz_set_light_position = _lib.Nviz_set_light_position
Nviz_set_light_position.argtypes = [POINTER(nv_data), c_int, c_double, c_double, c_double, c_double]
Nviz_set_light_position.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 55
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_light_bright'):
continue
Nviz_set_light_bright = _lib.Nviz_set_light_bright
Nviz_set_light_bright.argtypes = [POINTER(nv_data), c_int, c_double]
Nviz_set_light_bright.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 56
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_light_color'):
continue
Nviz_set_light_color = _lib.Nviz_set_light_color
Nviz_set_light_color.argtypes = [POINTER(nv_data), c_int, c_int, c_int, c_int]
Nviz_set_light_color.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 57
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_light_ambient'):
continue
Nviz_set_light_ambient = _lib.Nviz_set_light_ambient
Nviz_set_light_ambient.argtypes = [POINTER(nv_data), c_int, c_double]
Nviz_set_light_ambient.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 58
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_init_light'):
continue
Nviz_init_light = _lib.Nviz_init_light
Nviz_init_light.argtypes = [POINTER(nv_data), c_int]
Nviz_init_light.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 59
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_new_light'):
continue
Nviz_new_light = _lib.Nviz_new_light
Nviz_new_light.argtypes = [POINTER(nv_data)]
Nviz_new_light.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 60
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_draw_model'):
continue
Nviz_draw_model = _lib.Nviz_draw_model
Nviz_draw_model.argtypes = [POINTER(nv_data)]
Nviz_draw_model.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 63
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_new_map_obj'):
continue
Nviz_new_map_obj = _lib.Nviz_new_map_obj
Nviz_new_map_obj.argtypes = [c_int, String, c_double, POINTER(nv_data)]
Nviz_new_map_obj.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 64
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_attr'):
continue
Nviz_set_attr = _lib.Nviz_set_attr
Nviz_set_attr.argtypes = [c_int, c_int, c_int, c_int, String, c_double, POINTER(nv_data)]
Nviz_set_attr.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 65
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_surface_attr_default'):
continue
Nviz_set_surface_attr_default = _lib.Nviz_set_surface_attr_default
Nviz_set_surface_attr_default.argtypes = []
Nviz_set_surface_attr_default.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 66
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_vpoint_attr_default'):
continue
Nviz_set_vpoint_attr_default = _lib.Nviz_set_vpoint_attr_default
Nviz_set_vpoint_attr_default.argtypes = []
Nviz_set_vpoint_attr_default.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 67
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_volume_attr_default'):
continue
Nviz_set_volume_attr_default = _lib.Nviz_set_volume_attr_default
Nviz_set_volume_attr_default.argtypes = []
Nviz_set_volume_attr_default.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 68
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_unset_attr'):
continue
Nviz_unset_attr = _lib.Nviz_unset_attr
Nviz_unset_attr.argtypes = [c_int, c_int, c_int]
Nviz_unset_attr.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 71
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_init_data'):
continue
Nviz_init_data = _lib.Nviz_init_data
Nviz_init_data.argtypes = [POINTER(nv_data)]
Nviz_init_data.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 72
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_destroy_data'):
continue
Nviz_destroy_data = _lib.Nviz_destroy_data
Nviz_destroy_data.argtypes = [POINTER(nv_data)]
Nviz_destroy_data.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 73
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_bgcolor'):
continue
Nviz_set_bgcolor = _lib.Nviz_set_bgcolor
Nviz_set_bgcolor.argtypes = [POINTER(nv_data), c_int]
Nviz_set_bgcolor.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 74
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_bgcolor'):
continue
Nviz_get_bgcolor = _lib.Nviz_get_bgcolor
Nviz_get_bgcolor.argtypes = [POINTER(nv_data)]
Nviz_get_bgcolor.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 75
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_color_from_str'):
continue
Nviz_color_from_str = _lib.Nviz_color_from_str
Nviz_color_from_str.argtypes = [String]
Nviz_color_from_str.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 76
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_new_fringe'):
continue
Nviz_new_fringe = _lib.Nviz_new_fringe
Nviz_new_fringe.argtypes = [POINTER(nv_data), c_int, c_ulong, c_double, c_int, c_int, c_int, c_int]
Nviz_new_fringe.restype = POINTER(struct_fringe_data)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 78
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_fringe'):
continue
Nviz_set_fringe = _lib.Nviz_set_fringe
Nviz_set_fringe.argtypes = [POINTER(nv_data), c_int, c_ulong, c_double, c_int, c_int, c_int, c_int]
Nviz_set_fringe.restype = POINTER(struct_fringe_data)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 80
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_draw_fringe'):
continue
Nviz_draw_fringe = _lib.Nviz_draw_fringe
Nviz_draw_fringe.argtypes = [POINTER(nv_data)]
Nviz_draw_fringe.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 81
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_draw_arrow'):
continue
Nviz_draw_arrow = _lib.Nviz_draw_arrow
Nviz_draw_arrow.argtypes = [POINTER(nv_data)]
Nviz_draw_arrow.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 82
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_arrow'):
continue
Nviz_set_arrow = _lib.Nviz_set_arrow
Nviz_set_arrow.argtypes = [POINTER(nv_data), c_int, c_int, c_float, c_uint]
Nviz_set_arrow.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 83
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_delete_arrow'):
continue
Nviz_delete_arrow = _lib.Nviz_delete_arrow
Nviz_delete_arrow.argtypes = [POINTER(nv_data)]
Nviz_delete_arrow.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 84
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_new_scalebar'):
continue
Nviz_new_scalebar = _lib.Nviz_new_scalebar
Nviz_new_scalebar.argtypes = [POINTER(nv_data), c_int, POINTER(c_float), c_float, c_uint]
Nviz_new_scalebar.restype = POINTER(struct_scalebar_data)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 85
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_scalebar'):
continue
Nviz_set_scalebar = _lib.Nviz_set_scalebar
Nviz_set_scalebar.argtypes = [POINTER(nv_data), c_int, c_int, c_int, c_float, c_uint]
Nviz_set_scalebar.restype = POINTER(struct_scalebar_data)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 86
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_draw_scalebar'):
continue
Nviz_draw_scalebar = _lib.Nviz_draw_scalebar
Nviz_draw_scalebar.argtypes = [POINTER(nv_data)]
Nviz_draw_scalebar.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 87
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_delete_scalebar'):
continue
Nviz_delete_scalebar = _lib.Nviz_delete_scalebar
Nviz_delete_scalebar.argtypes = [POINTER(nv_data), c_int]
Nviz_delete_scalebar.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 90
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_init_view'):
continue
Nviz_init_view = _lib.Nviz_init_view
Nviz_init_view.argtypes = [POINTER(nv_data)]
Nviz_init_view.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 91
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_focus_state'):
continue
Nviz_set_focus_state = _lib.Nviz_set_focus_state
Nviz_set_focus_state.argtypes = [c_int]
Nviz_set_focus_state.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 92
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_focus_map'):
continue
Nviz_set_focus_map = _lib.Nviz_set_focus_map
Nviz_set_focus_map.argtypes = [c_int, c_int]
Nviz_set_focus_map.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 93
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_has_focus'):
continue
Nviz_has_focus = _lib.Nviz_has_focus
Nviz_has_focus.argtypes = [POINTER(nv_data)]
Nviz_has_focus.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 94
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_set_focus'):
continue
Nviz_set_focus = _lib.Nviz_set_focus
Nviz_set_focus.argtypes = [POINTER(nv_data), c_float, c_float, c_float]
Nviz_set_focus.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 95
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_focus'):
continue
Nviz_get_focus = _lib.Nviz_get_focus
Nviz_get_focus.argtypes = [POINTER(nv_data), POINTER(c_float), POINTER(c_float), POINTER(c_float)]
Nviz_get_focus.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 96
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_xyrange'):
continue
Nviz_get_xyrange = _lib.Nviz_get_xyrange
Nviz_get_xyrange.argtypes = [POINTER(nv_data)]
Nviz_get_xyrange.restype = c_float
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 97
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_zrange'):
continue
Nviz_get_zrange = _lib.Nviz_get_zrange
Nviz_get_zrange.argtypes = [POINTER(nv_data), POINTER(c_float), POINTER(c_float)]
Nviz_get_zrange.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 98
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_get_longdim'):
continue
Nviz_get_longdim = _lib.Nviz_get_longdim
Nviz_get_longdim.argtypes = [POINTER(nv_data)]
Nviz_get_longdim.restype = c_float
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 101
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_new_render_window'):
continue
Nviz_new_render_window = _lib.Nviz_new_render_window
Nviz_new_render_window.argtypes = []
Nviz_new_render_window.restype = POINTER(struct_render_window)
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 102
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_init_render_window'):
continue
Nviz_init_render_window = _lib.Nviz_init_render_window
Nviz_init_render_window.argtypes = [POINTER(struct_render_window)]
Nviz_init_render_window.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 103
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_destroy_render_window'):
continue
Nviz_destroy_render_window = _lib.Nviz_destroy_render_window
Nviz_destroy_render_window.argtypes = [POINTER(struct_render_window)]
Nviz_destroy_render_window.restype = None
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 104
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_create_render_window'):
continue
Nviz_create_render_window = _lib.Nviz_create_render_window
Nviz_create_render_window.argtypes = [POINTER(struct_render_window), POINTER(None), c_int, c_int]
Nviz_create_render_window.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/defs/nviz.h: 105
for _lib in six.itervalues(_libs):
if not hasattr(_lib, 'Nviz_make_current_render_window'):
continue
Nviz_make_current_render_window = _lib.Nviz_make_current_render_window
Nviz_make_current_render_window.argtypes = [POINTER(struct_render_window)]
Nviz_make_current_render_window.restype = c_int
break
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/ogsf.h: 30
try:
GS_UNIT_SIZE = 1000.0
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 42
try:
MAP_OBJ_UNDEFINED = 0
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 43
try:
MAP_OBJ_SURF = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 44
try:
MAP_OBJ_VOL = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 45
try:
MAP_OBJ_VECT = 3
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 46
try:
MAP_OBJ_SITE = 4
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 48
try:
DRAW_COARSE = 0
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 49
try:
DRAW_FINE = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 50
try:
DRAW_BOTH = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 53
try:
DRAW_QUICK_SURFACE = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 54
try:
DRAW_QUICK_VLINES = 2
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 55
try:
DRAW_QUICK_VPOINTS = 4
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 56
try:
DRAW_QUICK_VOLUME = 8
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 58
try:
RANGE = (5 * GS_UNIT_SIZE)
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 59
try:
RANGE_OFFSET = (2 * GS_UNIT_SIZE)
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 60
try:
ZRANGE = (3 * GS_UNIT_SIZE)
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 61
try:
ZRANGE_OFFSET = (1 * GS_UNIT_SIZE)
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 63
try:
DEFAULT_SURF_COLOR = 3390463
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 65
try:
FORMAT_PPM = 1
except:
pass
# /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 66
try:
FORMAT_TIF = 2
except:
pass
fringe_data = struct_fringe_data # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 78
arrow_data = struct_arrow_data # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 86
scalebar_data = struct_scalebar_data # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 93
render_window = struct_render_window # /Users/cmbarton/grass_source/grass-7.8.3/dist.x86_64-apple-darwin18.7.0/include/grass/nviz.h: 132
# No inserted files
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 13 12:02:28 2021
@author: Clau
"""
'''
Paper: Energy sufficiency (SDEWES LA 2022)
User: School B - LOWLANDS
'''
from core import User, np
User_list = []
#Definig users
SB = User("School type B", 1)
User_list.append(SB)
#Appliances
SB_indoor_bulb = SB.Appliance(SB,12,7,2,120,0.25,30)
SB_indoor_bulb.windows([480,780],[840,1140],0.35)
SB_outdoor_bulb = SB.Appliance(SB,3,13,1,60,0.2,10)
SB_outdoor_bulb.windows([960,1080],[0,0],0.35)
SB_TV = SB.Appliance(SB,1,60,2,120,0.1,5, occasional_use = 0.5)
SB_TV.windows([480,780],[840,1140],0.2)
SB_radio = SB.Appliance(SB,3,4,2,120,0.1,5, occasional_use = 0.5)
SB_radio.windows([480,780],[840,1140],0.2)
SB_DVD = SB.Appliance(SB,2,8,2,120,0.1,5, occasional_use = 0.5)
SB_DVD.windows([480,780],[840,1140],0.2)
SB_Freezer = SB.Appliance(SB,1,200,1,1440,0,30, 'yes',3)
SB_Freezer.windows([0,1440])
SB_Freezer.specific_cycle_1(200,20,5,10)
SB_Freezer.specific_cycle_2(200,15,5,15)
SB_Freezer.specific_cycle_3(200,10,5,20)
SB_Freezer.cycle_behaviour([580,1200],[0,0],[510,579],[0,0],[0,509],[1201,1440])
SB_PC = SB.Appliance(SB,1,50,2,210,0.1,10)
SB_PC.windows([480,780],[840,1140],0.35)
SB_Phone_charger = SB.Appliance(SB,3,2,2,180,0.2,5)
SB_Phone_charger.windows([480,780],[840,1140],0.35)
|
nilq/baby-python
|
python
|
# encoding: UTF-8
#
# Copyright (c) 2015 Facility for Rare Isotope Beams
#
"""
Lattice Model application package.
"""
|
nilq/baby-python
|
python
|
import fnmatch
import os
def locate(pattern, root=os.getcwd()):
for path, dirs, files in os.walk(root):
for filename in [os.path.abspath(os.path.join(path, filename)) for filename in files if fnmatch.fnmatch(filename, pattern)]:
yield filename
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""
OPENQASM interpreter.
Author: Andrew Cross
"""
import math
import copy
from ._unrollerexception import UnrollerException
class Unroller(object):
"""OPENQASM interpreter object that unrolls subroutines and loops."""
def __init__(self, ast, backend=None):
"""Initialize interpreter's data."""
# Abstract syntax tree from parser
self.ast = ast
# Backend object
self.backend = backend
# OPENQASM version number
self.version = 0.0
# Dict of qreg names and sizes
self.qregs = {}
# Dict of creg names and sizes
self.cregs = {}
# Dict of gates names and properties
self.gates = {}
# List of dictionaries mapping local parameter ids to real values
self.arg_stack = [{}]
# List of dictionaries mapping local bit ids to global ids (name,idx)
self.bit_stack = [{}]
def _process_bit_id(self, node):
"""Process an Id or IndexedId node as a bit or register type.
Return a list of tuples (name,index).
"""
if node.type == "indexed_id":
# An indexed bit or qubit
return [(node.name, node.index)]
elif node.type == "id":
# A qubit or qreg or creg
if len(self.bit_stack[-1]) == 0:
# Global scope
if node.name in self.qregs:
return [(node.name, j)
for j in range(self.qregs[node.name])]
elif node.name in self.cregs:
return [(node.name, j)
for j in range(self.cregs[node.name])]
else:
raise UnrollerException("expected qreg or creg name:",
"line=%s" % node.line,
"file=%s" % node.file)
else:
# local scope
if node.name in self.bit_stack[-1]:
return [self.bit_stack[-1][node.name]]
else:
raise UnrollerException("excepted local bit name:",
"line=%s" % node.line,
"file=%s" % node.file)
def _process_local_id(self, node):
"""Process an Id node as a local id."""
# The id must be in arg_stack i.e. the id is inside a gate_body
id_dict = self.arg_stack[-1]
if node.name in id_dict:
return float(id_dict[node.name])
else:
raise UnrollerException("expected local parameter name:",
"line=%s" % node.line,
"file=%s" % node.file)
def _process_custom_unitary(self, node):
"""Process a custom unitary node."""
name = node.name
if node.arguments is not None:
args = self._process_node(node.arguments)
else:
args = []
bits = [self._process_bit_id(node_element)
for node_element in node.bitlist.children]
if name in self.gates:
gargs = self.gates[name]["args"]
gbits = self.gates[name]["bits"]
gbody = self.gates[name]["body"]
# Loop over register arguments, if any.
maxidx = max(map(len, bits))
for idx in range(maxidx):
self.arg_stack.append({gargs[j]: args[j]
for j in range(len(gargs))})
# Only index into register arguments.
element = list(map(lambda x: idx * x,
[len(bits[j]) > 1 for j in range(len(bits))]))
self.bit_stack.append({gbits[j]: bits[j][element[j]]
for j in range(len(gbits))})
self.backend.start_gate(name,
[self.arg_stack[-1][s] for s in gargs],
[self.bit_stack[-1][s] for s in gbits])
if not self.gates[name]["opaque"]:
self._process_children(gbody)
self.backend.end_gate(name,
[self.arg_stack[-1][s] for s in gargs],
[self.bit_stack[-1][s] for s in gbits])
self.arg_stack.pop()
self.bit_stack.pop()
else:
raise UnrollerException("internal error undefined gate:",
"line=%s" % node.line, "file=%s" % node.file)
def _process_gate(self, node, opaque=False):
"""Process a gate node.
If opaque is True, process the node as an opaque gate node.
"""
self.gates[node.name] = {}
de = self.gates[node.name]
de["opaque"] = opaque
de["n_args"] = node.n_args()
de["n_bits"] = node.n_bits()
if node.n_args() > 0:
de["args"] = [element.name for element in node.arguments.children]
else:
de["args"] = []
de["bits"] = [c.name for c in node.bitlist.children]
if opaque:
de["body"] = None
else:
de["body"] = node.body
self.backend.define_gate(node.name, copy.deepcopy(de))
def _process_cnot(self, node):
"""Process a CNOT gate node."""
id0 = self._process_bit_id(node.children[0])
id1 = self._process_bit_id(node.children[1])
if not(len(id0) == len(id1) or len(id0) == 1 or len(id1) == 1):
raise UnrollerException("internal error: qreg size mismatch",
"line=%s" % node.line, "file=%s" % node.file)
maxidx = max([len(id0), len(id1)])
for idx in range(maxidx):
if len(id0) > 1 and len(id1) > 1:
self.backend.cx(id0[idx], id1[idx])
elif len(id0) > 1:
self.backend.cx(id0[idx], id1[0])
else:
self.backend.cx(id0[0], id1[idx])
def _process_binop(self, node):
"""Process a binary operation node."""
operation = node.children[0]
lexpr = node.children[1]
rexpr = node.children[2]
if operation == '+':
return self._process_node(lexpr) + self._process_node(rexpr)
elif operation == '-':
return self._process_node(lexpr) - self._process_node(rexpr)
elif operation == '*':
return self._process_node(lexpr) * self._process_node(rexpr)
elif operation == '/':
return self._process_node(lexpr) / self._process_node(rexpr)
elif operation == '^':
return self._process_node(lexpr) ** self._process_node(rexpr)
else:
raise UnrollerException("internal error: undefined binop",
"line=%s" % node.line, "file=%s" % node.file)
def _process_prefix(self, node):
"""Process a prefix node."""
operation = node.children[0]
expr = node.children[1]
if operation == '+':
return self._process_node(expr)
elif operation == '-':
return -self._process_node(expr)
else:
raise UnrollerException("internal error: undefined prefix",
"line=%s" % node.line, "file=%s" % node.file)
def _process_measure(self, node):
"""Process a measurement node."""
id0 = self._process_bit_id(node.children[0])
id1 = self._process_bit_id(node.children[1])
if len(id0) != len(id1):
raise UnrollerException("internal error: reg size mismatch",
"line=%s" % node.line, "file=%s" % node.file)
for idx, idy in zip(id0, id1):
self.backend.measure(idx, idy)
def _process_if(self, node):
"""Process an if node."""
creg = node.children[0].name
cval = node.children[1]
self.backend.set_condition(creg, cval)
self._process_node(node.children[2])
self.backend.drop_condition()
def _process_external(self, n):
"""Process an external function node n."""
op = n.children[0].name
expr = n.children[1]
dispatch = {
'sin': math.sin,
'cos': math.cos,
'tan': math.tan,
'exp': math.exp,
'ln': math.log,
'sqrt': math.sqrt
}
if op in dispatch:
return dispatch[op](self._process_node(expr))
else:
raise UnrollerException("internal error: undefined external",
"line=%s" % n.line, "file=%s" % n.file)
def _process_children(self, node):
"""Call process_node for all children of node."""
for c in node.children:
self._process_node(c)
def _process_node(self, node):
"""Carry out the action associated with node n."""
if node.type == "program":
self._process_children(node)
elif node.type == "qreg":
self.qregs[node.name] = int(node.index)
self.backend.new_qreg(node.name, int(node.index))
elif node.type == "creg":
self.cregs[node.name] = int(node.index)
self.backend.new_creg(node.name, int(node.index))
elif node.type == "id":
return self._process_local_id(node)
elif node.type == "int":
# We process int nodes when they are leaves of expressions
# and cast them to float to avoid, for example, 3/2 = 1.
return float(node.value)
elif node.type == "real":
return float(node.value)
elif node.type == "indexed_id":
# We should not get here.
raise UnrollerException("internal error n.type == indexed_id:",
"line=%s" % node.line,
"file=%s" % node.file)
elif node.type == "id_list":
# We process id_list nodes when they are leaves of barriers.
return [self._process_bit_id(node_children)
for node_children in node.children]
elif node.type == "primary_list":
# We should only be called for a barrier.
return [self._process_bit_id(m) for m in node.children]
elif node.type == "gate":
self._process_gate(node)
elif node.type == "custom_unitary":
self._process_custom_unitary(node)
elif node.type == "universal_unitary":
args = tuple(self._process_node(node.children[0]))
qid = self._process_bit_id(node.children[1])
for element in qid:
self.backend.u(args, element)
elif node.type == "cnot":
self._process_cnot(node)
elif node.type == "expression_list":
return [self._process_node(node_children)
for node_children in node.children]
elif node.type == "binop":
return self._process_binop(node)
elif node.type == "prefix":
return self._process_prefix(node)
elif node.type == "measure":
self._process_measure(node)
elif node.type == "magic":
self.version = float(node.children[0])
self.backend.version(node.children[0])
elif node.type == "barrier":
ids = self._process_node(node.children[0])
self.backend.barrier(ids)
elif node.type == "reset":
id0 = self._process_bit_id(node.children[0])
for idx in range(len(id0)):
self.backend.reset(id0[idx])
elif node.type == "if":
self._process_if(node)
elif node.type == "opaque":
self._process_gate(node, opaque=True)
elif node.type == "external":
return self._process_external(node)
else:
raise UnrollerException("internal error: undefined node type",
node.type, "line=%s" % node.line,
"file=%s" % node.file)
def set_backend(self, backend):
"""Set the backend object."""
self.backend = backend
def execute(self):
"""Interpret OPENQASM and make appropriate backend calls."""
if self.backend is not None:
self._process_node(self.ast)
else:
raise UnrollerException("backend not attached")
|
nilq/baby-python
|
python
|
import pytest
from beagle.nodes import File, Process
from beagle.transformers.evtx_transformer import WinEVTXTransformer
@pytest.fixture
def transformer() -> WinEVTXTransformer:
return WinEVTXTransformer(None)
def test_process_creation(transformer):
input_event = {
"provider_name": "Microsoft-Windows-Security-Auditing",
"provider_guid": "{54849625-5478-4994-a5ba-3e3b0328c30d}",
"eventid_qualifiers": "4688",
"version": "1",
"level": "0",
"task": "13312",
"opcode": "0",
"keywords": "0x8020000000000000",
"timecreated_systemtime": 1_474_410_459,
"eventrecordid": "13344",
"correlation_activityid": "",
"correlation_relatedactivityid": "",
"execution_processid": "4",
"execution_threadid": "60",
"channel": "Security",
"computer": "IE10Win7",
"security_userid": "",
"system": None,
"data_name_subjectusersid": "S-1-5-18",
"data_name_subjectusername": "IE10WIN7$",
"data_name_subjectdomainname": "WORKGROUP",
"data_name_subjectlogonid": "0x00000000000003e7",
"data_name_newprocessid": "0x00000dec",
"data_name_newprocessname": "C:\\Windows\\System32\\dllhost.exe",
"data_name_tokenelevationtype": "%%1938",
"data_name_processid": "0x00000248",
"data_name_commandline": "C:\\Windows\\system32\\DllHost.exe /Processid:{AB8902B4-09CA-4BB6-B78D-A8F59079A8D5}",
"eventdata": None,
"event": None,
}
nodes = transformer.transform(input_event)
proc: Process = nodes[0]
proc_file: File = nodes[1]
parent: Process = nodes[2]
assert proc.process_id == 3564
assert proc.process_image == "dllhost.exe"
assert proc.process_image_path == "C:\\Windows\\System32"
assert (
proc.command_line
== "C:\\Windows\\system32\\DllHost.exe /Processid:{AB8902B4-09CA-4BB6-B78D-A8F59079A8D5}"
)
assert proc.host == "IE10Win7"
assert parent.process_id == 584
assert proc_file.file_name == "dllhost.exe"
assert {"timestamp": 1_474_410_459} in parent.launched[proc]
|
nilq/baby-python
|
python
|
# Exercício 2: Para exercitar nossa capacidade de abstração, vamos modelar algumas partes de um software de geometria. Como poderíamos modelar um objeto retângulo?
class Rectangle:
def __init__(self, width, height):
self._width = width
self._height = height
def area(self):
pass
def perimeter(self):
pass
|
nilq/baby-python
|
python
|
import os
import matplotlib
from tqdm import tqdm
import numpy as np
from model import FasterRCNNVGG16
from trainer import FasterRCNNTrainer
from utils.config import opt
import data.dataset
import data.util
import torch
from torch.autograd import Variable
from torch.utils import data as data_
import torchvision.transforms as transforms
from utils import array_tool as at
from utils.vis_tool import visdom_bbox
import torch.utils.data
import torch
import PIL
import PIL.ImageDraw
import PIL.ImageFont
#rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
#resource.setrlimit(resource.RLIMIT_NOFILE, (20480, rlimit[1]))
class PlasticDetector:
def __init__(self, model_path, useGPU, n_fg_classes=2):
''' Creates a new detection model using the weights
stored in the file MODEL_PATH and initializes the GPU
if USEGPU is set to true.
MODEL_PATH: path to a trained detection model.
USEGPU: if true, the GPU will be used for faster computations.
'''
torch.set_num_threads(1)
opt.load_path = model_path
self.faster_rcnn = FasterRCNNVGG16(n_fg_class=n_fg_classes, anchor_scales=[1])
self.trainer = FasterRCNNTrainer(self.faster_rcnn, n_fg_class=n_fg_classes)
if useGPU:
self.trainer = self.trainer.cuda()
old_state = self.trainer.load(model_path)
self.transforms = transforms.ToTensor()
self.useGPU = useGPU
def predict_image(self, img, topk):
''' Detects objects in the provided testing images.
IMG: PIL image fitting the input of the trained model
TOPK: the number of bounding boxes to return. We return the
most confident bounding boxes first.
RETURNs: (BBOXES, CONFS) where BBOXES is a n x 4 array,
where each line corresponds to one bounding box. The
bounding box coordniates are stored in the format
[x_min, y_min, x_max, y_max], where x corresponds to the width
and y to the height. CONFS are the confidence values for
each bounding box and are a n x m array. Each row corresponds
to the bounding box in the same row of BBOXES and provides
the scores for the m classes, that the model was trained to detect.
'''
pred_bboxes, pred_labels, pred_scores = self._run_prediction(img)
return pred_bboxes[:topk, [1,0,3,2]], pred_scores[:topk]
def annotate_image(self, img, topk):
''' Detects objects in the provided testing images.
IMG: PIL image fitting the input of the trained model
TOPK: the number of bounding boxes to return. We return the
most confident bounding boxes first.
RETURNS: IMG: a PIL image with the detected bounding boxes
annotated as rectangles.
'''
pred_bboxes, pred_labels, pred_scores = self._run_prediction(img)
draw = PIL.ImageDraw.Draw(img)
colors = [(255,0,0),(0,255,0)]
for bbox, label, score in zip(pred_bboxes, pred_labels, pred_scores):
draw.rectangle(bbox[[1,0,3,2]], outline=colors[label])
#font = PIL.ImageFont.truetype("sans-serif.ttf", 16)
#draw.text(bbox[[1,0]],"Sample Text",colors[label])
return img
def _run_prediction(self, img):
''' Prepare an input image for CNN processing.
IMG: PIL image
RETURN: IMG as pytorch tensor in the format 1xCxHxW
normalized according to data.dataset.caffe_normalize.
'''
img = img.convert('RGB')
img = np.asarray(img, dtype=np.float32)
if img.ndim == 2:
# reshape (H, W) -> (1, H, W)
img = img[np.newaxis]
else:
# transpose (H, W, C) -> (C, H, W)
img = img.transpose((2, 0, 1))
proc_img = data.dataset.caffe_normalize(img/255.)
tensor_img = torch.from_numpy(proc_img).unsqueeze(0)
if self.useGPU:
tensor_img = tensor_img.cuda()
# This preset filters bounding boxes with a score < 0.7
# and has to be set everytime before using predict()
self.faster_rcnn.use_preset('visualize')
pred_bboxes, pred_labels, pred_scores = self.faster_rcnn.predict(tensor_img, [(img.shape[1], img.shape[2])])
box_filter = np.array(pred_scores[0]) > 0.7
return pred_bboxes[0][box_filter], pred_labels[0][box_filter], pred_scores[0][box_filter]
if __name__ == '__main__':
det = PlasticDetector('checkpoints/fasterrcnn_07122125_0.5273599762268979', True)
print('Loaded model.')
image_path = 'misc/demo.jpg'
test_image = PIL.Image.open(image_path)
print('Working on image {}'.format(image_path))
print(det.predict_image(test_image, 5))
pred_bboxes, pred_scores = det.predict_image(test_image, 1000)
pred_img = visdom_bbox(np.array(test_image.convert('RGB')).transpose((2, 0, 1)),
at.tonumpy(pred_bboxes[:,[1,0,3,2]]),
at.tonumpy([1 for _ in pred_bboxes]),
at.tonumpy(pred_scores),
label_names=['Animal', 'BG'])
PIL.Image.fromarray((255*pred_img).transpose((1,2,0)).astype(np.uint8)).save('output.jpg')
det.annotate_image(test_image, 5).save('output-annotate.jpg')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
"""
使用__new__方法实现单例模式
"""
class SingleTon(object):
"""继承该父类的类都是单例类,即重写类的new方法"""
_instance = {} # 用来保存自己类的实例
def __new__(cls, *args, **kwargs):
# 如果没有创建过该实例则创建一个自身的实例
if cls not in cls._instance:
cls._instance[cls] = super().__new__(cls)
return cls._instance[cls]
class Tony(SingleTon):
class_val = "class_method"
def __init__(self, name):
self.name = name
def print_name(self):
print(self.name)
@staticmethod
def print_static():
print("static method")
@classmethod
def print_class_method(cls):
print(cls.class_val)
if __name__ == '__main__':
tony = Tony("tony")
jim = Tony("jim")
print(tony is jim)
print(tony.name)
print(jim.name)
tony.print_name()
jim.print_name()
tony.print_static()
jim.print_static()
tony.print_class_method()
jim.print_class_method()
|
nilq/baby-python
|
python
|
class PathgeoTwitter:
import sys
from datetime import datetime
'''
createXLSX: convert tweets array into xlsx file
input
1. *tweets (array)
2. *cols (array): which columns in tweets you want to export
3. *outputPath (String)
4. *fileName (String): with XLSX extension, such as "test.xlsx"
5. ?keyword (string)
6. ?sheetTitle (string)
return filepath (string)
'''
def createXLSX(self, tweets, cols, outputPath, fileName, keyword=None, sheetTitle='Tweets'):
from openpyxl import Workbook
from BeautifulSoup import BeautifulSoup as BS
try:
book = Workbook()
sheet = book.get_active_sheet()
sheet.title = sheetTitle
#create columns
for indx, col in enumerate(cols):
sheet.cell(row=0, column=indx).value = col.upper()
#read tweets
for rowIndx, tweet in enumerate(tweets):
for colIndx, col in enumerate(cols):
if col not in tweet:
continue
val = ''
if col in ('urls', 'hashtags'):
if 'entities' in tweet and col in tweet['entities'] and tweet['entities'][col]:
if type(tweet['entities'][col][0]) in (str, unicode):
val = ', '.join(tweet['entities'][col])
elif col == 'urls':
val = ', '.join(map(lambda item: item['expanded_url'], tweet['entities'][col]))
elif col == 'hashtags':
val = ', '.join(map(lambda item: item['text'], tweet['entities'][col]))
if col == 'is_retweet':
val = '' if 'retweeted_id' not in tweet and 'user' not in tweet else bool(tweet.get('retweeted_id', None))
if col == 'retweeted_id':
val = tweet.get('retweeted_id', '')
if col == 'retweet_count':
val = tweet.get('retweet_count', '')
if col == 'time_zone' and 'user' in tweet:
val = tweet['user'].get('time_zone', '')
if col == 'followers_count' and 'user' in tweet:
val = tweet['user'].get('followers_count', '')
if col == 'friends_count' and 'user' in tweet:
val = tweet['user'].get('friends_count', '')
if col == 'statuses_count' and 'user' in tweet:
val = tweet['user'].get('statuses_count', '')
if col == 'language':
val = tweet.get('lang', None)
val = val or tweet.get('iso_language_code', None)
if col == 'location':
if 'location' in tweet:
val = tweet[col]
elif 'user' in tweet and 'location' in tweet['user']:
val = tweet['user']['location']
if col == 'from_user':
if 'from_user' in tweet:
val = tweet[col]
elif 'user' in tweet and type(tweet['user']) is dict and 'screen_name' in tweet['user']:
val = tweet['user']['screen_name']
if col == 'from_user_name':
if 'from_user_name' in tweet:
val = tweet[col]
elif 'user' in tweet and type(tweet['user']) is dict and 'name' in tweet['user']:
val = tweet['user']['name']
if col == "keyword":
val = keyword
elif col == "city":
val = ', '.join([item['name'] for item in tweet['search_info']['search_areas']])
elif col == "geo" and tweet['geo']:
val = "%f,%f" % (tweet['geo']['coordinates'][0], tweet['geo']['coordinates'][1])
elif col in ("created_at", "created_at_local"):
val = str(tweet[col])
elif col == 'source':
#strip away tags with BeautifulSoup
val = BS(tweet[col]).text
elif col in tweet:
val = tweet[col]
if type(val) not in (list, dict) and col not in ("_id", 'search_info', 'entities'):
sheet.cell(row=rowIndx+1, column=colIndx).value = val
book.save(outputPath+"\\"+fileName)
return outputPath+"\\"+fileName
except Exception, e:
import traceback
print str(e)
print str(traceback.print_exc())
|
nilq/baby-python
|
python
|
# Minimum Window Substring: https://leetcode.com/problems/minimum-window-substring/
# Given two strings s and t of lengths m and n respectively, return the minimum window substring of s such that every character in t (including duplicates) is included in the window. If there is no such substring, return the empty string "".
# The testcases will be generated such that the answer is unique.
# A substring is a contiguous sequence of characters within the string.
from collections import Counter
class Solution:
def minWindow(self, s: str, t: str) -> str:
# Count what we have in some sort of bit array
needed = Counter(t)
remainingNeeded = sum(needed.values())
left, right = None, None
start = 0
# Loop through all characters in s
for end in range(len(s)):
currentChar = s[end]
if currentChar in needed:
if needed[currentChar] > 0:
remainingNeeded -= 1
needed[currentChar] -= 1
if remainingNeeded == 0:
# Pop off characters
while start < end and remainingNeeded <= 0:
removeChar = s[start]
if removeChar in needed:
needed[removeChar] += 1
if needed[removeChar] > 0:
remainingNeeded += 1
break
start += 1
# Check for if the current result is less then previous
if left is None or (end + 1 - start) < (right - left):
left, right = start, end + 1
start += 1
# If nothing was found return ""
return "" if left is None else s[left:right]
# Can this be improved upon? We can improve the speed of this problem if instead of traversing across i poping off every value we simply
# create a q of the next letter in t that we need and skip to that letter and do the equivalent parsing as above
# This solution technically runs in O (S+T) where S and T is the lengths as we can parse through all of S and all of t and space is o(T) since we have to
# track all the values that we may need
# Score Card
# Did I need hints? N
# Did you finish within 30 min? Y
# Was the solution optimal? Kind of there is one slight improvement that I think can be made
# Were there any bugs? Nope!
# 4 5 3 3 = 3.75
|
nilq/baby-python
|
python
|
from webapp.forms import SForm
from django.views.generic.edit import FormView
from django import forms
class HomePageView(FormView):
template_name = 'home.html'
form_class = SForm
success_url = '/'
ctx = dict()
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
answer = form.check_string()
self.ctx['answer'] = answer
return super(HomePageView, self).form_valid(form)
def get_context_data(self, **kwargs):
context = super(HomePageView, self).get_context_data(**kwargs)
if 'answer' in self.ctx:
context['answer'] = self.ctx['answer']
return context
|
nilq/baby-python
|
python
|
from .default.params.Params import (Choice, TransitionChoice,
Array, Scalar, Log, Tuple,
Instrumentation, Dict)
|
nilq/baby-python
|
python
|
from flask import current_app as app
class Purchase:
def __init__(self, id, uid, pid, time_purchased, name, price, quantity, status):
self.id = id
self.uid = uid
self.pid = pid
self.time_purchased = time_purchased
self.name = name
self.price = price
self.quantity = quantity
self.status = status
@staticmethod
def get(id):
rows = app.db.execute('''
SELECT o.id, o.uid, i.product_id, o.time_purchased, p.name, i.price, i.quantity, i.status
FROM Items_ordered i, Orders o, Products p
WHERE o.id = i.order_id AND uid = :uid AND i.product_id = p.id
''',
id=id)
return Purchase(*(rows[0])) if rows else None
@staticmethod
def get_all_by_uid_since(uid, since):
rows = app.db.execute('''
SELECT o.id, o.uid, i.product_id, o.time_purchased, p.name, i.price, i.quantity, i.status
FROM Items_ordered i, Orders o, Products p
WHERE o.id = i.order_id AND uid = :uid AND i.product_id = p.id
AND time_purchased >= :since
ORDER BY time_purchased DESC
''',
uid=uid,
since=since)
return [Purchase(*row) for row in rows]
@staticmethod
def get_all_by_uid_sort(uid, since, order):
if order == 'n':
try:
rows = app.db.execute('''
SELECT o.id, o.uid, i.product_id, o.time_purchased, p.name, i.price, i.quantity, i.status
FROM Items_ordered i, Orders o, Products p
WHERE o.id = i.order_id AND uid = :uid AND i.product_id = p.id
AND time_purchased >= :since
ORDER BY p.name
''',
uid=uid,
since=since)
return [Purchase(*row) for row in rows]
except Exception as e:
print(e)
return None
elif order == 'timeD':
try:
rows = app.db.execute('''
SELECT o.id, o.uid, i.product_id, o.time_purchased, p.name, i.price, i.quantity, i.status
FROM Items_ordered i, Orders o, Products p
WHERE o.id = i.order_id AND uid = :uid AND i.product_id = p.id
AND time_purchased >= :since
ORDER BY time_purchased DESC
''',
uid=uid,
since=since)
return [Purchase(*row) for row in rows]
except Exception as e:
print(e)
return None
elif order == 'timeA':
try:
rows = app.db.execute('''
SELECT o.id, o.uid, i.product_id, o.time_purchased, p.name, i.price, i.quantity, i.status
FROM Items_ordered i, Orders o, Products p
WHERE o.id = i.order_id AND uid = :uid AND i.product_id = p.id
AND time_purchased >= :since
ORDER BY time_purchased
''',
uid=uid,
since=since)
return [Purchase(*row) for row in rows]
except Exception as e:
print(e)
return None
elif order == 'priceL':
try:
rows = app.db.execute('''
SELECT o.id, o.uid, i.product_id, o.time_purchased, p.name, i.price, i.quantity, i.status
FROM Items_ordered i, Orders o, Products p
WHERE o.id = i.order_id AND uid = :uid AND i.product_id = p.id
AND time_purchased >= :since
ORDER BY i.price
''',
uid=uid,
since=since)
return [Purchase(*row) for row in rows]
except Exception as e:
print(e)
return None
elif order == 'priceH':
try:
rows = app.db.execute('''
SELECT o.id, o.uid, i.product_id, o.time_purchased, p.name, i.price, i.quantity, i.status
FROM Items_ordered i, Orders o, Products p
WHERE o.id = i.order_id AND uid = :uid AND i.product_id = p.id
AND time_purchased >= :since
ORDER BY i.price DESC
''',
uid=uid,
since=since)
return [Purchase(*row) for row in rows]
except Exception as e:
print(e)
return None
@staticmethod
def get_all_by_uid_search(uid, since, search):
rows = app.db.execute('''
SELECT o.id, o.uid, i.product_id, o.time_purchased, p.name, i.price, i.quantity, i.status
FROM Items_ordered i, Orders o, Products p
WHERE o.id = i.order_id AND uid = :uid AND i.product_id = p.id
AND time_purchased >= :since AND p.name LIKE '%' || :search || '%'
ORDER BY time_purchased DESC
''',
uid=uid,
since=since,
search=search)
return [Purchase(*row) for row in rows]
@staticmethod
def place_order(uid):
try:
cost = app.db.execute(
'''
SELECT SUM(c.quantity * i.price) AS total
FROM Cart c, Inventory i
WHERE c.pid = i.product_id AND c.seller_id = i.seller_id AND c.id = :uid;
''',
uid = uid
)
totalcost = float(cost[0][0])
balance = app.db.execute(
'''
SELECT balance FROM Users WHERE id = :uid
''',
uid = uid
)
balance = float(balance[0][0])
items = app.db.execute(
'''
SELECT c.pid, c.seller_id, c.quantity, i.price
FROM Cart c, Inventory i
WHERE id = :uid AND c.pid = i.product_id AND c.seller_id = i.seller_id
''',
uid = uid
)
for item in items:
pid = item[0]
seller_id = item[1]
quant = int(item[2])
price = float(item[3])
total_price = float(price * quant)
rows = app.db.execute(
'''
UPDATE Inventory
SET quantity = quantity - :quant
WHERE product_id = :pid AND seller_id = :seller_id
RETURNING product_id
''',
pid = pid,
seller_id = seller_id,
quant = quant
)
rows1 = app.db.execute(
'''
UPDATE Users
SET balance = balance + :total_price
WHERE id = :seller_id
RETURNING id
''',
seller_id = seller_id,
total_price = total_price
)
if balance >= totalcost:
removeBalance = app.db.execute('''
UPDATE Users
SET balance = :new_balance
WHERE id = :uid
RETURNING balance
''',
new_balance = balance - totalcost,
uid = uid
)
generateID = app.db.execute('''
SELECT COUNT(id) FROM Orders
''')
order_id = int(generateID[0][0]) + 1
rows = app.db.execute('''
INSERT INTO ORDERS(id, uid)
VALUES(:id, :uid)
RETURNING id
''',
id = order_id,
uid = uid
)
id = rows[0][0]
rows = app.db.execute('''
INSERT INTO Items_Ordered(order_id, product_id, seller_id, price, quantity, status)
SELECT :order_id, c.pid, c.seller_id, i.price, c.quantity, :status
FROM Inventory i, Cart c
WHERE c.id = :uid AND c.pid = i.product_id AND c.seller_id = i.seller_id
RETURNING order_id
''',
uid = uid,
order_id = id,
status = 0
)
app.db.execute('''
DELETE FROM Cart WHERE id = :uid
''',
uid = uid
)
except Exception as e:
print(e)
@staticmethod
def can_place_order(uid):
try:
cost = app.db.execute(
'''
SELECT SUM(c.quantity * i.price) AS total
FROM Cart c, Inventory i
WHERE c.pid = i.product_id AND c.seller_id = i.seller_id AND c.id = :uid;
''',
uid = uid
)
totalcost = float(cost[0][0])
balance = app.db.execute(
'''
SELECT balance FROM Users WHERE id = :uid
''',
uid = uid
)
balance = float(balance[0][0])
if balance >= totalcost:
return True
else:
return False
except Exception as e:
print(e)
|
nilq/baby-python
|
python
|
from heapq import nlargest
def popular_shop(l, r, make_dict):
for i in range(l, r+1):
make_dict[i] += 1
t = int(input())
for j in range(t):
n_m = list(map(int, input().strip().split()))
n = n_m[0]
m = n_m[1]
make_dict = {i + 1: 0 for i in range(n)}
for i in range(m):
arr_el = list(map(int, input().strip().split()))
l = arr_el[0]
r = arr_el[1]
popular_shop(l, r, make_dict)
three_largest = nlargest(3, make_dict, key=make_dict.get)
three_largest.sort()
for i in three_largest:
print(i, end=" ")
# input
# 1
# 6 5
# 3 5
# 2 3
# 4 6
# 1 6
# 5 6
# out put
# 3 4 5
|
nilq/baby-python
|
python
|
import sys, os
from lxml import objectify
usage = """
Usage is:
py admx2oma.py <your.admx> <ADMX-OMA-URI>
<ADMX-OMA-URI> : The OMA-URI you specifyed in Intune when ingesting admx file
Take care, the OMA-URI is case sensitive.
<your.admx> : The admx file you ingested
"""
def run():
if len(sys.argv) < 3:
print(usage)
sys.exit()
admxFile = sys.argv[1]
admxOMA_URI = sys.argv[2]
if not os.path.exists(admxFile):
print("file not found: " + admxFile)
sys.exit()
templatestring = "./<scope>/Vendor/MSFT/Policy/Config/<area>/<policy>"
catHierarchie = {}
try:
(AppName, SettingType, id_or_admxName) = admxOMA_URI.partition("/ADMXInstall/")[2].split("/")
except BaseException:
print()
print("ERROR: Bad OMA-URI: " + admxOMA_URI)
print(usage)
sys.exit()
admx = objectify.parse(admxFile)
r = admx.getroot()
for category in r.categories.getchildren():
ref = category.parentCategory.get('ref') if hasattr(category, "parentCategory") else ":"
catHierarchie[category.get("name")] = ref
for policy in r.policies.findall("policy", namespaces=r.nsmap):
out = templatestring
out = out.replace("<policy>", policy.get("name"))
hierarchie = policy.parentCategory.get("ref")
nextCat = catHierarchie[policy.parentCategory.get("ref")]
while nextCat.find(":") == -1:
hierarchie = '~'.join((nextCat, hierarchie))
if not nextCat in catHierarchie:
break
nextCat = catHierarchie[nextCat]
hierarchie = '~'.join((AppName, SettingType, hierarchie))
out = out.replace("<area>", hierarchie)
p = PolicyOutput(policy.get("name"))
if policy.get("class") in ("Both", "User"):
p.omaUser = out.replace("<scope>", "User")
if policy.get("class") in ("Both", "Machine"):
p.omaDevice = out.replace("<scope>", "Device")
if hasattr(policy, "elements"):
for element in policy.elements.getchildren():
v = PolicyOutput.PolicyValue(element.get('id'), element.tag, element.get('valueName') or element.get('id'), element.get('required'))
p.values.append(v)
if element.tag in ('enum'):
for item in element.getchildren():
val = item.value.getchildren()[0]
v.valEnumOptions.append(str(val.get("value") if val.get("value") is not None else val.text))
v.value = v.valEnumOptions[0]
if element.tag in ('boolean'):
v.valEnumOptions.append('true')
v.valEnumOptions.append('false')
v.value = v.valEnumOptions[0]
p.print()
class PolicyOutput:
class PolicyValue:
def __init__(self, valID = '', valType = 'text', valName = None, required = None, value = ''):
self.valID = valID
self.valType = valType
self.valName = valName or valID
self.value = value
self.valEnumOptions = []
self.required = required
def __init__(self, name = ""):
self.polName = name
self.omaDevice = 'No device policy'
self.omaUser = 'No user policy'
self.values = []
templatestring = "./<scope>/Vendor/MSFT/Policy/Config/<area>/<policy>"
def print(self):
print(polTemplate.format(**self.__dict__))
dataTagList = []
for value in self.values:
dataTagList.append(dataTagTemplate.format(**value.__dict__))
out = {}
out.update({'valEnumOptionsOut': '(%s)'% '|'.join(value.valEnumOptions) if len(value.valEnumOptions) else ''})
out.update({'requiredOut': 'required' if value.required else 'optional'})
out.update({'dataTag': dataTagList[-1]})
out.update(value.__dict__)
print(valTemplate.format(**out))
dataTagList.insert(0, '') if len(dataTagList) else dataTagList
print(recordTemplate.format(**{'dataTags': '\n'.join(dataTagList)}))
polTemplate = """
===============================
Policy: {polName}
===============================
{omaUser}
{omaDevice}
Enabled value: <enabled/>
Disabled value: <disabled/>
""".rstrip()
polTemplate = """
===============================
Policy: {polName}
===============================
{omaUser}
{omaDevice}
(<enabled/>|<disabled/>)
""".rstrip()
dataTagTemplate = """
<data id='{valID}' value='{value}'/>
""".strip()
valTemplate = """
-------------------------------
{valName} ({requiredOut})
Value type: {valType} {valEnumOptionsOut}
{dataTag}
""".strip()
valTemplate = """
-------------------------------
Key Name: {valName}
Key ID: {valID}
Value type: {valType} {valEnumOptionsOut}
""".strip()
recordTemplate = """
----------- Example -----------
<enabled/>{dataTags}
""".strip()
if __name__ == "__main__":
run()
|
nilq/baby-python
|
python
|
import logging
import sys
import ast
from typing import Optional
from logistik.config import RedisKeys
from ttldict import TTLOrderedDict
from logistik.cache import ICache
from logistik.db.reprs.handler import HandlerConf
from logistik.environ import GNEnvironment
ONE_HOUR = 60 * 60
class CacheRedis(ICache):
def __init__(self, env: GNEnvironment, host: str, port: int = None, db: int = None):
self.env = env
self.ttl_dict = TTLOrderedDict(default_ttl=60 * 5) # five minutes
self.logger = logging.getLogger(__name__)
if host == "mock":
from fakeredis import FakeRedis
self.redis = FakeRedis()
else:
from redis import Redis
self.redis = Redis(host=host, port=port, db=db)
def get_response_for(self, handler: HandlerConf, request: dict) -> Optional[dict]:
try:
key = self.get_response_key_from_request(handler, request)
response = self.redis.get(key)
if response is None:
return None
response = str(response, "utf-8")
return ast.literal_eval(response)
except Exception as e:
self.logger.error(f"could not get response from redis: {str(e)}")
self.logger.exception(e)
self.env.capture_exception(sys.exc_info())
return None
def set_response_for(self, handler: HandlerConf, request: dict, response: dict) -> None:
try:
# if rest api returns [response, error_code]
if type(response) == list:
response = response[0]
key = self.get_response_key_from_request(handler, request)
self.redis.set(key, str(response))
self.redis.expire(key, 2 * ONE_HOUR)
except Exception as e:
self.logger.error(f"could not set response from redis: {str(e)}")
self.logger.exception(e)
self.env.capture_exception(sys.exc_info())
def _hash_for(self, handler_conf: HandlerConf):
return handler_conf.node_id()
def get_response_key_from_request(self, handler: HandlerConf, request: dict):
handler_hash = self._hash_for(handler)
provider_id = request.get("provider", dict()).get("id", "-1")
user_id = request.get("actor", dict()).get("id", "-1")
image_id = request.get("object", dict()).get("url", "").split("/")[-1].split(".")[0]
return RedisKeys.response_for(
provider_id=provider_id,
user_id=user_id,
image_id=image_id,
handler_hash=handler_hash
)
|
nilq/baby-python
|
python
|
from django.template.response import TemplateResponse
from .forms import QuestionForm
# Create your views here.
def index(request) :
form = QuestionForm()
# print(request.context)
data_service = request.context
template_context = data_service.to_dict()
template_context.update(form=form)
return TemplateResponse(request, 'question_answering/index.html',
context=template_context)
|
nilq/baby-python
|
python
|
"""Create social table.
Revision ID: fe9c31ba1c0e
Revises: 7512bb631d1c
Create Date: 2020-04-15 16:12:02.211522
"""
import sqlalchemy as sa
import sqlalchemy_utils as sau
from sqlalchemy.dialects import postgresql
from modist.models.common import SocialType
from alembic import op
from alembic.operations.toimpl import drop_constraint
# revision identifiers, used by Alembic.
revision = "fe9c31ba1c0e"
down_revision = "7512bb631d1c"
branch_labels = None
depends_on = None
def upgrade():
"""Pushes changes into the database."""
op.create_table(
"social",
sa.Column(
"id",
postgresql.UUID(as_uuid=True),
server_default=sa.text("uuid_generate_v4()"),
nullable=False,
),
sa.Column(
"created_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"updated_at",
sa.DateTime(timezone=True),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column("is_active", sa.Boolean(), server_default="true", nullable=False),
sa.Column(
"type", sa.Enum(SocialType), nullable=False, default=SocialType.GENERIC
),
sa.Column("url", sau.types.url.URLType(), nullable=False),
sa.PrimaryKeyConstraint("id"),
)
op.create_unique_constraint("uq_social_type", "social", ["type", "url"])
op.create_refresh_updated_at_trigger("social")
def downgrade():
"""Reverts changes performed by upgrade()."""
op.drop_refresh_updated_at_trigger("social")
op.drop_constraint("uq_social_type", "social")
op.drop_table("social")
sa.Enum(SocialType).drop(bind=op.get_bind())
|
nilq/baby-python
|
python
|
import bpy
class ahs_maincurve_volume_down(bpy.types.Operator):
bl_idname = 'object.ahs_maincurve_volume_down'
bl_label = "肉付けを削除"
bl_description = "選択カーブの設定したテーパー/ベベルを削除"
bl_options = {'REGISTER', 'UNDO'}
@classmethod
def poll(cls, context):
try:
for ob in context.selected_objects:
if ob.type != 'CURVE': continue
if ob.data.taper_object or ob.data.bevel_object: break
else: return False
except: return False
return True
def execute(self, context):
for ob in context.selected_objects:
if ob.type != 'CURVE': continue
if ob.data.taper_object:
o, c = ob.data.taper_object, ob.data.taper_object.data
if o: context.blend_data.objects.remove(o, do_unlink=True)
if c: context.blend_data.curves.remove(c, do_unlink=True)
if ob.data.bevel_object:
o, c = ob.data.bevel_object, ob.data.bevel_object.data
if o: context.blend_data.objects.remove(o, do_unlink=True)
if c: context.blend_data.curves.remove(c, do_unlink=True)
for area in context.screen.areas: area.tag_redraw()
return {'FINISHED'}
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
# Script for generating a general_pipeline_alternative.glsl that
# handles filling two mip levels, for the given warps-per-workgroup
# and 2nd level mipmap tile size per workgroup.
# Hard to explain, but hopefully the output is more sensible.
from sys import argv, exit, stderr
import os
from pathlib import Path
os.chdir(os.path.split(__file__)[0])
try:
warps = int(argv[1])
threads = warps * 32
tile_2_x = int(argv[2])
tile_2_y = int(argv[3])
tile_1_x = tile_2_x * 2 + 1
tile_1_y = tile_2_y * 2 + 1
name = f"py2_{warps}_{tile_2_x}_{tile_2_y}"
if tile_2_x != tile_2_y:
print("Warning: not tested for non-square tiles, probably buggy.",
file=stderr)
except Exception as e:
print(e)
print("args: [warps] [tile x] [tile y]")
exit(1)
from pathlib import Path
Path(f"./{name}").mkdir(parents=True, exist_ok=True)
general_pipeline_alternative_file = open(
f"./{name}/general_pipeline_alternative.glsl", 'w', encoding='utf-8')
dispatcher_file = open(
f"./{name}/{name}.cpp", 'w', encoding='utf-8')
def fill_tile_vars(width, height, indentation):
spaces = " " * indentation
candidate_columns = threads // height
candidate_rows = threads // width
# Candidate 1: fill horizontally (imagine a tall sliding window)
if candidate_columns * height > candidate_rows * width:
initThreadOffset = "ivec2(localIdx_ / %iu, localIdx_ %% %iu)" % (height, height)
step = "ivec2(%i, 0)" % candidate_columns
base_iterations = width // candidate_columns
remainder_columns = width % candidate_columns
comment_text = f"Fill in {base_iterations} {candidate_columns}x{height} steps"
iterations = str(base_iterations)
if remainder_columns != 0:
comment_text += f" and 1 {remainder_columns}x{height} step"
iterations = f"localIdx_ < {remainder_columns} * {height} ? {base_iterations + 1} : {iterations}"
if candidate_columns * height != threads:
idle_threads = threads - candidate_columns * height
comment_text += f" ({idle_threads} idle threads)"
iterations = f"localIdx_ >= {candidate_columns} * {height} ? 0 : {iterations}"
# Candidate 2: fill vertically (imagine a wide sliding window)
else:
initThreadOffset = "ivec2(localIdx_ %% %iu, localIdx_ / %iu)" % (width, width)
step = "ivec2(0, %i)" % candidate_rows
base_iterations = height // candidate_rows
remainder_rows = height % candidate_rows
comment_text = f"Fill in {base_iterations} {width}x{candidate_rows} steps"
iterations = str(base_iterations)
if remainder_rows != 0:
comment_text += f" and 1 {width}x{remainder_rows} step"
iterations = f"localIdx_ < {remainder_rows} * {width} ? {base_iterations + 1} : {iterations}"
if candidate_rows * width != threads:
idle_threads = threads - candidate_rows * width
comment_text += f" ({idle_threads} idle threads)"
iterations = f"localIdx_ >= {candidate_rows} * {width} ? 0 : {iterations}"
return f"""\
{spaces}// {comment_text}
{spaces}initThreadOffset_ = {initThreadOffset};
{spaces}step_ = {step};
{spaces}iterations_ = {iterations};"""
source_code = f"""\
// General-case shader for generating 1 or 2 levels of the mip pyramid.
// When generating 1 level, each workgroup handles up to {threads} samples of the
// output mip level. When generating 2 levels, each workgroup handles
// a {tile_2_x}x{tile_2_y} tile of the last (2nd) output mip level, generating up to
// {tile_1_x}x{tile_1_y} samples of the intermediate (1st) output mip level along the way.
//
// Dispatch with y, z = 1
layout(local_size_x = {warps} * 32) in;
// When generating 2 levels, the results of generating the intermediate
// level (first level generated) are cached here; this is the input tile
// needed to generate the {tile_2_x}x{tile_2_y} tile of the second level generated.
shared NVPRO_PYRAMID_SHARED_TYPE sharedLevel_[{tile_1_y}][{tile_1_x}]; // [y][x]
ivec2 kernelSizeFromInputSize_(ivec2 inputSize_)
{{
return ivec2(inputSize_.x == 1 ? 1 : (2 | (inputSize_.x & 1)),
inputSize_.y == 1 ? 1 : (2 | (inputSize_.y & 1)));
}}
NVPRO_PYRAMID_TYPE
loadSample_(ivec2 srcCoord_, int srcLevel_, bool loadFromShared_);
// Handle loading and reducing a rectangle of size kernelSize_
// with the given upper-left coordinate srcCoord_. Samples read from
// mip level srcLevel_ if !loadFromShared_, sharedLevel_ otherwise.
//
// kernelSize_ must range from 1x1 to 3x3.
//
// Once computed, the sample is written to the given coordinate of the
// specified destination mip level, and returned. The destination
// image size is needed to compute the kernel weights.
NVPRO_PYRAMID_TYPE reduceStoreSample_(ivec2 srcCoord_, int srcLevel_,
bool loadFromShared_,
ivec2 kernelSize_,
ivec2 dstImageSize_,
ivec2 dstCoord_, int dstLevel_)
{{
bool lfs_ = loadFromShared_;
float n_ = dstImageSize_.y;
float rcp_ = 1.0f / (2 * n_ + 1);
float w0_ = rcp_ * (n_ - dstCoord_.y);
float w1_ = rcp_ * n_;
float w2_ = 1.0f - w0_ - w1_;
NVPRO_PYRAMID_TYPE v0_, v1_, v2_, h0_, h1_, h2_, out_;
// Reduce vertically up to 3 times (depending on kernel horizontal size)
switch (kernelSize_.x)
{{
case 3:
switch (kernelSize_.y)
{{
case 3: v2_ = loadSample_(srcCoord_ + ivec2(2, 2), srcLevel_, lfs_);
case 2: v1_ = loadSample_(srcCoord_ + ivec2(2, 1), srcLevel_, lfs_);
case 1: v0_ = loadSample_(srcCoord_ + ivec2(2, 0), srcLevel_, lfs_);
}}
switch (kernelSize_.y)
{{
case 3: NVPRO_PYRAMID_REDUCE(w0_, v0_, w1_, v1_, w2_, v2_, h2_); break;
case 2: NVPRO_PYRAMID_REDUCE2(v0_, v1_, h2_); break;
case 1: h2_ = v0_; break;
}}
// fallthru
case 2:
switch (kernelSize_.y)
{{
case 3: v2_ = loadSample_(srcCoord_ + ivec2(1, 2), srcLevel_, lfs_);
case 2: v1_ = loadSample_(srcCoord_ + ivec2(1, 1), srcLevel_, lfs_);
case 1: v0_ = loadSample_(srcCoord_ + ivec2(1, 0), srcLevel_, lfs_);
}}
switch (kernelSize_.y)
{{
case 3: NVPRO_PYRAMID_REDUCE(w0_, v0_, w1_, v1_, w2_, v2_, h1_); break;
case 2: NVPRO_PYRAMID_REDUCE2(v0_, v1_, h1_); break;
case 1: h1_ = v0_; break;
}}
case 1:
switch (kernelSize_.y)
{{
case 3: v2_ = loadSample_(srcCoord_ + ivec2(0, 2), srcLevel_, lfs_);
case 2: v1_ = loadSample_(srcCoord_ + ivec2(0, 1), srcLevel_, lfs_);
case 1: v0_ = loadSample_(srcCoord_ + ivec2(0, 0), srcLevel_, lfs_);
}}
switch (kernelSize_.y)
{{
case 3: NVPRO_PYRAMID_REDUCE(w0_, v0_, w1_, v1_, w2_, v2_, h0_); break;
case 2: NVPRO_PYRAMID_REDUCE2(v0_, v1_, h0_); break;
case 1: h0_ = v0_; break;
}}
}}
// Reduce up to 3 samples horizontally.
switch (kernelSize_.x)
{{
case 3:
n_ = dstImageSize_.x;
rcp_ = 1.0f / (2 * n_ + 1);
w0_ = rcp_ * (n_ - dstCoord_.x);
w1_ = rcp_ * n_;
w2_ = 1.0f - w0_ - w1_;
NVPRO_PYRAMID_REDUCE(w0_, h0_, w1_, h1_, w2_, h2_, out_);
break;
case 2:
NVPRO_PYRAMID_REDUCE2(h0_, h1_, out_);
break;
case 1:
out_ = h0_;
}}
// Write out sample.
NVPRO_PYRAMID_STORE(dstCoord_, dstLevel_, out_);
return out_;
}}
NVPRO_PYRAMID_TYPE
loadSample_(ivec2 srcCoord_, int srcLevel_, bool loadFromShared_)
{{
NVPRO_PYRAMID_TYPE loaded_;
if (loadFromShared_)
{{
NVPRO_PYRAMID_SHARED_LOAD((sharedLevel_[srcCoord_.y][srcCoord_.x]), loaded_);
}}
else
{{
NVPRO_PYRAMID_LOAD(srcCoord_, srcLevel_, loaded_);
}}
return loaded_;
}}
// Compute and write out (to the 1st mip level generated) the samples
// at coordinates
// initDstCoord_,
// initDstCoord_ + step_, ...
// initDstCoord_ + (iterations_-1) * step_
// and cache them at in the sharedLevel_ tile at coordinates
// initSharedCoord_,
// initSharedCoord_ + step_, ...
// initSharedCoord_ + (iterations_-1) * step_
// If boundsCheck_ is true, skip coordinates that are out of bounds.
void intermediateLevelLoop_(ivec2 initDstCoord_,
ivec2 initSharedCoord_,
ivec2 step_,
int iterations_,
bool boundsCheck_)
{{
ivec2 dstCoord_ = initDstCoord_;
ivec2 sharedCoord_ = initSharedCoord_;
int srcLevel_ = int(NVPRO_PYRAMID_INPUT_LEVEL_);
int dstLevel_ = srcLevel_ + 1;
ivec2 srcImageSize_ = NVPRO_PYRAMID_LEVEL_SIZE(srcLevel_);
ivec2 dstImageSize_ = NVPRO_PYRAMID_LEVEL_SIZE(dstLevel_);
ivec2 kernelSize_ = kernelSizeFromInputSize_(srcImageSize_);
for (int i_ = 0; i_ < iterations_; ++i_)
{{
ivec2 srcCoord_ = dstCoord_ * 2;
// Optional bounds check.
if (boundsCheck_)
{{
if (uint(dstCoord_.x) >= uint(dstImageSize_.x)) continue;
if (uint(dstCoord_.y) >= uint(dstImageSize_.y)) continue;
}}
bool loadFromShared_ = false;
NVPRO_PYRAMID_TYPE sample_ =
reduceStoreSample_(srcCoord_, srcLevel_, loadFromShared_, kernelSize_,
dstImageSize_, dstCoord_, dstLevel_);
// Above function handles writing to the actual output; manually
// cache into shared memory here.
NVPRO_PYRAMID_SHARED_STORE((sharedLevel_[sharedCoord_.y][sharedCoord_.x]),
sample_);
dstCoord_ += step_;
sharedCoord_ += step_;
}}
}}
// Function for the workgroup that handles filling the intermediate level
// (caching it in shared memory as well).
//
// We need somewhere from {tile_1_x - 1}x{tile_1_y - 1} to {tile_1_x}x{tile_1_y} samples, depending
// on what the kernel size for the 2nd mip level generation will be.
//
// dstTileCoord_ : upper left coordinate of the tile to generate.
// boundsCheck_ : whether to skip samples that are out-of-bounds.
void fillIntermediateTile_(ivec2 dstTileCoord_, bool boundsCheck_)
{{
uint localIdx_ = int(gl_LocalInvocationIndex);
ivec2 initThreadOffset_;
ivec2 step_;
int iterations_;
ivec2 dstImageSize_ =
NVPRO_PYRAMID_LEVEL_SIZE((int(NVPRO_PYRAMID_INPUT_LEVEL_) + 1));
ivec2 futureKernelSize_ = kernelSizeFromInputSize_(dstImageSize_);
if (futureKernelSize_.x == 3)
{{
if (futureKernelSize_.y == 3)
{{
{fill_tile_vars(tile_1_x, tile_1_y, indentation = 6)}
}}
else // Future 3x[2,1] kernel
{{
{fill_tile_vars(tile_1_x, tile_1_y - 1, indentation = 6)}
}}
}}
else
{{
if (futureKernelSize_.y == 3)
{{
{fill_tile_vars(tile_1_x - 1, tile_1_y, indentation = 6)}
}}
else
{{
{fill_tile_vars(tile_1_x - 1, tile_1_y - 1, indentation = 6)}
}}
}}
intermediateLevelLoop_(dstTileCoord_ + initThreadOffset_, initThreadOffset_,
step_, iterations_, boundsCheck_);
}}
"""
lastLevelLoop_source = f""" \
// Compute and write out (to the 2nd mip level generated) the samples
// at coordinates
// initDstCoord_,
// initDstCoord_ + step_, ...
// initDstCoord_ + (iterations_-1) * step_
// using as inputs the 1x1 to 3x3 tiles of shared memory at coordinates
// initSharedSrcCoord_,
// initSharedSrcCoord_ + 2 * step_, ...
// initSharedSrcCoord_ + (iterations_-1) * 2 * step_
// If boundsCheck_ is true, skip coordinates that are out of bounds.
void lastLevelLoop_(ivec2 initSharedSrcCoord_,
ivec2 initDstCoord_,
ivec2 step_,
int iterations_,
bool boundsCheck_)
{{
ivec2 dstCoord_ = initDstCoord_;
ivec2 srcCoord_ = initSharedSrcCoord_;
int dstLevel_ = int(NVPRO_PYRAMID_INPUT_LEVEL_ + 2);
ivec2 srcImageSize_ = NVPRO_PYRAMID_LEVEL_SIZE((dstLevel_ - 1));
ivec2 dstImageSize_ = NVPRO_PYRAMID_LEVEL_SIZE(dstLevel_);
ivec2 kernelSize_ = kernelSizeFromInputSize_(srcImageSize_);
for (int i_ = 0; i_ < iterations_; ++i_)
{{
// Optional bounds check.
if (boundsCheck_)
{{
if (uint(dstCoord_.x) >= uint(dstImageSize_.x)) continue;
if (uint(dstCoord_.y) >= uint(dstImageSize_.y)) continue;
}}
bool loadFromShared_ = true;
reduceStoreSample_(srcCoord_, 0, loadFromShared_, kernelSize_,
dstImageSize_, dstCoord_, dstLevel_);
dstCoord_ += step_;
srcCoord_ += 2 * step_;
}}
}}
// Function for the workgroup that handles filling the last level tile
// (2nd level after the original input level), using as input the
// tile in shared memory.
//
// dstTileCoord_ : upper left coordinate of the tile to generate.
// boundsCheck_ : whether to skip samples that are out-of-bounds.
void fillLastTile_(ivec2 dstTileCoord_, bool boundsCheck_)
{{
uint localIdx_ = int(gl_LocalInvocationIndex);
ivec2 initThreadOffset_;
ivec2 step_;
int iterations_;
{fill_tile_vars(tile_2_x, tile_2_y, indentation = 2)}
lastLevelLoop_(initThreadOffset_ * 2, dstTileCoord_ + initThreadOffset_,
step_, iterations_, boundsCheck_);
}}
"""
no_lastLevelLoop_source = f"""\
// Function for the workgroup that handles filling the last level tile
// (2nd level after the original input level), using as input the
// tile in shared memory.
//
// dstTileCoord_ : upper left coordinate of the tile to generate.
// boundsCheck_ : whether to skip samples that are out-of-bounds.
void fillLastTile_(ivec2 dstTileCoord_, bool boundsCheck_)
{{
uint localIdx_ = gl_LocalInvocationIndex;
if (localIdx_ < {tile_2_x} * {tile_2_y})
{{
ivec2 threadOffset_ = ivec2(localIdx_ % {tile_2_x}u, localIdx_ / {tile_2_x}u);
int srcLevel_ = int(NVPRO_PYRAMID_INPUT_LEVEL_) + 1;
int dstLevel_ = int(NVPRO_PYRAMID_INPUT_LEVEL_) + 2;
ivec2 srcImageSize_ = NVPRO_PYRAMID_LEVEL_SIZE(srcLevel_);
ivec2 dstImageSize_ = NVPRO_PYRAMID_LEVEL_SIZE(dstLevel_);
ivec2 srcSharedCoord_ = threadOffset_ * 2;
bool loadFromShared_ = true;
ivec2 kernelSize_ = kernelSizeFromInputSize_(srcImageSize_);
ivec2 dstCoord_ = threadOffset_ + dstTileCoord_;
bool inBounds_ = true;
if (boundsCheck_)
{{
inBounds_ = (uint(dstCoord_.x) < uint(dstImageSize_.x))
&& (uint(dstCoord_.y) < uint(dstImageSize_.y));
}}
if (inBounds_)
{{
reduceStoreSample_(srcSharedCoord_, 0, loadFromShared_, kernelSize_,
dstImageSize_, dstCoord_, dstLevel_);
}}
}}
}}
"""
if tile_2_x * tile_2_y > threads:
source_code += lastLevelLoop_source
else:
source_code += no_lastLevelLoop_source
source_code += f"""\
void nvproPyramidMain()
{{
int inputLevel_ = int(NVPRO_PYRAMID_INPUT_LEVEL_);
if (NVPRO_PYRAMID_LEVEL_COUNT_ == 1u)
{{
ivec2 kernelSize_ =
kernelSizeFromInputSize_(NVPRO_PYRAMID_LEVEL_SIZE(inputLevel_));
ivec2 dstImageSize_ = NVPRO_PYRAMID_LEVEL_SIZE((inputLevel_ + 1));
ivec2 dstCoord_ = ivec2(int(gl_GlobalInvocationID.x) % dstImageSize_.x,
int(gl_GlobalInvocationID.x) / dstImageSize_.x);
ivec2 srcCoord_ = dstCoord_ * 2;
if (dstCoord_.y < dstImageSize_.y)
{{
reduceStoreSample_(srcCoord_, inputLevel_, false, kernelSize_,
dstImageSize_, dstCoord_, inputLevel_ + 1);
}}
}}
else // Handling two levels.
{{
// Assign a {tile_2_x}x{tile_2_y} tile of mip level inputLevel_ + 2 to this workgroup.
int level2_ = inputLevel_ + 2;
ivec2 level2Size_ = NVPRO_PYRAMID_LEVEL_SIZE(level2_);
ivec2 tileCount_;
tileCount_.x = int(uint(level2Size_.x + {tile_2_x - 1}) / {tile_2_x}u);
tileCount_.y = int(uint(level2Size_.y + {tile_2_y - 1}) / {tile_2_y}u);
ivec2 tileIdx_ = ivec2(gl_WorkGroupID.x % uint(tileCount_.x),
gl_WorkGroupID.x / uint(tileCount_.x));
uint localIdx_ = gl_LocalInvocationIndex;
// Determine if bounds checking is needed; this is only the case
// for tiles at the right or bottom fringe that might be cut off
// by the image border. Note that later, I use if statements rather
// than passing boundsCheck_ directly to convince the compiler
// to inline everything.
bool boundsCheck_ = tileIdx_.x >= tileCount_.x - 1 ||
tileIdx_.y >= tileCount_.y - 1;
if (boundsCheck_)
{{
// Compute the tile in level inputLevel_ + 1 that's needed to
// compute the above {tile_2_x}x{tile_2_y} tile.
fillIntermediateTile_(tileIdx_ * 2 * ivec2({tile_2_x}, {tile_2_y}), true);
barrier();
// Compute the inputLevel_ + 2 tile of size {tile_2_x}x{tile_2_y}, loading
// inupts from shared memory.
fillLastTile_(tileIdx_ * ivec2({tile_2_x}, {tile_2_y}), true);
}}
else
{{
// Same with no bounds checking.
fillIntermediateTile_(tileIdx_ * 2 * ivec2({tile_2_x}, {tile_2_y}), false);
barrier();
fillLastTile_(tileIdx_ * ivec2({tile_2_x}, {tile_2_y}), false);
}}
}}
}}
"""
general_pipeline_alternative_file.write(source_code)
dispatcher_file.write(f"""\
#include "nvpro_pyramid_dispatch_alternative.hpp"
#include "../py2_dispatch_impl.hpp"
NVPRO_PYRAMID_ADD_GENERAL_DISPATCHER(py2_{warps}_{tile_2_x}_{tile_2_y},
(py2_dispatch_impl<{warps}, {tile_2_x}, {tile_2_y}>))
""")
|
nilq/baby-python
|
python
|
from django.shortcuts import redirect, render
from django.urls import reverse
def home(request):
"""
This bounces home page requests to an appropriate place.
"""
if request.user.is_authenticated:
return redirect(reverse("page", kwargs={'path': 'index'}))
else:
return redirect(reverse("login"))
|
nilq/baby-python
|
python
|
import pygame as pg
from .utils import init_events, check_name_eligibility, str_to_tuple, find_font
from .base_icon import BaseIcon
class Canvas(BaseIcon):
defaults = {'type' : 'Canvas',
'name' : None,
'width' : 200,
'height' : 200,
'x' : None,
'y' : None,
'bg_color' : [255, 255, 255],
'enabled' : True,
'visible' : True}
updated = defaults.copy()
def __init__(self, form, x, y, exception_handler):
super().__init__(exception_handler)
self.__dict__.update(self.defaults)
self.form = form
self.x = x
self.y = y
self.abs_x = x + self.form.x
self.abs_y = y + self.form.y
self.events = init_events()
self.surface = None
self.draw_function = None
def draw(self):
self.surface = pg.Surface((self.width, self.height))
if self.is_selected:
self.surface.fill((255, 0, 0))
else:
self.surface.fill((0, 0, 0))
pg.draw.rect(self.surface, self.bg_color, (1, 1, self.width - 2, self.height - 2))
if self.draw_function is not None:
self.draw_function()
self.form.surface.blit(self.surface, (self.x, self.y))
self.abs_x = self.x + self.form.x
self.abs_y = self.y + self.form.y
def copy(self):
copied = Canvas(self.form, self.x, self.y, self.exception_handler, )
for key in self.updated:
copied.__dict__[key] = self.__dict__[key]
return copied
|
nilq/baby-python
|
python
|
URL = "https://github.com/General-101/Halo-Asset-Blender-Development-Toolset/issues/new"
EMAIL = "halo-asset-toolset@protonmail.com"
ENABLE_DEBUG = False
ENABLE_DEBUGGING_PM = False
ENABLE_PROFILING = False
ENABLE_CRASH_REPORT = True
|
nilq/baby-python
|
python
|
from mrjob.job import MRJob
from mrjob.step import MRStep
class SpendByCustomerSorted(MRJob):
def steps(self):
return [
MRStep(mapper=self.mapper_get_orders,
reducer=self.reducer_totals_by_customer),
MRStep(mapper=self.mapper_make_amounts_key,
reducer=self.reducer_output_results)
]
def mapper_get_orders(self, _, line):
(customerID, itemID, orderAmount) = line.split(',')
yield customerID, float(orderAmount)
def reducer_totals_by_customer(self, customerID, orders):
yield customerID, sum(orders)
def mapper_make_amounts_key(self, customerID, orderTotal):
yield '%04.02f'%float(orderTotal), customerID
def reducer_output_results(self, orderTotal, customerIDs):
for customerID in customerIDs:
yield customerID, orderTotal
if __name__ == '__main__':
SpendByCustomerSorted.run()
|
nilq/baby-python
|
python
|
import aws_cdk.core as cdk
import aws_cdk.aws_s3 as s3
import aws_cdk.aws_s3_deployment as s3_deployment
import aws_cdk.aws_ssm as ssm
import aws_cdk.aws_lambda as lambda_
import aws_cdk.aws_iam as iam
import aws_cdk.aws_kms as kms
class CfnNag(cdk.Stack):
def __init__(self, scope: cdk.Construct, id: str, general_config: dict, **kwargs):
super().__init__(scope, id, **kwargs)
lambda_role = iam.Role(self, "cfn-nag-role", assumed_by=iam.ServicePrincipal("lambda.amazonaws.com"))
lambda_role.add_managed_policy(
iam.ManagedPolicy.from_managed_policy_arn(
self, "lambda-service-basic-role", "arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole"
)
)
lambda_policy = iam.Policy(
self,
"lambda-role-policy",
statements=[
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["codepipeline:PutJobSuccessResult", "codepipeline:PutJobFailureResult"],
resources=["*"],
)
],
)
cfn_policy = lambda_policy.node.default_child
cfn_policy.cfn_options.metadata = {
"cfn_nag": {
"rules_to_suppress": [
{"id": "W12", "reason": "Circular dependency, pipeline is not deployed yet"},
]
}
}
lambda_policy.attach_to_role(lambda_role)
encryption_key = kms.Key(self, "cfn-nag-rules-key", enable_key_rotation=True)
encryption_key.add_to_resource_policy(
iam.PolicyStatement(
effect=iam.Effect.ALLOW,
actions=["kms:Decrypt", "kms:DescribeKey"],
resources=["*"],
principals=[iam.ArnPrincipal(lambda_role.role_arn)],
)
)
rules_bucket = s3.Bucket(
self,
id="cfn-nag-rules-bucket",
bucket_name=f"cfn-nag-rules-{self.account}",
removal_policy=cdk.RemovalPolicy.DESTROY,
block_public_access=s3.BlockPublicAccess.BLOCK_ALL,
encryption=s3.BucketEncryption.KMS,
encryption_key=encryption_key,
)
cdk.Tags.of(rules_bucket).add("resource-owner", "cfn-nag")
s3_deployment.BucketDeployment(
self,
id="cfn-nag-rules-deployment",
destination_bucket=rules_bucket,
sources=[s3_deployment.Source.asset("./devsecops_quickstart/cfn_nag/rules")],
memory_limit=128,
)
rules_bucket.add_to_resource_policy(
iam.PolicyStatement(
actions=["s3:List*", "s3:GetObject*", "s3:GetBucket*"],
resources=[
rules_bucket.bucket_arn,
f"{rules_bucket.bucket_arn}/*",
],
principals=[iam.ArnPrincipal(lambda_role.role_arn)],
)
)
handler = lambda_.Function(
self,
"cfn-nag-handler",
function_name="cfn-nag",
runtime=lambda_.Runtime.RUBY_2_5,
memory_size=1024,
timeout=cdk.Duration.seconds(300),
handler="handler.handler",
role=lambda_role,
code=lambda_.Code.from_bucket(
bucket=s3.Bucket.from_bucket_name(
self, "code-bucket", bucket_name=general_config["cfn_nag"]["code"]["bucket_name"]
),
key=general_config["cfn_nag"]["code"]["key"],
),
environment={"RULE_BUCKET_NAME": rules_bucket.bucket_name, "RuleBucketPrefix": ""},
)
cfn_nag_params = general_config["parameter_name"]["cfn_nag"]
ssm.StringParameter(
self,
"rules-bucket-url-ssm-param",
parameter_name=cfn_nag_params["rules_bucket"],
string_value=rules_bucket.bucket_name,
)
ssm.StringParameter(
self,
"lambda-arn-ssm-param",
parameter_name=cfn_nag_params["lambda_arn"],
string_value=handler.function_arn,
)
ssm.StringParameter(
self,
"role-arn-ssm-param",
parameter_name=cfn_nag_params["role_arn"],
string_value=lambda_role.role_arn,
)
|
nilq/baby-python
|
python
|
"""
PASSIVE Plugin for Testing for Captcha (OWASP-AT-008)
"""
from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Google Hacking for CAPTCHA"
def run(PluginInfo):
resource = get_resources("PassiveCAPTCHALnk")
Content = plugin_helper.resource_linklist("Online Resources", resource)
return Content
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
#
# diffoscope: in-depth comparison of files, archives, and directories
#
# Copyright © 2016 Chris Lamb <lamby@debian.org>
#
# diffoscope is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# diffoscope is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with diffoscope. If not, see <https://www.gnu.org/licenses/>.
import os
import re
import abc
import magic
import logging
import subprocess
from diffoscope.exc import (
RequiredToolNotFound,
OutputParsingError,
ContainerExtractionError,
)
from diffoscope.tools import tool_required
from diffoscope.config import Config
from diffoscope.profiling import profile
from diffoscope.difference import Difference
try:
import tlsh
except ImportError: # noqa
tlsh = None
SMALL_FILE_THRESHOLD = 65536 # 64 kiB
logger = logging.getLogger(__name__)
def path_apparent_size(path=".", visited=None):
# should output the same as `du --apparent-size -bs "$path"`
if not visited:
stat = os.stat(path, follow_symlinks=False)
visited = {stat.st_ino: stat.st_size}
if os.path.isdir(path) and not os.path.islink(path):
for entry in os.scandir(path):
inode = entry.inode()
if inode in visited:
continue
visited[inode] = entry.stat(follow_symlinks=False).st_size
if entry.is_dir(follow_symlinks=False):
path_apparent_size(entry.path, visited)
return sum(visited.values())
def _run_tests(fold, tests):
return fold(t(y, x) for x, t, y in tests)
class File(object, metaclass=abc.ABCMeta):
if hasattr(magic, 'open'): # use Magic-file-extensions from file
@classmethod
def guess_file_type(self, path):
if not hasattr(self, '_mimedb'):
self._mimedb = magic.open(magic.NONE)
self._mimedb.load()
return self._mimedb.file(
path.encode('utf-8', errors='surrogateescape')
)
@classmethod
def guess_encoding(self, path):
if not hasattr(self, '_mimedb_encoding'):
self._mimedb_encoding = magic.open(magic.MAGIC_MIME_ENCODING)
self._mimedb_encoding.load()
return self._mimedb_encoding.file(path)
else: # use python-magic
@classmethod
def guess_file_type(self, path):
if not hasattr(self, '_mimedb'):
self._mimedb = magic.Magic()
return maybe_decode(self._mimedb.from_file(path))
@classmethod
def guess_encoding(self, path):
if not hasattr(self, '_mimedb_encoding'):
self._mimedb_encoding = magic.Magic(mime_encoding=True)
return maybe_decode(self._mimedb_encoding.from_file(path))
def __init__(self, container=None):
self._container = container
def __repr__(self):
return '<%s %s>' % (self.__class__, self.name)
# This should return a path that allows to access the file content
@property
@abc.abstractmethod
def path(self):
raise NotImplementedError()
# Remove any temporary data associated with the file. The function
# should be idempotent and work during the destructor.
def cleanup(self):
if hasattr(self, '_as_container'):
del self._as_container
def __del__(self):
self.cleanup()
FILE_EXTENSION_SUFFIX = None
FILE_TYPE_RE = None
FILE_TYPE_HEADER_PREFIX = None
@classmethod
def recognizes(cls, file):
"""Check if a file's type matches the one represented by this class.
The default test returns True if the file matches these tests:
(cls.FILE_TYPE_RE OR
cls.FILE_TYPE_HEADER_PREFIX) AND
(cls.FILE_EXTENSION_SUFFIX)
If any test is None then the test is ignored and effectively deleted
from the above definition.
By default, the tests are all None and the test returns False for all
files. Subclasses may override them with specific values, or override
this method to implement a totally different test.
"""
# The structure below allows us to construct a boolean tree of tests
# that can be combined with all() and any(). Tests that are not defined
# for a class are filtered out, so that we don't get into a "vacuous
# truth" situation like a naive all([]) invocation would give.
file_type_tests = [
test
for test in (
(
cls.FILE_TYPE_RE,
lambda m, t: t.search(m),
file.magic_file_type,
),
(
cls.FILE_TYPE_HEADER_PREFIX,
bytes.startswith,
file.file_header,
),
)
if test[0]
] # filter out undefined tests
all_tests = [
test
for test in (
(cls.FILE_EXTENSION_SUFFIX, str.endswith, file.name),
(file_type_tests, _run_tests, any),
)
if test[0]
] # filter out undefined tests, inc. file_type_tests if it's empty
return _run_tests(all, all_tests) if all_tests else False
ENABLE_FALLBACK_RECOGONIZES = True
FALLBACK_FILE_EXTENSION_SUFFIX = None
FALLBACK_FILE_TYPE_HEADER_PREFIX = None
@classmethod
def fallback_recognizes(cls, file):
"""This is checked if the file could not be identified by recognizes().
This helps to work around bugs in file(1), see Debian bug #876316.
The default test returns True if the file matches these tests:
(cls.FALLBACK_FILE_EXTENSION_SUFFIX AND cls.FILE_EXTENSION_SUFFIX) AND
(cls.FALLBACK_FILE_TYPE_HEADER_PREFIX AND cls.FILE_TYPE_HEADER_PREFIX)
We also AND-compare with the non-fallback versions to ensure that
subclasses don't "accidentally match" (e.g. IpkFile vs GzipFile).
"""
if cls.recognizes.__func__ != File.recognizes.__func__:
# If the class has overridden the default recognizes() then the
# logic below about AND-comparing with the non-fallback versions is
# not valid, they have to re-implement it
return False
if not cls.ENABLE_FALLBACK_RECOGONIZES:
return False
all_tests = [
test
for test in (
(cls.FALLBACK_FILE_EXTENSION_SUFFIX, str.endswith, file.name),
(cls.FILE_EXTENSION_SUFFIX, str.endswith, file.name),
(
cls.FALLBACK_FILE_TYPE_HEADER_PREFIX,
bytes.startswith,
file.file_header,
),
(
cls.FILE_TYPE_HEADER_PREFIX,
bytes.startswith,
file.file_header,
),
)
if test[0]
] # filter out undefined tests, inc. file_type_tests if it's empty
return _run_tests(all, all_tests) if all_tests else False
# This might be different from path and is used to do file extension matching
@property
def name(self):
return self._name
@property
def container(self):
return self._container
@property
def as_container(self):
if not hasattr(self.__class__, 'CONTAINER_CLASS'):
if hasattr(self, '_other_file'):
return self._other_file.__class__.CONTAINER_CLASS(self)
return None
if not hasattr(self, '_as_container'):
logger.debug(
'instantiating %s for %s', self.__class__.CONTAINER_CLASS, self
)
try:
self._as_container = self.__class__.CONTAINER_CLASS(self)
except RequiredToolNotFound:
return None
logger.debug(
"Returning a %s for %s",
self._as_container.__class__.__name__,
self,
)
return self._as_container
@property
def progress_name(self):
x = self._name
return x[1:] if x.startswith('./') else x
@property
def magic_file_type(self):
if not hasattr(self, '_magic_file_type'):
self._magic_file_type = File.guess_file_type(self.path)
return self._magic_file_type
@property
def file_header(self):
if not hasattr(self, '_file_header'):
with open(self.path, 'rb') as f:
self._file_header = f.read(16)
return self._file_header
@property
def file_type(self):
for x, y in (
(self.is_device, "device"),
(self.is_symlink, "symlink"),
(self.is_directory, "directory"),
):
if x():
return y
return "file"
if tlsh:
@property
def fuzzy_hash(self):
if not hasattr(self, '_fuzzy_hash'):
# tlsh is not meaningful with files smaller than 512 bytes
if os.stat(self.path).st_size >= 512:
h = tlsh.Tlsh()
with open(self.path, 'rb') as f:
for buf in iter(lambda: f.read(32768), b''):
h.update(buf)
h.final()
try:
self._fuzzy_hash = h.hexdigest()
except ValueError:
# File must contain a certain amount of randomness.
self._fuzzy_hash = None
else:
self._fuzzy_hash = None
return self._fuzzy_hash
@abc.abstractmethod
def is_directory():
raise NotImplementedError()
@abc.abstractmethod
def is_symlink():
raise NotImplementedError()
@abc.abstractmethod
def is_device():
raise NotImplementedError()
def compare_bytes(self, other, source=None):
from .compare import compare_binary_files
# Don't attempt to compare directories with any other type as binaries
if os.path.isdir(self.path) or os.path.isdir(other.path):
return Difference.from_text(
"type: {}".format(self.file_type),
"type: {}".format(other.file_type),
self.name,
other.name,
source,
)
return compare_binary_files(self, other, source)
def _compare_using_details(self, other, source):
details = []
difference = Difference(None, self.name, other.name, source=source)
if hasattr(self, 'compare_details'):
details.extend(self.compare_details(other, source))
if self.as_container:
if self.as_container.auto_diff_metadata:
details.extend(
[
Difference.from_text(
self.magic_file_type,
other.magic_file_type,
self,
other,
source='filetype from file(1)',
),
Difference.from_text(
self.__class__.__name__,
other.__class__.__name__,
self,
other,
source='filetype from diffoscope',
),
]
)
# Don't recurse forever on archive quines, etc.
depth = self._as_container.depth
no_recurse = depth >= Config().max_container_depth
if no_recurse:
msg = "Reached max container depth ({})".format(depth)
logger.debug(msg)
difference.add_comment(msg)
details.extend(
self.as_container.compare(
other.as_container, no_recurse=no_recurse
)
)
details = [x for x in details if x]
if not details:
return None
difference.add_details(details)
return difference
def has_same_content_as(self, other):
logger.debug('File.has_same_content: %s %s', self, other)
if os.path.isdir(self.path) or os.path.isdir(other.path):
return False
# try comparing small files directly first
try:
my_size = os.path.getsize(self.path)
other_size = os.path.getsize(other.path)
except OSError:
# files not readable (e.g. broken symlinks) or something else,
# just assume they are different
return False
if my_size == other_size and my_size <= SMALL_FILE_THRESHOLD:
try:
with profile('command', 'cmp (internal)'):
with open(self.path, 'rb') as file1, open(
other.path, 'rb'
) as file2:
return file1.read() == file2.read()
except OSError:
# one or both files could not be opened for some reason,
# assume they are different
return False
return self.cmp_external(other)
@tool_required('cmp')
def cmp_external(self, other):
return (
subprocess.call(
('cmp', '-s', self.path, other.path),
shell=False,
close_fds=True,
)
== 0
)
# To be specialized directly, or by implementing compare_details
def compare(self, other, source=None):
if hasattr(self, 'compare_details') or self.as_container:
try:
difference = self._compare_using_details(other, source)
# no differences detected inside? let's at least do a binary diff
if difference is None:
difference = self.compare_bytes(other, source=source)
if difference is None:
return None
try:
infix = type(self).DESCRIPTION
except AttributeError:
infix = 'this file format'
suffix = ''
if self.magic_file_type != 'data':
suffix = ' file(1) reports: {}'.format(
self.magic_file_type
)
difference.add_comment(
"Format-specific differences are supported for {} but "
"no file-specific differences were detected; falling "
"back to a binary diff.{}".format(infix, suffix)
)
except subprocess.CalledProcessError as e:
difference = self.compare_bytes(other, source=source)
if e.output:
output = re.sub(r'^', ' ', e.output, flags=re.MULTILINE)
else:
output = '<none>'
cmd = ' '.join(e.cmd)
if difference is None:
return None
difference.add_comment(
"Command `%s` exited with %d. Output:\n%s"
% (cmd, e.returncode, output)
)
except RequiredToolNotFound as e:
difference = self.compare_bytes(other, source=source)
if difference is None:
return None
difference.add_comment(
"'%s' not available in path. Falling back to binary comparison."
% e.command
)
package = e.get_package()
if package:
difference.add_comment(
"Install '%s' to get a better output." % package
)
except OutputParsingError as e:
difference = self.compare_bytes(other, source=source)
if difference is None:
return None
difference.add_comment(
"Error parsing output of `%s` for %s"
% (e.command, e.object_class)
)
except ContainerExtractionError as e:
difference = self.compare_bytes(other, source=source)
if difference is None:
return None
difference.add_comment(
"Error extracting '{}', falling back to "
"binary comparison ('{}')".format(
e.pathname, e.wrapped_exc
)
)
return difference
return self.compare_bytes(other, source)
def maybe_decode(s):
"""
Helper function to convert to bytes if necessary.
"""
if type(s) is bytes:
return s.decode('utf-8')
return s
|
nilq/baby-python
|
python
|
# python3
"""Parse a pyi file using typed_ast."""
import hashlib
import sys
import typing
from typing import Any, List, Optional, Tuple, Union
import dataclasses
from pytype import utils
from pytype.ast import debug
from pytype.pyi import classdef
from pytype.pyi import conditions
from pytype.pyi import definitions
from pytype.pyi import function
from pytype.pyi import modules
from pytype.pyi import types
from pytype.pyi import visitor
from pytype.pytd import pep484
from pytype.pytd import pytd
from pytype.pytd import pytd_utils
from pytype.pytd import visitors
from pytype.pytd.codegen import decorate
from pytype.pytd.codegen import pytdgen
from typed_ast import ast3
_DEFAULT_PLATFORM = "linux"
# reexport as parser.ParseError
ParseError = types.ParseError
_TYPEVAR_IDS = ("TypeVar", "typing.TypeVar")
_PARAMSPEC_IDS = (
"ParamSpec", "typing.ParamSpec", "typing_extensions.ParamSpec")
_TYPING_NAMEDTUPLE_IDS = ("NamedTuple", "typing.NamedTuple")
_COLL_NAMEDTUPLE_IDS = ("namedtuple", "collections.namedtuple")
_TYPEDDICT_IDS = (
"TypedDict", "typing.TypedDict", "typing_extensions.TypedDict")
_NEWTYPE_IDS = ("NewType", "typing.NewType")
#------------------------------------------------------
# imports
def _tuple_of_import(alias: ast3.AST) -> Tuple[str, str]:
"""Convert a typedast import into one that add_import expects."""
if alias.asname is None:
return alias.name
return alias.name, alias.asname
def _import_from_module(module: Optional[str], level: int) -> str:
"""Convert a typedast import's 'from' into one that add_import expects."""
if module is None:
return {1: "__PACKAGE__", 2: "__PARENT__"}[level]
prefix = "." * level
return prefix + module
#------------------------------------------------------
# typevars
@dataclasses.dataclass
class _TypeVar:
"""Internal representation of typevars."""
name: str
bound: Optional[str]
constraints: List[Any]
@classmethod
def from_call(cls, node: ast3.AST) -> "_TypeVar":
"""Construct a _TypeVar from an ast.Call node."""
name, *constraints = node.args
bound = None
# 'bound' is the only keyword argument we currently use.
# TODO(rechen): We should enforce the PEP 484 guideline that
# len(constraints) != 1. However, this guideline is currently violated
# in typeshed (see https://github.com/python/typeshed/pull/806).
kws = {x.arg for x in node.keywords}
extra = kws - {"bound", "covariant", "contravariant"}
if extra:
raise ParseError("Unrecognized keyword(s): %s" % ", ".join(extra))
for kw in node.keywords:
if kw.arg == "bound":
bound = kw.value
return cls(name, bound, constraints)
@dataclasses.dataclass
class _ParamSpec:
"""Internal representation of ParamSpecs."""
name: str
@classmethod
def from_call(cls, node: ast3.AST) -> "_ParamSpec":
name, = node.args
return cls(name)
#------------------------------------------------------
# pytd utils
#------------------------------------------------------
# Main tree visitor and generator code
def _attribute_to_name(node: ast3.Attribute) -> ast3.Name:
"""Recursively convert Attributes to Names."""
val = node.value
if isinstance(val, ast3.Name):
prefix = val.id
elif isinstance(val, ast3.Attribute):
prefix = _attribute_to_name(val).id
elif isinstance(val, (pytd.NamedType, pytd.Module)):
prefix = val.name
else:
msg = "Unexpected attribute access on %r [%s]" % (val, type(val))
raise ParseError(msg)
return ast3.Name(prefix + "." + node.attr)
class AnnotationVisitor(visitor.BaseVisitor):
"""Converts typed_ast annotations to pytd."""
def show(self, node):
print(debug.dump(node, ast3, include_attributes=False))
def convert_late_annotation(self, annotation):
try:
# Late annotations may need to be parsed into an AST first
if annotation.isalpha():
return self.defs.new_type(annotation)
a = ast3.parse(annotation)
# Unwrap the module the parser puts around the source string
typ = a.body[0].value
return self.visit(typ)
except ParseError as e:
# Clear out position information since it is relative to the typecomment
e.clear_position()
raise e
def visit_Tuple(self, node):
return tuple(node.elts)
def visit_List(self, node):
return list(node.elts)
def visit_Name(self, node):
if self.subscripted and (node is self.subscripted[-1]):
# This is needed because
# Foo[X]
# parses to
# Subscript(Name(id = Foo), Name(id = X))
# so we see visit_Name(Foo) before visit_Subscript(Foo[X]).
# If Foo resolves to a generic type we want to know if it is being passed
# params in this context (in which case we simply resolve the type here,
# and create a new type when we get the param list in visit_Subscript) or
# if it is just being used as a bare Foo, in which case we need to create
# the new type Foo[Any] below.
return self.defs.resolve_type(node.id)
else:
return self.defs.new_type(node.id)
def enter_Subscript(self, node):
if isinstance(node.value, ast3.Attribute):
node.value = _attribute_to_name(node.value).id
self.subscripted.append(node.value)
def visit_Subscript(self, node):
params = node.slice.value
if type(params) is not tuple: # pylint: disable=unidiomatic-typecheck
params = (params,)
return self.defs.new_type(node.value, params)
def leave_Subscript(self, node):
self.subscripted.pop()
def visit_Attribute(self, node):
annotation = _attribute_to_name(node).id
return self.defs.new_type(annotation)
def visit_BinOp(self, node):
if isinstance(node.op, ast3.BitOr):
return self.defs.new_type("typing.Union", [node.left, node.right])
else:
raise ParseError(f"Unexpected operator {node.op}")
def visit_BoolOp(self, node):
if isinstance(node.op, ast3.Or):
raise ParseError("Deprecated syntax `x or y`; use `Union[x, y]` instead")
else:
raise ParseError(f"Unexpected operator {node.op}")
def _flatten_splices(body: List[Any]) -> List[Any]:
"""Flatten a list with nested Splices."""
if not any(isinstance(x, Splice) for x in body):
return body
out = []
for x in body:
if isinstance(x, Splice):
# This technically needn't be recursive because of how we build Splices
# but better not to have the class assume that.
out.extend(_flatten_splices(x.body))
else:
out.append(x)
return out
class Splice:
"""Splice a list into a node body."""
def __init__(self, body):
self.body = _flatten_splices(body)
def __str__(self):
return "Splice(\n" + ",\n ".join([str(x) for x in self.body]) + "\n)"
def __repr__(self):
return str(self)
class GeneratePytdVisitor(visitor.BaseVisitor):
"""Converts a typed_ast tree to a pytd tree."""
def __init__(self, src, filename, module_name, version, platform):
defs = definitions.Definitions(modules.Module(filename, module_name))
super().__init__(defs=defs, filename=filename)
self.src_code = src
self.module_name = module_name
self.version = version
self.platform = platform or _DEFAULT_PLATFORM
self.level = 0
self.in_function = False # pyi will not have nested defs
self.annotation_visitor = AnnotationVisitor(defs=defs, filename=filename)
def show(self, node):
print(debug.dump(node, ast3, include_attributes=False))
def convert_node(self, node):
# Converting a node via a visitor will convert the subnodes, but if the
# argument node itself needs conversion, we need to use the pattern
# node = annotation_visitor.visit(node)
# However, the AnnotationVisitor returns None if it does not trigger on the
# root node it is passed, so call it via this method instead.
ret = self.annotation_visitor.visit(node)
return ret if ret is not None else node
def convert_node_annotations(self, node):
"""Transform type annotations to pytd."""
if getattr(node, "annotation", None):
node.annotation = self.convert_node(node.annotation)
elif getattr(node, "type_comment", None):
node.type_comment = self.annotation_visitor.convert_late_annotation(
node.type_comment)
def resolve_name(self, name):
"""Resolve an alias or create a NamedType."""
return self.defs.type_map.get(name) or pytd.NamedType(name)
def visit_Module(self, node):
node.body = _flatten_splices(node.body)
return self.defs.build_type_decl_unit(node.body)
def visit_Pass(self, node):
return self.defs.ELLIPSIS
def visit_Expr(self, node):
# Handle some special cases of expressions that can occur in class and
# module bodies.
if node.value == self.defs.ELLIPSIS:
# class x: ...
return node.value
elif types.Constant.is_str(node.value):
# docstrings
return Splice([])
def visit_arg(self, node):
self.convert_node_annotations(node)
def _preprocess_decorator_list(self, node):
decorators = []
for d in node.decorator_list:
if isinstance(d, ast3.Name):
decorators.append(d.id)
elif isinstance(d, ast3.Attribute):
decorators.append(f"{d.value.id}.{d.attr}")
else:
raise ParseError(f"Unexpected decorator: {d}")
node.decorator_list = decorators
def _preprocess_function(self, node):
node.args = self.convert_node(node.args)
node.returns = self.convert_node(node.returns)
self._preprocess_decorator_list(node)
node.body = _flatten_splices(node.body)
def visit_FunctionDef(self, node):
self._preprocess_function(node)
return function.NameAndSig.from_function(node, False)
def visit_AsyncFunctionDef(self, node):
self._preprocess_function(node)
return function.NameAndSig.from_function(node, True)
def new_alias_or_constant(self, name, value):
"""Build an alias or constant."""
# This is here rather than in _Definitions because we need to build a
# constant or alias from a partially converted typed_ast subtree.
if name == "__slots__":
if not (isinstance(value, ast3.List) and
all(types.Constant.is_str(x) for x in value.elts)):
raise ParseError("__slots__ must be a list of strings")
return types.SlotDecl(tuple(x.value for x in value.elts))
elif isinstance(value, types.Constant):
return pytd.Constant(name, value.to_pytd())
elif isinstance(value, types.Ellipsis):
return pytd.Constant(name, pytd.AnythingType())
elif isinstance(value, pytd.NamedType):
res = self.defs.resolve_type(value.name)
return pytd.Alias(name, res)
elif isinstance(value, ast3.List):
if name != "__all__":
raise ParseError("Only __slots__ and __all__ can be literal lists")
return pytd.Constant(name, pytdgen.pytd_list("str"))
elif isinstance(value, ast3.Tuple):
# TODO(mdemello): Consistent with the current parser, but should it
# properly be Tuple[Type]?
return pytd.Constant(name, pytd.NamedType("tuple"))
elif isinstance(value, ast3.Name):
value = self.defs.resolve_type(value.id)
return pytd.Alias(name, value)
else:
# TODO(mdemello): add a case for TypeVar()
# Convert any complex type aliases
value = self.convert_node(value)
return pytd.Alias(name, value)
def enter_AnnAssign(self, node):
self.convert_node_annotations(node)
def visit_AnnAssign(self, node):
name = node.target.id
typ = node.annotation
val = self.convert_node(node.value)
if val and not types.is_any(val):
msg = f"Default value for {name}: {typ.name} can only be '...', got {val}"
raise ParseError(msg)
return pytd.Constant(name, typ, val)
def visit_Assign(self, node):
targets = node.targets
if len(targets) > 1 or isinstance(targets[0], ast3.Tuple):
msg = "Assignments must be of the form 'name = value'"
raise ParseError(msg)
self.convert_node_annotations(node)
target = targets[0]
name = target.id
# Record and erase TypeVar and ParamSpec definitions.
if isinstance(node.value, _TypeVar):
self.defs.add_type_var(name, node.value)
return Splice([])
elif isinstance(node.value, _ParamSpec):
self.defs.add_param_spec(name, node.value)
return Splice([])
if node.type_comment:
# TODO(mdemello): can pyi files have aliases with typecomments?
ret = pytd.Constant(name, node.type_comment)
else:
ret = self.new_alias_or_constant(name, node.value)
if self.in_function:
# Should never happen, but this keeps pytype happy.
if isinstance(ret, types.SlotDecl):
raise ParseError("Cannot change the type of __slots__")
return function.Mutator(name, ret.type)
if self.level == 0:
self.defs.add_alias_or_constant(ret)
return ret
def visit_ClassDef(self, node):
class_name = node.name
self.defs.type_map[class_name] = pytd.NamedType(class_name)
# Convert decorators to named types
self._preprocess_decorator_list(node)
decorators = classdef.get_decorators(
node.decorator_list, self.defs.type_map)
self.annotation_visitor.visit(node.bases)
self.annotation_visitor.visit(node.keywords)
defs = _flatten_splices(node.body)
return self.defs.build_class(
class_name, node.bases, node.keywords, decorators, defs)
def enter_If(self, node):
# Evaluate the test and preemptively remove the invalid branch so we don't
# waste time traversing it.
node.test = conditions.evaluate(node.test, self.version, self.platform)
if not isinstance(node.test, bool):
raise ParseError("Unexpected if statement" + debug.dump(node, ast3))
if node.test:
node.orelse = []
else:
node.body = []
def visit_If(self, node):
if not isinstance(node.test, bool):
raise ParseError("Unexpected if statement" + debug.dump(node, ast3))
if node.test:
return Splice(node.body)
else:
return Splice(node.orelse)
def visit_Import(self, node):
if self.level > 0:
raise ParseError("Import statements need to be at module level")
imports = [_tuple_of_import(x) for x in node.names]
self.defs.add_import(None, imports)
return Splice([])
def visit_ImportFrom(self, node):
if self.level > 0:
raise ParseError("Import statements need to be at module level")
imports = [_tuple_of_import(x) for x in node.names]
module = _import_from_module(node.module, node.level)
self.defs.add_import(module, imports)
return Splice([])
def _convert_newtype_args(self, node: ast3.AST):
if len(node.args) != 2:
msg = "Wrong args: expected NewType(name, [(field, type), ...])"
raise ParseError(msg)
name, typ = node.args
typ = self.convert_node(typ)
node.args = [name.s, typ]
def _convert_typing_namedtuple_args(self, node: ast3.AST):
# TODO(mdemello): handle NamedTuple("X", a=int, b=str, ...)
if len(node.args) != 2:
msg = "Wrong args: expected NamedTuple(name, [(field, type), ...])"
raise ParseError(msg)
name, fields = node.args
fields = self.convert_node(fields)
fields = [(types.string_value(n), t) for (n, t) in fields]
node.args = [name.s, fields]
def _convert_collections_namedtuple_args(self, node: ast3.AST):
if len(node.args) != 2:
msg = "Wrong args: expected namedtuple(name, [field, ...])"
raise ParseError(msg)
name, fields = node.args
fields = self.convert_node(fields)
fields = [(types.string_value(n), pytd.AnythingType()) for n in fields]
node.args = [name.s, fields]
def _convert_typevar_args(self, node):
self.annotation_visitor.visit(node.keywords)
if not node.args:
raise ParseError("Missing arguments to TypeVar")
name, *rest = node.args
if not isinstance(name, ast3.Str):
raise ParseError("Bad arguments to TypeVar")
node.args = [name.s] + [self.convert_node(x) for x in rest]
# Special-case late types in bound since typeshed uses it.
for kw in node.keywords:
if kw.arg == "bound":
if isinstance(kw.value, types.Constant):
val = types.string_value(kw.value, context="TypeVar bound")
kw.value = self.annotation_visitor.convert_late_annotation(val)
def _convert_paramspec_args(self, node):
name, = node.args
node.args = [name.s]
def _convert_typed_dict_args(self, node: ast3.AST):
# TODO(b/157603915): new_typed_dict currently doesn't do anything with the
# args, so we don't bother converting them fully.
msg = "Wrong args: expected TypedDict(name, {field: type, ...})"
if len(node.args) != 2:
raise ParseError(msg)
name, fields = node.args
if not (isinstance(name, ast3.Str) and isinstance(fields, ast3.Dict)):
raise ParseError(msg)
def enter_Call(self, node):
# Some function arguments need to be converted from strings to types when
# entering the node, rather than bottom-up when they would already have been
# converted to types.Constant.
# We also convert some literal string nodes that are not meant to be types
# (e.g. the first arg to TypeVar()) to their bare values since we are
# passing them to internal functions directly in visit_Call.
if isinstance(node.func, ast3.Attribute):
node.func = _attribute_to_name(node.func)
if node.func.id in _TYPEVAR_IDS:
self._convert_typevar_args(node)
elif node.func.id in _PARAMSPEC_IDS:
self._convert_paramspec_args(node)
elif node.func.id in _TYPING_NAMEDTUPLE_IDS:
self._convert_typing_namedtuple_args(node)
elif node.func.id in _COLL_NAMEDTUPLE_IDS:
self._convert_collections_namedtuple_args(node)
elif node.func.id in _TYPEDDICT_IDS:
self._convert_typed_dict_args(node)
elif node.func.id in _NEWTYPE_IDS:
return self._convert_newtype_args(node)
def visit_Call(self, node):
if node.func.id in _TYPEVAR_IDS:
if self.level > 0:
raise ParseError("TypeVars need to be defined at module level")
return _TypeVar.from_call(node)
elif node.func.id in _PARAMSPEC_IDS:
return _ParamSpec.from_call(node)
elif node.func.id in _TYPING_NAMEDTUPLE_IDS + _COLL_NAMEDTUPLE_IDS:
return self.defs.new_named_tuple(*node.args)
elif node.func.id in _TYPEDDICT_IDS:
return self.defs.new_typed_dict(*node.args, total=False)
elif node.func.id in _NEWTYPE_IDS:
return self.defs.new_new_type(*node.args)
# Convert all other calls to NamedTypes; for example:
# * typing.pyi uses things like
# List = _Alias()
# * pytd extensions allow both
# raise Exception
# and
# raise Exception()
return pytd.NamedType(node.func.id)
def visit_Raise(self, node):
ret = self.convert_node(node.exc)
return types.Raise(ret)
# Track nesting level
def enter_FunctionDef(self, node):
self.level += 1
self.in_function = True
def leave_FunctionDef(self, node):
self.level -= 1
self.in_function = False
def enter_AsyncFunctionDef(self, node):
self.enter_FunctionDef(node)
def leave_AsyncFunctionDef(self, node):
self.leave_FunctionDef(node)
def enter_ClassDef(self, node):
self.level += 1
def leave_ClassDef(self, node):
self.level -= 1
def post_process_ast(ast, src, name=None):
"""Post-process the parsed AST."""
ast = definitions.finalize_ast(ast)
ast = ast.Visit(pep484.ConvertTypingToNative(name))
if name:
ast = ast.Replace(name=name)
ast = ast.Visit(visitors.AddNamePrefix())
else:
# If there's no unique name, hash the sourcecode.
ast = ast.Replace(name=hashlib.md5(src.encode("utf-8")).hexdigest())
ast = ast.Visit(visitors.StripExternalNamePrefix())
# Now that we have resolved external names, validate any class decorators that
# do code generation. (We will generate the class lazily, but we should check
# for errors at parse time so they can be reported early.)
try:
ast = ast.Visit(decorate.ValidateDecoratedClassVisitor())
except TypeError as e:
# Convert errors into ParseError. Unfortunately we no longer have location
# information if an error is raised during transformation of a class node.
raise ParseError.from_exc(e)
# Typeshed files that explicitly import and refer to "__builtin__" need to
# have that rewritten to builtins
ast = ast.Visit(visitors.RenameBuiltinsPrefix())
return ast
def _parse(src: str, feature_version: int, filename: str = ""):
"""Call the typed_ast parser with the appropriate feature version."""
try:
ast_root_node = ast3.parse(src, filename, feature_version=feature_version)
except SyntaxError as e:
raise ParseError(e.msg, line=e.lineno, filename=filename) from e
return ast_root_node
# Python version input type.
VersionType = Union[int, Tuple[int, ...]]
def _feature_version(python_version: VersionType) -> int:
"""Get the python feature version for the parser."""
def from_major(v):
# We only use this to set the feature version, and all pyi files need to
# parse as at least python 3.6
if v == 2:
return 6
else:
# We don't support host python2, so sys.version = 3.x
return sys.version_info.minor
if isinstance(python_version, int):
return from_major(python_version)
else:
python_version = typing.cast(Tuple[int, ...], python_version)
if len(python_version) == 1:
return from_major(python_version[0])
else:
if python_version[0] == 2:
return 6
return python_version[1]
def parse_string(
src: str,
python_version: VersionType,
name: Optional[str] = None,
filename: Optional[str] = None,
platform: Optional[str] = None
):
return parse_pyi(src, filename=filename, module_name=name,
platform=platform, python_version=python_version)
def parse_pyi(
src: str,
filename: Optional[str],
module_name: str,
python_version: VersionType,
platform: Optional[str] = None
) -> pytd.TypeDeclUnit:
"""Parse a pyi string."""
filename = filename or ""
feature_version = _feature_version(python_version)
python_version = utils.normalize_version(python_version)
root = _parse(src, feature_version, filename)
gen_pytd = GeneratePytdVisitor(
src, filename, module_name, python_version, platform)
root = gen_pytd.visit(root)
root = post_process_ast(root, src, module_name)
return root
def parse_pyi_debug(
src: str,
filename: str,
module_name: str,
python_version: VersionType,
platform: Optional[str] = None
) -> Tuple[pytd.TypeDeclUnit, GeneratePytdVisitor]:
"""Debug version of parse_pyi."""
feature_version = _feature_version(python_version)
python_version = utils.normalize_version(python_version)
root = _parse(src, feature_version, filename)
print(debug.dump(root, ast3, include_attributes=False))
gen_pytd = GeneratePytdVisitor(
src, filename, module_name, python_version, platform)
root = gen_pytd.visit(root)
print("---transformed parse tree--------------------")
print(root)
root = post_process_ast(root, src, module_name)
print("---post-processed---------------------")
print(root)
print("------------------------")
print(gen_pytd.defs.type_map)
print(gen_pytd.defs.module_path_map)
return root, gen_pytd
def canonical_pyi(pyi, python_version, multiline_args=False):
"""Rewrite a pyi in canonical form."""
ast = parse_string(pyi, python_version=python_version)
ast = ast.Visit(visitors.ClassTypeToNamedType())
ast = ast.Visit(visitors.CanonicalOrderingVisitor(sort_signatures=True))
ast.Visit(visitors.VerifyVisitor())
return pytd_utils.Print(ast, multiline_args)
|
nilq/baby-python
|
python
|
#
# Copyright 2022 Logical Clocks AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import humps
import json
from hopsworks.core import kafka_api
from hopsworks import util
class KafkaSchema:
def __init__(
self,
id=None,
subject=None,
version=None,
schema=None,
project_id=None,
type=None,
):
self._id = id
self._subject = subject
self._version = version
self._schema = schema
self._kafka_api = kafka_api.KafkaApi(project_id)
@classmethod
def from_response_json(cls, json_dict, project_id):
json_decamelized = humps.decamelize(json_dict)
if "count" not in json_decamelized:
return cls(**json_decamelized, project_id=project_id)
elif json_decamelized["count"] == 0:
return []
else:
return [
cls(**kafka_topic, project_id=project_id)
for kafka_topic in json_decamelized["items"]
]
def update_from_response_json(self, json_dict):
json_decamelized = humps.decamelize(json_dict)
self.__init__(**json_decamelized)
return self
@property
def id(self):
"""Id of the kafka schema"""
return self._id
@property
def subject(self):
"""Name of the subject for the schema"""
return self._subject
@property
def version(self):
"""Version of the schema"""
return self._version
@property
def schema(self):
"""Schema definition"""
return self._schema
def delete(self):
"""Delete the schema
!!! danger "Potentially dangerous operation"
This operation deletes the schema.
# Raises
`RestAPIError`.
"""
self._kafka_api._delete_subject_version(self.subject, self.version)
def json(self):
return json.dumps(self, cls=util.Encoder)
def __str__(self):
return self.json()
def __repr__(self):
return f"KafkaSchema({self._subject!r}, {self._version!r})"
|
nilq/baby-python
|
python
|
import json
import os
import threading
import time
from functools import wraps
import speech_recognition as sr
class BaseCredentials:
def __init__(self):
pass
def __call__(self):
raise NotImplementedError
@property
def name(self):
raise NotImplementedError
class GoogleCloudCredientials(BaseCredentials):
def __init__(self, credentials=os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', None)):
super().__init__()
self.credentials = credentials
if self.credentials and os.path.isfile(self.credentials):
with open(self.credentials, 'r') as f:
self.credentials = json.dumps(json.load(f))
def __call__(self):
return {'credentials_json': self.credentials}
@property
def name(self):
return 'Google Cloud Speech'
class MicrosoftBingCredientials(BaseCredentials):
def __init__(self, key=os.environ.get('BING_KEY', None)):
super().__init__()
self.key = key
def __call__(self):
return {'key': self.key}
@property
def name(self):
return 'Microsoft Bing Voice Recognition'
class IBMCredientials(BaseCredentials):
def __init__(self, username=os.environ.get('IBM_USERNAME', None), password=os.environ.get('IBM_PASSWORD', None)):
super().__init__()
self.username = username
self.password = password
def __call__(self):
return {'username': self.username, 'password': self.password}
@property
def name(self):
return 'IBM Speech to Text'
def rate_limited(max_per_second):
"""Rate-limits the decorated function locally, for one process.
from: https://gist.github.com/gregburek/1441055 """
lock = threading.Lock()
min_interval = 1.0 / max_per_second
def decorate(func):
last_time_called = time.perf_counter()
@wraps(func)
def rate_limited_function(*args, **kwargs):
lock.acquire()
nonlocal last_time_called
try:
elapsed = time.perf_counter() - last_time_called
left_to_wait = min_interval - elapsed
if left_to_wait > 0:
time.sleep(left_to_wait)
return func(*args, **kwargs)
finally:
last_time_called = time.perf_counter()
lock.release()
return rate_limited_function
return decorate
class SpeechRecognitionAPI:
def __init__(self, api='gcp', lang='pt-BR', **kwargs):
self._r = sr.Recognizer()
self.lang = lang
if api == 'gcp':
self.credentials = GoogleCloudCredientials(**kwargs)
self._recognize = self._r.recognize_google_cloud
elif api == 'bing':
self.credentials = MicrosoftBingCredientials(**kwargs)
self._recognize = self._r.recognize_bing
elif api == 'ibm':
self.credentials = IBMCredientials(**kwargs)
self._recognize = self._r.recognize_ibm
@rate_limited(5)
def recognize(self, audio, safe=True):
if not isinstance(audio, sr.AudioData):
with sr.AudioFile(audio) as source:
audio = self._r.record(source)
try:
return self._recognize(audio, language=self.lang, **self.credentials())
except sr.UnknownValueError as e:
if not safe:
raise e
return "{} could not understand audio".format(self.credentials.name)
except sr.RequestError as e:
if not safe:
raise e
return "Could not request results from {} service; {}".format(self.credentials.name, e)
|
nilq/baby-python
|
python
|
#
# Copyright (c) 2018 Wind River Systems, Inc.
#
# SPDX-License-Identifier: Apache-2.0
#
from django.conf.urls import url
from starlingx_dashboard.dashboards.dc_admin.dc_software_management.views \
import CreateCloudPatchConfigView
from starlingx_dashboard.dashboards.dc_admin.dc_software_management.views \
import CreateCloudPatchStrategyView
from starlingx_dashboard.dashboards.dc_admin.dc_software_management.views \
import DetailPatchView
from starlingx_dashboard.dashboards.dc_admin.dc_software_management.views \
import EditCloudPatchConfigView
from starlingx_dashboard.dashboards.dc_admin.dc_software_management.views \
import IndexView
from starlingx_dashboard.dashboards.dc_admin.dc_software_management.views \
import UploadPatchView
urlpatterns = [
url(r'^$', IndexView.as_view(), name='index'),
url(r'^(?P<patch_id>[^/]+)/patchdetail/$',
DetailPatchView.as_view(), name='dc_patchdetail'),
url(r'^dc_patchupload/$', UploadPatchView.as_view(),
name='dc_patchupload'),
url(r'^createcloudpatchstrategy/$', CreateCloudPatchStrategyView.as_view(),
name='createcloudpatchstrategy'),
url(r'^createcloudpatchconfig/$', CreateCloudPatchConfigView.as_view(),
name='createcloudpatchconfig'),
url(r'^(?P<subcloud>[^/]+)/editcloudpatchconfig/$',
EditCloudPatchConfigView.as_view(),
name='editcloudpatchconfig'),
]
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.7 on 2021-09-24 18:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('users', '0001_initial'),
('recipes', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='recipe',
name='likes',
field=models.ManyToManyField(blank=True, related_name='likes', to='users.Profile'),
),
migrations.AddField(
model_name='recipe',
name='submitted_by',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='users.profile'),
),
migrations.AddField(
model_name='rating',
name='recipe',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='ratings', to='recipes.recipe'),
),
migrations.AddField(
model_name='rating',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='users.profile'),
),
migrations.AddField(
model_name='image',
name='album',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='recipes.imagealbum'),
),
migrations.AddField(
model_name='image',
name='submitted_by',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='users.profile'),
),
]
|
nilq/baby-python
|
python
|
from minpiler.std import M
x: int
y: str
z: int = 20
M.print(z)
# > print 20
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.