code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
from textwrap import dedent
from tests import check_as_expected
ROOT = 'superhelp.helpers.sorting_reversing_help.'
def test_misc():
test_conf = [
(
dedent("""\
pet = 'cat'
"""),
{
ROOT + 'sorting_reversing_overview': 0,
ROOT + 'list_sort_as_value': 0,
}
),
(
dedent("""\
pets = sorted(['cat', 'dog', 'budgie'])
"""),
{
ROOT + 'sorting_reversing_overview': 1,
ROOT + 'list_sort_as_value': 0,
}
),
(
dedent("""\
my_pets = sorted(['cat', 'dog', 'budgie'])
your_pets = sorted(['cat', 'dog', 'budgie'])
"""),
{
ROOT + 'sorting_reversing_overview': 2,
ROOT + 'list_sort_as_value': 0,
}
),
(
dedent("""\
my_pets = reversed(['cat', 'dog', 'budgie'])
your_pets = sorted(['cat', 'dog', 'budgie'])
"""),
{
ROOT + 'sorting_reversing_overview': 2,
ROOT + 'list_sort_as_value': 0,
}
),
(
dedent("""\
for i in range(2):
my_pets = reversed(['cat', 'dog', 'budgie'])
your_pets = sorted(['cat', 'dog', 'budgie'])
"""),
{
ROOT + 'sorting_reversing_overview': 1,
ROOT + 'list_sort_as_value': 0,
}
),
(
dedent("""\
demo = [1, 2].sort()
"""),
{
ROOT + 'sorting_reversing_overview': 1,
ROOT + 'list_sort_as_value': 1,
}
),
(
dedent("""\
for i in range(2):
demo = [1, 2].sort()
"""),
{
ROOT + 'sorting_reversing_overview': 1,
ROOT + 'list_sort_as_value': 1,
}
),
(
dedent("""\
my_pets = reversed(['cat', 'dog', 'budgie'])
for i in range(2):
demo = [1, 2].sort()
"""),
{
ROOT + 'sorting_reversing_overview': 2,
ROOT + 'list_sort_as_value': 1,
}
),
(
dedent("""\
hours, mins, secs = Utils._get_time_parts_since_t1(t1)
"""),
{
ROOT + 'sorting_reversing_overview': 0,
ROOT + 'list_sort_as_value': 0,
}
),
]
check_as_expected(test_conf, execute_code=True)
check_as_expected(test_conf, execute_code=False)
# test_misc()
|
[
"textwrap.dedent",
"tests.check_as_expected"
] |
[((2666, 2713), 'tests.check_as_expected', 'check_as_expected', (['test_conf'], {'execute_code': '(True)'}), '(test_conf, execute_code=True)\n', (2683, 2713), False, 'from tests import check_as_expected\n'), ((2718, 2766), 'tests.check_as_expected', 'check_as_expected', (['test_conf'], {'execute_code': '(False)'}), '(test_conf, execute_code=False)\n', (2735, 2766), False, 'from tests import check_as_expected\n'), ((175, 222), 'textwrap.dedent', 'dedent', (['""" pet = \'cat\'\n """'], {}), '(" pet = \'cat\'\\n ")\n', (181, 222), False, 'from textwrap import dedent\n'), ((394, 469), 'textwrap.dedent', 'dedent', (['""" pets = sorted([\'cat\', \'dog\', \'budgie\'])\n """'], {}), '(" pets = sorted([\'cat\', \'dog\', \'budgie\'])\\n ")\n', (400, 469), False, 'from textwrap import dedent\n'), ((641, 789), 'textwrap.dedent', 'dedent', (['""" my_pets = sorted([\'cat\', \'dog\', \'budgie\'])\n your_pets = sorted([\'cat\', \'dog\', \'budgie\'])\n """'], {}), '(\n """ my_pets = sorted([\'cat\', \'dog\', \'budgie\'])\n your_pets = sorted([\'cat\', \'dog\', \'budgie\'])\n """\n )\n', (647, 789), False, 'from textwrap import dedent\n'), ((948, 1098), 'textwrap.dedent', 'dedent', (['""" my_pets = reversed([\'cat\', \'dog\', \'budgie\'])\n your_pets = sorted([\'cat\', \'dog\', \'budgie\'])\n """'], {}), '(\n """ my_pets = reversed([\'cat\', \'dog\', \'budgie\'])\n your_pets = sorted([\'cat\', \'dog\', \'budgie\'])\n """\n )\n', (954, 1098), False, 'from textwrap import dedent\n'), ((1257, 1446), 'textwrap.dedent', 'dedent', (['""" for i in range(2):\n my_pets = reversed([\'cat\', \'dog\', \'budgie\'])\n your_pets = sorted([\'cat\', \'dog\', \'budgie\'])\n """'], {}), '(\n """ for i in range(2):\n my_pets = reversed([\'cat\', \'dog\', \'budgie\'])\n your_pets = sorted([\'cat\', \'dog\', \'budgie\'])\n """\n )\n', (1263, 1446), False, 'from textwrap import dedent\n'), ((1605, 1661), 'textwrap.dedent', 'dedent', (['""" demo = [1, 2].sort()\n """'], {}), "(' demo = [1, 2].sort()\\n ')\n", (1611, 1661), False, 'from textwrap import dedent\n'), ((1833, 1937), 'textwrap.dedent', 'dedent', (['""" for i in range(2):\n demo = [1, 2].sort()\n """'], {}), '(\n """ for i in range(2):\n demo = [1, 2].sort()\n """\n )\n', (1839, 1937), False, 'from textwrap import dedent\n'), ((2096, 2257), 'textwrap.dedent', 'dedent', (['""" my_pets = reversed([\'cat\', \'dog\', \'budgie\'])\n for i in range(2):\n demo = [1, 2].sort()\n """'], {}), '(\n """ my_pets = reversed([\'cat\', \'dog\', \'budgie\'])\n for i in range(2):\n demo = [1, 2].sort()\n """\n )\n', (2102, 2257), False, 'from textwrap import dedent\n'), ((2416, 2516), 'textwrap.dedent', 'dedent', (['""" hours, mins, secs = Utils._get_time_parts_since_t1(t1)\n """'], {}), "(\n ' hours, mins, secs = Utils._get_time_parts_since_t1(t1)\\n '\n )\n", (2422, 2516), False, 'from textwrap import dedent\n')]
|
import tkinter
import time
import math
LINES=0
TRIANGLES=1
ORTOGRAPHIC=0
PERSPECTIVE=1
EPSILON=0.0001
class BaseCamera:
def __init__(self):
pass
def __setup_cam__(self,e):
raise NotImplementedError
def __update__(self,dt):
raise NotImplementedError
def __updated__(self):
raise NotImplementedError
def __recalc_matrix__(self,e):
raise NotImplementedError
class BaseCanvasElement:
def __init__(self):
pass
def __updated__(self):
raise NotImplementedError
def __recalc_flatten__(self,e):
raise NotImplementedError
def __flatten_data__(self):
raise NotImplementedError
class StaticCamera(BaseCamera):
def __init__(self,x,y,z,tx,ty,tz,ux,uy,uz):
self.x=x
self.y=y
self.z=z
self.tx=tx
self.ty=ty
self.tz=tz
self.ux=ux
self.uy=uy
self.uz=uz
self._u=True
def __setup_cam__(self,e):
pass
def __update__(self,dt):
pass
def __updated__(self):
return self._u
def __recalc_matrix__(self,e):
self._u=False
zx=self.tx-self.x
zy=self.ty-self.y
zz=self.tz-self.z
m=math.sqrt(zx**2+zy**2+zz**2)
zx/=m
zy/=m
zz/=m
xx=self.uy*zz-self.uz*zy
xy=self.uz*zx-self.ux*zz
xz=self.ux*zy-self.uy*zx
m=math.sqrt(xx**2+xy**2+xz**2)
xx/=m
xy/=m
xz/=m
yx=zy*xz-zz*xy
yy=zz*xx-zx*xz
yz=zx*xy-zy*xx
return (xx,xy,xz,yx,yy,yz,zx,zy,zz,-xx*self.x-xy*self.y-xz*self.z,-yx*self.x-yy*self.y-yz*self.z,-zx*self.x-zy*self.y-zz*self.z)
class OrbitalCamera(BaseCamera):
def __init__(self,x,y,z,rx,ry,d,ux,uy,uz):
self.x=x
self.y=y
self.z=z
self.rx=rx
self.ry=ry
self.d=d
self.ux=ux
self.uy=uy
self.uz=uz
self._ra=0
self._l=True
self._d=None
self._u=True
def lock(self,l):
self._l=l
def rotate_around(self,t):
self._ra=2*math.pi/t
def __setup_cam__(self,e):
def _up(_):
if (self._l):
self.d=min(self.d+0.05,20)
self._u=True
def _down(_):
if (self._l):
self.d=max(self.d-0.05,0.05)
self._u=True
def _drag(a):
if (self._d is not None):
if (self._l):
self.rx=min(max(self.rx-(a.y-self._d[1])*0.01,EPSILON),math.pi)
# self.ry+=(a.x-self._d[0])*0.01
self._u=True
self._d=(a.x,a.y)
def _drag_stop(_):
self._d=None
e.bind_key("<Up>",_up)
e.bind_key("<Down>",_down)
e.bind_key("<B1-Motion>",_drag)
e.bind_key("<ButtonRelease-1>",_drag_stop)
def __update__(self,dt):
self.ry+=self._ra*dt
if (self._ra):
self._u=True
def __updated__(self):
return self._u
def __recalc_matrix__(self,e):
self._u=False
px=math.sin(self.rx)*math.cos(self.ry)*self.d
py=math.cos(self.rx)*self.d
pz=math.sin(self.rx)*math.sin(self.ry)*self.d
m=math.sqrt(px**2+py**2+pz**2)
zx=-px/m
zy=-py/m
zz=-pz/m
px+=self.x
py+=self.y
pz+=self.z
xx=self.uy*zz-self.uz*zy
xy=self.uz*zx-self.ux*zz
xz=self.ux*zy-self.uy*zx
m=math.sqrt(xx**2+xy**2+xz**2)
if (m==0):
m=1
xx/=m
xy/=m
xz/=m
yx=zy*xz-zz*xy
yy=zz*xx-zx*xz
yz=zx*xy-zy*xx
return (xx,xy,xz,yx,yy,yz,zx,zy,zz,-xx*px-xy*py-xz*pz,-yx*px-yy*py-yz*pz,-zx*px-zy*py-zz*pz)
class ShapeBuffer(BaseCanvasElement):
def __init__(self,vl,il,m=LINES,cl="#ffffff"):
self.vl=vl
self.il=il
self.m=m
self.cl=cl
self._u=True
self._dt=(tuple(),tuple())
def __updated__(self):
return self._u
def __recalc_flatten__(self,e):
self._u=False
tp=[]
for k in self.vl:
tp.append(e._transform(*k))
if (self.m==LINES):
ll=[]
for i in range(0,len(self.il),3):
t=self.il[i:i+3]
ll.append((tp[t[0]][0],tp[t[0]][1],tp[t[1]][0],tp[t[1]][1],self.cl))
ll.append((tp[t[1]][0],tp[t[1]][1],tp[t[2]][0],tp[t[2]][1],self.cl))
ll.append((tp[t[2]][0],tp[t[2]][1],tp[t[0]][0],tp[t[0]][1],self.cl))
self._dt=(tuple(ll),tuple())
elif (self.m==TRIANGLES):
print("Tri")
def __flatten_data__(self):
return self._dt
class Graphics:
def __init__(self,w,h,bg="#000000"):
self.w=w
self.h=h
self._r=tkinter.Tk()
self._r.title("")
self._r.geometry(f"{w}x{h}")
self._c=tkinter.Canvas(self._r,width=w,height=h,highlightthickness=0,background=bg)
self._c.pack()
self._r.update_idletasks()
self._cm=None
self._c_m=()
self._pr=()
self._p_u=False
self._p_m=()
self._cb=None
self._lt=0
self._ll=[]
self._tl=[]
self._f_u=False
self._kb={}
@property
def window(self):
return self._r
def background(self,bg):
self._c["background"]=bg
def bind_key(self,k,f):
def _cb(a,k=k):
for f in self._kb[k]:
f(a)
if (k not in self._kb):
self._r.bind(k,lambda a:_cb(a))
self._kb[k]=[f]
else:
self._kb[k]+=[f]
def camera(self,c):
if (not isinstance(c,BaseCamera)):
raise RuntimeError(f"Camera of Type '{e.__class__.__name__}' is not compatible!")
self._cm=c
self._cm.__setup_cam__(self)
def projection(self,t,*a):
if (t==ORTOGRAPHIC):
if (len(a)!=6):
raise TypeError(f"Ortographic Projection requires 6 Arguments (top, left, bottom, right, near, far)!")
self._pm=(t,a[0],a[1],a[2],a[3],a[4],a[5])
elif (t==PERSPECTIVE):
if (len(a)!=3):
raise TypeError(f"Perspective Projection requires 3 Arguments (fov, near, far)!")
self._pm=(t,math.cos(a[0]/360*math.pi)/math.sin(a[0]/360*math.pi),a[1],a[2])
else:
raise NameError(f"Unknown Projection Type Value '{t}'!")
self._update_p()
def draw(self,e):
if (not isinstance(e,BaseCanvasElement)):
raise RuntimeError(f"Element of Type '{e.__class__.__name__}' is not compatible!")
if (e.__updated__() or self._f_u):
e.__recalc_flatten__(self)
ll,tl=e.__flatten_data__()
self._ll.extend(ll)
self._tl.extend(tl)
def display(self,cb):
self._cb=cb
self._r.after(1,self._loop)
self._r.mainloop()
def close(self):
self._r.destroy()
def _loop(self):
tm=time.time()
if (self._lt==0):
self._lt=tm
self._ll=[]
self._tl=[]
self._f_u=False
self._cm.__update__(tm-self._lt)
if (self._cm.__updated__()):
self._c_m=self._cm.__recalc_matrix__(self)
self._f_u=True
self._cb(tm-self._lt)
self._c.delete(tkinter.ALL)
if (self._p_u):
self._p_u=False
self._update_p()
for e in self._ll:
self._c.create_line(e[0],e[1],e[2],e[3],fill=e[4])
for e in self._tl:
self._c.create_polygon(*e,fill="#00ff00")
self._lt=tm
self._r.after(1,self._loop)
def _update_p(self):
if (self._pm[0]==ORTOGRAPHIC):
t,l,b,r=self._pm[1:5]
self._p_m=(2/(r-l),0,0,0,2/(t-b),0,0,0,-2/(self._pm[6]-self._pm[5]),-(r+l)/(r-l),-(t+b)/(t-b),(self._pm[6]+self._pm[5])/(self._pm[6]-self._pm[5]))
else:
self._p_m=(self._pm[1]/self.w*self.h,0,0,0,self._pm[1],0,0,0,(self._pm[3]+self._pm[2])/(self._pm[3]-self._pm[2]),0,0,(2*self._pm[2]*self._pm[3])/(self._pm[2]-self._pm[3]))
def _transform(self,x,y,z):
nx=x*self._c_m[0]+y*self._c_m[3]+z*self._c_m[6]+self._c_m[9]
ny=x*self._c_m[1]+y*self._c_m[4]+z*self._c_m[7]+self._c_m[10]
nz=x*self._c_m[2]+y*self._c_m[5]+z*self._c_m[8]+self._c_m[11]
nnx=(nx*self._p_m[0]+ny*self._p_m[3]+nz*self._p_m[6]+self._p_m[9])/((nz if nz!=0 else EPSILON) if self._pm[0]==PERSPECTIVE else 1)
nny=(nx*self._p_m[1]+ny*self._p_m[4]+nz*self._p_m[7]+self._p_m[10])/((nz if nz!=0 else EPSILON) if self._pm[0]==PERSPECTIVE else 1)
return ((nnx*self.w)/(2*nz if nz!=0 else EPSILON)+self.w/2,(nny*self.h)/(2*nz if nz!=0 else EPSILON)+self.h/2,(nx*self._p_m[2]+ny*self._p_m[5]+nz*self._p_m[8]+self._p_m[11])/((nz if nz!=0 else EPSILON) if self._pm[0]==PERSPECTIVE else 1))
|
[
"tkinter.Canvas",
"math.sqrt",
"math.sin",
"time.time",
"math.cos",
"tkinter.Tk"
] |
[((1062, 1100), 'math.sqrt', 'math.sqrt', (['(zx ** 2 + zy ** 2 + zz ** 2)'], {}), '(zx ** 2 + zy ** 2 + zz ** 2)\n', (1071, 1100), False, 'import math\n'), ((1200, 1238), 'math.sqrt', 'math.sqrt', (['(xx ** 2 + xy ** 2 + xz ** 2)'], {}), '(xx ** 2 + xy ** 2 + xz ** 2)\n', (1209, 1238), False, 'import math\n'), ((2652, 2690), 'math.sqrt', 'math.sqrt', (['(px ** 2 + py ** 2 + pz ** 2)'], {}), '(px ** 2 + py ** 2 + pz ** 2)\n', (2661, 2690), False, 'import math\n'), ((2838, 2876), 'math.sqrt', 'math.sqrt', (['(xx ** 2 + xy ** 2 + xz ** 2)'], {}), '(xx ** 2 + xy ** 2 + xz ** 2)\n', (2847, 2876), False, 'import math\n'), ((3918, 3930), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (3928, 3930), False, 'import tkinter\n'), ((3992, 4071), 'tkinter.Canvas', 'tkinter.Canvas', (['self._r'], {'width': 'w', 'height': 'h', 'highlightthickness': '(0)', 'background': 'bg'}), '(self._r, width=w, height=h, highlightthickness=0, background=bg)\n', (4006, 4071), False, 'import tkinter\n'), ((5738, 5749), 'time.time', 'time.time', ([], {}), '()\n', (5747, 5749), False, 'import time\n'), ((2575, 2592), 'math.cos', 'math.cos', (['self.rx'], {}), '(self.rx)\n', (2583, 2592), False, 'import math\n'), ((2527, 2544), 'math.sin', 'math.sin', (['self.rx'], {}), '(self.rx)\n', (2535, 2544), False, 'import math\n'), ((2545, 2562), 'math.cos', 'math.cos', (['self.ry'], {}), '(self.ry)\n', (2553, 2562), False, 'import math\n'), ((2605, 2622), 'math.sin', 'math.sin', (['self.rx'], {}), '(self.rx)\n', (2613, 2622), False, 'import math\n'), ((2623, 2640), 'math.sin', 'math.sin', (['self.ry'], {}), '(self.ry)\n', (2631, 2640), False, 'import math\n'), ((5136, 5166), 'math.cos', 'math.cos', (['(a[0] / 360 * math.pi)'], {}), '(a[0] / 360 * math.pi)\n', (5144, 5166), False, 'import math\n'), ((5163, 5193), 'math.sin', 'math.sin', (['(a[0] / 360 * math.pi)'], {}), '(a[0] / 360 * math.pi)\n', (5171, 5193), False, 'import math\n')]
|
from datetime import datetime
from blog.models import Article
def retrieveArticles(drafts, future, limit=-1):
"""
Will retrieve and filter articles according to the parameters
"""
latest_blog_posts = Article.objects.all()
if drafts == False:
# No drafts
latest_blog_posts = latest_blog_posts.filter(draft = False)
if future == False:
# date less than or equal today's date. That way future articles won't be publicly visible
latest_blog_posts = latest_blog_posts.filter(date__lte = datetime.today())
# Ordered by date DESC
latest_blog_posts = latest_blog_posts.order_by('-date')
if limit>0:
latest_blog_posts = latest_blog_posts[:limit]
return latest_blog_posts
|
[
"blog.models.Article.objects.all",
"datetime.datetime.today"
] |
[((218, 239), 'blog.models.Article.objects.all', 'Article.objects.all', ([], {}), '()\n', (237, 239), False, 'from blog.models import Article\n'), ((534, 550), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (548, 550), False, 'from datetime import datetime\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import annotations
import collections
import copy
import itertools
import math
from typing import List, Optional
from .elements import elements_dict
from .exceptions import *
from .grid import *
from .molecule import Molecule, Atom, ATOM_RADIUS
from .schem_random import SChemRandom
from .waldo import Waldo, Instruction, InstructionType
# Dimensions of components that differ from the standard dimensions for their type
COMPONENT_SHAPES = {
# SC stores co-ordinates as col, row
'research-input': (1, 1),
'research-output': (1, 1),
'disabled-output': (1, 1),
'drag-silo-input': (5, 5),
'drag-atmospheric-input': (2, 2),
'drag-oceanic-input': (2, 2),
'drag-powerplant-input': (14, 15),
'drag-mining-input': (3, 2),
'drag-ancient-input': (2, 2),
'drag-spaceship-input': (2, 2)} # TODO: Actually (2,3) but its pipe isn't in the middle which fucks our assumptions
# Custom and ResNet research levels use the custom research reactor type and specify all features directly in the
# level JSON. Specify the features of all other reactor types.
DEFAULT_RESEARCH_REACTOR_TYPE = 'custom-research-reactor'
REACTOR_TYPES = {
# Standard production/research reactor types
'drag-starter-reactor': {'bonder-count': 4},
'drag-disassembly-reactor': {'bonder-minus-count': 4, 'has-bottom-input': False},
'drag-assembly-reactor': {'bonder-plus-count': 4, 'has-bottom-output': False},
'drag-advanced-reactor': {'bonder-count': 4, 'has-sensor': True},
'drag-fusion-reactor': {'bonder-count': 4, 'has-fuser': True},
'drag-superbonder-reactor': {'bonder-count': 8},
'drag-nuclear-reactor': {'bonder-count': 4, 'has-fuser': True, 'has-splitter': True},
'drag-quantum-reactor': {'bonder-count': 4, 'has-sensor': True, 'has-teleporter': True,
'quantum-walls-y': {'5': [0, 1, 2, 3, 4, 5, 6, 7]}},
'drag-sandbox-reactor': {'bonder-count': 8, 'has-sensor': True, 'has-fuser': True, 'has-splitter': True},
# Reactor types that only appear in official non-ResNet research levels.
'empty-research-reactor': {}, # 1-1 - 1-3
'tutorial-research-reactor-2': {'bonder-count': 4}, # 1-4
'drag-reduced-reactor': {'bonder-count': 2}, # 6-1, 7-2
'drag-largeoutput-reactor': {'bonder-count': 4, 'has-sensor': True, 'has-large-output': True}, # 6-3, 7-4, 8-2
'drag-advancedfusion-reactor': {'bonder-count': 2, 'has-sensor': True, 'has-fuser': True}, # 7-3
'drag-quantum-reactor-x': {'has-teleporter': True, 'quantum-walls-y': {'5': [0, 1, 2, 3, 4, 5, 6, 7]}}, # QT-1
'drag-quantum-reactor-s': {'bonder-count': 2, 'has-sensor': True, 'has-teleporter': True, # QT-2 - QT-4
'quantum-walls-y': {'5': [0, 1, 2, 3, 4, 5, 6, 7]}}}
class Pipe:
"""A SpaceChem component's pipe. All posns are relative to the parent component's posn."""
__slots__ = 'posns', '_molecules', '_add_cycles', '_last_pop_cycle'
def __init__(self, posns: List[Position]):
"""Construct a pipe. posns should be defined relative to the pipe's parent component posn."""
self.posns = posns
# To avoid incurring any performance costs for moving molecules through the pipe, implement a pipe as a timed
# queue, storing the cycle each molecule was added on and only allowing them to exit the pipe if their age in
# cycles is >= len(self) - 1 (e.g. for a 1-long pipe, they are available in the same cycle).
self._molecules = collections.deque()
self._add_cycles = collections.deque() # Kept in lockstep with _molecules
# To ensure reactors can't input from a pipe twice in a cycle, track the last cycle of a successful pop()
self._last_pop_cycle = -1
def __len__(self):
return len(self.posns)
def get(self, idx: int, cycle: int) -> Optional[Molecule]:
"""Return the molecule at the given index, else None."""
if not self._molecules:
return None
# Provide O(1) access for either end which is all we use, else fall back to O(N)
elif idx == 0:
if (len(self._molecules) == len(self) # Pipe is full
or cycle == self._add_cycles[0]): # A molecule was already added this cycle
return self._molecules[0]
else:
return None
elif idx == -1:
# Make sure it's had time to reach the end
if cycle - self._add_cycles[-1] >= len(self) - 1 and cycle != self._last_pop_cycle:
return self._molecules[-1]
else:
return None
else:
return self.to_list(cycle)[idx]
def push(self, molecule: Molecule, cycle: int) -> bool:
"""Attempt to pass the given molecule to the pipe. Return False if there is no room at the front of the pipe.
Note that since cycle incrementation controls pipe movement, if a molecule is being added that shouldn't be
moved in the same cycle (namely, all components except reactor), cycle + 1 should be given as the cycle the
molecule was added instead.
"""
if self._molecules and (len(self._molecules) == len(self) # Pipe is full
or cycle == self._add_cycles[0]): # A molecule was already added this cycle
return False
else:
self._molecules.appendleft(molecule)
self._add_cycles.appendleft(cycle)
return True
def pop(self, cycle: int) -> Optional[Molecule]:
"""Remove and return the molecule at the end of the pipe, or None if there is none."""
if (not self._molecules
or cycle - self._add_cycles[-1] < len(self) - 1 # Make sure it's had time to reach the end
or cycle == self._last_pop_cycle): # Make sure a component can't extract two molecules in a cycle
return None
else:
self._last_pop_cycle = cycle
self._add_cycles.pop()
return self._molecules.pop()
def to_list(self, cycle: int) -> list:
"""Return a list representing the current positions of molecule in the pipe, with None for empty spaces."""
result = [None for _ in self.posns]
# Insert molecules starting from the back, to account for backed up molecules
cur_pipe_idx = len(self) - 1
for molecule, cycle_added in zip(reversed(self._molecules), reversed(self._add_cycles)):
# Add this molecule in the farthest position it could have travelled to, accounting for clogs
if cycle - cycle_added >= cur_pipe_idx:
result[cur_pipe_idx] = molecule
# Update the current pipe idx
cur_pipe_idx -= 1
else: # Once we are past any clogged molecules, we will never need cur_pipe_idx again
result[cycle - cycle_added] = molecule
return result
@classmethod
def from_preset_string(cls, start_posn: Position, dirns_str: str) -> Pipe:
"""Construct a pipe from the given CE pipe string, e.g. 'RRDRUULR', moving in the indicated directions
(U = Up, R = Right, D = Down, L = Left) from the start_posn (should be relative to the parent component's posn).
"""
posns = [start_posn]
char_to_dirn = {'U': UP, 'R': RIGHT, 'D': DOWN, 'L': LEFT}
for dirn_char in dirns_str:
posns.append(posns[-1] + char_to_dirn[dirn_char])
assert len(posns) == len(set(posns)), "Pipe overlaps with itself"
return Pipe(posns)
@classmethod
def from_export_str(cls, export_str: str):
"""Note that a pipe's solution lines might not be contiguous. It is expected that the caller filters
out the lines for a single pipe and passes them as a single string to this method.
"""
lines = [s for s in export_str.split('\n') if s] # Split into non-empty lines
# Ensure all non-empty lines are valid and for the same-indexed pipe
assert all(s.startswith('PIPE:0,') for s in lines) or all(s.startswith('PIPE:1,') for s in lines), \
"Invalid lines in pipe export string"
# Extract and store the pipe's positions, checking for discontinuities in the given pipe positions
posns = []
for line in lines:
fields = line.split(',')
assert len(fields) == 3, f"Invalid num fields in PIPE line:\n{line}"
posn = Position(col=int(fields[1]), row=int(fields[2]))
if posns:
assert abs(posn - posns[-1]) in ((0, 1), (1, 0)), "Pipe is not contiguous"
posns.append(posn)
assert posns, "Expected at least one PIPE line"
assert len(posns) == len(set(posns)), "Pipe overlaps with itself"
return Pipe(posns)
def export_str(self, pipe_idx: int = 0) -> str:
"""Represent this pipe in solution export string format."""
return '\n'.join(f'PIPE:{pipe_idx},{posn.col},{posn.row}' for posn in self.posns)
def reset(self):
"""Empty this pipe."""
self._molecules = collections.deque()
self._add_cycles = collections.deque()
self._last_pop_cycle = -1
class Component:
"""Informal Interface class defining methods overworld objects will implement one or more of."""
__slots__ = 'type', 'posn', 'dimensions', 'in_pipes', 'out_pipes'
def __new__(cls, component_dict=None, _type=None, **kwargs):
"""Return a new object of the appropriate subclass based on the component type."""
# If this is being called from a child class, behave like a normal __new__ implementation (to avoid recursion)
if cls != Component:
return object.__new__(cls)
if _type is None:
_type = component_dict['type']
parts = _type.split('-')
if 'reactor' in parts:
return super().__new__(Reactor)
elif 'input' in parts:
return super().__new__(Input)
elif _type == 'drag-printer-output':
return super().__new__(OutputPrinter)
elif _type == 'drag-printer-passthrough':
return super().__new__(PassThroughPrinter)
elif 'output' in parts or 'production-target' in _type:
return super().__new__(Output)
elif _type == 'drag-recycler':
return super().__new__(Recycler)
elif _type == 'drag-storage-tank':
return super().__new__(StorageTank)
elif _type == 'drag-storage-tank-infinite':
return super().__new__(InfiniteStorageTank)
elif _type == 'freeform-counter':
return super().__new__(PassThroughCounter)
elif _type == 'drag-qpipe-in':
return super().__new__(TeleporterInput)
elif _type == 'drag-qpipe-out':
return super().__new__(TeleporterOutput)
elif 'weapon' in parts:
return super().__new__(Weapon)
else:
raise ValueError(f"Unrecognized component type {_type}")
def __init__(self, component_dict=None, _type=None, posn=None, num_in_pipes=0, num_out_pipes=0):
self.type = _type if _type is not None else component_dict['type']
self.posn = Position(*posn) if posn is not None else Position(col=component_dict['x'], row=component_dict['y'])
self.dimensions = COMPONENT_SHAPES[self.type] if self.type in COMPONENT_SHAPES else self.DEFAULT_SHAPE
self.in_pipes = [None for _ in range(num_in_pipes)]
# Initialize output pipes in middle, rounded down, accounting for any level-preset pipes
self.out_pipes = []
pipe_start_posn = Position(col=self.dimensions[0], row=(self.dimensions[1] - 1) // 2)
if component_dict is not None and 'output-pipes' in component_dict:
assert len(component_dict['output-pipes']) == num_out_pipes, f"Unexpected number of output pipes for {self.type}"
for pipe_dirns_str in component_dict['output-pipes']:
self.out_pipes.append(Pipe.from_preset_string(pipe_start_posn, pipe_dirns_str))
pipe_start_posn += DOWN
else:
for _ in range(num_out_pipes):
self.out_pipes.append(Pipe(posns=[pipe_start_posn]))
pipe_start_posn += DOWN
@classmethod
def parse_metadata(cls, s):
"""Given a component export string or its COMPONENT line, return its component type and posn."""
component_line = s.strip('\n').split('\n', maxsplit=1)[0] # Get first non-empty line
# Parse COMPONENT line
assert component_line.startswith('COMPONENT:'), "Missing COMPONENT line in export string"
fields = component_line.split(',')
assert len(fields) == 4, f"Unrecognized component line format:\n{component_line}"
component_type = fields[0][len('COMPONENT:'):].strip("'")
component_posn = Position(int(fields[1]), int(fields[2]))
# TODO: Still don't know what the 4th field does...
return component_type, component_posn
def update_from_export_str(self, export_str, update_pipes=True):
"""Given a matching export string, update this component. Optionally ignore pipe updates (namely necessary
for Ω-Pseudoethyne which disallows mutating a 1-long pipe where custom levels do not.
"""
component_line, *pipe_lines = (s for s in export_str.split('\n') if s) # Remove empty lines and get first line
_, component_posn = self.parse_metadata(component_line)
assert component_posn == self.posn, f"No component at posn {component_posn}"
# TODO: Is ignoring component type checks unsafe?
#assert component_type == self.type, \
# f"Component of type {self.type} cannot be overwritten with component of type {component_type}"
# Check that any pipe lines are superficially valid (all PIPE:0 or PIPE:1), which SC does even if
# the component does not accept pipe updates (e.g. research reactors)
# Ensure all non-empty lines are valid
for pipe_line in pipe_lines:
if not (pipe_line.startswith('PIPE:0') or pipe_line.startswith('PIPE:1')):
raise ValueError(f"Unexpected line in component pipes: `{pipe_line}`")
if update_pipes and self.out_pipes:
# Expect the remaining lines to define the component's output pipes
# If the pipes on an existing component are updatable, all of them must be specified during an update
# (as testable by playing around with preset reactors in CE production levels)
# Whereas when updating presets with non-updatable pipes (e.g. research reactors), all pipes must be included
assert pipe_lines, f"Some pipes are missing for component {self.type}"
pipe_export_strs = ['\n'.join(s for s in pipe_lines if s.startswith(f'PIPE:{i},'))
for i in range(2)]
new_out_pipes = [Pipe.from_export_str(s) for s in pipe_export_strs if s]
assert len(new_out_pipes) == len(self.out_pipes), f"Unexpected number of pipes for component {self.type}"
for i, pipe in enumerate(new_out_pipes):
# Preset pipes of length > 1 are immutable
if len(self.out_pipes[i]) == 1:
# Ensure this pipe starts from the correct position
assert pipe.posns[0] == Position(col=self.dimensions[0], row=((self.dimensions[1] - 1) // 2) + i), \
f"Invalid start position for pipe {i} of component {self.type}"
self.out_pipes[i] = pipe
def __str__(self):
return f'{self.type},{self.posn}'
def do_instant_actions(self, _):
"""Do any instant actions (e.g. execute waldo instructions, spawn/consume molecules)."""
return
def move_contents(self, _):
"""Move the contents of this object (e.g. waldos/molecules)."""
pass
def reset(self):
"""Reset this component and its pipe's contents as if it has never been run."""
for pipe in self.out_pipes:
pipe.reset()
return self
def export_str(self):
"""Represent this component in solution export string format."""
# By SC's convention, set the first lines of the export to be the first segment of each pipe
# Reverse zip to separate the first lines of each export
first_segment_lines, remainders = [], []
for i, pipe in enumerate(self.out_pipes):
first_segment_line, *remainder = pipe.export_str(pipe_idx=i).split('\n', maxsplit=1)
first_segment_lines.append(first_segment_line)
remainders.extend(remainder)
return '\n'.join([f"COMPONENT:'{self.type}',{self.posn.col},{self.posn.row},''",
*first_segment_lines, *remainders])
class Input(Component):
DEFAULT_SHAPE = (2, 3)
__slots__ = 'molecules', 'input_rate', 'num_inputs'
# Convenience property for when we know we're dealing with an Input
@property
def out_pipe(self):
return self.out_pipes[0]
@out_pipe.setter
def out_pipe(self, p):
self.out_pipes[0] = p
def __new__(cls, input_dict, *args, **kwargs):
"""Convert to a random or programmed input if relevant."""
if 'repeating-molecules' in input_dict:
return object.__new__(ProgrammedInput)
molecules_key = 'inputs' if 'inputs' in input_dict else 'molecules'
if len(input_dict[molecules_key]) <= 1:
return object.__new__(cls)
else:
return object.__new__(RandomInput)
def __init__(self, input_dict, _type=None, posn=None, is_research=False):
super().__init__(input_dict, _type=_type, posn=posn, num_out_pipes=1)
# Handle either vanilla or Community Edition nomenclature
molecules_key = 'inputs' if 'inputs' in input_dict else 'molecules'
assert len(input_dict[molecules_key]) != 0, "No molecules in input dict"
self.molecules = [Molecule.from_json_string(input_mol_dict['molecule'])
for input_mol_dict in input_dict[molecules_key]]
if is_research:
self.input_rate = 1
elif 'production-delay' in input_dict:
self.input_rate = input_dict['production-delay']
else:
self.input_rate = 10
self.num_inputs = 0
def move_contents(self, cycle):
"""Create a new molecule if on the correct cycle and the pipe has room."""
# -1 necessary since starting cycle is 1 not 0, while mod == 1 would break on rate = 1
if (cycle - 1) % self.input_rate == 0 and self.out_pipe.get(0, cycle) is None:
self.out_pipe.push(copy.deepcopy(self.molecules[0]), cycle + 1)
self.num_inputs += 1
def reset(self):
super().reset()
self.num_inputs = 0
return self
class RandomInput(Input):
__slots__ = 'seed', 'random_generator', 'input_counts', 'random_bucket'
def __init__(self, input_dict, _type=None, posn=None, is_research=False):
super().__init__(input_dict, _type=_type, posn=posn, is_research=is_research)
assert len(self.molecules) > 1, "Fixed input passed to RandomInput ctor"
# Create a random generator with the given seed. Most levels default to seed 0
self.seed = input_dict['random-seed'] if 'random-seed' in input_dict else 0
self.random_generator = SChemRandom(seed=self.seed)
self.random_bucket = [] # Bucket of indices for the molecules in the current balancing bucket
molecules_key = 'inputs' if 'inputs' in input_dict else 'molecules'
self.input_counts = [input_mol_dict['count'] for input_mol_dict in input_dict[molecules_key]]
def get_next_molecule_idx(self):
"""Get the next input molecule's index. Exposed to allow for tracking branches in random level states."""
# Create the next balance bucket if we've run out.
# The bucket stores an index identifying one of the 2-3 molecules
if not self.random_bucket:
# TODO: Check this method of drawing from the bucket matches results from research levels with two random
# inputs (if I recall, both draw from e.g. the 'bottom 6th' of the inputs at the same time regardless
# of if that is a 1/6 chance or a larger chance molecule in the respective zone - which is why I think
# this implementation is correct)
for mol_idx, mol_count in enumerate(self.input_counts):
self.random_bucket.extend(mol_count * [mol_idx])
# Randomly remove one entry from the bucket and return it
bucket_idx = self.random_generator.next(len(self.random_bucket))
return self.random_bucket.pop(bucket_idx)
def move_contents(self, cycle):
# -1 necessary since starting cycle is 1 not 0, while mod == 1 would break on rate = 1
if (cycle - 1) % self.input_rate == 0 and self.out_pipe.get(0, cycle) is None:
self.out_pipe.push(copy.deepcopy(self.molecules[self.get_next_molecule_idx()]), cycle + 1)
self.num_inputs += 1
def reset(self):
super().reset()
self.random_generator = SChemRandom(seed=self.seed)
self.random_bucket = []
return self
class ProgrammedInput(Input):
__slots__ = 'starting_molecules', 'starting_idx', 'repeating_molecules', 'repeating_idx'
def __init__(self, input_dict, _type=None, posn=None, is_research=False):
super(Input, self).__init__(input_dict, _type=_type, posn=posn, num_out_pipes=1)
assert len(input_dict['repeating-molecules']) != 0, "No repeating molecules in input dict"
self.starting_molecules = [Molecule.from_json_string(s) for s in input_dict['starting-molecules']]
self.starting_idx = 0
self.repeating_molecules = [Molecule.from_json_string(s) for s in input_dict['repeating-molecules']]
self.repeating_idx = 0
if is_research:
self.input_rate = 1
elif 'production-delay' in input_dict:
self.input_rate = input_dict['production-delay']
else:
self.input_rate = 10
self.num_inputs = 0
def move_contents(self, cycle):
# -1 necessary since starting cycle is 1 not 0, while mod == 1 would break on rate = 1
if (cycle - 1) % self.input_rate == 0 and self.out_pipe.get(0, cycle) is None:
if self.starting_idx == len(self.starting_molecules):
self.out_pipe.push(copy.deepcopy(self.repeating_molecules[self.repeating_idx]), cycle + 1)
self.repeating_idx = (self.repeating_idx + 1) % len(self.repeating_molecules)
else:
self.out_pipe.push(copy.deepcopy(self.starting_molecules[self.starting_idx]), cycle + 1)
self.starting_idx += 1
self.num_inputs += 1
def reset(self):
super().reset()
self.starting_idx = self.repeating_idx = 0
return self
class Output(Component):
DEFAULT_SHAPE = (2, 3)
__slots__ = 'output_molecule', 'target_count', 'current_count'
# Convenience property for when we know we're dealing with an Output
@property
def in_pipe(self):
return self.in_pipes[0]
@in_pipe.setter
def in_pipe(self, p):
self.in_pipes[0] = p
def __init__(self, output_dict, _type=None, posn=None):
super().__init__(output_dict, _type=_type, posn=posn, num_in_pipes=1)
# CE output components are abstracted one level higher than vanilla output zones; unwrap if needed
if 'output-target' in output_dict:
output_dict = output_dict['output-target']
self.output_molecule = Molecule.from_json_string(output_dict['molecule'])
self.target_count = output_dict['count']
self.current_count = 0
def do_instant_actions(self, cycle):
"""Check for and process any incoming molecule, and return True if this output just completed (in which case
the caller should check if the other outputs are also done). This avoids checking all output counts every cycle.
"""
if self.in_pipe is None:
return False
molecule = self.in_pipe.pop(cycle)
if molecule is not None:
if not molecule.isomorphic(self.output_molecule):
raise InvalidOutputError(f"Invalid output molecule; expected:\n{self.output_molecule}\n\nbut got:\n{molecule}")
if self.current_count < self.target_count:
self.current_count += 1
if self.current_count == self.target_count:
return True
return False
def reset(self):
super().reset()
self.current_count = 0
return self
class PassThroughCounter(Output):
__slots__ = 'stored_molecule',
def __init__(self, output_dict):
super(Output, self).__init__(output_dict, num_in_pipes=1, num_out_pipes=1)
self.output_molecule = Molecule.from_json_string(output_dict['target']['molecule'])
self.target_count = output_dict['target']['count']
self.current_count = 0
self.stored_molecule = None
@property
def out_pipe(self):
return self.out_pipes[0]
@out_pipe.setter
def out_pipe(self, p):
self.out_pipes[0] = p
def do_instant_actions(self, cycle):
"""Check for and process any incoming molecule, and return True if this output just completed (in which case
the caller should check if the other outputs are also done). This avoids checking all output counts every cycle.
"""
if self.in_pipe is None:
return False
# If the stored slot is empty, store the next molecule and 'output' it while we do so
if self.in_pipe.get(-1, cycle) is not None and self.stored_molecule is None:
self.stored_molecule = self.in_pipe.get(-1, cycle)
return super().do_instant_actions(cycle) # This will remove the molecule from the pipe
return False
def move_contents(self, cycle):
# If there is a molecule stored (possibly stored just now), put it in the output pipe if possible
if self.stored_molecule is not None and self.out_pipe.get(0, cycle) is None:
self.out_pipe.push(self.stored_molecule, cycle + 1)
self.stored_molecule = None
def reset(self):
super().reset()
self.stored_molecule = None
return self
# It's less confusing for output counting and user-facing purposes if this is not an Output subclass
class DisabledOutput(Component):
"""Used by research levels, which actually crash if a wrong output is used unlike assembly reactors."""
DEFAULT_SHAPE = (1, 1)
__slots__ = ()
@property
def in_pipe(self):
return self.in_pipes[0]
def __init__(self, *, _type, posn):
super().__init__(_type=_type, posn=posn, num_in_pipes=1)
def do_instant_actions(self, cycle):
# Technically should check for `in_pipe is None` first but I'd also be curious to see this crash since disabled
# outputs are only used in research levels, where it should be impossible to not connect to the disabled output
if self.in_pipe.get(-1, cycle) is not None:
raise InvalidOutputError("A molecule was passed to a disabled output.")
class OutputPrinter(Component):
"""Displays the last 3 molecules passed to it. For now this is effectively going to be a recycler..."""
DEFAULT_SHAPE = (2, 3)
__slots__ = ()
def __init__(self, component_dict=None, _type=None, posn=None):
super().__init__(component_dict, _type=_type, posn=posn, num_in_pipes=1)
@property
def in_pipe(self):
return self.in_pipes[0]
@in_pipe.setter
def in_pipe(self, p):
self.in_pipes[0] = p
def do_instant_actions(self, cycle):
"""Consume and print incoming molecules."""
if self.in_pipe is not None:
# TODO: Print received molecules when in --debug somehow
self.in_pipe.pop(cycle)
class PassThroughPrinter(OutputPrinter):
"""Displays the last 3 molecules passed to it and passes them on."""
__slots__ = 'stored_molecule',
def __init__(self, component_dict=None, _type=None, posn=None):
super(OutputPrinter, self).__init__(component_dict, _type=_type, posn=posn, num_in_pipes=1, num_out_pipes=1)
self.stored_molecule = None
@property
def out_pipe(self):
return self.out_pipes[0]
@out_pipe.setter
def out_pipe(self, p):
self.out_pipes[0] = p
def do_instant_actions(self, cycle):
"""Check for and process any incoming molecule, and return True if this output just completed (in which case
the caller should check if the other outputs are also done). This avoids checking all output counts every cycle.
"""
if self.in_pipe is None:
return
# If there is a molecule stored (possibly stored just now), put it in the output pipe if possible
if self.stored_molecule is not None and self.out_pipe.get(0, cycle) is None:
self.out_pipe.push(self.stored_molecule, cycle + 1)
self.stored_molecule = None
# If the stored slot is empty, store the next molecule and 'print' it while we do so
if self.in_pipe.get(-1, cycle) is not None and self.stored_molecule is None:
self.stored_molecule = self.in_pipe.get(-1, cycle)
super().do_instant_actions(cycle) # This will consume and print the input molecule
def reset(self):
super().reset()
self.stored_molecule = None
return self
class Recycler(Component):
DEFAULT_SHAPE = (5, 5)
__slots__ = ()
def __init__(self, component_dict=None, _type=None, posn=None):
super().__init__(component_dict=component_dict, _type=_type, posn=posn, num_in_pipes=3)
def do_instant_actions(self, cycle):
for pipe in self.in_pipes:
if pipe is not None:
pipe.pop(cycle)
# TODO: Ideally this would subclass both deque and Component but doing so gives me
# "multiple bases have instance lay-out conflict". Need to investigate.
class StorageTank(Component):
DEFAULT_SHAPE = (3, 3)
MAX_CAPACITY = 25
__slots__ = 'contents',
def __init__(self, component_dict=None, _type=None, posn=None):
super().__init__(component_dict=component_dict, _type=_type, posn=posn, num_in_pipes=1, num_out_pipes=1)
self.contents = collections.deque()
# Convenience properties
@property
def in_pipe(self):
return self.in_pipes[0]
@in_pipe.setter
def in_pipe(self, p):
self.in_pipes[0] = p
@property
def out_pipe(self):
return self.out_pipes[0]
@out_pipe.setter
def out_pipe(self, p):
self.out_pipes[0] = p
def do_instant_actions(self, cycle):
if self.in_pipe is None:
return
if self.in_pipe.get(-1, cycle) is not None and len(self.contents) < self.MAX_CAPACITY:
self.contents.appendleft(self.in_pipe.pop(cycle))
def move_contents(self, cycle):
"""Add a molecule to the output pipe if the storage tank is not empty."""
if self.out_pipe.get(0, cycle) is None and self.contents:
self.out_pipe.push(self.contents.pop(), cycle + 1)
@classmethod
def from_export_str(cls, export_str):
# First line must be the COMPONENT line
component_line, pipe_str = export_str.strip().split('\n', maxsplit=1)
assert component_line.startswith('COMPONENT:'), "StorageTank.from_export_str expects COMPONENT line included"
fields = component_line.split(',')
assert len(fields) == 4, f"Unrecognized component line format:\n{component_line}"
component_type = fields[0][len('COMPONENT:'):].strip("'")
component_posn = Position(int(fields[1]), int(fields[2]))
return cls(component_type, component_posn, out_pipe=Pipe.from_export_str(pipe_str))
def reset(self):
super().reset()
self.contents = collections.deque()
return self
class InfiniteStorageTank(StorageTank):
MAX_CAPACITY = math.inf
class TeleporterInput(Component):
DEFAULT_SHAPE = (3, 1)
__slots__ = 'destination',
def __init__(self, component_dict):
super().__init__(component_dict, num_in_pipes=1)
self.destination = None
# Convenience properties
@property
def in_pipe(self):
return self.in_pipes[0]
@in_pipe.setter
def in_pipe(self, p):
self.in_pipes[0] = p
def do_instant_actions(self, cycle):
"""Note that the teleporter pair behaves differently from a pass-through counter insofar as the pass-through
counter stores any molecule it receives internally when its output pipe is clogged, whereas the teleporter
refuses to accept the next molecule until the output pipe is clear (i.e. behaves like a single discontinuous
pipe that also happens to only allow single atoms through).
"""
if self.in_pipe is None:
return
if self.destination.out_pipe.get(0, cycle) is None:
molecule = self.in_pipe.pop(cycle)
if molecule is not None:
assert len(molecule) == 1, f"An invalid molecule was passed to Teleporter (Input): {molecule}"
self.destination.out_pipe.push(molecule, cycle + 1)
class TeleporterOutput(Component):
DEFAULT_SHAPE = (3, 1)
__slots__ = ()
def __init__(self, component_dict):
super().__init__(component_dict, num_out_pipes=1)
# Convenience properties
@property
def out_pipe(self):
return self.out_pipes[0]
@out_pipe.setter
def out_pipe(self, p):
self.out_pipes[0] = p
class Reactor(Component):
DEFAULT_SHAPE = (4, 4) # Size in overworld
# For convenience during float-precision rotation co-ordinates, we consider the center of the
# top-left cell to be at (0,0), and hence the top-left reactor corner is (-0.5, -0.5).
# Further, treat the walls as being one atom radius closer, so that we can efficiently check if an atom will collide
# with them given only the atom's center co-ordinates
NUM_COLS = 10
NUM_ROWS = 8
NUM_WALDOS = 2
NUM_MOVE_CHECKS = 10 # Number of times to check for collisions during molecule movement
walls = {UP: -0.5 + ATOM_RADIUS, DOWN: 7.5 - ATOM_RADIUS,
LEFT: -0.5 + ATOM_RADIUS, RIGHT: 9.5 - ATOM_RADIUS}
# Names of features as stored in attributes
FEATURE_NAMES = ('bonders', 'sensors', 'fusers', 'splitters', 'swappers')
__slots__ = ('in_pipes', 'out_pipes',
'waldos', 'molecules',
'large_output', *FEATURE_NAMES,
'bond_plus_pairs', 'bond_minus_pairs',
'quantum_walls_x', 'quantum_walls_y', 'disallowed_instrs',
'annotations', 'debug')
def __init__(self, component_dict=None, _type=None, posn=None):
"""Initialize a reactor from only its component dict, doing e.g. default placements of features. Used for
levels with preset reactors.
"""
if component_dict is None:
component_dict = {}
# If the reactor type is known, look up its properties and merge them (with lower priority) into the given dict
_type = _type if _type is not None else component_dict['type']
if _type in REACTOR_TYPES:
component_dict = {**REACTOR_TYPES[_type], **component_dict} # TODO: Use py3.9's dict union operator
# If the has-bottom attributes are unspecified, they default to True, unlike most attribute flags
num_in_pipes = 1 if 'has-bottom-input' in component_dict and not component_dict['has-bottom-input'] else 2
num_out_pipes = 1 if 'has-bottom-output' in component_dict and not component_dict['has-bottom-output'] else 2
super().__init__(component_dict,
_type=_type, posn=posn,
num_in_pipes=num_in_pipes, num_out_pipes=num_out_pipes)
# Place all features
cur_col = 0 # For simplicity we will put each feature type in its own column(s)
# Place bonders. Different bonder types go in the same struct so they can share a priority index
self.bonders = []
for feature_name, abbrev in (('bonder', '+-'), ('bonder-plus', '+'), ('bonder-minus', '-')):
if f'{feature_name}-count' in component_dict:
self.bonders.extend([(Position(cur_col, i), abbrev)
for i in range(component_dict[f'{feature_name}-count'])])
cur_col += 1
# Place remaining features
for attr_name, feature_name, feature_width, default_count in (('sensors', 'sensor', 1, 1),
('fusers', 'fuser', 2, 1),
('splitters', 'splitter', 2, 1),
('swappers', 'teleporter', 1, 2)):
if f'{feature_name}-count' in component_dict:
setattr(self, attr_name, [Position(cur_col, i) for i in range(component_dict[f'{feature_name}-count'])])
elif f'has-{feature_name}' in component_dict and component_dict[f'has-{feature_name}']:
setattr(self, attr_name, [Position(cur_col, i) for i in range(default_count)])
else:
setattr(self, attr_name, [])
cur_col += feature_width
# Pre-compute active bond pairs
self.bond_plus_pairs, self.bond_minus_pairs = self.bond_pairs()
self.large_output = 'has-large-output' in component_dict and component_dict['has-large-output']
# Place Waldo starts at default locations
self.waldos = [Waldo(idx=i, instr_map={Position(4, 1 + 5*i): (None, Instruction(InstructionType.START,
direction=LEFT))})
for i in range(self.NUM_WALDOS)]
# Parse any quantum walls from the reactor definition
self.quantum_walls_x = []
self.quantum_walls_y = []
for quantum_walls_key, out_list in [['quantum-walls-x', self.quantum_walls_x],
['quantum-walls-y', self.quantum_walls_y]]:
if quantum_walls_key in component_dict and component_dict[quantum_walls_key] is not None:
# a/b abstract row/col vs col/row while handling the two quantum wall orientations
# E.g. "quantum-walls-x": {"row1": [col1, col2, col3]}, "quantum-walls-y": {"col1": [row1, row2, row3]}
for a, bs in component_dict[quantum_walls_key].items():
assert len(bs) > 0, "Unexpected empty list in quantum wall definitions"
# Since we consider (0, 0) to be the center of the reactor's top-left cell, all quantum walls are on
# the half-coordinate grid edges
a = int(a) - 0.5 # Unstringify the json key and convert to reactor co-ordinates
# Store consecutive quantum walls as one entity. This will reduce collision check operations
bs.sort()
wall_min = wall_max = bs[0]
for b in bs:
# If there was a break in the wall, store the last wall and reset. Else extend the wall
if b > wall_max + 1:
out_list.append((a, (wall_min - 0.5, wall_max + 0.5)))
wall_min = wall_max = b
else:
wall_max = b
# Store the remaining wall
out_list.append((a, (wall_min - 0.5, wall_max + 0.5)))
self.disallowed_instrs = set() if 'disallowed-instructions' not in component_dict else set(component_dict['disallowed-instructions'])
self.annotations = []
# Store molecules as dict keys to be ordered (preserving Spacechem's hidden 'least recently modified' rule)
# and to have O(1) add/delete. Dict values are ignored.
self.molecules = {}
def bond_pairs(self):
"""For each of + and - bond commands, return a tuple of (bonder_A_posn, bonder_B_posn, dirn) triplets,
sorted in priority order.
"""
pair_lists = []
for bond_type in ('+', '-'):
# Store the relevant types of bonders in a dict paired up with their indices for fast lookup/sorting (below)
bonders = {posn: i for i, (posn, bond_types) in enumerate(self.bonders) if bond_type in bond_types}
pair_lists.append(tuple((posn, neighbor_posn, direction)
for posn in bonders
for neighbor_posn, direction in
sorted([(posn + direction, direction)
for direction in (RIGHT, DOWN)
if posn + direction in bonders],
key=lambda x: bonders[x[0]])))
return pair_lists
def update_from_export_str(self, export_str, update_pipes=True):
features = {'bonders': [], 'sensors': [], 'fusers': [], 'splitters': [], 'swappers': []}
# One map for each waldo, of positions to pairs of arrows (directions) and/or non-arrow instructions
# TODO: usage might be cleaner if separate arrow_maps and instr_maps... but probably more space
waldo_instr_maps = [{} for _ in range(self.NUM_WALDOS)] # Can't use * or else dict gets multi-referenced
feature_posns = set() # for verifying features were not placed illegally
# Break the component string up into its individual sections, while removing empty lines
component_line, *lines = (s for s in export_str.split('\n') if s)
# Member lines
pipes_idx = next((i for i, s in enumerate(lines) if s.startswith('PIPE:')), len(lines))
member_lines, lines = lines[:pipes_idx], lines[pipes_idx:]
if not member_lines:
raise ValueError("Missing MEMBER lines in reactor component")
# Pipe and annotation lines
annotations_idx = next((i for i, s in enumerate(lines) if s.startswith('ANNOTATION:')), len(lines))
pipe_lines, annotation_lines = lines[:annotations_idx], lines[annotations_idx:]
# Validates COMPONENT line and updates pipes
super().update_from_export_str(component_line + '\n' + '\n'.join(pipe_lines), update_pipes=update_pipes)
# Add members (features and instructions)
for line in member_lines:
assert line.startswith('MEMBER:'), f"Unexpected line in reactor members: `{line}`"
fields = line.split(',')
if len(fields) != 8:
raise Exception(f"Unrecognized member line format:\n{line}")
member_name = fields[0][len('MEMBER:'):].strip("'")
# Game stores directions in degrees, with right = 0, up = -90 (reversed so sin math works on
# the reversed vertical axis)
direction = None if int(fields[1]) == -1 else Direction(1 + int(fields[1]) // 90)
# Red has a field which is 64 for arrows, 128 for instructions
# The same field in Blue is 16 for arrows, 32 for instructions
waldo_idx = 0 if int(fields[3]) >= 64 else 1
position = Position(int(fields[4]), int(fields[5]))
assert 0 <= position.col < self.NUM_COLS and 0 <= position.row < self.NUM_ROWS, \
f"Member {member_name} is out-of-bounds"
if member_name.startswith('feature-'):
if position in feature_posns:
raise Exception(f"Overlapping features at {position}")
feature_posns.add(position)
# Sanity check the other half of double-size features
if member_name in ('feature-fuser', 'feature-splitter'):
position2 = position + RIGHT
assert position2.col < self.NUM_COLS, f"Member {member_name} is out-of-bounds"
if position2 in feature_posns:
raise Exception(f"Overlapping features at {position2}")
feature_posns.add(position2)
if member_name == 'feature-bonder':
features['bonders'].append((position, '+-'))
elif member_name == 'feature-sensor':
features['sensors'].append(position)
elif member_name == 'feature-fuser':
features['fusers'].append(position)
elif member_name == 'feature-splitter':
features['splitters'].append(position)
elif member_name == 'feature-tunnel':
features['swappers'].append(position)
elif member_name == 'feature-bonder-plus':
features['bonders'].append((position, '+'))
elif member_name == 'feature-bonder-minus':
features['bonders'].append((position, '-'))
else:
raise Exception(f"Unrecognized member type {member_name}")
continue
# Make sure this instruction is legal
if member_name in self.disallowed_instrs:
raise ValueError(f"Disallowed instruction type: {repr(member_name)}")
# Since this member is an instr and not a feature, prep a slot in the instr map
if position not in waldo_instr_maps[waldo_idx]:
waldo_instr_maps[waldo_idx][position] = [None, None]
if member_name == 'instr-arrow':
assert waldo_instr_maps[waldo_idx][position][0] is None, f"Overlapping arrows at {position}"
waldo_instr_maps[waldo_idx][position][0] = direction
continue
assert waldo_instr_maps[waldo_idx][position][1] is None, f"Overlapping commands at {position}"
if member_name == 'instr-start':
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.START, direction=direction)
continue
# Note: Some similar instructions have the same name but are sub-typed by the
# second integer field
instr_sub_type = int(fields[2])
if member_name == 'instr-input':
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.INPUT, target_idx=instr_sub_type)
elif member_name == 'instr-output':
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.OUTPUT,
target_idx=instr_sub_type)
elif member_name == 'instr-grab':
if instr_sub_type == 0:
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.GRAB_DROP)
elif instr_sub_type == 1:
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.GRAB)
else:
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.DROP)
elif member_name == 'instr-rotate':
if instr_sub_type == 0:
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.ROTATE,
direction=Direction.CLOCKWISE)
else:
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.ROTATE,
direction=Direction.COUNTER_CLOCKWISE)
elif member_name == 'instr-sync':
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.SYNC)
elif member_name == 'instr-bond':
if instr_sub_type == 0:
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.BOND_PLUS)
else:
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.BOND_MINUS)
elif member_name == 'instr-sensor':
# The last CSV field is used by the sensor for the target atomic number
atomic_num = int(fields[7])
if atomic_num not in elements_dict:
raise Exception(f"Invalid atomic number {atomic_num} on sensor command.")
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.SENSE,
direction=direction,
target_idx=atomic_num)
elif member_name == 'instr-fuse':
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.FUSE)
elif member_name == 'instr-split':
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.SPLIT)
elif member_name == 'instr-swap':
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.SWAP)
elif member_name == 'instr-toggle':
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.FLIP_FLOP, direction=direction)
elif member_name == 'instr-debug':
waldo_instr_maps[waldo_idx][position][1] = Instruction(InstructionType.PAUSE)
else:
raise Exception(f"Unrecognized member type {member_name}")
self.waldos = [Waldo(idx=i, instr_map=waldo_instr_maps[i]) for i in range(self.NUM_WALDOS)]
# Since bonders of different types get stored together to share a priority idx, check their individual counts
# match the existing counts
for bonder_type, feature_name in (('+-', 'bonders'),
('+', 'bonder-pluses'),
('-', 'bonder-minuses')):
actual_count = sum(1 for _, bt in features['bonders'] if bt == bonder_type)
expected_count = sum(1 for _, bt in self.bonders if bt == bonder_type)
assert actual_count == expected_count, \
f"Expected {expected_count} {feature_name} for {self.type} reactor but got {actual_count}"
# Sanity-check and set features
for feature_name, posns in features.items():
assert len(posns) == len(getattr(self, feature_name)), \
f"Expected {len(getattr(self, feature_name))} {feature_name} for {self.type} reactor but got {len(posns)}"
setattr(self, feature_name, posns)
self.bond_plus_pairs, self.bond_minus_pairs = self.bond_pairs() # Re-precompute bond pairings
# Store the annotations sorted by output idx (the first field)
self.annotations = sorted(annotation_lines)
def export_str(self):
"""Represent this reactor in solution export string format."""
# Generate the generic component export, and separate it into the metadata and pipe lines
component_line, *pipes = super().export_str().split('\n', maxsplit=1)
# By SC's convention, MEMBER lines are ordered as waldo starts, then features, then remaining waldo instructions
# Grab the waldo start lines from the front of each waldo's export
waldo_starts, waldo_instrs = [], []
for waldo in self.waldos:
start, *instrs = waldo.export_str().split('\n', maxsplit=1)
waldo_starts.append(start)
waldo_instrs.extend(instrs)
# TODO: Make reactors more agnostic of feature types
features = []
for (posn, bond_types) in self.bonders:
if bond_types == '+-':
feature_name = 'bonder'
elif bond_types == '+':
feature_name = 'bonder-plus'
elif bond_types == '-':
feature_name = 'bonder-minus'
else:
raise Exception("Invalid bonder type in internal data")
features.append(f"MEMBER:'feature-{feature_name}',-1,0,1,{posn.col},{posn.row},0,0")
for posn in self.sensors:
features.append(f"MEMBER:'feature-sensor',-1,0,1,{posn.col},{posn.row},0,0")
for posn in self.fusers:
features.append(f"MEMBER:'feature-fuser',-1,0,1,{posn.col},{posn.row},0,0")
for posn in self.splitters:
features.append(f"MEMBER:'feature-splitter',-1,0,1,{posn.col},{posn.row},0,0")
for posn in self.swappers:
features.append(f"MEMBER:'feature-tunnel',-1,0,1,{posn.col},{posn.row},0,0")
return '\n'.join([component_line, *waldo_starts, *features, *waldo_instrs, *pipes, *self.annotations])
def __hash__(self):
"""Hash of the current reactor state."""
return hash((tuple(molecule.hashable_repr() for molecule in self.molecules),
tuple(self.waldos)))
def __str__(self, flash_features=True, show_instructions=False):
"""Return a rich-format pretty-print string representing this reactor."""
# Each cell gets two characters, + 1 space between cells (we'll use that space to show waldos reticles)
cells = [[[' ', ' '] for _ in range(self.NUM_COLS)] for _ in range(self.NUM_ROWS)]
borders = [[' ' for _ in range(self.NUM_COLS + 1)] for _ in range(self.NUM_ROWS)] # Waldos and zone edges
# Add faint lines at edges of input/output zones (horizontal border excluded)
for c in (4, 6):
for r in range(self.NUM_ROWS):
if borders[r][c] == ' ': # Don't overwrite waldos
borders[r][c] = '[light_slate_grey]│[/]'
# Add faint traces of the waldo cmd paths
waldo_traces = [waldo.trace_path(num_cols=self.NUM_COLS, num_rows=self.NUM_ROWS) for waldo in self.waldos]
for i, (waldo, color) in enumerate(zip(self.waldos, ('dim red', 'dim blue'))):
for posn, dirns in waldo_traces[i].items():
c, r = posn
path_char = Waldo.dirns_to_char[frozenset(dirns)]
# Fill in this waldo's half of the cell
cells[r][c][i] = f'[{color}]{path_char}[/]'
# If the other waldo has nothing to draw in this cell and our directions include right (red) or left
# (blue) fill in the other waldo's spot with an extending line
if i == 0 and RIGHT in dirns and posn not in waldo_traces[1]:
cells[r][c][1 - i] = f'[{color}]─[/]'
elif i == 1 and LEFT in dirns and posn not in waldo_traces[0]:
cells[r][c][1 - i] = f'[{color}]─[/]'
# Extend a line through the border to our left for as far as won't cross or touch the other waldo
if (borders[r][c] == ' '
and LEFT in dirns
and (posn not in waldo_traces[1 - i] or i == 0)
and (posn + LEFT not in waldo_traces[1 - i]
or RIGHT not in waldo_traces[1 - i][posn + LEFT] # Ok to use border if we won't touch them
or i == 1)):
borders[r][c] = f'[{color}]─[/]'
# Vice versa for right
if (borders[r][c + 1] == ' '
and RIGHT in dirns
and (posn not in waldo_traces[1 - i] or i == 1) # Blue can cheat
and (posn + RIGHT not in waldo_traces[1 - i]
or LEFT not in waldo_traces[1 - i][posn + RIGHT] # Ok to use border if we won't touch them
or i == 0)):
borders[r][c + 1] = f'[{color}]─[/]'
# Add waldo instructions (priority over waldo paths)
if show_instructions:
for i, (waldo, color) in enumerate(zip(self.waldos, ('red', 'blue'))):
for (c, r), (_, cmd) in waldo.instr_map.items():
if cmd is not None:
cells[r][c][i] = f'[{color}]{cmd}[/]'
# Add waldo reticles
for i, (waldo, color) in enumerate(zip(self.waldos, ('bold red', 'bold blue'))):
c, r = waldo.position
waldo_chars = ('|', '|') if waldo.molecule is None else ('(', ')')
for j in range(2):
# Color purple where two waldos overlap
mixed_color = 'bold purple' if '[bold red]' in borders[r][c + j] else color
borders[r][c + j] = f'[{mixed_color}]{waldo_chars[j]}[/]'
# Map out the molecules in the reactor (priority over waldo paths/cmds)
for molecule in self.molecules:
for (c, r), atom in molecule.atom_map.items():
# Round co-ordinates in case we are printing mid-rotate
cell = cells[round(r)][round(c)]
cell[0] = atom.element.symbol[0]
if len(atom.element.symbol) >= 2:
cell[1] = atom.element.symbol[1]
# Use grey background for feature cells (bonders, fusers, etc.)
# Though they're stored together for priority reasons in Reactor.bonders, color + and - bonders separately
# from regular bonders so as to distinguish them during + vs - commands
feature_colors = {k: 'light_slate_grey'
for k in list(Reactor.FEATURE_NAMES) + ['bonder_pluses', 'bonder_minuses']}
input_colors = {}
output_colors = {}
# Flash the appropriate feature background cells on waldo input, output, bond +/-, etc.
if flash_features:
for i, (waldo, waldo_color) in enumerate(zip(self.waldos, ('red', 'blue'))):
if waldo.position not in waldo.instr_map:
continue
cmd = waldo.instr_map[waldo.position][1]
if cmd is None:
continue
if cmd.type == InstructionType.INPUT:
input_colors[cmd.target_idx] = waldo_color if cmd.target_idx not in input_colors else 'purple'
elif cmd.type == InstructionType.OUTPUT:
output_colors[cmd.target_idx] = waldo_color if cmd.target_idx not in output_colors else 'purple'
elif cmd.type == InstructionType.BOND_PLUS:
feature_colors['bonders'] = waldo_color if feature_colors['bonders'] != 'red' else 'purple'
feature_colors['bonder_pluses'] = waldo_color if feature_colors['bonder_pluses'] != 'red' else 'purple'
elif cmd.type == InstructionType.BOND_MINUS:
feature_colors['bonders'] = waldo_color if feature_colors['bonders'] != 'red' else 'purple'
feature_colors['bonder_minuses'] = waldo_color if feature_colors['bonder_minuses'] != 'red' else 'purple'
elif cmd.type == InstructionType.FUSE:
feature_colors['fusers'] = waldo_color if feature_colors['fusers'] != 'red' else 'purple'
elif cmd.type == InstructionType.SPLIT:
feature_colors['splitters'] = waldo_color if feature_colors['splitters'] != 'red' else 'purple'
elif cmd.type == InstructionType.SWAP:
feature_colors['swappers'] = waldo_color if feature_colors['swappers'] != 'red' else 'purple'
elif cmd.type == InstructionType.SENSE:
feature_colors['sensors'] = waldo_color if feature_colors['sensors'] != 'red' else 'purple'
# Color background of feature cells
for feature_name, feature_color in feature_colors.items():
# Extract individual bonder types from their shared struct
if feature_name == 'bonders':
cell_posns = set(p for p, bond_types in self.bonders if bond_types == '+-')
elif feature_name == 'bonder_pluses':
cell_posns = set(p for p, bond_types in self.bonders if bond_types == '+')
elif feature_name == 'bonder_minuses':
cell_posns = set(p for p, bond_types in self.bonders if bond_types == '-')
else:
cell_posns = set(getattr(self, feature_name))
for c, r in cell_posns:
cells[r][c][0] = f'[on {feature_color}]{cells[r][c][0]}[/]'
cells[r][c][1] = f'[on {feature_color}]{cells[r][c][1]}[/]'
# Add the second posn and the border for double-length features
if feature_name in ('fusers', 'splitters'):
# Merging these would be nicer to rich.print but can get screwy if any cell is overridden after
borders[r][c + 1] = f'[on {feature_color}]{borders[r][c + 1]}[/]'
cells[r][c + 1][0] = f'[on {feature_color}]{cells[r][c + 1][0]}[/]'
cells[r][c + 1][1] = f'[on {feature_color}]{cells[r][c + 1][1]}[/]'
# Fill in the borders of adjacent bonders
elif 'bonder' in feature_name:
if (c + 1, r) in cell_posns:
borders[r][c + 1] = f'[on {feature_color}]{borders[r][c + 1]}[/]'
# Color background of inputs/outputs when activated, excepting already-colored features
for input_idx, input_color in input_colors.items():
for c, r in itertools.product(range(4), (range(4) if input_idx == 0 else range(4, 8))):
if not cells[r][c][0].startswith('[on '):
cells[r][c][0] = f'[on {input_color}]{cells[r][c][0]}[/]'
cells[r][c][1] = f'[on {input_color}]{cells[r][c][1]}[/]'
for c, r in itertools.product(range(1, 4), (range(4) if input_idx == 0 else range(4, 8))):
if not borders[r][c].startswith('[on '):
borders[r][c] = f'[on {input_color}]{borders[r][c]}[/]'
for output_idx, output_color in output_colors.items():
for c, r in itertools.product(range(6, 10), (range(4) if output_idx == 0 else range(4, 8))):
if not cells[r][c][0].startswith('[on '):
cells[r][c][0] = f'[on {output_color}]{cells[r][c][0]}[/]'
cells[r][c][1] = f'[on {output_color}]{cells[r][c][1]}[/]'
for c, r in itertools.product(range(7, 10), (range(4) if output_idx == 0 else range(4, 8))):
if not borders[r][c].startswith('[on '):
borders[r][c] = f'[on {output_color}]{borders[r][c]}[/]'
result = f" {self.NUM_COLS * '___'}_ \n"
for r in range(self.NUM_ROWS):
result += f"│{''.join(b + c[0] + c[1] for b, c in zip(borders[r], cells[r] + [['', '']]))}│\n"
result += f" {self.NUM_COLS * '‾‾‾'}‾ "
return result
def do_instant_actions(self, cycle):
for waldo in self.waldos:
self.exec_instrs(waldo, cycle)
def move_contents(self, cycle):
"""Move all waldos in this reactor and any molecules they are holding."""
# If the waldo is facing a wall, mark it as stalled (may also be stalled due to sync, input, etc.)
for waldo in self.waldos:
if ((waldo.direction == UP and waldo.position.row == 0)
or (waldo.direction == DOWN and waldo.position.row == 7)
or (waldo.direction == LEFT and waldo.position.col == 0)
or (waldo.direction == RIGHT and waldo.position.col == 9)):
waldo.is_stalled = True
# If any waldo is about to rotate a molecule, don't skimp on collision checks
# Note that a waldo might be marked as rotating (and stalled accordingly) while holding nothing, in the case
# that red hits a rotate and then has its atom fused or swapped away by blue in the same cycle
# Hence the waldo.molecule is not None check is necessary
if any(waldo.is_rotating and waldo.molecule is not None for waldo in self.waldos):
# If both waldos are holding the same molecule and either of them is rotating, a crash occurs
# (even if they're in the same position and rotating the same direction)
if self.waldos[0].molecule is self.waldos[1].molecule:
raise ReactionError("Molecule pulled apart")
# Otherwise, move each waldo's molecule partway at a time and check for collisions each time
step_radians = math.pi / (2 * self.NUM_MOVE_CHECKS)
step_distance = 1 / self.NUM_MOVE_CHECKS
for _ in range(self.NUM_MOVE_CHECKS):
# Move all molecules currently being held by a waldo forward a step
for waldo in self.waldos:
if waldo.molecule is not None and not waldo.is_stalled:
waldo.molecule.move(waldo.direction, distance=step_distance)
elif waldo.is_rotating:
waldo.molecule.rotate_fine(pivot_pos=waldo.position,
direction=waldo.cur_cmd().direction,
radians=step_radians)
# After moving all molecules, check each rotated molecule for collisions with walls or other molecules
# Though all molecules had to move, only the rotating one(s) needs to do checks at each step, since we
# know the other waldo will only have static molecules left to check against, and translation movements
# can't clip through a static atom without ending on top of it
# Note: This only holds true for <= 2 waldos and since we checked that at least one waldo is rotating
for waldo in self.waldos:
if waldo.is_rotating:
self.check_collisions(waldo.molecule)
# After completing all steps of the movement, convert moved molecules back to integer co-ordinates and do
# any final checks/updates
for waldo in self.waldos:
if waldo.molecule is not None and not waldo.is_stalled:
waldo.molecule.round_posns()
# Do the final check we skipped for non-rotating molecules
self.check_collisions_lazy(waldo.molecule)
elif waldo.is_rotating:
waldo.molecule.round_posns()
# Rotate atom bonds
waldo.molecule.rotate_bonds(waldo.cur_cmd().direction)
elif any(waldo.molecule is not None and not waldo.is_stalled for waldo in self.waldos):
# If we are not doing any rotates, we can skip the full collision checks
# Non-rotating molecules can cause collisions/errors if:
# * The waldos are pulling a molecule apart
# * OR The final destination of a moved molecule overlaps any other molecule after the move
# * OR The final destination of a moved molecule overlaps the initial position of another moving molecule,
# and the offending waldos were not moving in the same direction
if self.waldos[0].molecule is self.waldos[1].molecule:
# Given that we know one is moving, if the waldos share a molecule they must move in the same direction
if (any(waldo.is_stalled for waldo in self.waldos)
or self.waldos[0].direction != self.waldos[1].direction):
raise ReactionError("A molecule has been grabbed by both waldos and pulled apart.")
# Only mark one waldo as moving a molecule so we don't move their molecule twice
waldos_moving_molecules = [self.waldos[0]]
else:
waldos_moving_molecules = [w for w in self.waldos if not w.is_stalled and w.molecule is not None]
# (skipped if both waldos holding same molecule)
# Check if a molecule being moved will bump into the back of another moving molecule
if (len(waldos_moving_molecules) == 2 and self.waldos[0].direction != self.waldos[1].direction):
for waldo in self.waldos:
# Intersect the target positions of this waldo's molecule with the current positions of the
# other waldo's molecules
other_waldo = self.waldos[1 - waldo.idx]
target_posns = set(posn + waldo.direction for posn in waldo.molecule.atom_map)
if not target_posns.isdisjoint(other_waldo.molecule.atom_map):
raise ReactionError("Collision between molecules")
# Move all molecules
for waldo in waldos_moving_molecules:
# If we're moving perpendicular to any quantum walls, check for collisions with them
if ((self.quantum_walls_y and waldo.direction in (LEFT, RIGHT))
or (self.quantum_walls_x and waldo.direction in (UP, DOWN))):
# Move the molecule halfway, check for quantum wall collisions, then move the last half
waldo.molecule.move(waldo.direction, distance=0.5)
self.check_quantum_wall_collisions(waldo.molecule)
waldo.molecule.move(waldo.direction, distance=0.5)
waldo.molecule.round_posns()
else:
waldo.molecule.move(waldo.direction)
# Perform collision checks against the moved molecules
for waldo in self.waldos:
if waldo.molecule is not None and not waldo.is_stalled:
self.check_collisions_lazy(waldo.molecule)
# Move waldos and mark them as no longer stalled. Note that is_rotated must be left alone to tell it not to
# rotate twice
for waldo in self.waldos:
if not waldo.is_stalled:
waldo.position += waldo.direction
waldo.is_stalled = False
def check_molecule_collisions_lazy(self, molecule):
"""Raise an exception if the given molecule collides with any other molecules.
Assumes integer co-ordinates in all molecules.
"""
for other_molecule in self.molecules:
molecule.check_collisions_lazy(other_molecule) # Implicitly ignores self
def check_wall_collisions(self, molecule):
"""Raise an exception if the given molecule collides with any walls."""
if not all(self.walls[UP] < p.row < self.walls[DOWN]
and self.walls[LEFT] < p.col < self.walls[RIGHT]
for p in molecule.atom_map):
raise ReactionError("A molecule has collided with a wall")
def check_quantum_wall_collisions(self, molecule):
for r, (c1, c2) in self.quantum_walls_x:
for p in molecule.atom_map:
# If the atom's center (p) is in line with the wall, check its not too close to the wall segment
if c1 < p.col < c2:
if abs(p.row - r) < ATOM_RADIUS:
raise ReactionError("A molecule has collided with a quantum wall")
# If p is not in line with the wall, we just need to make sure it's not near the wall's endpoints
elif max((p.col - c1)**2, (p.col - c2)**2) + (p.row - r)**2 < ATOM_RADIUS**2:
raise ReactionError("A molecule has collided with a quantum wall")
for c, (r1, r2) in self.quantum_walls_y:
for p in molecule.atom_map:
# If the atom's center (p) is in line with the wall, check its not too close to the wall segment
if r1 < p.row < r2:
if abs(p.col - c) < ATOM_RADIUS:
raise ReactionError("A molecule has collided with a quantum wall")
# If p is not in line with the wall, we just need to make sure it's not near the wall's endpoints
elif max((p.row - r1)**2, (p.row - r2)**2) + (p.col - c)**2 < ATOM_RADIUS**2:
raise ReactionError("A molecule has collided with a quantum wall")
def check_collisions_lazy(self, molecule):
"""Raise an exception if the given molecule collides with any other molecules or walls.
Assumes integer co-ordinates in all molecules.
"""
self.check_molecule_collisions_lazy(molecule)
self.check_wall_collisions(molecule)
# Quantum wall collision checks may be skipped since they should only lie on grid edges
def check_collisions(self, molecule):
"""Check that the given molecule isn't colliding with any walls or other molecules.
Raise an exception if it is.
"""
for other_molecule in self.molecules:
molecule.check_collisions(other_molecule) # Implicitly ignores self
self.check_wall_collisions(molecule)
self.check_quantum_wall_collisions(molecule)
def exec_instrs(self, waldo, cycle):
if waldo.position not in waldo.instr_map:
return
arrow_direction, cmd = waldo.instr_map[waldo.position]
# Update the waldo's direction based on any arrow in this cell
if arrow_direction is not None:
waldo.direction = arrow_direction
# Execute the non-arrow instruction
if cmd is None:
return
elif cmd.type == InstructionType.INPUT:
self.input(waldo, cmd.target_idx, cycle)
elif cmd.type == InstructionType.OUTPUT:
self.output(waldo, cmd.target_idx, cycle)
elif cmd.type == InstructionType.GRAB:
self.grab(waldo)
elif cmd.type == InstructionType.DROP:
self.drop(waldo)
elif cmd.type == InstructionType.GRAB_DROP:
if waldo.molecule is None:
self.grab(waldo)
else:
self.drop(waldo)
elif cmd.type == InstructionType.ROTATE:
# If we are holding a molecule and weren't just rotating, start rotating
# In all other cases, stop rotating
waldo.is_rotating = waldo.is_stalled = waldo.molecule is not None and not waldo.is_rotating
elif cmd.type == InstructionType.BOND_PLUS:
self.bond_plus()
elif cmd.type == InstructionType.BOND_MINUS:
self.bond_minus()
elif cmd.type == InstructionType.SYNC:
# Mark this waldo as stalled if both waldos aren't on a Sync
other_waldo = self.waldos[1 - waldo.idx]
waldo.is_stalled = other_waldo.cur_cmd() is None or other_waldo.cur_cmd().type != InstructionType.SYNC
elif cmd.type == InstructionType.FUSE:
self.fuse()
elif cmd.type == InstructionType.SPLIT:
self.split()
elif cmd.type == InstructionType.SENSE:
for posn in self.sensors:
molecule = self.get_molecule(posn)
if molecule is not None and molecule.atom_map[posn].element.atomic_num == cmd.target_idx:
waldo.direction = cmd.direction
break
elif cmd.type == InstructionType.FLIP_FLOP:
# Update the waldo's direction if the flip-flop is on
if waldo.flipflop_states[waldo.position]:
waldo.direction = cmd.direction
waldo.flipflop_states[waldo.position] = not waldo.flipflop_states[waldo.position] # ...flip it
elif cmd.type == InstructionType.SWAP:
self.swap()
elif cmd.type == InstructionType.PAUSE:
raise PauseException("Pause command encountered")
def input(self, waldo, input_idx, cycle):
# If there is no such pipe or it has no molecule available, stall the waldo
if (input_idx > len(self.in_pipes) - 1
or self.in_pipes[input_idx] is None
or self.in_pipes[input_idx].get(-1, cycle) is None):
waldo.is_stalled = True
return
# Grab the molecule from the appropriate pipe or stall if no such molecule (or no pipe)
new_molecule = self.in_pipes[input_idx].pop(cycle)
sample_posn = next(iter(new_molecule.atom_map))
# If the molecule came from a previous reactor, shift its columns from output to input co-ordinates
# We don't do this immediately on output to save a little work when the molecule is going to an output component
# anyway (since output checks are agnostic of absolute co-ordinates)
if sample_posn.col >= 6:
new_molecule.move(LEFT, 6)
# Update the molecule's co-ordinates to those of the correct zone if it came from an opposite output zone
if input_idx == 0 and sample_posn.row >= 4:
new_molecule.move(UP, 4)
elif input_idx == 1 and sample_posn.row < 4:
new_molecule.move(DOWN, 4)
self.molecules[new_molecule] = None # Dummy value
self.check_molecule_collisions_lazy(new_molecule)
def output(self, waldo, output_idx, cycle):
# If the there is no such output pipe (e.g. assembly reactor, large output research), do nothing
if (output_idx > len(self.out_pipes) - 1
or self.out_pipes[output_idx] is None):
return
# TODO: It'd be nice to only have to calculate this for molecules that have been
# debonded or dropped, etc. However, the cost of pre-computing it every time
# we do such an action is probably not worth the cost of just doing it once
# over all molecules whenever output is called.
# TODO 2: On the other hand, solutions with a waldo wall-stalling on output just
# got fucked
# This manual iter is a little awkward but helps ensure we don't iterate more than once into this dict while
# we're deleting from it
molecules_in_zone = iter(molecule for molecule in self.molecules
# Ignore grabbed molecules
if not any(waldo.molecule is molecule for waldo in self.waldos)
and molecule.output_zone_idx(large_output=self.large_output) == output_idx)
molecule = next(molecules_in_zone, None)
# Try to output the first molecule in the zone if an output hasn't already been done this cycle
if molecule is not None:
# Most components (input, storage tanks, quantum pipes, etc.) behave as though the molecule gets put into
# their pipe in the movement phase, with an animation of the molecule moving out of the component, and the
# molecule not being available to the downstream component until the next cycle's instant actions phase.
# Reactors behave differently, with the spawned molecule appearing directly in the next segment without
# movement animation, and being available immediately during the same instant actions phase for the next
# component to consume. To support this with the updated dependence of pipes on the cycle count, treat the
# molecule as though it was pushed during the previous movement phase (-1 to cycle count since instant
# actions come before movement in a cycle).
# TODO: Should figure out if there's a way to make this less awkward.
if self.out_pipes[output_idx].get(0, cycle) is None:
# Put the molecule in the pipe and remove it from the reactor
self.out_pipes[output_idx].push(molecule, cycle)
# Look for any other outputable molecule
molecule = next(molecules_in_zone, None)
# Remove the just-output molecule from the reactor (AFTER finishing with the dict iterator)
del self.molecules[self.out_pipes[output_idx].get(0, cycle)]
# If there is any output(s) remaining in this zone (regardless of whether we outputted), stall this waldo
waldo.is_stalled = molecule is not None
def get_molecule(self, position):
"""Select the molecule at the given grid position, or None if no such molecule.
Used by Grab, Bond+/-, Fuse, etc.
"""
return next((molecule for molecule in self.molecules if position in molecule), None)
def grab(self, waldo):
if waldo.molecule is None:
waldo.molecule = self.get_molecule(waldo.position)
def drop(self, waldo):
waldo.molecule = None # Remove the reference to the molecule
def bond_plus(self):
for position, neighbor_posn, direction in self.bond_plus_pairs:
# Identify the molecule on each bonder (may be same, doesn't matter for now)
molecule_A = self.get_molecule(position)
if molecule_A is None:
continue
molecule_B = self.get_molecule(neighbor_posn)
if molecule_B is None:
continue
atom_A = molecule_A[position]
# If the bond being increased is already at the max bond size of 3, don't do
# anything. However, due to weirdness of Spacechem's bonding algorithm, we still
# mark the molecule as modified below
if direction not in atom_A.bonds or atom_A.bonds[direction] != 3:
atom_B = molecule_B[neighbor_posn]
# Do nothing if either atom is at (or above) its bond limit (spacechem does not mark any molecules as
# modified in this case unless the bond was size 3)
if (sum(atom_A.bonds.values()) >= atom_A.element.max_bonds
or sum(atom_B.bonds.values()) >= atom_B.element.max_bonds):
continue
direction_B = direction.opposite()
if direction not in atom_A.bonds:
atom_A.bonds[direction] = 0
atom_A.bonds[direction] += 1
if direction_B not in atom_B.bonds:
atom_B.bonds[direction_B] = 0
atom_B.bonds[direction_B] += 1
if molecule_A is molecule_B:
# Mark molecule as modified by popping it to the back of the reactor's queue
del self.molecules[molecule_A]
self.molecules[molecule_A] = None # dummy value
else:
# Add the smaller molecule to the larger one (faster), then delete the smaller
# and mark the larger as modified
molecules = [molecule_A, molecule_B]
molecules.sort(key=len)
molecules[1] += molecules[0]
# Also make sure that any waldos holding the to-be-deleted molecule are updated
# to point at the combined molecule
for waldo in self.waldos:
if waldo.molecule is molecules[0]:
waldo.molecule = molecules[1]
del self.molecules[molecules[0]]
del self.molecules[molecules[1]]
self.molecules[molecules[1]] = None # dummy value
def bond_minus(self):
for position, _, direction in self.bond_minus_pairs:
molecule = self.get_molecule(position)
# Skip if there isn't a molecule with a bond over this pair
if molecule is None or direction not in molecule[position].bonds:
continue
# Now that we know for sure the molecule will be mutated, debond the molecule
# and check if this broke the molecule in two
split_off_molecule = molecule.debond(position, direction)
# Mark the molecule as modified
del self.molecules[molecule]
self.molecules[molecule] = None # Dummy value
# If a new molecule broke off, add it to the reactor molecules
if split_off_molecule is not None:
self.molecules[split_off_molecule] = None # Dummy value
# If the molecule got broken apart, ensure any waldos holding it are now holding
# the correct piece of it
for waldo in self.waldos:
if waldo.molecule is molecule and waldo.position in split_off_molecule:
waldo.molecule = split_off_molecule
def defrag_molecule(self, molecule, posn):
"""Given a molecule that has had some of its bonds broken from the given position, update reactor.molecules
based on any molecules that broke off. Note that this always at least moves the molecule to the back of the
priority queue, even if it did not break apart (this should be safe since defrag should only be called when the
molecule is modified).
"""
# Update the reactor molecules based on how the molecule broke apart
del self.molecules[molecule]
for new_molecule in molecule.defrag(posn):
self.molecules[new_molecule] = None # Dummy value
# Update the references of any waldos that were holding the molecule
for waldo in self.waldos:
if waldo.molecule is molecule and waldo.position in new_molecule:
waldo.molecule = new_molecule
def delete_atom_bonds(self, posn):
"""Helper used by fuse and swap to remove all bonds from an atom and break up its molecule if needed.
If no atom at the given position, does nothing.
"""
molecule = self.get_molecule(posn)
if molecule is None:
return
atom = molecule.atom_map[posn]
for dirn in CARDINAL_DIRECTIONS:
if dirn in atom.bonds:
neighbor_atom = molecule.atom_map[posn + dirn]
del atom.bonds[dirn]
del neighbor_atom.bonds[dirn.opposite()]
self.defrag_molecule(molecule, posn)
def reduce_excess_bonds(self, posn):
"""Helper used by fuse and split to reduce bonds on a mutated atom down to its new max count, and break up its
molecule if needed.
"""
molecule = self.get_molecule(posn)
atom = molecule.atom_map[posn]
excess_bonds = sum(atom.bonds.values()) - atom.element.max_bonds
max_bond_size = max(atom.bonds.values(), default=0)
bonds_broke = False
neighbor_atoms = {} # So we don't have to repeatedly incur the get_molecule cost
while excess_bonds > 0:
# The order here is deliberately hardcoded to match empirical observations of SpaceChem's behavior
for dirn in (RIGHT, LEFT, UP, DOWN):
# Reduce triple bonds first, then double bonds, etc.
if dirn in atom.bonds and atom.bonds[dirn] == max_bond_size:
if dirn not in neighbor_atoms:
neighbor_posn = posn + dirn
neighbor_atoms[dirn] = self.get_molecule(neighbor_posn)[neighbor_posn]
atom.bonds[dirn] -= 1
neighbor_atoms[dirn].bonds[dirn.opposite()] -= 1
if atom.bonds[dirn] == 0:
del atom.bonds[dirn]
del neighbor_atoms[dirn].bonds[dirn.opposite()]
bonds_broke = True
excess_bonds -= 1
if excess_bonds == 0:
break
max_bond_size -= 1
if bonds_broke:
# Update the reactor molecules based on how the molecule broke apart (if at all)
self.defrag_molecule(molecule, posn)
else:
# If no bonds broke we can save a little work and just directly mark the molecule as updated
del self.molecules[molecule]
self.molecules[molecule] = None # Dummy value
def fuse(self):
for left_posn in self.fusers:
left_molecule = self.get_molecule(left_posn)
if left_molecule is None:
continue
right_posn = left_posn + RIGHT
right_molecule = self.get_molecule(right_posn)
if right_molecule is None:
continue
left_atom = left_molecule[left_posn]
right_atom = right_molecule[right_posn]
# If the target atoms can't be legally fused, do nothing
fused_atomic_num = left_atom.element.atomic_num + right_atom.element.atomic_num
if fused_atomic_num > 109:
continue
# Remove all bonds from the left atom
self.delete_atom_bonds(left_posn)
# Delete the left molecule (now just a single atom). Note that the molecule handle will have changed after
# delete_atom_bonds. The right atom may be part of a new molecule but its handle shouldn't have changed
left_molecule = self.get_molecule(left_posn)
for waldo in self.waldos:
if waldo.molecule is left_molecule:
waldo.molecule = None
del self.molecules[left_molecule]
# Update the right atom's element, reducing its bonds as needed
right_atom.element = elements_dict[fused_atomic_num]
self.reduce_excess_bonds(right_posn)
def split(self):
for splitter_posn in self.splitters:
split_molecule = self.get_molecule(splitter_posn)
if split_molecule is None:
continue
split_atom = split_molecule[splitter_posn]
if split_atom.element.atomic_num <= 1:
continue
# Split the left atom
new_atomic_num = split_atom.element.atomic_num // 2
split_atom.element = elements_dict[split_atom.element.atomic_num - new_atomic_num]
self.reduce_excess_bonds(splitter_posn) # Reduce the left atom's bonds if its new bond count is too low
# Lastly create the new molecule (and check for collisions in its cell)
new_molecule = Molecule(atom_map={splitter_posn + RIGHT: Atom(element=elements_dict[new_atomic_num])})
self.check_molecule_collisions_lazy(new_molecule)
self.molecules[new_molecule] = None # Dummy value
def swap(self):
"""Swap atoms between swappers. Note that the order of operations here was carefully chosen to modify the
internal priority order of reactor molecules the same way that SpaceChem does.
"""
# Debond all atoms on swappers from their neighbors
for posn in self.swappers:
self.delete_atom_bonds(posn) # Does nothing if no atom on the swapper
# Swap the atoms, ensuring that waldos don't drop if their held atom is replaced
# Make sure we get all the molecules to be swapped before we mess up get_molecule by moving them
for i, (posn, molecule) in enumerate([(p, self.get_molecule(p)) for p in self.swappers]):
next_posn = self.swappers[(i + 1) % len(self.swappers)]
if molecule is not None:
# Update the molecule's atom position and move it to the back of the reactor priority queue
molecule.atom_map[next_posn] = molecule.atom_map[posn]
del molecule.atom_map[posn]
del self.molecules[molecule]
self.molecules[molecule] = None # Dummy value
# If there are any waldos holding something on the next swapper, update their contents
for waldo in self.waldos:
if waldo.position == next_posn and waldo.molecule is not None:
waldo.molecule = molecule # May be None, which handles the no molecule case correctly
def reset(self):
super().reset()
self.molecules = {}
for waldo in self.waldos:
waldo.reset()
return self
# Component used in defense levels to damage a boss
# In order to re-use as much existing logic as possible, we can treat these like outputs, which damage the boss when
# completed if particular conditions are met.
# 'drag-weapon-canister'
# 'drag-weapon-consumer'... but it's just a typed recycler lol?
class Weapon(Output):
__slots__ = () # TODO: 'boss' and any other properties common to other defense level weapons once implemented
def __new__(cls, component_dict, _type=None, **kwargs):
"""Convert to the specific weapon subclass based on component name."""
_type = component_dict['type'] if _type is None else _type
if _type == 'drag-weapon-canister':
return super().__new__(CrashCanister)
elif _type == 'drag-weapon-consumer':
return super().__new__(InternalStorageTank)
else:
raise ValueError(f"Invalid weapon type `{component_dict['type']}`")
def __init__(self, component_dict, *args, **kwargs):
super().__init__(output_dict=component_dict, *args, **kwargs)
class CrashCanister(Weapon):
"""Collapsar. While it's categorized as a weapon, it's effectively just an output with some end delay."""
__slots__ = 'canister_drop_cycle',
DEFAULT_SHAPE = (4, 4)
def __init__(self, component_dict, *args, **kwargs):
super().__init__(component_dict, *args, **kwargs)
self.canister_drop_cycle = None
# For most defense levels, the animation at the end halts all waldos, causing them to repeat the last cycle until
# the animation completes
# However in collapsar, when the output is complete it stops accepting molecules, while the canister drops, but the
# solution keeps running as normal and must not crash due to any clogs that result.
# To simulate this, instead of using an end-animation-cycles var like we'll do for other defense levels, have the
# canister not mark itself as complete until 2000 cycles after the 40th output
def do_instant_actions(self, cycle):
# Behave like a normal output until complete
if self.canister_drop_cycle is None:
if super().do_instant_actions(cycle):
# Once complete, save the cycle and artificially lower the complete count to ensure the solution can't
# end even if another output completes (since we're treating the hydrogen tank as a 0-count output)
self.current_count -= 1
self.canister_drop_cycle = cycle
elif cycle == self.canister_drop_cycle + 2000:
# Once the target count is met, stop processing inputs and wait for 2000 cycles before setting the target
# count back to its true value and indicating to the caller that the output just completed
self.current_count += 1
return True
return False
def reset(self):
super().reset()
self.canister_drop_cycle = None
class InternalStorageTank(Weapon):
"""Collapsar. While its component name indicates it's a weapon, it's effectively a 0-count output."""
pass
|
[
"copy.deepcopy",
"collections.deque"
] |
[((3553, 3572), 'collections.deque', 'collections.deque', ([], {}), '()\n', (3570, 3572), False, 'import collections\n'), ((3600, 3619), 'collections.deque', 'collections.deque', ([], {}), '()\n', (3617, 3619), False, 'import collections\n'), ((9157, 9176), 'collections.deque', 'collections.deque', ([], {}), '()\n', (9174, 9176), False, 'import collections\n'), ((9204, 9223), 'collections.deque', 'collections.deque', ([], {}), '()\n', (9221, 9223), False, 'import collections\n'), ((30663, 30682), 'collections.deque', 'collections.deque', ([], {}), '()\n', (30680, 30682), False, 'import collections\n'), ((32242, 32261), 'collections.deque', 'collections.deque', ([], {}), '()\n', (32259, 32261), False, 'import collections\n'), ((18798, 18830), 'copy.deepcopy', 'copy.deepcopy', (['self.molecules[0]'], {}), '(self.molecules[0])\n', (18811, 18830), False, 'import copy\n'), ((22636, 22695), 'copy.deepcopy', 'copy.deepcopy', (['self.repeating_molecules[self.repeating_idx]'], {}), '(self.repeating_molecules[self.repeating_idx])\n', (22649, 22695), False, 'import copy\n'), ((22855, 22912), 'copy.deepcopy', 'copy.deepcopy', (['self.starting_molecules[self.starting_idx]'], {}), '(self.starting_molecules[self.starting_idx])\n', (22868, 22912), False, 'import copy\n')]
|
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
Test nn.probability.distribution.Geometric.
"""
import pytest
import mindspore.nn as nn
import mindspore.nn.probability.distribution as msd
from mindspore import dtype
from mindspore import Tensor
def test_arguments():
"""
Args passing during initialization.
"""
g = msd.Geometric()
assert isinstance(g, msd.Distribution)
g = msd.Geometric([0.1, 0.3, 0.5, 0.9], dtype=dtype.int32)
assert isinstance(g, msd.Distribution)
def test_type():
with pytest.raises(TypeError):
msd.Geometric([0.1], dtype=dtype.float32)
def test_name():
with pytest.raises(TypeError):
msd.Geometric([0.1], name=1.0)
def test_seed():
with pytest.raises(TypeError):
msd.Geometric([0.1], seed='seed')
def test_prob():
"""
Invalid probability.
"""
with pytest.raises(ValueError):
msd.Geometric([-0.1], dtype=dtype.int32)
with pytest.raises(ValueError):
msd.Geometric([1.1], dtype=dtype.int32)
with pytest.raises(ValueError):
msd.Geometric([0.0], dtype=dtype.int32)
with pytest.raises(ValueError):
msd.Geometric([1.0], dtype=dtype.int32)
class GeometricProb(nn.Cell):
"""
Geometric distribution: initialize with probs.
"""
def __init__(self):
super(GeometricProb, self).__init__()
self.g = msd.Geometric(0.5, dtype=dtype.int32)
def construct(self, value):
prob = self.g.prob(value)
log_prob = self.g.log_prob(value)
cdf = self.g.cdf(value)
log_cdf = self.g.log_cdf(value)
sf = self.g.survival_function(value)
log_sf = self.g.log_survival(value)
return prob + log_prob + cdf + log_cdf + sf + log_sf
def test_geometric_prob():
"""
Test probability functions: passing value through construct.
"""
net = GeometricProb()
value = Tensor([3, 4, 5, 6, 7], dtype=dtype.float32)
ans = net(value)
assert isinstance(ans, Tensor)
class GeometricProb1(nn.Cell):
"""
Geometric distribution: initialize without probs.
"""
def __init__(self):
super(GeometricProb1, self).__init__()
self.g = msd.Geometric(dtype=dtype.int32)
def construct(self, value, probs):
prob = self.g.prob(value, probs)
log_prob = self.g.log_prob(value, probs)
cdf = self.g.cdf(value, probs)
log_cdf = self.g.log_cdf(value, probs)
sf = self.g.survival_function(value, probs)
log_sf = self.g.log_survival(value, probs)
return prob + log_prob + cdf + log_cdf + sf + log_sf
def test_geometric_prob1():
"""
Test probability functions: passing value/probs through construct.
"""
net = GeometricProb1()
value = Tensor([3, 4, 5, 6, 7], dtype=dtype.float32)
probs = Tensor([0.5], dtype=dtype.float32)
ans = net(value, probs)
assert isinstance(ans, Tensor)
class GeometricKl(nn.Cell):
"""
Test class: kl_loss between Geometric distributions.
"""
def __init__(self):
super(GeometricKl, self).__init__()
self.g1 = msd.Geometric(0.7, dtype=dtype.int32)
self.g2 = msd.Geometric(dtype=dtype.int32)
def construct(self, probs_b, probs_a):
kl1 = self.g1.kl_loss('Geometric', probs_b)
kl2 = self.g2.kl_loss('Geometric', probs_b, probs_a)
return kl1 + kl2
def test_kl():
"""
Test kl_loss function.
"""
ber_net = GeometricKl()
probs_b = Tensor([0.3], dtype=dtype.float32)
probs_a = Tensor([0.7], dtype=dtype.float32)
ans = ber_net(probs_b, probs_a)
assert isinstance(ans, Tensor)
class GeometricCrossEntropy(nn.Cell):
"""
Test class: cross_entropy of Geometric distribution.
"""
def __init__(self):
super(GeometricCrossEntropy, self).__init__()
self.g1 = msd.Geometric(0.3, dtype=dtype.int32)
self.g2 = msd.Geometric(dtype=dtype.int32)
def construct(self, probs_b, probs_a):
h1 = self.g1.cross_entropy('Geometric', probs_b)
h2 = self.g2.cross_entropy('Geometric', probs_b, probs_a)
return h1 + h2
def test_cross_entropy():
"""
Test cross_entropy between Geometric distributions.
"""
net = GeometricCrossEntropy()
probs_b = Tensor([0.3], dtype=dtype.float32)
probs_a = Tensor([0.7], dtype=dtype.float32)
ans = net(probs_b, probs_a)
assert isinstance(ans, Tensor)
class GeometricBasics(nn.Cell):
"""
Test class: basic mean/sd/mode/entropy function.
"""
def __init__(self):
super(GeometricBasics, self).__init__()
self.g = msd.Geometric([0.3, 0.5], dtype=dtype.int32)
def construct(self):
mean = self.g.mean()
sd = self.g.sd()
var = self.g.var()
mode = self.g.mode()
entropy = self.g.entropy()
return mean + sd + var + mode + entropy
def test_bascis():
"""
Test mean/sd/mode/entropy functionality of Geometric distribution.
"""
net = GeometricBasics()
ans = net()
assert isinstance(ans, Tensor)
class GeoConstruct(nn.Cell):
"""
Bernoulli distribution: going through construct.
"""
def __init__(self):
super(GeoConstruct, self).__init__()
self.g = msd.Geometric(0.5, dtype=dtype.int32)
self.g1 = msd.Geometric(dtype=dtype.int32)
def construct(self, value, probs):
prob = self.g('prob', value)
prob1 = self.g('prob', value, probs)
prob2 = self.g1('prob', value, probs)
return prob + prob1 + prob2
def test_geo_construct():
"""
Test probability function going through construct.
"""
net = GeoConstruct()
value = Tensor([0, 0, 0, 0, 0], dtype=dtype.float32)
probs = Tensor([0.5], dtype=dtype.float32)
ans = net(value, probs)
assert isinstance(ans, Tensor)
|
[
"mindspore.Tensor",
"pytest.raises",
"mindspore.nn.probability.distribution.Geometric"
] |
[((957, 972), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', ([], {}), '()\n', (970, 972), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1024, 1078), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['[0.1, 0.3, 0.5, 0.9]'], {'dtype': 'dtype.int32'}), '([0.1, 0.3, 0.5, 0.9], dtype=dtype.int32)\n', (1037, 1078), True, 'import mindspore.nn.probability.distribution as msd\n'), ((2509, 2553), 'mindspore.Tensor', 'Tensor', (['[3, 4, 5, 6, 7]'], {'dtype': 'dtype.float32'}), '([3, 4, 5, 6, 7], dtype=dtype.float32)\n', (2515, 2553), False, 'from mindspore import Tensor\n'), ((3368, 3412), 'mindspore.Tensor', 'Tensor', (['[3, 4, 5, 6, 7]'], {'dtype': 'dtype.float32'}), '([3, 4, 5, 6, 7], dtype=dtype.float32)\n', (3374, 3412), False, 'from mindspore import Tensor\n'), ((3425, 3459), 'mindspore.Tensor', 'Tensor', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (3431, 3459), False, 'from mindspore import Tensor\n'), ((4084, 4118), 'mindspore.Tensor', 'Tensor', (['[0.3]'], {'dtype': 'dtype.float32'}), '([0.3], dtype=dtype.float32)\n', (4090, 4118), False, 'from mindspore import Tensor\n'), ((4133, 4167), 'mindspore.Tensor', 'Tensor', (['[0.7]'], {'dtype': 'dtype.float32'}), '([0.7], dtype=dtype.float32)\n', (4139, 4167), False, 'from mindspore import Tensor\n'), ((4873, 4907), 'mindspore.Tensor', 'Tensor', (['[0.3]'], {'dtype': 'dtype.float32'}), '([0.3], dtype=dtype.float32)\n', (4879, 4907), False, 'from mindspore import Tensor\n'), ((4922, 4956), 'mindspore.Tensor', 'Tensor', (['[0.7]'], {'dtype': 'dtype.float32'}), '([0.7], dtype=dtype.float32)\n', (4928, 4956), False, 'from mindspore import Tensor\n'), ((6279, 6323), 'mindspore.Tensor', 'Tensor', (['[0, 0, 0, 0, 0]'], {'dtype': 'dtype.float32'}), '([0, 0, 0, 0, 0], dtype=dtype.float32)\n', (6285, 6323), False, 'from mindspore import Tensor\n'), ((6336, 6370), 'mindspore.Tensor', 'Tensor', (['[0.5]'], {'dtype': 'dtype.float32'}), '([0.5], dtype=dtype.float32)\n', (6342, 6370), False, 'from mindspore import Tensor\n'), ((1149, 1173), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1162, 1173), False, 'import pytest\n'), ((1183, 1224), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['[0.1]'], {'dtype': 'dtype.float32'}), '([0.1], dtype=dtype.float32)\n', (1196, 1224), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1252, 1276), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1265, 1276), False, 'import pytest\n'), ((1286, 1316), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['[0.1]'], {'name': '(1.0)'}), '([0.1], name=1.0)\n', (1299, 1316), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1344, 1368), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (1357, 1368), False, 'import pytest\n'), ((1378, 1411), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['[0.1]'], {'seed': '"""seed"""'}), "([0.1], seed='seed')\n", (1391, 1411), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1480, 1505), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1493, 1505), False, 'import pytest\n'), ((1515, 1555), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['[-0.1]'], {'dtype': 'dtype.int32'}), '([-0.1], dtype=dtype.int32)\n', (1528, 1555), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1565, 1590), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1578, 1590), False, 'import pytest\n'), ((1600, 1639), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['[1.1]'], {'dtype': 'dtype.int32'}), '([1.1], dtype=dtype.int32)\n', (1613, 1639), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1649, 1674), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1662, 1674), False, 'import pytest\n'), ((1684, 1723), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['[0.0]'], {'dtype': 'dtype.int32'}), '([0.0], dtype=dtype.int32)\n', (1697, 1723), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1733, 1758), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1746, 1758), False, 'import pytest\n'), ((1768, 1807), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['[1.0]'], {'dtype': 'dtype.int32'}), '([1.0], dtype=dtype.int32)\n', (1781, 1807), True, 'import mindspore.nn.probability.distribution as msd\n'), ((1993, 2030), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['(0.5)'], {'dtype': 'dtype.int32'}), '(0.5, dtype=dtype.int32)\n', (2006, 2030), True, 'import mindspore.nn.probability.distribution as msd\n'), ((2800, 2832), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', ([], {'dtype': 'dtype.int32'}), '(dtype=dtype.int32)\n', (2813, 2832), True, 'import mindspore.nn.probability.distribution as msd\n'), ((3712, 3749), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['(0.7)'], {'dtype': 'dtype.int32'}), '(0.7, dtype=dtype.int32)\n', (3725, 3749), True, 'import mindspore.nn.probability.distribution as msd\n'), ((3768, 3800), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', ([], {'dtype': 'dtype.int32'}), '(dtype=dtype.int32)\n', (3781, 3800), True, 'import mindspore.nn.probability.distribution as msd\n'), ((4447, 4484), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['(0.3)'], {'dtype': 'dtype.int32'}), '(0.3, dtype=dtype.int32)\n', (4460, 4484), True, 'import mindspore.nn.probability.distribution as msd\n'), ((4503, 4535), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', ([], {'dtype': 'dtype.int32'}), '(dtype=dtype.int32)\n', (4516, 4535), True, 'import mindspore.nn.probability.distribution as msd\n'), ((5215, 5259), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['[0.3, 0.5]'], {'dtype': 'dtype.int32'}), '([0.3, 0.5], dtype=dtype.int32)\n', (5228, 5259), True, 'import mindspore.nn.probability.distribution as msd\n'), ((5851, 5888), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', (['(0.5)'], {'dtype': 'dtype.int32'}), '(0.5, dtype=dtype.int32)\n', (5864, 5888), True, 'import mindspore.nn.probability.distribution as msd\n'), ((5907, 5939), 'mindspore.nn.probability.distribution.Geometric', 'msd.Geometric', ([], {'dtype': 'dtype.int32'}), '(dtype=dtype.int32)\n', (5920, 5939), True, 'import mindspore.nn.probability.distribution as msd\n')]
|
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import DoximityProvider
urlpatterns = default_urlpatterns(DoximityProvider)
|
[
"allauth.socialaccount.providers.oauth2.urls.default_urlpatterns"
] |
[((131, 168), 'allauth.socialaccount.providers.oauth2.urls.default_urlpatterns', 'default_urlpatterns', (['DoximityProvider'], {}), '(DoximityProvider)\n', (150, 168), False, 'from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns\n')]
|
from urllib.request import urlopen
from bs4 import BeautifulSoup
html = urlopen("http://www.pythonscraping.com/pages/warandpeace.html")
bsObj = BeautifulSoup(html, "html.parser")
nameList = bsObj.findAll("span", {"class":{"green", "red"}})
for name in nameList:
print(name.get_text())
|
[
"bs4.BeautifulSoup",
"urllib.request.urlopen"
] |
[((73, 136), 'urllib.request.urlopen', 'urlopen', (['"""http://www.pythonscraping.com/pages/warandpeace.html"""'], {}), "('http://www.pythonscraping.com/pages/warandpeace.html')\n", (80, 136), False, 'from urllib.request import urlopen\n'), ((145, 179), 'bs4.BeautifulSoup', 'BeautifulSoup', (['html', '"""html.parser"""'], {}), "(html, 'html.parser')\n", (158, 179), False, 'from bs4 import BeautifulSoup\n')]
|
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pushes 180 degree rotations around axes in the XY plane later in the circuit.
"""
from cirq import _compat, circuits, transformers
@_compat.deprecated_class(deadline='v1.0', fix='Use cirq.eject_phased_paulis instead.')
class EjectPhasedPaulis:
"""Pushes X, Y, and PhasedX gates towards the end of the circuit.
As the gates get pushed, they may absorb Z gates, cancel against other
X, Y, or PhasedX gates with exponent=1, get merged into measurements (as
output bit flips), and cause phase kickback operations across CZs (which can
then be removed by the EjectZ optimization).
"""
def __init__(self, tolerance: float = 1e-8, eject_parameterized: bool = False) -> None:
"""Inits EjectPhasedPaulis.
Args:
tolerance: Maximum absolute error tolerance. The optimization is
permitted to simply drop negligible combinations gates with a
threshold determined by this tolerance.
eject_parameterized: If True, the optimization will attempt to eject
parameterized gates as well. This may result in other gates
parameterized by symbolic expressions.
"""
self.tolerance = tolerance
self.eject_parameterized = eject_parameterized
def optimize_circuit(self, circuit: circuits.Circuit):
circuit._moments = [
*transformers.eject_phased_paulis(
circuit, atol=self.tolerance, eject_parameterized=self.eject_parameterized
)
]
|
[
"cirq.transformers.eject_phased_paulis",
"cirq._compat.deprecated_class"
] |
[((723, 814), 'cirq._compat.deprecated_class', '_compat.deprecated_class', ([], {'deadline': '"""v1.0"""', 'fix': '"""Use cirq.eject_phased_paulis instead."""'}), "(deadline='v1.0', fix=\n 'Use cirq.eject_phased_paulis instead.')\n", (747, 814), False, 'from cirq import _compat, circuits, transformers\n'), ((1970, 2082), 'cirq.transformers.eject_phased_paulis', 'transformers.eject_phased_paulis', (['circuit'], {'atol': 'self.tolerance', 'eject_parameterized': 'self.eject_parameterized'}), '(circuit, atol=self.tolerance,\n eject_parameterized=self.eject_parameterized)\n', (2002, 2082), False, 'from cirq import _compat, circuits, transformers\n')]
|
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Train retinanet and get checkpoint files."""
import os
import argparse
import ast
import mindspore
import mindspore.nn as nn
from mindspore import context, Tensor
from mindspore.communication.management import init, get_rank
from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor, Callback
from mindspore.train import Model
from mindspore.context import ParallelMode
from mindspore.train.serialization import load_checkpoint, load_param_into_net
from mindspore.common import set_seed
from src.retinahead import retinanetWithLossCell, TrainingWrapper, retinahead
from src.backbone import resnet152
from src.config import config
from src.dataset import create_retinanet_dataset, create_mindrecord
from src.lr_schedule import get_lr
from src.init_params import init_net_param, filter_checkpoint_parameter
set_seed(1)
class Monitor(Callback):
"""
Monitor loss and time.
Args:
lr_init (numpy array): train lr
Returns:
None
Examples:
>>> Monitor(100,lr_init=Tensor([0.05]*100).asnumpy())
"""
def __init__(self, lr_init=None):
super(Monitor, self).__init__()
self.lr_init = lr_init
self.lr_init_len = len(lr_init)
def step_end(self, run_context):
cb_params = run_context.original_args()
print("lr:[{:8.6f}]".format(self.lr_init[cb_params.cur_step_num-1]), flush=True)
def main():
parser = argparse.ArgumentParser(description="retinanet training")
parser.add_argument("--only_create_dataset", type=ast.literal_eval, default=False,
help="If set it true, only create Mindrecord, default is False.")
parser.add_argument("--distribute", type=ast.literal_eval, default=False,
help="Run distribute, default is False.")
parser.add_argument("--device_id", type=int, default=0, help="Device id, default is 0.")
parser.add_argument("--device_num", type=int, default=1, help="Use device nums, default is 1.")
parser.add_argument("--lr", type=float, default=0.1, help="Learning rate, default is 0.1.")
parser.add_argument("--mode", type=str, default="sink", help="Run sink mode or not, default is sink.")
parser.add_argument("--dataset", type=str, default="coco", help="Dataset, default is coco.")
parser.add_argument("--epoch_size", type=int, default=500, help="Epoch size, default is 500.")
parser.add_argument("--batch_size", type=int, default=16, help="Batch size, default is 32.")
parser.add_argument("--pre_trained", type=str, default=None, help="Pretrained Checkpoint file path.")
parser.add_argument("--pre_trained_epoch_size", type=int, default=0, help="Pretrained epoch size.")
parser.add_argument("--save_checkpoint_epochs", type=int, default=1, help="Save checkpoint epochs, default is 1.")
parser.add_argument("--loss_scale", type=int, default=1024, help="Loss scale, default is 1024.")
parser.add_argument("--filter_weight", type=ast.literal_eval, default=False,
help="Filter weight parameters, default is False.")
parser.add_argument("--run_platform", type=str, default="Ascend", choices=("Ascend"),
help="run platform, only support Ascend.")
args_opt = parser.parse_args()
if args_opt.run_platform == "Ascend":
context.set_context(mode=context.GRAPH_MODE, device_target="Ascend")
if args_opt.distribute:
if os.getenv("DEVICE_ID", "not_set").isdigit():
context.set_context(device_id=int(os.getenv("DEVICE_ID")))
init()
device_num = args_opt.device_num
rank = get_rank()
context.set_auto_parallel_context(parallel_mode=ParallelMode.DATA_PARALLEL, gradients_mean=True,
device_num=device_num)
else:
rank = 0
device_num = 1
context.set_context(device_id=args_opt.device_id)
else:
raise ValueError("Unsupported platform.")
mindrecord_file = create_mindrecord(args_opt.dataset, "retina6402.mindrecord", True)
if not args_opt.only_create_dataset:
loss_scale = float(args_opt.loss_scale)
# When create MindDataset, using the fitst mindrecord file, such as retinanet.mindrecord0.
dataset = create_retinanet_dataset(mindrecord_file, repeat_num=1,
batch_size=args_opt.batch_size, device_num=device_num, rank=rank)
dataset_size = dataset.get_dataset_size()
print("Create dataset done!")
backbone = resnet152(config.num_classes)
retinanet = retinahead(backbone, config)
net = retinanetWithLossCell(retinanet, config)
net.to_float(mindspore.float16)
init_net_param(net)
if args_opt.pre_trained:
if args_opt.pre_trained_epoch_size <= 0:
raise KeyError("pre_trained_epoch_size must be greater than 0.")
param_dict = load_checkpoint(args_opt.pre_trained)
if args_opt.filter_weight:
filter_checkpoint_parameter(param_dict)
load_param_into_net(net, param_dict)
lr = Tensor(get_lr(global_step=config.global_step,
lr_init=config.lr_init, lr_end=config.lr_end_rate * args_opt.lr, lr_max=args_opt.lr,
warmup_epochs1=config.warmup_epochs1, warmup_epochs2=config.warmup_epochs2,
warmup_epochs3=config.warmup_epochs3, warmup_epochs4=config.warmup_epochs4,
warmup_epochs5=config.warmup_epochs5, total_epochs=args_opt.epoch_size,
steps_per_epoch=dataset_size))
opt = nn.Momentum(filter(lambda x: x.requires_grad, net.get_parameters()), lr,
config.momentum, config.weight_decay, loss_scale)
net = TrainingWrapper(net, opt, loss_scale)
model = Model(net)
print("Start train retinanet, the first epoch will be slower because of the graph compilation.")
cb = [TimeMonitor(), LossMonitor()]
cb += [Monitor(lr_init=lr.asnumpy())]
config_ck = CheckpointConfig(save_checkpoint_steps=dataset_size * args_opt.save_checkpoint_epochs,
keep_checkpoint_max=config.keep_checkpoint_max)
ckpt_cb = ModelCheckpoint(prefix="retinanet", directory=config.save_checkpoint_path, config=config_ck)
if args_opt.distribute:
if rank == 0:
cb += [ckpt_cb]
model.train(args_opt.epoch_size, dataset, callbacks=cb, dataset_sink_mode=True)
else:
cb += [ckpt_cb]
model.train(args_opt.epoch_size, dataset, callbacks=cb, dataset_sink_mode=True)
if __name__ == '__main__':
main()
|
[
"src.retinahead.retinanetWithLossCell",
"argparse.ArgumentParser",
"mindspore.train.callback.ModelCheckpoint",
"mindspore.train.serialization.load_checkpoint",
"mindspore.train.serialization.load_param_into_net",
"mindspore.context.set_context",
"mindspore.train.callback.LossMonitor",
"mindspore.context.set_auto_parallel_context",
"src.lr_schedule.get_lr",
"mindspore.common.set_seed",
"src.dataset.create_retinanet_dataset",
"mindspore.communication.management.get_rank",
"mindspore.train.callback.CheckpointConfig",
"src.retinahead.TrainingWrapper",
"mindspore.communication.management.init",
"src.dataset.create_mindrecord",
"mindspore.train.callback.TimeMonitor",
"src.init_params.filter_checkpoint_parameter",
"os.getenv",
"mindspore.train.Model",
"src.init_params.init_net_param",
"src.retinahead.retinahead",
"src.backbone.resnet152"
] |
[((1519, 1530), 'mindspore.common.set_seed', 'set_seed', (['(1)'], {}), '(1)\n', (1527, 1530), False, 'from mindspore.common import set_seed\n'), ((2104, 2161), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""retinanet training"""'}), "(description='retinanet training')\n", (2127, 2161), False, 'import argparse\n'), ((4718, 4784), 'src.dataset.create_mindrecord', 'create_mindrecord', (['args_opt.dataset', '"""retina6402.mindrecord"""', '(True)'], {}), "(args_opt.dataset, 'retina6402.mindrecord', True)\n", (4735, 4784), False, 'from src.dataset import create_retinanet_dataset, create_mindrecord\n'), ((4002, 4070), 'mindspore.context.set_context', 'context.set_context', ([], {'mode': 'context.GRAPH_MODE', 'device_target': '"""Ascend"""'}), "(mode=context.GRAPH_MODE, device_target='Ascend')\n", (4021, 4070), False, 'from mindspore import context, Tensor\n'), ((4993, 5119), 'src.dataset.create_retinanet_dataset', 'create_retinanet_dataset', (['mindrecord_file'], {'repeat_num': '(1)', 'batch_size': 'args_opt.batch_size', 'device_num': 'device_num', 'rank': 'rank'}), '(mindrecord_file, repeat_num=1, batch_size=args_opt\n .batch_size, device_num=device_num, rank=rank)\n', (5017, 5119), False, 'from src.dataset import create_retinanet_dataset, create_mindrecord\n'), ((5268, 5297), 'src.backbone.resnet152', 'resnet152', (['config.num_classes'], {}), '(config.num_classes)\n', (5277, 5297), False, 'from src.backbone import resnet152\n'), ((5318, 5346), 'src.retinahead.retinahead', 'retinahead', (['backbone', 'config'], {}), '(backbone, config)\n', (5328, 5346), False, 'from src.retinahead import retinanetWithLossCell, TrainingWrapper, retinahead\n'), ((5361, 5401), 'src.retinahead.retinanetWithLossCell', 'retinanetWithLossCell', (['retinanet', 'config'], {}), '(retinanet, config)\n', (5382, 5401), False, 'from src.retinahead import retinanetWithLossCell, TrainingWrapper, retinahead\n'), ((5450, 5469), 'src.init_params.init_net_param', 'init_net_param', (['net'], {}), '(net)\n', (5464, 5469), False, 'from src.init_params import init_net_param, filter_checkpoint_parameter\n'), ((6557, 6594), 'src.retinahead.TrainingWrapper', 'TrainingWrapper', (['net', 'opt', 'loss_scale'], {}), '(net, opt, loss_scale)\n', (6572, 6594), False, 'from src.retinahead import retinanetWithLossCell, TrainingWrapper, retinahead\n'), ((6611, 6621), 'mindspore.train.Model', 'Model', (['net'], {}), '(net)\n', (6616, 6621), False, 'from mindspore.train import Model\n'), ((6837, 6976), 'mindspore.train.callback.CheckpointConfig', 'CheckpointConfig', ([], {'save_checkpoint_steps': '(dataset_size * args_opt.save_checkpoint_epochs)', 'keep_checkpoint_max': 'config.keep_checkpoint_max'}), '(save_checkpoint_steps=dataset_size * args_opt.\n save_checkpoint_epochs, keep_checkpoint_max=config.keep_checkpoint_max)\n', (6853, 6976), False, 'from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor, Callback\n'), ((7027, 7123), 'mindspore.train.callback.ModelCheckpoint', 'ModelCheckpoint', ([], {'prefix': '"""retinanet"""', 'directory': 'config.save_checkpoint_path', 'config': 'config_ck'}), "(prefix='retinanet', directory=config.save_checkpoint_path,\n config=config_ck)\n", (7042, 7123), False, 'from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor, Callback\n'), ((4250, 4256), 'mindspore.communication.management.init', 'init', ([], {}), '()\n', (4254, 4256), False, 'from mindspore.communication.management import init, get_rank\n'), ((4321, 4331), 'mindspore.communication.management.get_rank', 'get_rank', ([], {}), '()\n', (4329, 4331), False, 'from mindspore.communication.management import init, get_rank\n'), ((4344, 4467), 'mindspore.context.set_auto_parallel_context', 'context.set_auto_parallel_context', ([], {'parallel_mode': 'ParallelMode.DATA_PARALLEL', 'gradients_mean': '(True)', 'device_num': 'device_num'}), '(parallel_mode=ParallelMode.DATA_PARALLEL,\n gradients_mean=True, device_num=device_num)\n', (4377, 4467), False, 'from mindspore import context, Tensor\n'), ((4584, 4633), 'mindspore.context.set_context', 'context.set_context', ([], {'device_id': 'args_opt.device_id'}), '(device_id=args_opt.device_id)\n', (4603, 4633), False, 'from mindspore import context, Tensor\n'), ((5663, 5700), 'mindspore.train.serialization.load_checkpoint', 'load_checkpoint', (['args_opt.pre_trained'], {}), '(args_opt.pre_trained)\n', (5678, 5700), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net\n'), ((5808, 5844), 'mindspore.train.serialization.load_param_into_net', 'load_param_into_net', (['net', 'param_dict'], {}), '(net, param_dict)\n', (5827, 5844), False, 'from mindspore.train.serialization import load_checkpoint, load_param_into_net\n'), ((5866, 6267), 'src.lr_schedule.get_lr', 'get_lr', ([], {'global_step': 'config.global_step', 'lr_init': 'config.lr_init', 'lr_end': '(config.lr_end_rate * args_opt.lr)', 'lr_max': 'args_opt.lr', 'warmup_epochs1': 'config.warmup_epochs1', 'warmup_epochs2': 'config.warmup_epochs2', 'warmup_epochs3': 'config.warmup_epochs3', 'warmup_epochs4': 'config.warmup_epochs4', 'warmup_epochs5': 'config.warmup_epochs5', 'total_epochs': 'args_opt.epoch_size', 'steps_per_epoch': 'dataset_size'}), '(global_step=config.global_step, lr_init=config.lr_init, lr_end=\n config.lr_end_rate * args_opt.lr, lr_max=args_opt.lr, warmup_epochs1=\n config.warmup_epochs1, warmup_epochs2=config.warmup_epochs2,\n warmup_epochs3=config.warmup_epochs3, warmup_epochs4=config.\n warmup_epochs4, warmup_epochs5=config.warmup_epochs5, total_epochs=\n args_opt.epoch_size, steps_per_epoch=dataset_size)\n', (5872, 6267), False, 'from src.lr_schedule import get_lr\n'), ((6741, 6754), 'mindspore.train.callback.TimeMonitor', 'TimeMonitor', ([], {}), '()\n', (6752, 6754), False, 'from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor, Callback\n'), ((6756, 6769), 'mindspore.train.callback.LossMonitor', 'LossMonitor', ([], {}), '()\n', (6767, 6769), False, 'from mindspore.train.callback import CheckpointConfig, ModelCheckpoint, LossMonitor, TimeMonitor, Callback\n'), ((5756, 5795), 'src.init_params.filter_checkpoint_parameter', 'filter_checkpoint_parameter', (['param_dict'], {}), '(param_dict)\n', (5783, 5795), False, 'from src.init_params import init_net_param, filter_checkpoint_parameter\n'), ((4118, 4151), 'os.getenv', 'os.getenv', (['"""DEVICE_ID"""', '"""not_set"""'], {}), "('DEVICE_ID', 'not_set')\n", (4127, 4151), False, 'import os\n'), ((4213, 4235), 'os.getenv', 'os.getenv', (['"""DEVICE_ID"""'], {}), "('DEVICE_ID')\n", (4222, 4235), False, 'import os\n')]
|
"""
flask application
"""
# # use to retrieve environment variable
import os
# import flask app
from application import app
# run application
if __name__ == '__main__':
port = os.getenv('FLASK_PORT', 5000)
app.run(port=port, host='0.0.0.0', use_reloader=False)
|
[
"os.getenv",
"application.app.run"
] |
[((182, 211), 'os.getenv', 'os.getenv', (['"""FLASK_PORT"""', '(5000)'], {}), "('FLASK_PORT', 5000)\n", (191, 211), False, 'import os\n'), ((216, 270), 'application.app.run', 'app.run', ([], {'port': 'port', 'host': '"""0.0.0.0"""', 'use_reloader': '(False)'}), "(port=port, host='0.0.0.0', use_reloader=False)\n", (223, 270), False, 'from application import app\n')]
|
import logging
LOG_FORMAT = "[%(asctime)s] [%(levelname)s] - %(message)s"
logging.basicConfig(level=logging.INFO, format=LOG_FORMAT)
logging.info('loading dependencies')
import os
import numpy as np
from PIL import Image
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
tf.logging.set_verbosity(tf.logging.ERROR)
__dirname = os.path.dirname(__file__)
model_path = os.path.join(__dirname, './model')
import sys
sys.path.append(os.path.join(__dirname))
logging.info('loading database')
import database
filename = np.load(os.path.join(__dirname, './filename.npy'))
logging.info('loading graph')
import preprocess_image as pi
import main
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=True)
session_config = tf.ConfigProto(gpu_options=gpu_options)
run_config = tf.estimator.RunConfig().replace(session_config=session_config)
clf = tf.estimator.Estimator(
model_fn=main.model_fn, model_dir=model_path, config=run_config,
params={
'resnet_size': 50,
'data_format': None,
'batch_size': 32,
'multi_gpu': False,
'version': 2,
})
next_image = None
def gen():
yield Image.new('RGB', (pi._IMAGE_SIZE, pi._IMAGE_SIZE), (0, 0, 0))
while True:
yield next_image
def input_fn(generator):
return tf.data.Dataset.from_generator(generator, (tf.float32), (256, 256, 3)).batch(1)
result = clf.predict(lambda: input_fn(gen))
logging.info('loading session')
next(result)
logging.info('ready')
def process_image(image_path, ymin, xmin, ymax, xmax):
global next_image
im = Image.open(image_path)
# crop to bbox
im = im.crop((xmin, ymin, xmax, ymax))
# aspect preserve resize
width, height = im.size
bigger = max(width, height)
ratio = pi._IMAGE_SIZE / bigger
width = int(ratio * width)
height = int(ratio * height)
im = im.resize((width, height))
# pad to given size
temp = Image.new('RGB', (pi._IMAGE_SIZE, pi._IMAGE_SIZE), (0, 0, 0))
width_gap = (pi._IMAGE_SIZE - width) // 2
height_gap = (pi._IMAGE_SIZE - height) // 2
temp.paste(im, (width_gap, height_gap))
next_image = temp
def similar_cloth(image_path, ymin, xmin, ymax, xmax, top=5, method='cosine'):
"""
Get clothes similar to the given one from the database
Args:
image_path: The path of the input cloth image
ymin: The ordinate of the upper left point of the bounding box
xmin: The abscissa of the upper left point of the bounding box
ymin: The ordinate of the lower right point of the bounding box
ymin: The abscissa of the lower right point of the bounding box
top: Number of the similar clothes, default to 5
method: method to calculate distance, default to 'cosine'
Returns:
list of filenames of the most similar cloths, like
[
'img/WOMEN/Blouses_Shirts/id_00000001/02_1_front.jpg',
'img/WOMEN/Blouses_Shirts/id_00000001/02_2_side.jpg'
]
"""
process_image(image_path, ymin, xmin, ymax, xmax)
vector = next(result)['logits']
top = database.topN(vector, n=top, method=method)
return filename[top]
|
[
"PIL.Image.new",
"logging.basicConfig",
"tensorflow.estimator.RunConfig",
"os.path.dirname",
"tensorflow.logging.set_verbosity",
"PIL.Image.open",
"logging.info",
"tensorflow.ConfigProto",
"tensorflow.estimator.Estimator",
"database.topN",
"tensorflow.data.Dataset.from_generator",
"tensorflow.GPUOptions",
"os.path.join"
] |
[((74, 132), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': 'LOG_FORMAT'}), '(level=logging.INFO, format=LOG_FORMAT)\n', (93, 132), False, 'import logging\n'), ((134, 170), 'logging.info', 'logging.info', (['"""loading dependencies"""'], {}), "('loading dependencies')\n", (146, 170), False, 'import logging\n'), ((288, 330), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.ERROR'], {}), '(tf.logging.ERROR)\n', (312, 330), True, 'import tensorflow as tf\n'), ((344, 369), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (359, 369), False, 'import os\n'), ((383, 417), 'os.path.join', 'os.path.join', (['__dirname', '"""./model"""'], {}), "(__dirname, './model')\n", (395, 417), False, 'import os\n'), ((472, 504), 'logging.info', 'logging.info', (['"""loading database"""'], {}), "('loading database')\n", (484, 504), False, 'import logging\n'), ((584, 613), 'logging.info', 'logging.info', (['"""loading graph"""'], {}), "('loading graph')\n", (596, 613), False, 'import logging\n'), ((671, 722), 'tensorflow.GPUOptions', 'tf.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(True)'}), '(per_process_gpu_memory_fraction=True)\n', (684, 722), True, 'import tensorflow as tf\n'), ((740, 779), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {'gpu_options': 'gpu_options'}), '(gpu_options=gpu_options)\n', (754, 779), True, 'import tensorflow as tf\n'), ((863, 1060), 'tensorflow.estimator.Estimator', 'tf.estimator.Estimator', ([], {'model_fn': 'main.model_fn', 'model_dir': 'model_path', 'config': 'run_config', 'params': "{'resnet_size': 50, 'data_format': None, 'batch_size': 32, 'multi_gpu': \n False, 'version': 2}"}), "(model_fn=main.model_fn, model_dir=model_path, config\n =run_config, params={'resnet_size': 50, 'data_format': None,\n 'batch_size': 32, 'multi_gpu': False, 'version': 2})\n", (885, 1060), True, 'import tensorflow as tf\n'), ((1416, 1447), 'logging.info', 'logging.info', (['"""loading session"""'], {}), "('loading session')\n", (1428, 1447), False, 'import logging\n'), ((1461, 1482), 'logging.info', 'logging.info', (['"""ready"""'], {}), "('ready')\n", (1473, 1482), False, 'import logging\n'), ((446, 469), 'os.path.join', 'os.path.join', (['__dirname'], {}), '(__dirname)\n', (458, 469), False, 'import os\n'), ((540, 581), 'os.path.join', 'os.path.join', (['__dirname', '"""./filename.npy"""'], {}), "(__dirname, './filename.npy')\n", (552, 581), False, 'import os\n'), ((1570, 1592), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (1580, 1592), False, 'from PIL import Image\n'), ((1915, 1976), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(pi._IMAGE_SIZE, pi._IMAGE_SIZE)', '(0, 0, 0)'], {}), "('RGB', (pi._IMAGE_SIZE, pi._IMAGE_SIZE), (0, 0, 0))\n", (1924, 1976), False, 'from PIL import Image\n'), ((3091, 3134), 'database.topN', 'database.topN', (['vector'], {'n': 'top', 'method': 'method'}), '(vector, n=top, method=method)\n', (3104, 3134), False, 'import database\n'), ((793, 817), 'tensorflow.estimator.RunConfig', 'tf.estimator.RunConfig', ([], {}), '()\n', (815, 817), True, 'import tensorflow as tf\n'), ((1148, 1209), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(pi._IMAGE_SIZE, pi._IMAGE_SIZE)', '(0, 0, 0)'], {}), "('RGB', (pi._IMAGE_SIZE, pi._IMAGE_SIZE), (0, 0, 0))\n", (1157, 1209), False, 'from PIL import Image\n'), ((1289, 1357), 'tensorflow.data.Dataset.from_generator', 'tf.data.Dataset.from_generator', (['generator', 'tf.float32', '(256, 256, 3)'], {}), '(generator, tf.float32, (256, 256, 3))\n', (1319, 1357), True, 'import tensorflow as tf\n')]
|
# coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
import re
import json
from ..utils import sanitize_for_serialization
class VoicemailOrganizationPolicy(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
VoicemailOrganizationPolicy - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'enabled': 'bool',
'alert_timeout_seconds': 'int',
'pin_configuration': 'PINConfiguration',
'voicemail_extension': 'str',
'pin_required': 'bool',
'send_email_notifications': 'bool',
'modified_date': 'datetime'
}
self.attribute_map = {
'enabled': 'enabled',
'alert_timeout_seconds': 'alertTimeoutSeconds',
'pin_configuration': 'pinConfiguration',
'voicemail_extension': 'voicemailExtension',
'pin_required': 'pinRequired',
'send_email_notifications': 'sendEmailNotifications',
'modified_date': 'modifiedDate'
}
self._enabled = None
self._alert_timeout_seconds = None
self._pin_configuration = None
self._voicemail_extension = None
self._pin_required = None
self._send_email_notifications = None
self._modified_date = None
@property
def enabled(self):
"""
Gets the enabled of this VoicemailOrganizationPolicy.
Whether voicemail is enable for this organization
:return: The enabled of this VoicemailOrganizationPolicy.
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""
Sets the enabled of this VoicemailOrganizationPolicy.
Whether voicemail is enable for this organization
:param enabled: The enabled of this VoicemailOrganizationPolicy.
:type: bool
"""
self._enabled = enabled
@property
def alert_timeout_seconds(self):
"""
Gets the alert_timeout_seconds of this VoicemailOrganizationPolicy.
The organization's default number of seconds to ring a user's phone before a call is transfered to voicemail
:return: The alert_timeout_seconds of this VoicemailOrganizationPolicy.
:rtype: int
"""
return self._alert_timeout_seconds
@alert_timeout_seconds.setter
def alert_timeout_seconds(self, alert_timeout_seconds):
"""
Sets the alert_timeout_seconds of this VoicemailOrganizationPolicy.
The organization's default number of seconds to ring a user's phone before a call is transfered to voicemail
:param alert_timeout_seconds: The alert_timeout_seconds of this VoicemailOrganizationPolicy.
:type: int
"""
self._alert_timeout_seconds = alert_timeout_seconds
@property
def pin_configuration(self):
"""
Gets the pin_configuration of this VoicemailOrganizationPolicy.
The configuration for user PINs to access their voicemail from a phone
:return: The pin_configuration of this VoicemailOrganizationPolicy.
:rtype: PINConfiguration
"""
return self._pin_configuration
@pin_configuration.setter
def pin_configuration(self, pin_configuration):
"""
Sets the pin_configuration of this VoicemailOrganizationPolicy.
The configuration for user PINs to access their voicemail from a phone
:param pin_configuration: The pin_configuration of this VoicemailOrganizationPolicy.
:type: PINConfiguration
"""
self._pin_configuration = pin_configuration
@property
def voicemail_extension(self):
"""
Gets the voicemail_extension of this VoicemailOrganizationPolicy.
The extension for voicemail retrieval. The default value is *86.
:return: The voicemail_extension of this VoicemailOrganizationPolicy.
:rtype: str
"""
return self._voicemail_extension
@voicemail_extension.setter
def voicemail_extension(self, voicemail_extension):
"""
Sets the voicemail_extension of this VoicemailOrganizationPolicy.
The extension for voicemail retrieval. The default value is *86.
:param voicemail_extension: The voicemail_extension of this VoicemailOrganizationPolicy.
:type: str
"""
self._voicemail_extension = voicemail_extension
@property
def pin_required(self):
"""
Gets the pin_required of this VoicemailOrganizationPolicy.
If this is true, a PIN is required when accessing a user's voicemail from a phone.
:return: The pin_required of this VoicemailOrganizationPolicy.
:rtype: bool
"""
return self._pin_required
@pin_required.setter
def pin_required(self, pin_required):
"""
Sets the pin_required of this VoicemailOrganizationPolicy.
If this is true, a PIN is required when accessing a user's voicemail from a phone.
:param pin_required: The pin_required of this VoicemailOrganizationPolicy.
:type: bool
"""
self._pin_required = pin_required
@property
def send_email_notifications(self):
"""
Gets the send_email_notifications of this VoicemailOrganizationPolicy.
Whether email notifications are sent for new voicemails in the organization. If false, new voicemail email notifications are not be sent for the organization overriding any user or group setting.
:return: The send_email_notifications of this VoicemailOrganizationPolicy.
:rtype: bool
"""
return self._send_email_notifications
@send_email_notifications.setter
def send_email_notifications(self, send_email_notifications):
"""
Sets the send_email_notifications of this VoicemailOrganizationPolicy.
Whether email notifications are sent for new voicemails in the organization. If false, new voicemail email notifications are not be sent for the organization overriding any user or group setting.
:param send_email_notifications: The send_email_notifications of this VoicemailOrganizationPolicy.
:type: bool
"""
self._send_email_notifications = send_email_notifications
@property
def modified_date(self):
"""
Gets the modified_date of this VoicemailOrganizationPolicy.
The date the policy was last modified. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:return: The modified_date of this VoicemailOrganizationPolicy.
:rtype: datetime
"""
return self._modified_date
@modified_date.setter
def modified_date(self, modified_date):
"""
Sets the modified_date of this VoicemailOrganizationPolicy.
The date the policy was last modified. Date time is represented as an ISO-8601 string. For example: yyyy-MM-ddTHH:mm:ss[.mmm]Z
:param modified_date: The modified_date of this VoicemailOrganizationPolicy.
:type: datetime
"""
self._modified_date = modified_date
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_json(self):
"""
Returns the model as raw JSON
"""
return json.dumps(sanitize_for_serialization(self.to_dict()))
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
[
"six.iteritems"
] |
[((8409, 8438), 'six.iteritems', 'iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (8418, 8438), False, 'from six import iteritems\n')]
|
from collections import OrderedDict
import pytest
import sys
import yaml
from apispec import APISpec, BasePlugin
from apispec.exceptions import (
APISpecError,
DuplicateComponentNameError,
DuplicateParameterError,
InvalidParameterError,
)
from .utils import (
get_schemas,
get_paths,
get_parameters,
get_responses,
get_security_schemes,
build_ref,
)
description = "This is a sample Petstore server. You can find out more "
'about Swagger at <a href="http://swagger.wordnik.com">http://swagger.wordnik.com</a> '
"or on irc.freenode.net, #swagger. For this sample, you can use the api "
'key "special-key" to test the authorization filters'
@pytest.fixture(params=("2.0", "3.0.0"))
def spec(request):
openapi_version = request.param
if openapi_version == "2.0":
security_kwargs = {"security": [{"apiKey": []}]}
else:
security_kwargs = {
"components": {
"securitySchemes": {
"bearerAuth": dict(type="http", scheme="bearer", bearerFormat="JWT")
},
"schemas": {
"ErrorResponse": {
"type": "object",
"properties": {
"ok": {
"type": "boolean",
"description": "status indicator",
"example": False,
}
},
"required": ["ok"],
}
},
}
}
return APISpec(
title="Swagger Petstore",
version="1.0.0",
openapi_version=openapi_version,
info={"description": description},
**security_kwargs
)
class TestAPISpecInit:
def test_raises_wrong_apispec_version(self):
message = "Not a valid OpenAPI version number:"
with pytest.raises(APISpecError, match=message):
APISpec(
"Swagger Petstore",
version="1.0.0",
openapi_version="4.0", # 4.0 is not supported
info={"description": description},
security=[{"apiKey": []}],
)
class TestMetadata:
def test_openapi_metadata(self, spec):
metadata = spec.to_dict()
assert metadata["info"]["title"] == "Swagger Petstore"
assert metadata["info"]["version"] == "1.0.0"
assert metadata["info"]["description"] == description
if spec.openapi_version.major < 3:
assert metadata["swagger"] == spec.openapi_version.vstring
assert metadata["security"] == [{"apiKey": []}]
else:
assert metadata["openapi"] == spec.openapi_version.vstring
security_schemes = {
"bearerAuth": dict(type="http", scheme="bearer", bearerFormat="JWT")
}
assert metadata["components"]["securitySchemes"] == security_schemes
assert metadata["components"]["schemas"].get("ErrorResponse", False)
assert metadata["info"]["title"] == "Swagger Petstore"
assert metadata["info"]["version"] == "1.0.0"
assert metadata["info"]["description"] == description
@pytest.mark.parametrize("spec", ("3.0.0",), indirect=True)
def test_openapi_metadata_merge_v3(self, spec):
properties = {
"ok": {
"type": "boolean",
"description": "property description",
"example": True,
}
}
spec.components.schema(
"definition", {"properties": properties, "description": "description"}
)
metadata = spec.to_dict()
assert metadata["components"]["schemas"].get("ErrorResponse", False)
assert metadata["components"]["schemas"].get("definition", False)
class TestTags:
tag = {
"name": "MyTag",
"description": "This tag gathers all API endpoints which are mine.",
}
def test_tag(self, spec):
spec.tag(self.tag)
tags_json = spec.to_dict()["tags"]
assert self.tag in tags_json
def test_tag_is_chainable(self, spec):
spec.tag({"name": "tag1"}).tag({"name": "tag2"})
assert spec.to_dict()["tags"] == [{"name": "tag1"}, {"name": "tag2"}]
class TestComponents:
properties = {
"id": {"type": "integer", "format": "int64"},
"name": {"type": "string", "example": "doggie"},
}
def test_schema(self, spec):
spec.components.schema("Pet", {"properties": self.properties})
defs = get_schemas(spec)
assert "Pet" in defs
assert defs["Pet"]["properties"] == self.properties
def test_schema_is_chainable(self, spec):
spec.components.schema("Pet", {"properties": {}}).schema(
"Plant", {"properties": {}}
)
defs = get_schemas(spec)
assert "Pet" in defs
assert "Plant" in defs
def test_schema_description(self, spec):
model_description = "An animal which lives with humans."
spec.components.schema(
"Pet", {"properties": self.properties, "description": model_description}
)
defs = get_schemas(spec)
assert defs["Pet"]["description"] == model_description
def test_schema_stores_enum(self, spec):
enum = ["name", "photoUrls"]
spec.components.schema("Pet", {"properties": self.properties, "enum": enum})
defs = get_schemas(spec)
assert defs["Pet"]["enum"] == enum
def test_schema_discriminator(self, spec):
spec.components.schema(
"Pet", {"properties": self.properties, "discriminator": "name"}
)
defs = get_schemas(spec)
assert defs["Pet"]["discriminator"] == "name"
def test_schema_duplicate_name(self, spec):
spec.components.schema("Pet", {"properties": self.properties})
with pytest.raises(
DuplicateComponentNameError,
match='Another schema with name "Pet" is already registered.',
):
spec.components.schema("Pet", properties=self.properties)
def test_parameter(self, spec):
parameter = {"format": "int64", "type": "integer"}
spec.components.parameter("PetId", "path", parameter)
params = get_parameters(spec)
assert params["PetId"] == {
"format": "int64",
"type": "integer",
"in": "path",
"name": "PetId",
"required": True,
}
def test_parameter_is_chainable(self, spec):
spec.components.parameter("param1", "path").parameter("param2", "path")
params = get_parameters(spec)
assert "param1" in params
assert "param2" in params
def test_parameter_duplicate_name(self, spec):
spec.components.parameter("test_parameter", "path")
with pytest.raises(
DuplicateComponentNameError,
match='Another parameter with name "test_parameter" is already registered.',
):
spec.components.parameter("test_parameter", "path")
def test_response(self, spec):
response = {"description": "Pet not found"}
spec.components.response("NotFound", response)
responses = get_responses(spec)
assert responses["NotFound"] == response
def test_response_is_chainable(self, spec):
spec.components.response("resp1").response("resp2")
responses = get_responses(spec)
assert "resp1" in responses
assert "resp2" in responses
def test_response_duplicate_name(self, spec):
spec.components.response("test_response")
with pytest.raises(
DuplicateComponentNameError,
match='Another response with name "test_response" is already registered.',
):
spec.components.response("test_response")
def test_security_scheme(self, spec):
sec_scheme = {"type": "apiKey", "in": "header", "name": "X-API-Key"}
spec.components.security_scheme("ApiKeyAuth", sec_scheme)
assert get_security_schemes(spec)["ApiKeyAuth"] == sec_scheme
def test_security_scheme_is_chainable(self, spec):
spec.components.security_scheme("sec_1", {}).security_scheme("sec_2", {})
security_schemes = get_security_schemes(spec)
assert "sec_1" in security_schemes
assert "sec_2" in security_schemes
def test_security_scheme_duplicate_name(self, spec):
sec_scheme_1 = {"type": "apiKey", "in": "header", "name": "X-API-Key"}
sec_scheme_2 = {"type": "apiKey", "in": "header", "name": "X-API-Key-2"}
spec.components.security_scheme("ApiKeyAuth", sec_scheme_1)
with pytest.raises(
DuplicateComponentNameError,
match='Another security scheme with name "ApiKeyAuth" is already registered.',
):
spec.components.security_scheme("ApiKeyAuth", sec_scheme_2)
def test_to_yaml(self, spec):
enum = ["name", "photoUrls"]
spec.components.schema("Pet", properties=self.properties, enum=enum)
assert spec.to_dict() == yaml.safe_load(spec.to_yaml())
class TestPath:
paths = {
"/pet/{petId}": {
"get": {
"parameters": [
{
"required": True,
"format": "int64",
"name": "petId",
"in": "path",
"type": "integer",
"description": "ID of pet that needs to be fetched",
}
],
"responses": {
"200": {"schema": "Pet", "description": "successful operation"},
"400": {"description": "Invalid ID supplied"},
"404": {"description": "Pet not found"},
},
"produces": ["application/json", "application/xml"],
"operationId": "getPetById",
"summary": "Find pet by ID",
"description": (
"Returns a pet when ID < 10. "
"ID > 10 or nonintegers will simulate API error conditions"
),
"tags": ["pet"],
}
}
}
def test_path(self, spec):
route_spec = self.paths["/pet/{petId}"]["get"]
spec.path(
path="/pet/{petId}",
operations=dict(
get=dict(
parameters=route_spec["parameters"],
responses=route_spec["responses"],
produces=route_spec["produces"],
operationId=route_spec["operationId"],
summary=route_spec["summary"],
description=route_spec["description"],
tags=route_spec["tags"],
)
),
)
p = get_paths(spec)["/pet/{petId}"]["get"]
assert p["parameters"] == route_spec["parameters"]
assert p["responses"] == route_spec["responses"]
assert p["operationId"] == route_spec["operationId"]
assert p["summary"] == route_spec["summary"]
assert p["description"] == route_spec["description"]
assert p["tags"] == route_spec["tags"]
def test_paths_maintain_order(self, spec):
spec.path(path="/path1")
spec.path(path="/path2")
spec.path(path="/path3")
spec.path(path="/path4")
assert list(spec.to_dict()["paths"].keys()) == [
"/path1",
"/path2",
"/path3",
"/path4",
]
def test_paths_is_chainable(self, spec):
spec.path(path="/path1").path("/path2")
assert list(spec.to_dict()["paths"].keys()) == ["/path1", "/path2"]
def test_methods_maintain_order(self, spec):
methods = ["get", "post", "put", "patch", "delete", "head", "options"]
for method in methods:
spec.path(path="/path", operations=OrderedDict({method: {}}))
assert list(spec.to_dict()["paths"]["/path"]) == methods
def test_path_merges_paths(self, spec):
"""Test that adding a second HTTP method to an existing path performs
a merge operation instead of an overwrite"""
path = "/pet/{petId}"
route_spec = self.paths[path]["get"]
spec.path(path=path, operations=dict(get=route_spec))
spec.path(
path=path,
operations=dict(
put=dict(
parameters=route_spec["parameters"],
responses=route_spec["responses"],
produces=route_spec["produces"],
operationId="updatePet",
summary="Updates an existing Pet",
description="Use this method to make changes to Pet `petId`",
tags=route_spec["tags"],
)
),
)
p = get_paths(spec)[path]
assert "get" in p
assert "put" in p
@pytest.mark.parametrize("openapi_version", ("2.0", "3.0.0"))
def test_path_called_twice_with_same_operations_parameters(self, openapi_version):
"""Test calling path twice with same operations or parameters
operations and parameters being mutated by clean_operations and plugin helpers
should not make path fail on second call
"""
class TestPlugin(BasePlugin):
def path_helper(self, path, operations, parameters, **kwargs):
"""Mutate operations and parameters"""
operations.update({"post": {"responses": {"201": "201ResponseRef"}}})
parameters.append("ParamRef_3")
return path
spec = APISpec(
title="Swagger Petstore",
version="1.0.0",
openapi_version=openapi_version,
plugins=[TestPlugin()],
)
path = "/pet/{petId}"
parameters = ["ParamRef_1"]
operation = {
"parameters": ["ParamRef_2"],
"responses": {"200": "200ResponseRef"},
}
spec.path(path=path, operations={"get": operation}, parameters=parameters)
spec.path(path=path, operations={"put": operation}, parameters=parameters)
operations = (get_paths(spec))[path]
assert (
operations["get"]
== operations["put"]
== {
"parameters": [build_ref(spec, "parameter", "ParamRef_2")],
"responses": {"200": build_ref(spec, "response", "200ResponseRef")},
}
)
assert operations["parameters"] == [
build_ref(spec, "parameter", "ParamRef_1"),
build_ref(spec, "parameter", "ParamRef_3"),
]
def test_path_ensures_path_parameters_required(self, spec):
path = "/pet/{petId}"
spec.path(
path=path,
operations=dict(put=dict(parameters=[{"name": "petId", "in": "path"}])),
)
assert get_paths(spec)[path]["put"]["parameters"][0]["required"] is True
def test_path_with_no_path_raises_error(self, spec):
message = "Path template is not specified"
with pytest.raises(APISpecError, match=message):
spec.path()
def test_path_summary_description(self, spec):
summary = "Operations on a Pet"
description = "Operations on a Pet identified by its ID"
spec.path(path="/pet/{petId}", summary=summary, description=description)
p = get_paths(spec)["/pet/{petId}"]
assert p["summary"] == summary
assert p["description"] == description
def test_parameter(self, spec):
route_spec = self.paths["/pet/{petId}"]["get"]
spec.components.parameter("test_parameter", "path", route_spec["parameters"][0])
spec.path(
path="/pet/{petId}", operations={"get": {"parameters": ["test_parameter"]}}
)
metadata = spec.to_dict()
p = get_paths(spec)["/pet/{petId}"]["get"]
assert p["parameters"][0] == build_ref(spec, "parameter", "test_parameter")
if spec.openapi_version.major < 3:
assert (
route_spec["parameters"][0] == metadata["parameters"]["test_parameter"]
)
else:
assert (
route_spec["parameters"][0]
== metadata["components"]["parameters"]["test_parameter"]
)
@pytest.mark.parametrize(
"parameters",
([{"name": "petId"}], [{"in": "path"}]), # missing "in" # missing "name"
)
def test_invalid_parameter(self, spec, parameters):
path = "/pet/{petId}"
with pytest.raises(InvalidParameterError):
spec.path(path=path, operations=dict(put={}, get={}), parameters=parameters)
def test_parameter_duplicate(self, spec):
spec.path(
path="/pet/{petId}",
operations={
"get": {
"parameters": [
{"name": "petId", "in": "path"},
{"name": "petId", "in": "query"},
]
}
},
)
with pytest.raises(DuplicateParameterError):
spec.path(
path="/pet/{petId}",
operations={
"get": {
"parameters": [
{"name": "petId", "in": "path"},
{"name": "petId", "in": "path"},
]
}
},
)
def test_global_parameters(self, spec):
path = "/pet/{petId}"
route_spec = self.paths["/pet/{petId}"]["get"]
spec.components.parameter("test_parameter", "path", route_spec["parameters"][0])
spec.path(
path=path,
operations=dict(put={}, get={}),
parameters=[{"name": "petId", "in": "path"}, "test_parameter"],
)
assert get_paths(spec)[path]["parameters"] == [
{"name": "petId", "in": "path", "required": True},
build_ref(spec, "parameter", "test_parameter"),
]
def test_global_parameter_duplicate(self, spec):
path = "/pet/{petId}"
spec.path(
path=path,
operations=dict(put={}, get={}),
parameters=[
{"name": "petId", "in": "path"},
{"name": "petId", "in": "query"},
],
)
assert get_paths(spec)[path]["parameters"] == [
{"name": "petId", "in": "path", "required": True},
{"name": "petId", "in": "query"},
]
with pytest.raises(DuplicateParameterError):
spec.path(
path=path,
operations=dict(put={}, get={}),
parameters=[
{"name": "petId", "in": "path"},
{"name": "petId", "in": "path"},
"test_parameter",
],
)
def test_response(self, spec):
route_spec = self.paths["/pet/{petId}"]["get"]
spec.components.response("test_response", route_spec["responses"]["200"])
spec.path(
path="/pet/{petId}",
operations={"get": {"responses": {"200": "test_response"}}},
)
metadata = spec.to_dict()
p = get_paths(spec)["/pet/{petId}"]["get"]
assert p["responses"]["200"] == build_ref(spec, "response", "test_response")
if spec.openapi_version.major < 3:
assert (
route_spec["responses"]["200"] == metadata["responses"]["test_response"]
)
else:
assert (
route_spec["responses"]["200"]
== metadata["components"]["responses"]["test_response"]
)
@pytest.mark.skipif(
int(sys.version[0]) == 2, reason="HTTPStatus only available in Python3"
)
def test_response_with_HTTPStatus_code(self, spec):
from http import HTTPStatus
code = HTTPStatus(200)
spec.path(
path="/pet/{petId}",
operations={"get": {"responses": {code: "test_response"}}},
)
assert "200" in get_paths(spec)["/pet/{petId}"]["get"]["responses"]
def test_response_with_status_code_range(self, spec, recwarn):
status_code = "2XX"
spec.path(
path="/pet/{petId}",
operations={"get": {"responses": {status_code: "test_response"}}},
)
if spec.openapi_version.major < 3:
assert len(recwarn) == 1
assert recwarn.pop(UserWarning)
assert status_code in get_paths(spec)["/pet/{petId}"]["get"]["responses"]
def test_path_check_invalid_http_method(self, spec):
spec.path("/pet/{petId}", operations={"get": {}})
spec.path("/pet/{petId}", operations={"x-dummy": {}})
message = "One or more HTTP methods are invalid"
with pytest.raises(APISpecError, match=message):
spec.path("/pet/{petId}", operations={"dummy": {}})
class TestPlugins:
@staticmethod
def test_plugin_factory(return_none=False):
class TestPlugin(BasePlugin):
def schema_helper(self, name, definition, **kwargs):
if not return_none:
return {"properties": {"name": {"type": "string"}}}
def parameter_helper(self, parameter, **kwargs):
if not return_none:
return {"description": "some parameter"}
def response_helper(self, response, **kwargs):
if not return_none:
return {"description": "42"}
def path_helper(self, path, operations, parameters, **kwargs):
if not return_none:
if path == "/path_1":
operations.update({"get": {"responses": {"200": {}}}})
parameters.append({"name": "page", "in": "query"})
return "/path_1_modified"
def operation_helper(self, path, operations, **kwargs):
if path == "/path_2":
operations["post"] = {"responses": {"201": {}}}
return TestPlugin()
@pytest.mark.parametrize("openapi_version", ("2.0", "3.0.0"))
@pytest.mark.parametrize("return_none", (True, False))
def test_plugin_schema_helper_is_used(self, openapi_version, return_none):
spec = APISpec(
title="Swagger Petstore",
version="1.0.0",
openapi_version=openapi_version,
plugins=(self.test_plugin_factory(return_none),),
)
spec.components.schema("Pet")
definitions = get_schemas(spec)
if return_none:
assert definitions["Pet"] == {}
else:
assert definitions["Pet"] == {"properties": {"name": {"type": "string"}}}
@pytest.mark.parametrize("openapi_version", ("2.0", "3.0.0"))
@pytest.mark.parametrize("return_none", (True, False))
def test_plugin_parameter_helper_is_used(self, openapi_version, return_none):
spec = APISpec(
title="Swagger Petstore",
version="1.0.0",
openapi_version=openapi_version,
plugins=(self.test_plugin_factory(return_none),),
)
spec.components.parameter("Pet", "body", {})
parameters = get_parameters(spec)
if return_none:
assert parameters["Pet"] == {"in": "body", "name": "Pet"}
else:
assert parameters["Pet"] == {
"in": "body",
"name": "Pet",
"description": "some parameter",
}
@pytest.mark.parametrize("openapi_version", ("2.0", "3.0.0"))
@pytest.mark.parametrize("return_none", (True, False))
def test_plugin_response_helper_is_used(self, openapi_version, return_none):
spec = APISpec(
title="Swagger Petstore",
version="1.0.0",
openapi_version=openapi_version,
plugins=(self.test_plugin_factory(return_none),),
)
spec.components.response("Pet", {})
responses = get_responses(spec)
if return_none:
assert responses["Pet"] == {}
else:
assert responses["Pet"] == {"description": "42"}
@pytest.mark.parametrize("openapi_version", ("2.0", "3.0.0"))
@pytest.mark.parametrize("return_none", (True, False))
def test_plugin_path_helper_is_used(self, openapi_version, return_none):
spec = APISpec(
title="Swagger Petstore",
version="1.0.0",
openapi_version=openapi_version,
plugins=(self.test_plugin_factory(return_none),),
)
spec.path("/path_1")
paths = get_paths(spec)
assert len(paths) == 1
if return_none:
assert paths["/path_1"] == {}
else:
assert paths["/path_1_modified"] == {
"get": {"responses": {"200": {}}},
"parameters": [{"in": "query", "name": "page"}],
}
@pytest.mark.parametrize("openapi_version", ("2.0", "3.0.0"))
def test_plugin_operation_helper_is_used(self, openapi_version):
spec = APISpec(
title="Swagger Petstore",
version="1.0.0",
openapi_version=openapi_version,
plugins=(self.test_plugin_factory(),),
)
spec.path("/path_2", operations={"post": {"responses": {"200": {}}}})
paths = get_paths(spec)
assert len(paths) == 1
assert paths["/path_2"] == {"post": {"responses": {"201": {}}}}
class TestPluginsOrder:
class OrderedPlugin(BasePlugin):
def __init__(self, index, output):
super(TestPluginsOrder.OrderedPlugin, self).__init__()
self.index = index
self.output = output
def path_helper(self, path, operations, **kwargs):
self.output.append("plugin_{}_path".format(self.index))
def operation_helper(self, path, operations, **kwargs):
self.output.append("plugin_{}_operations".format(self.index))
def test_plugins_order(self):
"""Test plugins execution order in APISpec.path
- All path helpers are called, then all operation helpers, then all response helpers.
- At each step, helpers are executed in the order the plugins are passed to APISpec.
"""
output = []
spec = APISpec(
title="Swagger Petstore",
version="1.0.0",
openapi_version="3.0.0",
plugins=(self.OrderedPlugin(1, output), self.OrderedPlugin(2, output)),
)
spec.path("/path", operations={"get": {"responses": {200: {}}}})
assert output == [
"plugin_1_path",
"plugin_2_path",
"plugin_1_operations",
"plugin_2_operations",
]
|
[
"apispec.APISpec",
"pytest.fixture",
"pytest.raises",
"collections.OrderedDict",
"pytest.mark.parametrize",
"http.HTTPStatus"
] |
[((689, 728), 'pytest.fixture', 'pytest.fixture', ([], {'params': "('2.0', '3.0.0')"}), "(params=('2.0', '3.0.0'))\n", (703, 728), False, 'import pytest\n'), ((1616, 1758), 'apispec.APISpec', 'APISpec', ([], {'title': '"""Swagger Petstore"""', 'version': '"""1.0.0"""', 'openapi_version': 'openapi_version', 'info': "{'description': description}"}), "(title='Swagger Petstore', version='1.0.0', openapi_version=\n openapi_version, info={'description': description}, **security_kwargs)\n", (1623, 1758), False, 'from apispec import APISpec, BasePlugin\n'), ((3276, 3334), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""spec"""', "('3.0.0',)"], {'indirect': '(True)'}), "('spec', ('3.0.0',), indirect=True)\n", (3299, 3334), False, 'import pytest\n'), ((13059, 13119), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""openapi_version"""', "('2.0', '3.0.0')"], {}), "('openapi_version', ('2.0', '3.0.0'))\n", (13082, 13119), False, 'import pytest\n'), ((16476, 16554), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""parameters"""', "([{'name': 'petId'}], [{'in': 'path'}])"], {}), "('parameters', ([{'name': 'petId'}], [{'in': 'path'}]))\n", (16499, 16554), False, 'import pytest\n'), ((22293, 22353), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""openapi_version"""', "('2.0', '3.0.0')"], {}), "('openapi_version', ('2.0', '3.0.0'))\n", (22316, 22353), False, 'import pytest\n'), ((22359, 22412), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""return_none"""', '(True, False)'], {}), "('return_none', (True, False))\n", (22382, 22412), False, 'import pytest\n'), ((22952, 23012), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""openapi_version"""', "('2.0', '3.0.0')"], {}), "('openapi_version', ('2.0', '3.0.0'))\n", (22975, 23012), False, 'import pytest\n'), ((23018, 23071), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""return_none"""', '(True, False)'], {}), "('return_none', (True, False))\n", (23041, 23071), False, 'import pytest\n'), ((23737, 23797), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""openapi_version"""', "('2.0', '3.0.0')"], {}), "('openapi_version', ('2.0', '3.0.0'))\n", (23760, 23797), False, 'import pytest\n'), ((23803, 23856), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""return_none"""', '(True, False)'], {}), "('return_none', (True, False))\n", (23826, 23856), False, 'import pytest\n'), ((24377, 24437), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""openapi_version"""', "('2.0', '3.0.0')"], {}), "('openapi_version', ('2.0', '3.0.0'))\n", (24400, 24437), False, 'import pytest\n'), ((24443, 24496), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""return_none"""', '(True, False)'], {}), "('return_none', (True, False))\n", (24466, 24496), False, 'import pytest\n'), ((25140, 25200), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""openapi_version"""', "('2.0', '3.0.0')"], {}), "('openapi_version', ('2.0', '3.0.0'))\n", (25163, 25200), False, 'import pytest\n'), ((20094, 20109), 'http.HTTPStatus', 'HTTPStatus', (['(200)'], {}), '(200)\n', (20104, 20109), False, 'from http import HTTPStatus\n'), ((1943, 1985), 'pytest.raises', 'pytest.raises', (['APISpecError'], {'match': 'message'}), '(APISpecError, match=message)\n', (1956, 1985), False, 'import pytest\n'), ((1999, 2133), 'apispec.APISpec', 'APISpec', (['"""Swagger Petstore"""'], {'version': '"""1.0.0"""', 'openapi_version': '"""4.0"""', 'info': "{'description': description}", 'security': "[{'apiKey': []}]"}), "('Swagger Petstore', version='1.0.0', openapi_version='4.0', info={\n 'description': description}, security=[{'apiKey': []}])\n", (2006, 2133), False, 'from apispec import APISpec, BasePlugin\n'), ((5951, 6061), 'pytest.raises', 'pytest.raises', (['DuplicateComponentNameError'], {'match': '"""Another schema with name "Pet" is already registered."""'}), '(DuplicateComponentNameError, match=\n \'Another schema with name "Pet" is already registered.\')\n', (5964, 6061), False, 'import pytest\n'), ((6913, 7037), 'pytest.raises', 'pytest.raises', (['DuplicateComponentNameError'], {'match': '"""Another parameter with name "test_parameter" is already registered."""'}), '(DuplicateComponentNameError, match=\n \'Another parameter with name "test_parameter" is already registered.\')\n', (6926, 7037), False, 'import pytest\n'), ((7700, 7822), 'pytest.raises', 'pytest.raises', (['DuplicateComponentNameError'], {'match': '"""Another response with name "test_response" is already registered."""'}), '(DuplicateComponentNameError, match=\n \'Another response with name "test_response" is already registered.\')\n', (7713, 7822), False, 'import pytest\n'), ((8741, 8867), 'pytest.raises', 'pytest.raises', (['DuplicateComponentNameError'], {'match': '"""Another security scheme with name "ApiKeyAuth" is already registered."""'}), '(DuplicateComponentNameError, match=\n \'Another security scheme with name "ApiKeyAuth" is already registered.\')\n', (8754, 8867), False, 'import pytest\n'), ((15229, 15271), 'pytest.raises', 'pytest.raises', (['APISpecError'], {'match': 'message'}), '(APISpecError, match=message)\n', (15242, 15271), False, 'import pytest\n'), ((16712, 16748), 'pytest.raises', 'pytest.raises', (['InvalidParameterError'], {}), '(InvalidParameterError)\n', (16725, 16748), False, 'import pytest\n'), ((17218, 17256), 'pytest.raises', 'pytest.raises', (['DuplicateParameterError'], {}), '(DuplicateParameterError)\n', (17231, 17256), False, 'import pytest\n'), ((18712, 18750), 'pytest.raises', 'pytest.raises', (['DuplicateParameterError'], {}), '(DuplicateParameterError)\n', (18725, 18750), False, 'import pytest\n'), ((21015, 21057), 'pytest.raises', 'pytest.raises', (['APISpecError'], {'match': 'message'}), '(APISpecError, match=message)\n', (21028, 21057), False, 'import pytest\n'), ((12029, 12054), 'collections.OrderedDict', 'OrderedDict', (['{method: {}}'], {}), '({method: {}})\n', (12040, 12054), False, 'from collections import OrderedDict\n')]
|
# -*- mode: python; coding: utf-8 -*-
import sys
import logging, logging.config
import json
LOGGER_NAME = 'pg-perfect-ticker'
_logger = None
def try_print(*args, **kwargs):
try:
return print(*args, **kwargs)
except OSError:
pass
def log(level, msg):
global _logger
if _logger is not None:
_logger.log(level, msg)
return
if level >= logging.WARNING:
try_print(msg, file=sys.stderr)
return
try_print(msg)
def init(log_config_path):
global _logger
if log_config_path is not None:
with open(log_config_path, encoding='utf-8') as log_fd:
log_dict = json.load(log_fd)
logging.config.dictConfig(log_dict)
_logger = logging.getLogger(LOGGER_NAME)
|
[
"json.load",
"logging.config.dictConfig",
"logging.getLogger"
] |
[((714, 749), 'logging.config.dictConfig', 'logging.config.dictConfig', (['log_dict'], {}), '(log_dict)\n', (739, 749), False, 'import logging, logging.config\n'), ((768, 798), 'logging.getLogger', 'logging.getLogger', (['LOGGER_NAME'], {}), '(LOGGER_NAME)\n', (785, 798), False, 'import logging, logging.config\n'), ((688, 705), 'json.load', 'json.load', (['log_fd'], {}), '(log_fd)\n', (697, 705), False, 'import json\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import datetime
import sys
from sys import stdout
stdout.write('Hello from cx_Freeze\n')
stdout.write('The current date is %s\n\n' %
datetime.today().strftime('%B %d, %Y %H:%M:%S'))
stdout.write('Executable: %r\n' % sys.executable)
stdout.write('Prefix: %r\n' % sys.prefix)
stdout.write('Default encoding: %r\n' % sys.getdefaultencoding())
stdout.write('File system encoding: %r\n\n' % sys.getfilesystemencoding())
stdout.write('ARGUMENTS:\n')
for a in sys.argv:
stdout.write('%s\n' % a)
stdout.write('\n')
stdout.write('PATH:\n')
for p in sys.path:
stdout.write('%s\n' % p)
stdout.write('\n')
|
[
"sys.stdout.write",
"sys.getdefaultencoding",
"datetime.datetime.today",
"sys.getfilesystemencoding"
] |
[((112, 150), 'sys.stdout.write', 'stdout.write', (['"""Hello from cx_Freeze\n"""'], {}), "('Hello from cx_Freeze\\n')\n", (124, 150), False, 'from sys import stdout\n'), ((258, 307), 'sys.stdout.write', 'stdout.write', (["('Executable: %r\\n' % sys.executable)"], {}), "('Executable: %r\\n' % sys.executable)\n", (270, 307), False, 'from sys import stdout\n'), ((308, 349), 'sys.stdout.write', 'stdout.write', (["('Prefix: %r\\n' % sys.prefix)"], {}), "('Prefix: %r\\n' % sys.prefix)\n", (320, 349), False, 'from sys import stdout\n'), ((492, 520), 'sys.stdout.write', 'stdout.write', (['"""ARGUMENTS:\n"""'], {}), "('ARGUMENTS:\\n')\n", (504, 520), False, 'from sys import stdout\n'), ((569, 587), 'sys.stdout.write', 'stdout.write', (['"""\n"""'], {}), "('\\n')\n", (581, 587), False, 'from sys import stdout\n'), ((589, 612), 'sys.stdout.write', 'stdout.write', (['"""PATH:\n"""'], {}), "('PATH:\\n')\n", (601, 612), False, 'from sys import stdout\n'), ((661, 679), 'sys.stdout.write', 'stdout.write', (['"""\n"""'], {}), "('\\n')\n", (673, 679), False, 'from sys import stdout\n'), ((544, 568), 'sys.stdout.write', 'stdout.write', (["('%s\\n' % a)"], {}), "('%s\\n' % a)\n", (556, 568), False, 'from sys import stdout\n'), ((636, 660), 'sys.stdout.write', 'stdout.write', (["('%s\\n' % p)"], {}), "('%s\\n' % p)\n", (648, 660), False, 'from sys import stdout\n'), ((390, 414), 'sys.getdefaultencoding', 'sys.getdefaultencoding', ([], {}), '()\n', (412, 414), False, 'import sys\n'), ((462, 489), 'sys.getfilesystemencoding', 'sys.getfilesystemencoding', ([], {}), '()\n', (487, 489), False, 'import sys\n'), ((208, 224), 'datetime.datetime.today', 'datetime.today', ([], {}), '()\n', (222, 224), False, 'from datetime import datetime\n')]
|
import numpy as np
import os
from PIL import Image
from torch.utils.data import Dataset
EXTENSIONS = ['.jpg', '.png']
def load_image(file):
return Image.open(file)
def is_image(filename):
return any(filename.endswith(ext) for ext in EXTENSIONS)
def is_label(filename):
return filename.endswith("_labelTrainIds.png")
def image_path(root, basename, extension):
return os.path.join(root, f'{basename}{extension}')
def image_path_city(root, name):
return os.path.join(root, f'{name}')
def image_basename(filename):
return os.path.basename(os.path.splitext(filename)[0])
class VOC12(Dataset):
def __init__(self, root, input_transform=None, target_transform=None):
self.images_root = os.path.join(root, 'images')
self.labels_root = os.path.join(root, 'labels')
self.filenames = [image_basename(f)
for f in os.listdir(self.labels_root) if is_image(f)]
self.filenames.sort()
self.input_transform = input_transform
self.target_transform = target_transform
def __getitem__(self, index):
filename = self.filenames[index]
with open(image_path(self.images_root, filename, '.jpg'), 'rb') as f:
image = load_image(f).convert('RGB')
with open(image_path(self.labels_root, filename, '.png'), 'rb') as f:
label = load_image(f).convert('RGB')
r,g,b=label.split()
label=r
if self.input_transform is not None:
image = self.input_transform(image)
if self.target_transform is not None:
label = self.target_transform(label)
return image, label
def __len__(self):
return len(self.filenames)
class cityscapes(Dataset):
def __init__(self, root, co_transform=None, subset='train'):
self.images_root = os.path.join(root, 'leftImg8bit/')
self.labels_root = os.path.join(root, 'gtFine/')
#self.labels_root = os.path.join(root, 'gtCoarse/')
self.images_root += subset
self.labels_root += subset
print (self.images_root)
#self.filenames = [image_basename(f) for f in os.listdir(self.images_root) if is_image(f)]
self.filenames = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(self.images_root)) for f in fn if is_image(f)]
self.filenames.sort()
#[os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(".")) for f in fn]
#self.filenamesGt = [image_basename(f) for f in os.listdir(self.labels_root) if is_image(f)]
self.filenamesGt = [os.path.join(dp, f) for dp, dn, fn in os.walk(os.path.expanduser(self.labels_root)) for f in fn if is_label(f)]
self.filenamesGt.sort()
self.co_transform = co_transform # ADDED THIS
def __getitem__(self, index):
filename = self.filenames[index]
filenameGt = self.filenamesGt[index]
with open(image_path_city(self.images_root, filename), 'rb') as f:
image = load_image(f).convert('RGB')
with open(image_path_city(self.labels_root, filenameGt), 'rb') as f:
label = load_image(f).convert('RGB')
r,g,b=label.split()
label=r
if self.co_transform is not None:
image, label = self.co_transform(image, label)
return image, label
def __len__(self):
return len(self.filenames)
|
[
"os.path.expanduser",
"PIL.Image.open",
"os.path.splitext",
"os.path.join",
"os.listdir"
] |
[((155, 171), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (165, 171), False, 'from PIL import Image\n'), ((389, 433), 'os.path.join', 'os.path.join', (['root', 'f"""{basename}{extension}"""'], {}), "(root, f'{basename}{extension}')\n", (401, 433), False, 'import os\n'), ((479, 508), 'os.path.join', 'os.path.join', (['root', 'f"""{name}"""'], {}), "(root, f'{name}')\n", (491, 508), False, 'import os\n'), ((725, 753), 'os.path.join', 'os.path.join', (['root', '"""images"""'], {}), "(root, 'images')\n", (737, 753), False, 'import os\n'), ((781, 809), 'os.path.join', 'os.path.join', (['root', '"""labels"""'], {}), "(root, 'labels')\n", (793, 809), False, 'import os\n'), ((1832, 1866), 'os.path.join', 'os.path.join', (['root', '"""leftImg8bit/"""'], {}), "(root, 'leftImg8bit/')\n", (1844, 1866), False, 'import os\n'), ((1894, 1923), 'os.path.join', 'os.path.join', (['root', '"""gtFine/"""'], {}), "(root, 'gtFine/')\n", (1906, 1923), False, 'import os\n'), ((568, 594), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (584, 594), False, 'import os\n'), ((2221, 2240), 'os.path.join', 'os.path.join', (['dp', 'f'], {}), '(dp, f)\n', (2233, 2240), False, 'import os\n'), ((2587, 2606), 'os.path.join', 'os.path.join', (['dp', 'f'], {}), '(dp, f)\n', (2599, 2606), False, 'import os\n'), ((876, 904), 'os.listdir', 'os.listdir', (['self.labels_root'], {}), '(self.labels_root)\n', (886, 904), False, 'import os\n'), ((2267, 2303), 'os.path.expanduser', 'os.path.expanduser', (['self.images_root'], {}), '(self.images_root)\n', (2285, 2303), False, 'import os\n'), ((2633, 2669), 'os.path.expanduser', 'os.path.expanduser', (['self.labels_root'], {}), '(self.labels_root)\n', (2651, 2669), False, 'import os\n')]
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
sys.path.append('../')
from auto_scan_test import AutoScanTest, IgnoreReasons
from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place
import unittest
import hypothesis
from hypothesis import given, settings, seed, example, assume
import hypothesis.strategies as st
import argparse
import numpy as np
from functools import partial
class TestLinspaceOp(AutoScanTest):
def __init__(self, *args, **kwargs):
AutoScanTest.__init__(self, *args, **kwargs)
self.enable_testing_on_place(
TargetType.Host,
PrecisionType.FP32,
DataLayoutType.NCHW,
thread=[1, 2])
def is_program_valid(self,
program_config: ProgramConfig,
predictor_config: CxxConfig) -> bool:
return True
def sample_program_configs(self, draw):
start_id = draw(st.integers(min_value=0, max_value=5))
stop_id = draw(st.integers(min_value=50, max_value=60))
num_data = draw(st.integers(min_value=1, max_value=10))
op_type_str = draw(st.sampled_from(
[5])) #2:int 5:float, lite only support float
def generate_start1(*args, **kwargs):
return np.array([float(start_id)]).astype(np.float32)
def generate_start2(*args, **kwargs):
return np.array([int(start_id)]).astype(np.int32)
def generate_stop1(*args, **kwargs):
return np.array([float(stop_id)]).astype(np.float32)
def generate_stop2(*args, **kwargs):
return np.array([int(stop_id)]).astype(np.int32)
def generate_num(*args, **kwargs):
return np.array([int(num_data)]).astype(np.int32)
build_ops = OpConfig(
type="linspace",
inputs={
"Start": ["start_data"],
"Stop": ["stop_data"],
"Num": ["num_data"],
},
outputs={"Out": ["output_data"], },
attrs={"dtype": int(op_type_str)})
if op_type_str == 2:
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"start_data":
TensorConfig(data_gen=partial(generate_start2)),
"stop_data":
TensorConfig(data_gen=partial(generate_stop2)),
"num_data": TensorConfig(data_gen=partial(generate_num)),
},
outputs=["output_data"])
elif op_type_str == 5:
program_config = ProgramConfig(
ops=[build_ops],
weights={},
inputs={
"start_data":
TensorConfig(data_gen=partial(generate_start1)),
"stop_data":
TensorConfig(data_gen=partial(generate_stop1)),
"num_data": TensorConfig(data_gen=partial(generate_num)),
},
outputs=["output_data"])
return program_config
def sample_predictor_configs(self):
return self.get_predictor_configs(), ["linspace"], (1e-5, 1e-5)
def add_ignore_pass_case(self):
pass
def test(self, *args, **kwargs):
self.run_and_statis(quant=False, max_examples=25)
if __name__ == "__main__":
unittest.main(argv=[''])
|
[
"sys.path.append",
"unittest.main",
"functools.partial",
"auto_scan_test.AutoScanTest.__init__",
"hypothesis.strategies.sampled_from",
"hypothesis.strategies.integers"
] |
[((622, 644), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (637, 644), False, 'import sys\n'), ((4006, 4030), 'unittest.main', 'unittest.main', ([], {'argv': "['']"}), "(argv=[''])\n", (4019, 4030), False, 'import unittest\n'), ((1111, 1155), 'auto_scan_test.AutoScanTest.__init__', 'AutoScanTest.__init__', (['self', '*args'], {}), '(self, *args, **kwargs)\n', (1132, 1155), False, 'from auto_scan_test import AutoScanTest, IgnoreReasons\n'), ((1555, 1592), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(0)', 'max_value': '(5)'}), '(min_value=0, max_value=5)\n', (1566, 1592), True, 'import hypothesis.strategies as st\n'), ((1617, 1656), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(50)', 'max_value': '(60)'}), '(min_value=50, max_value=60)\n', (1628, 1656), True, 'import hypothesis.strategies as st\n'), ((1682, 1720), 'hypothesis.strategies.integers', 'st.integers', ([], {'min_value': '(1)', 'max_value': '(10)'}), '(min_value=1, max_value=10)\n', (1693, 1720), True, 'import hypothesis.strategies as st\n'), ((1749, 1769), 'hypothesis.strategies.sampled_from', 'st.sampled_from', (['[5]'], {}), '([5])\n', (1764, 1769), True, 'import hypothesis.strategies as st\n'), ((2915, 2939), 'functools.partial', 'partial', (['generate_start2'], {}), '(generate_start2)\n', (2922, 2939), False, 'from functools import partial\n'), ((3017, 3040), 'functools.partial', 'partial', (['generate_stop2'], {}), '(generate_stop2)\n', (3024, 3040), False, 'from functools import partial\n'), ((3097, 3118), 'functools.partial', 'partial', (['generate_num'], {}), '(generate_num)\n', (3104, 3118), False, 'from functools import partial\n'), ((3418, 3442), 'functools.partial', 'partial', (['generate_start1'], {}), '(generate_start1)\n', (3425, 3442), False, 'from functools import partial\n'), ((3520, 3543), 'functools.partial', 'partial', (['generate_stop1'], {}), '(generate_stop1)\n', (3527, 3543), False, 'from functools import partial\n'), ((3600, 3621), 'functools.partial', 'partial', (['generate_num'], {}), '(generate_num)\n', (3607, 3621), False, 'from functools import partial\n')]
|
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy import Column, Integer, String, Boolean
from todo.database import Base
from todo import app
db = SQLAlchemy(app)
class Entry(db.Model):
__tablename__ = "entries"
id = Column(Integer, primary_key=True)
title = Column(String)
order = Column(Integer)
completed = Column(Boolean)
def __init__(self, title=None, order=None):
self.title = title
self.order = order
self.completed = False
def __repr__(self):
return "<Entry: {}>".format(self.title)
db.create_all()
db.session.commit()
|
[
"flask_sqlalchemy.SQLAlchemy",
"sqlalchemy.Column"
] |
[((154, 169), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (164, 169), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((234, 267), 'sqlalchemy.Column', 'Column', (['Integer'], {'primary_key': '(True)'}), '(Integer, primary_key=True)\n', (240, 267), False, 'from sqlalchemy import Column, Integer, String, Boolean\n'), ((280, 294), 'sqlalchemy.Column', 'Column', (['String'], {}), '(String)\n', (286, 294), False, 'from sqlalchemy import Column, Integer, String, Boolean\n'), ((307, 322), 'sqlalchemy.Column', 'Column', (['Integer'], {}), '(Integer)\n', (313, 322), False, 'from sqlalchemy import Column, Integer, String, Boolean\n'), ((339, 354), 'sqlalchemy.Column', 'Column', (['Boolean'], {}), '(Boolean)\n', (345, 354), False, 'from sqlalchemy import Column, Integer, String, Boolean\n')]
|
# Copyright (c) 2015 Uber Technologies, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
import pytest
from tchannel import TChannel, Request, Response, schemes
from tchannel.response import TransportHeaders
@pytest.mark.gen_test
@pytest.mark.call
def test_call_should_get_response():
# Given this test server:
server = TChannel(name='server')
@server.raw.register
def endpoint(request):
assert isinstance(request, Request)
assert request.headers == 'req headers'
assert request.body == 'req body'
return Response('resp body', headers='resp headers')
server.listen()
# Make a call:
tchannel = TChannel(name='client')
resp = yield tchannel.raw(
service='server',
endpoint='endpoint',
headers='req headers',
body='req body',
hostport=server.hostport,
)
# verify response
assert isinstance(resp, Response)
assert resp.headers == 'resp headers'
assert resp.body == 'resp body'
# verify response transport headers
assert isinstance(resp.transport, TransportHeaders)
assert resp.transport.scheme == schemes.RAW
assert resp.transport.failure_domain is None
@pytest.mark.gen_test
@pytest.mark.call
def test_register_should_work_with_different_endpoint():
# Given this test server:
server = TChannel(name='server')
@server.raw.register('foo')
def endpoint(request):
return 'resp body'
server.listen()
# Make a call:
tchannel = TChannel(name='client')
resp = yield tchannel.raw(
service='server',
endpoint='foo',
hostport=server.hostport,
)
assert resp.body == 'resp body'
|
[
"tchannel.TChannel",
"tchannel.Response"
] |
[((1439, 1462), 'tchannel.TChannel', 'TChannel', ([], {'name': '"""server"""'}), "(name='server')\n", (1447, 1462), False, 'from tchannel import TChannel, Request, Response, schemes\n'), ((1770, 1793), 'tchannel.TChannel', 'TChannel', ([], {'name': '"""client"""'}), "(name='client')\n", (1778, 1793), False, 'from tchannel import TChannel, Request, Response, schemes\n'), ((2454, 2477), 'tchannel.TChannel', 'TChannel', ([], {'name': '"""server"""'}), "(name='server')\n", (2462, 2477), False, 'from tchannel import TChannel, Request, Response, schemes\n'), ((2622, 2645), 'tchannel.TChannel', 'TChannel', ([], {'name': '"""client"""'}), "(name='client')\n", (2630, 2645), False, 'from tchannel import TChannel, Request, Response, schemes\n'), ((1667, 1712), 'tchannel.Response', 'Response', (['"""resp body"""'], {'headers': '"""resp headers"""'}), "('resp body', headers='resp headers')\n", (1675, 1712), False, 'from tchannel import TChannel, Request, Response, schemes\n')]
|
import time
import os
import shutil
from Dataload.dataload_SST_binary import *
from DataUtils.alphabet import CreateAlphabet
import torch
from DataUtils.batch_iterator import *
from DataUtils.Embed import Embed
from DataUtils.common import paddingkey
from model.Text_Classification import *
def get_learning_algorithm(config):
"""
:param config: config
:return: optimizer algorithm
"""
algorithm = None
if config.adam is True:
algorithm = "Adam"
elif config.sgd is True:
algorithm = "SGD"
print("the learning algorithm is {}.".format(algorithm))
return algorithm
def get_params(config, alphabet):
"""
:param config: config
:param alphabet: alphabet dict
:return:
"""
# get algorithm
config.learning_algorithm = get_learning_algorithm(config)
# save best model path
config.save_best_model_path = config.save_best_model_dir
if config.test is False:
if os.path.exists(config.save_best_model_path):
shutil.rmtree(config.save_best_model_path)
# get params
config.embed_num = alphabet.word_alphabet.vocab_size # word number
config.label_num = alphabet.label_alphabet.vocab_size # label number
config.paddingId = alphabet.word_paddingId
config.alphabet = alphabet
print("embed_num : {},class_num : {}".format(config.embed_num,config.label_num))
print("PaddingID {}".format(config.paddingId))
def save_dict2file(dict,path):
"""
:param dict: dict
:param path: path to sasve dict
:return:
"""
print("Saving dictionary.........")
if os.path.exists(path):
print("path {} is exist,deleted.".format(path))
file = open(path,encoding="utf-8",mode="w") #'w'是以文件写入的方式打开文件
for word, index in dict.items():
file.write(str(word) + "\t" + str(index) + "\n")
file.close()
print("Save dictionary has been finished.........")
def save_dictionary(config):
"""
:param config: config
:return:
"""
if config.save_dict is True:
if os.path.exists(config.dict_directory):
shutil.rmtree(config.dict_directory)
if not os.path.isdir(config.dict_directory):
os.makedirs(config.dict_directory)
config.word_dict_path = "/".join([config.dict_directory, config.word_dict])
config.label_dict_path = "/".join([config.dict_directory, config.label_dict])
print("word_dict_directory :{}".format(config.word_dict_path))
print("label_dict_directory : {} ".format(config.label_dict_path))
save_dict2file(config.alphabet.word_alphabet.words2id, config.word_dict_path)
save_dict2file(config.alphabet.label_alphabet.words2id, config.label_dict_path)
# copy to mu lu
print("copy dictionaconry to {}".format(config.save_dir))
shutil.copytree(config.dict_directory, "/".join([config.save_dir, config.dict_directory]))
def preprocessing(config):
"""
:param config:
:return:
"""
print("processing data............")
# read file
data_loader = DataLoader(path=[config.train_file, config.dev_file, config.test_file], shuffle=True, config=config)
train_data, dev_data, test_data = data_loader.dataload()
print("train sentence {},dev sentence {},test sentence {}.".format(len(train_data), len(dev_data), len(test_data)))
data_dict = {"train_data": train_data, "dev_data": dev_data, "test_data": test_data}
if config.save_pkl:
torch.save(obj=data_dict, f=os.path.join(config.pkl_directory, config.pkl_data))
# create the alphabet
alphabet = None
if config.embed_finetune is False:
alphabet = CreateAlphabet(min_freq=config.min_freq, train_data=train_data, dev_data=dev_data, test_data=test_data, config=config)
alphabet.build_vocab()
if config.embed_finetune is True:
alphabet = CreateAlphabet(min_freq=config.min_freq, train_data=train_data, config=config)
alphabet.build_vocab()
alphabet_dict = {"alphabet": alphabet}
if config.save_pkl:
torch.save(obj=alphabet_dict, f=os.path.join(config.pkl_directory, config.pkl_alphabet))
# create iterator
create_iter = Iterators(batch_size=[config.batch_size, config.dev_batch_size, config.test_batch_size],
data=[train_data, dev_data, test_data], operator=alphabet, config=config)
train_iter, dev_iter, test_iter = create_iter.createIterator()
iter_dict = {"train_iter": train_iter, "dev_iter": dev_iter, "test_iter": test_iter}
if config.save_pkl:
torch.save(obj=iter_dict, f=os.path.join(config.pkl_directory, config.pkl_iter))
return train_iter, dev_iter, test_iter, alphabet
def pre_embed(config, alphabet):
"""
:param config:
:param alphabet:
:return:
"""
print("............................")
pretrain_embed = None
embed_types = ""
if config.pretrained_embed and config.zeros:
embed_types = "zeros"
elif config.pretrained_embed and config.avg:
embed_types = "avg"
elif config.pretrained_embed and config.uniform:
embed_types = "uniform"
elif config.pretrained_embed and config.nnembed:
embed_types = "nn"
if config.pretrained_embed is True:
p = Embed(path=config.pretrained_embed_file, words_dict=alphabet.word_alphabet.id2words, embed_type=embed_types,
pad=paddingkey)
pretrain_embed = p.get_embed()
embed_dict = {"pretrain_embed": pretrain_embed}
# pcl.save(obj=embed_dict, path=os.path.join(config.pkl_directory, config.pkl_embed))
torch.save(obj=embed_dict, f=os.path.join(config.pkl_directory, config.pkl_embed))
return pretrain_embed
def load_model(config):
"""
:param config: config
:return: nn model
"""
print("********************************************************")
model = Text_Classification(config)
if config.use_cuda is True:
model = model.cuda()
return model
def load_data(config):
"""
:param config: config
:return: batch data iterator and alphabet
"""
print("load data for process or pkl data")
alphabet=None
start_time = time.time()
if(config.train is True)and(config.process is True):
print('PROCESS DATA:')
if os.path.exists(config.pkl_directory): shutil.rmtree(config.pkl_directory)
if not os.path.isdir(config.pkl_directory):os.makedirs(config.pkl_directory)
train_iter, dev_iter, test_iter, alphabet = preprocessing(config)
config.pretrained_weight = pre_embed(config=config, alphabet=alphabet)
end_time = time.time()
print("All Data/Alphabet/Iterator Use Time {:.4}".format(end_time - start_time))
print("***************************************")
return train_iter, dev_iter, test_iter, alphabet
|
[
"os.makedirs",
"os.path.isdir",
"os.path.exists",
"time.time",
"DataUtils.Embed.Embed",
"shutil.rmtree",
"DataUtils.alphabet.CreateAlphabet",
"os.path.join"
] |
[((1605, 1625), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1619, 1625), False, 'import os\n'), ((6188, 6199), 'time.time', 'time.time', ([], {}), '()\n', (6197, 6199), False, 'import time\n'), ((6295, 6331), 'os.path.exists', 'os.path.exists', (['config.pkl_directory'], {}), '(config.pkl_directory)\n', (6309, 6331), False, 'import os\n'), ((6610, 6621), 'time.time', 'time.time', ([], {}), '()\n', (6619, 6621), False, 'import time\n'), ((958, 1001), 'os.path.exists', 'os.path.exists', (['config.save_best_model_path'], {}), '(config.save_best_model_path)\n', (972, 1001), False, 'import os\n'), ((2047, 2084), 'os.path.exists', 'os.path.exists', (['config.dict_directory'], {}), '(config.dict_directory)\n', (2061, 2084), False, 'import os\n'), ((3663, 3786), 'DataUtils.alphabet.CreateAlphabet', 'CreateAlphabet', ([], {'min_freq': 'config.min_freq', 'train_data': 'train_data', 'dev_data': 'dev_data', 'test_data': 'test_data', 'config': 'config'}), '(min_freq=config.min_freq, train_data=train_data, dev_data=\n dev_data, test_data=test_data, config=config)\n', (3677, 3786), False, 'from DataUtils.alphabet import CreateAlphabet\n'), ((3870, 3948), 'DataUtils.alphabet.CreateAlphabet', 'CreateAlphabet', ([], {'min_freq': 'config.min_freq', 'train_data': 'train_data', 'config': 'config'}), '(min_freq=config.min_freq, train_data=train_data, config=config)\n', (3884, 3948), False, 'from DataUtils.alphabet import CreateAlphabet\n'), ((5264, 5393), 'DataUtils.Embed.Embed', 'Embed', ([], {'path': 'config.pretrained_embed_file', 'words_dict': 'alphabet.word_alphabet.id2words', 'embed_type': 'embed_types', 'pad': 'paddingkey'}), '(path=config.pretrained_embed_file, words_dict=alphabet.word_alphabet.\n id2words, embed_type=embed_types, pad=paddingkey)\n', (5269, 5393), False, 'from DataUtils.Embed import Embed\n'), ((6333, 6368), 'shutil.rmtree', 'shutil.rmtree', (['config.pkl_directory'], {}), '(config.pkl_directory)\n', (6346, 6368), False, 'import shutil\n'), ((6380, 6415), 'os.path.isdir', 'os.path.isdir', (['config.pkl_directory'], {}), '(config.pkl_directory)\n', (6393, 6415), False, 'import os\n'), ((6416, 6449), 'os.makedirs', 'os.makedirs', (['config.pkl_directory'], {}), '(config.pkl_directory)\n', (6427, 6449), False, 'import os\n'), ((1015, 1057), 'shutil.rmtree', 'shutil.rmtree', (['config.save_best_model_path'], {}), '(config.save_best_model_path)\n', (1028, 1057), False, 'import shutil\n'), ((2098, 2134), 'shutil.rmtree', 'shutil.rmtree', (['config.dict_directory'], {}), '(config.dict_directory)\n', (2111, 2134), False, 'import shutil\n'), ((2150, 2186), 'os.path.isdir', 'os.path.isdir', (['config.dict_directory'], {}), '(config.dict_directory)\n', (2163, 2186), False, 'import os\n'), ((2200, 2234), 'os.makedirs', 'os.makedirs', (['config.dict_directory'], {}), '(config.dict_directory)\n', (2211, 2234), False, 'import os\n'), ((3505, 3556), 'os.path.join', 'os.path.join', (['config.pkl_directory', 'config.pkl_data'], {}), '(config.pkl_directory, config.pkl_data)\n', (3517, 3556), False, 'import os\n'), ((4087, 4142), 'os.path.join', 'os.path.join', (['config.pkl_directory', 'config.pkl_alphabet'], {}), '(config.pkl_directory, config.pkl_alphabet)\n', (4099, 4142), False, 'import os\n'), ((4592, 4643), 'os.path.join', 'os.path.join', (['config.pkl_directory', 'config.pkl_iter'], {}), '(config.pkl_directory, config.pkl_iter)\n', (4604, 4643), False, 'import os\n'), ((5634, 5686), 'os.path.join', 'os.path.join', (['config.pkl_directory', 'config.pkl_embed'], {}), '(config.pkl_directory, config.pkl_embed)\n', (5646, 5686), False, 'import os\n')]
|
from keras import backend as K
def reshape2d_by_image_dim_ordering(X):
"""
image_dim_orderring に合わせて2D画像のshapeを変える
"""
if K.image_dim_ordering() == 'th':
X = X.reshape(X.shape[0], 1, 96, 96)
input_shape = (1, 96, 96)
else:
X = X.reshape(X.shape[0], 96, 96, 1)
input_shape = (96, 96, 1)
return X, input_shape
|
[
"keras.backend.image_dim_ordering"
] |
[((131, 153), 'keras.backend.image_dim_ordering', 'K.image_dim_ordering', ([], {}), '()\n', (151, 153), True, 'from keras import backend as K\n')]
|
#!/usr/bin/env python
"""This is part of the MEA-Calendar Webex-Teams bot functionality.
It overcomes the limitation of the ngrok free account limitation of tunnel expiration every 8 hours,
by automatically stopping the current ngrok and botkit every 7.5 hours and then recreating a new ngrok
session, getting the new session's URL, and restarting botkit using the new URL.
The logic is scheduled to run every 7.5 hours natively within the script. No need for external cron scheduler.
"""
import subprocess
import requests
import json
import time
import schedule
import threading
import sys
__author__ = "<NAME>"
__copyright__ = "Copyright 2019 Cisco and/or its affiliates"
__license__ = "CISCO SAMPLE CODE LICENSE"
__version__ = "1.1"
__email__ = "<EMAIL>"
def bot_run():
# get the new ngrok session URL for the HTTPS session:
ngrok_api_url = "http://localhost:4040/api/tunnels"
ngrok_new_session_response = requests.get(ngrok_api_url).text
#print(ngrok_new_session_response)
ngrok_new_tunnel_url = json.loads(ngrok_new_session_response)['tunnels'][1]['public_url']
time.sleep(10)
print("%s: %s" % (time.asctime(time.localtime(time.time())), "Starting a new bot..."))
command = "PUBLIC_URL=%s node bot.js" % ngrok_new_tunnel_url
subprocess.run(command, shell=True)
def main():
# stop the currently running botkit node:
print("%s: %s" % (time.asctime(time.localtime(time.time())), "Stopping the current bot..."))
stop_ngrok = subprocess.run("pkill -9 node", shell=True)
time.sleep(10)
# stop the currently running ngrok session:
print("%s: %s" % (time.asctime(time.localtime(time.time())), "Stopping the current ngrok session..."))
stop_ngrok = subprocess.run("pkill -9 ngrok".split(), stdout = subprocess.PIPE)
time.sleep(10)
# start a new ngrok session on http port 3000 (used by botkit):
print("%s: %s" % (time.asctime(time.localtime(time.time())), "Starting a new ngrok session..."))
ngrok = subprocess.Popen(['ngrok','http', '3000'], stdout = subprocess.PIPE)
time.sleep(10)
# start a new botkit node as a separate thread, as otherwise the command
# has to be ran with Shell=True and otherwise the program execution is stopped
threading1 = threading.Thread(target=bot_run)
threading1.daemon = True
threading1.start()
if __name__ == "__main__":
# Run the program now then repeatedly every 7hours 30 minutes = 450 minutes
main()
schedule.every(450).minutes.do(main)
while True:
try:
schedule.run_pending()
time.sleep(1)
except (KeyboardInterrupt, EOFError):
sys.exit(1)
|
[
"schedule.run_pending",
"threading.Thread",
"subprocess.run",
"subprocess.Popen",
"json.loads",
"time.sleep",
"time.time",
"requests.get",
"schedule.every",
"sys.exit"
] |
[((1130, 1144), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1140, 1144), False, 'import time\n'), ((1308, 1343), 'subprocess.run', 'subprocess.run', (['command'], {'shell': '(True)'}), '(command, shell=True)\n', (1322, 1343), False, 'import subprocess\n'), ((1524, 1567), 'subprocess.run', 'subprocess.run', (['"""pkill -9 node"""'], {'shell': '(True)'}), "('pkill -9 node', shell=True)\n", (1538, 1567), False, 'import subprocess\n'), ((1573, 1587), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1583, 1587), False, 'import time\n'), ((1837, 1851), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1847, 1851), False, 'import time\n'), ((2038, 2105), 'subprocess.Popen', 'subprocess.Popen', (["['ngrok', 'http', '3000']"], {'stdout': 'subprocess.PIPE'}), "(['ngrok', 'http', '3000'], stdout=subprocess.PIPE)\n", (2054, 2105), False, 'import subprocess\n'), ((2112, 2126), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2122, 2126), False, 'import time\n'), ((2309, 2341), 'threading.Thread', 'threading.Thread', ([], {'target': 'bot_run'}), '(target=bot_run)\n', (2325, 2341), False, 'import threading\n'), ((957, 984), 'requests.get', 'requests.get', (['ngrok_api_url'], {}), '(ngrok_api_url)\n', (969, 984), False, 'import requests\n'), ((2607, 2629), 'schedule.run_pending', 'schedule.run_pending', ([], {}), '()\n', (2627, 2629), False, 'import schedule\n'), ((2643, 2656), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2653, 2656), False, 'import time\n'), ((1058, 1096), 'json.loads', 'json.loads', (['ngrok_new_session_response'], {}), '(ngrok_new_session_response)\n', (1068, 1096), False, 'import json\n'), ((2526, 2545), 'schedule.every', 'schedule.every', (['(450)'], {}), '(450)\n', (2540, 2545), False, 'import schedule\n'), ((2717, 2728), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2725, 2728), False, 'import sys\n'), ((1196, 1207), 'time.time', 'time.time', ([], {}), '()\n', (1205, 1207), False, 'import time\n'), ((1459, 1470), 'time.time', 'time.time', ([], {}), '()\n', (1468, 1470), False, 'import time\n'), ((1690, 1701), 'time.time', 'time.time', ([], {}), '()\n', (1699, 1701), False, 'import time\n'), ((1974, 1985), 'time.time', 'time.time', ([], {}), '()\n', (1983, 1985), False, 'import time\n')]
|
from django.conf import settings
from django.http import HttpResponsePermanentRedirect
from django.template import Context
from django.template.exceptions import TemplateDoesNotExist
from django.template.loader import get_template
from django.utils.deprecation import MiddlewareMixin
from django_boost.http.response import (HttpExceptionBase,
HttpRedirectExceptionBase)
class RedirectCorrectHostnameMiddleware(MiddlewareMixin):
"""
Redirect to correct hostname.
if requested hostname and settings.CORRECT_HOST does not match.
"""
conditions = not settings.DEBUG and hasattr(settings, 'CORRECT_HOST')
def __call__(self, request):
if self.conditions and request.get_host() != settings.CORRECT_HOST:
return HttpResponsePermanentRedirect(
'{scheme}://{host}{path}'.format(scheme=request.scheme,
host=settings.CORRECT_HOST,
path=request.get_full_path()))
response = self.get_response(request)
return response
class HttpStatusCodeExceptionMiddleware(MiddlewareMixin):
"""
Handle status code exceptions.
similar to the `Http404` exception.
"""
def get_template_from_status_code(self, status_code):
try:
file_name = "%s.html" % status_code
t = get_template(file_name)
context = Context()
return t.render(context)
except TemplateDoesNotExist:
return "%s" % status_code
def process_exception(self, request, e):
if isinstance(e, HttpRedirectExceptionBase):
return e.response_class(e.url)
elif isinstance(e, HttpExceptionBase):
response_text = self.get_template_from_status_code(e.status_code)
return e.response_class(response_text)
return None
|
[
"django.template.Context",
"django.template.loader.get_template"
] |
[((1414, 1437), 'django.template.loader.get_template', 'get_template', (['file_name'], {}), '(file_name)\n', (1426, 1437), False, 'from django.template.loader import get_template\n'), ((1460, 1469), 'django.template.Context', 'Context', ([], {}), '()\n', (1467, 1469), False, 'from django.template import Context\n')]
|
"""Space switching without constraints or extra DAG nodes.
Contains functions to create a space switching network as well as seamlessly switching
between spaces.
Example Usage
=============
::
import cmt.rig.spaceswitch as spaceswitch
# Create the space switch
spaceswitch.create_space_switch(
pole_vector_control,
[(ik_control, "foot"), (root_control, "root"), (world_control, "world")],
switch_attribute="space",
use_rotate=False,
)
# Seamless switch
spaceswitch.switch_space(pole_vector_control, "space", 1, create_keys=False)
"""
import maya.cmds as cmds
import maya.api.OpenMaya as OpenMaya
from cmt.dge import dge
import cmt.rig.common as common
import cmt.shortcuts as shortcuts
def create_space_switch(
node, drivers, switch_attribute=None, use_translate=True, use_rotate=True
):
"""Creates a space switch network.
The network uses the offsetParentMatrix attribute and does not create any
constraints or new dag nodes.
:param node: Transform to drive
:param drivers: List of tuples: [(driver1, "spaceName1"), (driver2, "spaceName2")]
:param switch_attribute: Name of the switch attribute to create on the target node.
"""
if switch_attribute is None:
switch_attribute = "space"
if cmds.objExists("{}.{}".format(node, switch_attribute)):
cmds.deleteAttr(node, at=switch_attribute)
names = [d[1] for d in drivers]
cmds.addAttr(node, ln=switch_attribute, at="enum", en=":".join(names), keyable=True)
# Create attribute to toggle translation in the matrices
enable_translate_attr = _create_bool_attribute(
node, "{}UseTranslate".format(switch_attribute), use_translate
)
# Create attribute to toggle rotation in the matrices
enable_rotate_attr = _create_bool_attribute(
node, "{}UseRotate".format(switch_attribute), use_rotate
)
blend = cmds.createNode("blendMatrix", name="{}_spaceswitch".format(node))
# Get the current offset parent matrix. This is used as the starting blend point
m = OpenMaya.MMatrix(cmds.getAttr("{}.offsetParentMatrix".format(node)))
cmds.setAttr("{}.inputMatrix".format(blend), list(m), type="matrix")
parent = cmds.listRelatives(node, parent=True, path=True)
to_parent_local = "{}.worldInverseMatrix[0]".format(parent[0]) if parent else None
for i, driver in enumerate(drivers):
driver = driver[0]
_connect_driver_matrix_network(blend, node, driver, i, to_parent_local)
target_attr = "{}.target[{}]".format(blend, i)
# Hook up the weight toggle when switching spaces
dge(
"x = switch == {} ? 1 : 0".format(i),
x="{}.weight".format(target_attr),
switch="{}.{}".format(node, switch_attribute),
)
# Connect the translation, rotation toggles
cmds.connectAttr(enable_translate_attr, "{}.useTranslate".format(target_attr))
cmds.connectAttr(enable_rotate_attr, "{}.useRotate".format(target_attr, i))
cmds.connectAttr(
"{}.outputMatrix".format(blend), "{}.offsetParentMatrix".format(node)
)
def _create_bool_attribute(node, attribute, default_value):
cmds.addAttr(
node, ln=attribute, at="bool", defaultValue=default_value, keyable=True
)
return "{}.{}".format(node, attribute)
def _connect_driver_matrix_network(blend, node, driver, index, to_parent_local):
# The multMatrix node will calculate the transformation to blend to when driven
# by this driver transform
mult = cmds.createNode(
"multMatrix", name="spaceswitch_{}_to_{}".format(node, driver)
)
offset = (
shortcuts.get_dag_path2(node).exclusiveMatrix()
* OpenMaya.MMatrix(cmds.getAttr("{}.worldInverseMatrix[0]".format(driver)))
)
cmds.setAttr("{}.matrixIn[0]".format(mult), list(offset), type="matrix")
cmds.connectAttr("{}.worldMatrix[0]".format(driver), "{}.matrixIn[1]".format(mult))
if to_parent_local:
cmds.connectAttr(to_parent_local, "{}.matrixIn[2]".format(mult))
cmds.connectAttr(
"{}.matrixSum".format(mult), "{}.target[{}].targetMatrix".format(blend, index)
)
def switch_space(node, attribute, space, create_keys=False):
"""Seamlessly switch between spaces
:param node: Node to switch
:param attribute: Space switching attribute on node
:param space: Space index in the space attribute
:param create_keys: True to create switching keys
"""
m = cmds.xform(node, q=True, ws=True, m=True)
cmds.setAttr("{}.{}".format(node, attribute), space)
cmds.xform(node, ws=True, m=m)
|
[
"maya.cmds.listRelatives",
"maya.cmds.addAttr",
"maya.cmds.deleteAttr",
"cmt.shortcuts.get_dag_path2",
"maya.cmds.xform"
] |
[((2238, 2286), 'maya.cmds.listRelatives', 'cmds.listRelatives', (['node'], {'parent': '(True)', 'path': '(True)'}), '(node, parent=True, path=True)\n', (2256, 2286), True, 'import maya.cmds as cmds\n'), ((3215, 3304), 'maya.cmds.addAttr', 'cmds.addAttr', (['node'], {'ln': 'attribute', 'at': '"""bool"""', 'defaultValue': 'default_value', 'keyable': '(True)'}), "(node, ln=attribute, at='bool', defaultValue=default_value,\n keyable=True)\n", (3227, 3304), True, 'import maya.cmds as cmds\n'), ((4520, 4561), 'maya.cmds.xform', 'cmds.xform', (['node'], {'q': '(True)', 'ws': '(True)', 'm': '(True)'}), '(node, q=True, ws=True, m=True)\n', (4530, 4561), True, 'import maya.cmds as cmds\n'), ((4623, 4653), 'maya.cmds.xform', 'cmds.xform', (['node'], {'ws': '(True)', 'm': 'm'}), '(node, ws=True, m=m)\n', (4633, 4653), True, 'import maya.cmds as cmds\n'), ((1369, 1411), 'maya.cmds.deleteAttr', 'cmds.deleteAttr', (['node'], {'at': 'switch_attribute'}), '(node, at=switch_attribute)\n', (1384, 1411), True, 'import maya.cmds as cmds\n'), ((3686, 3715), 'cmt.shortcuts.get_dag_path2', 'shortcuts.get_dag_path2', (['node'], {}), '(node)\n', (3709, 3715), True, 'import cmt.shortcuts as shortcuts\n')]
|
##########################################################################
#
# MTraceCheck
# Copyright 2017 The Regents of the University of Michigan
# <NAME> and <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
##########################################################################
import sys
class ExecutionState:
IDLE = 0
WAIT = 1
READY = 2
PERFORMED = 3
class Instruction:
# doowon, 2017/09/06, fences
## Class variables
# instType: -1 (uninitialized), 0 (load), 1 (store), 2 (fence)
# address: -1 (uninitialized), 0 -- (numMemLocs-1)
# loadTarget: -1 (uninitialized), 0 -- (numHistRegs-1)
# value: value that is read in load op
# (ignore this value in store op)
def __init__(self, paramType=-1, paramAddress=-1, paramLoadTarget=-1):
self.instType = paramType
self.address = paramAddress
self.loadTarget = paramLoadTarget
self.value = 0xffffffff
self.intraDeps = []
self.interDeps = []
self.exeState = ExecutionState.IDLE
self.sortEdges = set() # Set removes redundancy
self.sortReverseEdges = set()
self.sortOrder = None
def genInst(self, paramType, paramAddress, paramLoadTarget):
self.instType = paramType
self.address = paramAddress
self.loadTarget = paramLoadTarget
def writeValue(self, paramValue):
self.value = paramValue
def addIntraDep(self, memOp):
self.intraDeps.append(memOp)
def addInterDep(self, memOp):
self.interDeps.append(memOp)
def addSortEdge(self, memOp):
self.sortEdges.add(memOp)
def removeSortEdge(self, memOp):
self.sortEdges.remove(memOp)
def addReverseEdge(self, memOp):
self.sortReverseEdges.add(memOp)
# No need to remove elements once they are added
def setState(self, paramExeState):
self.exeState = paramExeState
def resetInst(self):
self.exeState = ExecutionState.IDLE
self.value = 0xffffffff
del self.interDeps[:]
self.sortEdges.clear()
self.sortReverseEdges.clear()
self.sortOrder = None
def printInst(self):
sys.stdout.write("Instruction: ")
if (self.instType == 0):
sys.stdout.write("ld ")
sys.stdout.write("0x%x" % (self.address))
sys.stdout.write(",r%d" % (self.loadTarget))
sys.stdout.write(",0x%x\n" % (self.value))
elif (self.instType == 1):
sys.stdout.write("st ")
sys.stdout.write("0x%x\n" % (self.address))
elif (self.instType == 2):
sys.stdout.write("fence")
# TODO: Fine-grained fences
else:
print ("Error: Unrecognized instruction type %d" % self.instType)
sys.exit(1)
def getAssembly(self):
# NOTE: Whenever you change this assembly format, change functions
# to parse assembly code defined below
string = ""
if (self.instType == 0):
string += "ld "
string += "0x%x" % (self.address)
string += ",r%d" % (self.loadTarget)
elif (self.instType == 1):
string += "st "
string += "0x%x" % (self.address)
elif (self.instType == 2):
string += "fence"
else:
print ("Error: Unrecognized instruction type %d" % self.instType)
sys.exit(1)
return string
def getThreadIndex(memOp):
return ((memOp >> 16) & 0xffff)
def getInstIndex(memOp):
return (memOp & 0xffff)
def getMemOp(threadIndex, instIndex):
return (((threadIndex & 0xffff) << 16) | (instIndex & 0xffff))
def getMemId(threadIndex, instIndex):
return "m%d_%d" % (threadIndex, instIndex)
# Assembly parsing
def getInstType(asm):
tokens = asm.split(" ")
if (tokens[0] == "ld"):
return 0
elif (tokens[0] == "st"):
return 1
elif (tokens[0] == "fence"):
return 2
else:
print("Error: unsupported instruction type when parsing assembly code %s" % asm)
sys.exit(1)
def getAddress(asm):
tokens = asm.split(" ")
instType = getInstType(asm)
if (instType == 0): # load
lastIdx = tokens[1].find(",")
assert(lastIdx != -1)
return int(tokens[1][:lastIdx], 16)
elif (instType == 1): # store
return int(tokens[1], 16)
else:
# not reachable
sys.exit(1)
|
[
"sys.stdout.write",
"sys.exit"
] |
[((2695, 2728), 'sys.stdout.write', 'sys.stdout.write', (['"""Instruction: """'], {}), "('Instruction: ')\n", (2711, 2728), False, 'import sys\n'), ((2774, 2797), 'sys.stdout.write', 'sys.stdout.write', (['"""ld """'], {}), "('ld ')\n", (2790, 2797), False, 'import sys\n'), ((2810, 2849), 'sys.stdout.write', 'sys.stdout.write', (["('0x%x' % self.address)"], {}), "('0x%x' % self.address)\n", (2826, 2849), False, 'import sys\n'), ((2864, 2906), 'sys.stdout.write', 'sys.stdout.write', (["(',r%d' % self.loadTarget)"], {}), "(',r%d' % self.loadTarget)\n", (2880, 2906), False, 'import sys\n'), ((2921, 2961), 'sys.stdout.write', 'sys.stdout.write', (["(',0x%x\\n' % self.value)"], {}), "(',0x%x\\n' % self.value)\n", (2937, 2961), False, 'import sys\n'), ((4941, 4952), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4949, 4952), False, 'import sys\n'), ((3011, 3034), 'sys.stdout.write', 'sys.stdout.write', (['"""st """'], {}), "('st ')\n", (3027, 3034), False, 'import sys\n'), ((3047, 3088), 'sys.stdout.write', 'sys.stdout.write', (["('0x%x\\n' % self.address)"], {}), "('0x%x\\n' % self.address)\n", (3063, 3088), False, 'import sys\n'), ((4593, 4604), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4601, 4604), False, 'import sys\n'), ((3138, 3163), 'sys.stdout.write', 'sys.stdout.write', (['"""fence"""'], {}), "('fence')\n", (3154, 3163), False, 'import sys\n'), ((3308, 3319), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3316, 3319), False, 'import sys\n'), ((3930, 3941), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3938, 3941), False, 'import sys\n')]
|
# -*- coding: utf-8 -*-
import math
"""
Created on Wed Oct 9 00:51:06 2019
@author: randel
"""
class Calculator(object):
'''
Classe Calculadora, especializada em realizar as seguintes operações matemáticas:
add - adição de dois números;
subtract - subtração de dois números;
multiply - multiplicação de dois números;
module - modulo de um número;
'''
def __init__(self):
'''
Método construtor da classe calculator.
Responsávél pela correta inicialização das variáveis, caso haja.
Ao adicionar novas funcionalidades, sempre adicionar a nova função
antes da função exit.
'''
self.functions = []
self.order = 0
self.add_function('add', self.add, 2)
self.add_function('subtract', self.subtract, 2)
self.add_function('multiply', self.multiply, 2)
self.add_function('division', self.division, 2)
self.add_function('module', self.module, 1)
self.add_function('square root', self.square_root, 1)
self.add_function('power', self.power, 2)
self.add_function('raise to the square', self.raise_to_the_square, 1)
self.add_function('fibonacci', self.fibonacci, 1)
self.add_function('exit', False, 0)
def add_function(self, name_function, function, number_params):
self.order += 1
self.functions.append({'name' : name_function, 'function' : function, 'order' : self.order, 'number_params' : number_params})
def add(self, numbers):
'''
Método add, encarregado de retornar a soma de dois números.
'''
return numbers[0] + numbers[1]
def subtract(self, numbers):
'''
Método subtract, encarregado de retornar a subtração de dois números.
'''
return numbers[0] - numbers[1]
def multiply(self, numbers):
'''
Método multiply, encarregado de retornar a multiplicação de dois números.
'''
return numbers[0] * numbers[1]
def division(self, numbers):
'''
Método division, encarregado de retornar a divisão de dois números.
'''
if numbers[0] == 0 or numbers[1] == 0:
return 0
return numbers[0] / numbers[1]
def module(self, numbers):
'''
Método module, encarregado de retornar o modulo de um número.
'''
if numbers[0] < 0:
return numbers[0] * -1
return numbers[0]
def square_root(self, numbers):
'''
Método square_root, encarregado de retornar a raiz quadrada de um número.
'''
return math.sqrt(numbers[0])
def power(self, numbers):
'''
Método power, encarreado de retornar a potência de um número n1 elevado a outro número n2.
'''
return numbers[0] ** numbers[1]
def raise_to_the_square(self, numbers):
'''
Método raise to the square, encarregado de retornar o quadrado de um número.
'''
return numbers[0] ** 2
def fibonacci(self, numbers):
'''
Método fibonacci, encarregado de retornar o número correspondente na sequência de fibonacci.
0 1 1 2 3 5 8
'''
penultimate = 0
last = 1
current = 0
n = numbers[0]
if n == 1: return n
for i in range(n-1):
current = penultimate + last
penultimate = last
last = current
return current
|
[
"math.sqrt"
] |
[((2692, 2713), 'math.sqrt', 'math.sqrt', (['numbers[0]'], {}), '(numbers[0])\n', (2701, 2713), False, 'import math\n')]
|
#!/usr/bin/env python3
from migrator import run_migrations
run_migrations("shared_functions")
|
[
"migrator.run_migrations"
] |
[((61, 95), 'migrator.run_migrations', 'run_migrations', (['"""shared_functions"""'], {}), "('shared_functions')\n", (75, 95), False, 'from migrator import run_migrations\n')]
|
import unittest
from lymph.core.monitoring.global_metrics import ProcessMetrics
class GlobalMetricsTests(unittest.TestCase):
def setUp(self):
self.process_metrics = ProcessMetrics()
def test_process_metrics(self):
metric_names = [m[0] for m in self.process_metrics]
self.assertIn('proc.files.count', metric_names)
self.assertIn('proc.threads.count', metric_names)
self.assertIn('proc.mem.rss', metric_names)
self.assertIn('proc.cpu.system', metric_names)
|
[
"lymph.core.monitoring.global_metrics.ProcessMetrics"
] |
[((180, 196), 'lymph.core.monitoring.global_metrics.ProcessMetrics', 'ProcessMetrics', ([], {}), '()\n', (194, 196), False, 'from lymph.core.monitoring.global_metrics import ProcessMetrics\n')]
|
import os
from dvc.exceptions import MoveNotDataSourceError, DvcException
def _expand_target_path(from_path, to_path):
if os.path.isdir(to_path) and not os.path.isdir(from_path):
return os.path.join(to_path, os.path.basename(from_path))
return to_path
def move(self, from_path, to_path):
"""
Renames an output file and modifies the stage associated
to reflect the change on the pipeline.
If the output has the same name as its stage, it would
also rename the corresponding stage file.
E.g.
Having: (hello, hello.dvc)
$ dvc move hello greetings
Result: (greeting, greeting.dvc)
It only works with outputs generated by `add` or `import`,
also known as data sources.
"""
import dvc.output as Output
from dvc.stage import Stage
from_out = Output.loads_from(Stage(self, cwd=os.curdir), [from_path])[0]
to_path = _expand_target_path(from_path, to_path)
outs = self.find_outs_by_path(from_out.path)
assert len(outs) == 1
out = outs[0]
stage = out.stage
if not stage.is_data_source:
raise MoveNotDataSourceError(stage.relpath)
stage_name = os.path.splitext(os.path.basename(stage.path))[0]
from_name = os.path.basename(from_out.path)
if stage_name == from_name:
os.unlink(stage.path)
stage.path = os.path.join(
os.path.dirname(to_path),
os.path.basename(to_path) + Stage.STAGE_FILE_SUFFIX,
)
stage.cwd = os.path.abspath(
os.path.join(os.curdir, os.path.dirname(to_path))
)
to_out = Output.loads_from(
stage, [os.path.basename(to_path)], out.cache, out.metric
)[0]
with self.state:
out.move(to_out)
stage.dump()
self.remind_to_git_add()
|
[
"os.unlink",
"os.path.basename",
"os.path.isdir",
"os.path.dirname",
"dvc.exceptions.MoveNotDataSourceError",
"dvc.stage.Stage"
] |
[((1243, 1274), 'os.path.basename', 'os.path.basename', (['from_out.path'], {}), '(from_out.path)\n', (1259, 1274), False, 'import os\n'), ((129, 151), 'os.path.isdir', 'os.path.isdir', (['to_path'], {}), '(to_path)\n', (142, 151), False, 'import os\n'), ((1121, 1158), 'dvc.exceptions.MoveNotDataSourceError', 'MoveNotDataSourceError', (['stage.relpath'], {}), '(stage.relpath)\n', (1143, 1158), False, 'from dvc.exceptions import MoveNotDataSourceError, DvcException\n'), ((1315, 1336), 'os.unlink', 'os.unlink', (['stage.path'], {}), '(stage.path)\n', (1324, 1336), False, 'import os\n'), ((160, 184), 'os.path.isdir', 'os.path.isdir', (['from_path'], {}), '(from_path)\n', (173, 184), False, 'import os\n'), ((223, 250), 'os.path.basename', 'os.path.basename', (['from_path'], {}), '(from_path)\n', (239, 250), False, 'import os\n'), ((858, 884), 'dvc.stage.Stage', 'Stage', (['self'], {'cwd': 'os.curdir'}), '(self, cwd=os.curdir)\n', (863, 884), False, 'from dvc.stage import Stage\n'), ((1194, 1222), 'os.path.basename', 'os.path.basename', (['stage.path'], {}), '(stage.path)\n', (1210, 1222), False, 'import os\n'), ((1385, 1409), 'os.path.dirname', 'os.path.dirname', (['to_path'], {}), '(to_path)\n', (1400, 1409), False, 'import os\n'), ((1423, 1448), 'os.path.basename', 'os.path.basename', (['to_path'], {}), '(to_path)\n', (1439, 1448), False, 'import os\n'), ((1560, 1584), 'os.path.dirname', 'os.path.dirname', (['to_path'], {}), '(to_path)\n', (1575, 1584), False, 'import os\n'), ((1645, 1670), 'os.path.basename', 'os.path.basename', (['to_path'], {}), '(to_path)\n', (1661, 1670), False, 'import os\n')]
|
__author__ = 'kdsouza'
from Spec import *
from collections import namedtuple
#=========== mock Receiver =============
## This opens a UI, which we do not need to do
# receiver = load_data(filename='/Users/kdsouza/Desktop/Projects/pandas_play/weather_year.csv')
weather_data = pd.read_csv("/Users/kdsouza/Desktop/Projects/pandas_play/weather_year.csv")
DataFrameSelection = namedtuple('DataFrameSelection', 'rows, cols, items')
class Receiver(tr.HasTraits):
"""Mock Receiver class to avert side effects"""
selection = tr.Instance(DataFrameSelection)
test_receiver = Receiver(
selection=DataFrameSelection([],
[weather_data['EDT'],
weather_data['Mean TemperatureF']],
[]))
x, y = get_x_y(test_receiver)
from pandas.util.testing import assert_series_equal
assert_series_equal(x, weather_data['EDT'])
#============= Spec demo ===============
sample_dict = {
'a': 10,
'b': VEdit(True, BooleanEditor, {}),
'c': VEdit(7, EnumEditor, {'values': [7, 8, 9]}),
'd': {
'sub_a': VEdit(False, BooleanEditor, {}),
'sub_b': 'sub_b_val',
'sub_c': 10,
'sub_dict': {'sub_sub_a': .05,
'sub_sub_b': VEdit('a', EnumEditor, {'values':['a', 'b', 'c']})
}
}
}
def generate_sample_spec(d):
return Spec(d)
def view_sample_spec(d):
generate_sample_spec(d).configure_traits()
pass
# generate_sample_spec(sample_dict)
# view_sample_spec(sample_dict)
#============== Layout demo ============
def generate_sample_layout(d):
return PlotLayout(spec_nodes=Spec(d))
def view_sample_layout(d):
layout = generate_sample_layout(d)
layout.figure.add_subplot(111)
layout.configure_traits()
view_sample_layout(sample_dict)
|
[
"pandas.util.testing.assert_series_equal",
"collections.namedtuple"
] |
[((378, 431), 'collections.namedtuple', 'namedtuple', (['"""DataFrameSelection"""', '"""rows, cols, items"""'], {}), "('DataFrameSelection', 'rows, cols, items')\n", (388, 431), False, 'from collections import namedtuple\n'), ((875, 918), 'pandas.util.testing.assert_series_equal', 'assert_series_equal', (['x', "weather_data['EDT']"], {}), "(x, weather_data['EDT'])\n", (894, 918), False, 'from pandas.util.testing import assert_series_equal\n')]
|
from django.conf.urls import url
from django.contrib import admin
import web.views as web_views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^$', web_views.DocumentManagerView.as_view(), name='web')
]
|
[
"web.views.DocumentManagerView.as_view",
"django.conf.urls.url"
] |
[((118, 149), 'django.conf.urls.url', 'url', (['"""^admin/"""', 'admin.site.urls'], {}), "('^admin/', admin.site.urls)\n", (121, 149), False, 'from django.conf.urls import url\n'), ((167, 206), 'web.views.DocumentManagerView.as_view', 'web_views.DocumentManagerView.as_view', ([], {}), '()\n', (204, 206), True, 'import web.views as web_views\n')]
|
import os, json
from flask import render_template, url_for, flash
# from ..models import EditableHTML
from . import main
from app import basedir
@main.route('/')
def index():
return render_template('main/index.html')
@main.route('/resume')
def resume():
# resume_data_fname = basedir + url_for('main.static', filename='data/resume/resume-data.json')
resume_data_fname = basedir + url_for('static', filename='data/resume/resume-data.json')
with open(resume_data_fname, 'r') as f:
resume_data = json.load(f)
return render_template('main/resume.html', resume_data=resume_data)
# @main.route('/about')
# def about():
# editable_html_obj = EditableHTML.get_editable_html('about')
# return render_template('main/about.html',
# editable_html_obj=editable_html_obj)
|
[
"flask.url_for",
"json.load",
"flask.render_template"
] |
[((189, 223), 'flask.render_template', 'render_template', (['"""main/index.html"""'], {}), "('main/index.html')\n", (204, 223), False, 'from flask import render_template, url_for, flash\n'), ((545, 605), 'flask.render_template', 'render_template', (['"""main/resume.html"""'], {'resume_data': 'resume_data'}), "('main/resume.html', resume_data=resume_data)\n", (560, 605), False, 'from flask import render_template, url_for, flash\n'), ((396, 454), 'flask.url_for', 'url_for', (['"""static"""'], {'filename': '"""data/resume/resume-data.json"""'}), "('static', filename='data/resume/resume-data.json')\n", (403, 454), False, 'from flask import render_template, url_for, flash\n'), ((521, 533), 'json.load', 'json.load', (['f'], {}), '(f)\n', (530, 533), False, 'import os, json\n')]
|
import numpy as np
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from fsmpy.datasets import load_patients_diagnoses
from fsmpy.sets import IntuitionisticFuzzySet
from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu
from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue
from fsmpy.similarities import liu, chen_2
from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu
from fsmpy import LIANG_SHI_SIMILARITY_1, LIANG_SHI_SIMILARITY_2, LIANG_SHI_SIMILARITY_3
from fsmpy import HUNG_YANG_3_SIMILARITY_1, HUNG_YANG_3_SIMILARITY_2, HUNG_YANG_3_SIMILARITY_3, \
HUNG_YANG_3_SIMILARITY_4, HUNG_YANG_3_SIMILARITY_5, HUNG_YANG_3_SIMILARITY_6, HUNG_YANG_3_SIMILARITY_7
from fsmpy import DENG_JIANG_FU_MONOTONIC_TYPE_1_1, DENG_JIANG_FU_MONOTONIC_TYPE_1_2, \
DENG_JIANG_FU_MONOTONIC_TYPE_1_3, DENG_JIANG_FU_MONOTONIC_TYPE_1_4, DENG_JIANG_FU_MONOTONIC_TYPE_2_1, \
DENG_JIANG_FU_MONOTONIC_TYPE_2_2, DENG_JIANG_FU_MONOTONIC_TYPE_2_3, DENG_JIANG_FU_MONOTONIC_TYPE_2_4, \
DENG_JIANG_FU_MONOTONIC_TYPE_3_1, DENG_JIANG_FU_MONOTONIC_TYPE_3_2, DENG_JIANG_FU_MONOTONIC_TYPE_3_3
from fsmpy import IANCU_SIMILARITY_1, IANCU_SIMILARITY_2, IANCU_SIMILARITY_3, IANCU_SIMILARITY_4, \
IANCU_SIMILARITY_5, IANCU_SIMILARITY_6, IANCU_SIMILARITY_7, IANCU_SIMILARITY_8, IANCU_SIMILARITY_9, \
IANCU_SIMILARITY_10, IANCU_SIMILARITY_11, IANCU_SIMILARITY_12, IANCU_SIMILARITY_13, IANCU_SIMILARITY_14,\
IANCU_SIMILARITY_15, IANCU_SIMILARITY_16, IANCU_SIMILARITY_17, IANCU_SIMILARITY_18, IANCU_SIMILARITY_19, \
IANCU_SIMILARITY_20
def test_dengfeng_chuntian():
# Example 1
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
assert_almost_equal(dengfeng_chuntian(A1, B, p=1, weights=None), 0.78, decimal=2)
# assert_almost_equal(dengfeng_chuntian(A2, B, p=1, weights=None), 0.80, decimal=2) # fails
assert_almost_equal(dengfeng_chuntian(A3, B, p=1, weights=None), 0.85, decimal=2)
assert_almost_equal(dengfeng_chuntian(A1, B, p=2, weights=None), 0.74, decimal=2)
assert_almost_equal(dengfeng_chuntian(A2, B, p=2, weights=None), 0.78, decimal=2)
assert_almost_equal(dengfeng_chuntian(A3, B, p=2, weights=None), 0.84, decimal=2)
assert_almost_equal(dengfeng_chuntian(A1, B, p=2, weights=[0.5, 0.3, 0.2]), 0.696, decimal=3)
# assert_almost_equal(dengfeng_chuntian(A2, B, p=2, weights=[0.5, 0.3, 0.2]), 0.779, decimal=3)
assert_almost_equal(dengfeng_chuntian(A3, B, p=2, weights=[0.5, 0.3, 0.2]), 0.853, decimal=3)
def test_liang_shi():
A1 = IntuitionisticFuzzySet([0.1, 0.5, 0.1], [0.9, 0.9, 0.1])
A2 = IntuitionisticFuzzySet([0.5, 0.7, 0.0], [0.5, 0.7, 0.2])
A3 = IntuitionisticFuzzySet([0.7, 0.1, 0.4], [0.8, 0.2, 0.6])
B = IntuitionisticFuzzySet([0.4, 0.6, 0.0], [0.6, 0.8, 0.2])
# Example 1
assert_almost_equal(liang_shi(A1, B, similarity_type=LIANG_SHI_SIMILARITY_1, p=1), 0.83, decimal=2)
assert_almost_equal(liang_shi(A2, B, similarity_type=LIANG_SHI_SIMILARITY_1, p=1), 0.93, decimal=2)
assert_almost_equal(liang_shi(A3, B, similarity_type=LIANG_SHI_SIMILARITY_1, p=1), 0.60, decimal=2)
# Example 2 # fails
assert_almost_equal(liang_shi(A1, B, similarity_type=LIANG_SHI_SIMILARITY_2, p=1), 0.92, decimal=2)
assert_almost_equal(liang_shi(A2, B, similarity_type=LIANG_SHI_SIMILARITY_2, p=1), 0.97, decimal=2)
assert_almost_equal(liang_shi(A3, B, similarity_type=LIANG_SHI_SIMILARITY_2, p=1), 0.77, decimal=2)
# Example 3
assert_almost_equal(liang_shi(A1, B, similarity_type=LIANG_SHI_SIMILARITY_3, p=1, omegas=[1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0]), 0.89, decimal=2)
assert_almost_equal(liang_shi(A2, B, similarity_type=LIANG_SHI_SIMILARITY_3, p=1, omegas=[1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0]), 0.95, decimal=2)
assert_almost_equal(liang_shi(A3, B, similarity_type=LIANG_SHI_SIMILARITY_3, p=1, omegas=[1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0]), 0.72, decimal=2)
def test_park_kwun_lin():
A1 = IntuitionisticFuzzySet([0.2, 0.1, 0.0], [0.6, 0.7, 0.6])
A2 = IntuitionisticFuzzySet([0.2, 0.0, 0.2], [0.6, 0.6, 0.8])
A3 = IntuitionisticFuzzySet([0.1, 0.2, 0.2], [0.5, 0.7, 0.8])
B = IntuitionisticFuzzySet([0.3, 0.2, 0.1], [0.7, 0.8, 0.7])
assert_almost_equal(park_kwun_lim(A1, B), 0.800, decimal=3)
assert_almost_equal(park_kwun_lim(A2, B), 0.733, decimal=3)
assert_almost_equal(park_kwun_lim(A3, B), 0.767, decimal=3)
def test_mitchell():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
def test_julian_hung_lin():
pass
def test_hung_yang_1():
# Example 1
A1 = IntuitionisticFuzzySet([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])
A2 = IntuitionisticFuzzySet([0.2, 0.2, 0.2], [0.2, 0.2, 0.2])
A3 = IntuitionisticFuzzySet([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])
B = IntuitionisticFuzzySet([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])
assert_almost_equal(hung_yang_1(A1, B), 1.00, decimal=2)
assert_almost_equal(hung_yang_1(A2, B), 0.933, decimal=3)
assert_almost_equal(hung_yang_1(A3, B), 0.800, decimal=3)
assert_almost_equal(hung_yang_1(A1, B, similarity_type='c'), 1.00, decimal=2)
assert_almost_equal(hung_yang_1(A2, B, similarity_type='c'), 0.875, decimal=3)
assert_almost_equal(hung_yang_1(A3, B, similarity_type='c'), 0.667, decimal=3)
assert_almost_equal(hung_yang_1(A1, B, similarity_type='e'), 1.00, decimal=2)
assert_almost_equal(hung_yang_1(A2, B, similarity_type='e'), 0.898, decimal=3)
assert_almost_equal(hung_yang_1(A3, B, similarity_type='e'), 0.713, decimal=3)
# Example 2
A1 = IntuitionisticFuzzySet([0.2, 0.2, 0.2], [0.3, 0.3, 0.3])
A2 = IntuitionisticFuzzySet([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])
B = IntuitionisticFuzzySet([0.3, 0.3, 0.1], [0.3, 0.3, 0.3])
assert_almost_equal(hung_yang_1(A1, B), 0.900, decimal=3)
assert_almost_equal(hung_yang_1(A2, B), 0.833, decimal=3)
assert_almost_equal(hung_yang_1(A1, B, similarity_type='c'), 0.818, decimal=3)
assert_almost_equal(hung_yang_1(A2, B, similarity_type='c'), 0.714, decimal=3)
assert_almost_equal(hung_yang_1(A1, B, similarity_type='e'), 0.849, decimal=3)
assert_almost_equal(hung_yang_1(A2, B, similarity_type='e'), 0.757, decimal=3)
# Example 3
A1 = IntuitionisticFuzzySet([0.1, 0.5, 0.1], [0.9, 0.9, 0.1])
A2 = IntuitionisticFuzzySet([0.5, 0.7, 0.0], [0.5, 0.7, 0.2])
A3 = IntuitionisticFuzzySet([0.7, 0.1, 0.4], [0.8, 0.2, 0.6])
B = IntuitionisticFuzzySet([0.4, 0.6, 0.0], [0.6, 0.8, 0.2])
assert_almost_equal(hung_yang_1(A1, B), 0.833, decimal=3)
assert_almost_equal(hung_yang_1(A2, B), 0.933, decimal=3)
assert_almost_equal(hung_yang_1(A3, B), 0.567, decimal=3)
assert_almost_equal(hung_yang_1(A1, B, similarity_type='c'), 0.714, decimal=2)
assert_almost_equal(hung_yang_1(A2, B, similarity_type='c'), 0.875, decimal=3)
assert_almost_equal(hung_yang_1(A3, B, similarity_type='c'), 0.395, decimal=3)
assert_almost_equal(hung_yang_1(A1, B, similarity_type='e'), 0.757, decimal=3)
assert_almost_equal(hung_yang_1(A2, B, similarity_type='e'), 0.898, decimal=3)
assert_almost_equal(hung_yang_1(A3, B, similarity_type='e'), 0.444, decimal=3)
def test_ye():
# Example 1
C1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
C2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
C3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
Q = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
assert_almost_equal(ye(C1, Q), 0.9353, decimal=4)
assert_almost_equal(ye(C2, Q), 0.9519, decimal=4)
assert_almost_equal(ye(C3, Q), 0.9724, decimal=4)
weights = [0.5, 0.3, 0.2]
assert_almost_equal(ye(C1, Q, weights=weights), 0.9133, decimal=4)
assert_almost_equal(ye(C2, Q, weights=weights), 0.9404, decimal=4)
assert_almost_equal(ye(C3, Q, weights=weights), 0.9712, decimal=4)
viral_fever = IntuitionisticFuzzySet([0.4, 0.3, 0.1, 0.4, 0.1],
[0.0, 0.5, 0.7, 0.3, 0.7])
malaria = IntuitionisticFuzzySet([0.7, 0.2, 0.0, 0.7, 0.1], [0.0, 0.6, 0.9, 0.0, 0.8])
typhoid = IntuitionisticFuzzySet([0.3, 0.6, 0.2, 0.2, 0.1], [0.3, 0.1, 0.7, 0.6, 0.9])
stomach_problem = IntuitionisticFuzzySet([0.1, 0.2, 0.8, 0.2, 0.2], [
0.7, 0.4, 0.0, 0.7, 0.7])
chest_problem = IntuitionisticFuzzySet([0.1, 0.0, 0.2, 0.2, 0.8], [
0.8, 0.8, 0.8, 0.8, 0.1])
patient = IntuitionisticFuzzySet([0.8, 0.6, 0.2, 0.6, 0.1], [0.1, 0.1, 0.8, 0.1, 0.6])
assert_almost_equal(ye(patient, viral_fever), 0.9046, decimal=4)
assert_almost_equal(ye(patient, malaria), 0.8602, decimal=4) # fails
assert_almost_equal(ye(patient, typhoid), 0.8510, decimal=4)
assert_almost_equal(ye(patient, stomach_problem), 0.5033, decimal=4)
assert_almost_equal(ye(patient, chest_problem), 0.4542, decimal=4) # fails
def test_hwang_yang():
# Example 1
X1A = IntuitionisticFuzzySet([0.3], [0.3])
X1B = IntuitionisticFuzzySet([0.4], [0.4])
assert_almost_equal(hwang_yang(X1A, X1B), 0.997, decimal=3) # fails
X2A = IntuitionisticFuzzySet([0.3], [0.4])
X2B = IntuitionisticFuzzySet([0.4], [0.3])
assert_almost_equal(hwang_yang(X2A, X2B), 0.859, decimal=3)
X3A = IntuitionisticFuzzySet([1.0], [0.0])
X3B = IntuitionisticFuzzySet([0.0], [0.0])
assert_almost_equal(hwang_yang(X3A, X3B), 0.902, decimal=3)
X4A = IntuitionisticFuzzySet([0.5], [0.5])
X4B = IntuitionisticFuzzySet([0.0], [0.0])
assert_almost_equal(hwang_yang(X4A, X4B), 0.902, decimal=3)
X5A = IntuitionisticFuzzySet([0.4], [0.2])
X5B = IntuitionisticFuzzySet([0.5], [0.3])
assert_almost_equal(hwang_yang(X5A, X5B), 0.995, decimal=3)
X6A = IntuitionisticFuzzySet([0.4], [0.2])
X6B = IntuitionisticFuzzySet([0.5], [0.2])
assert_almost_equal(hwang_yang(X6A, X6B), 0.997, decimal=3)
def test_hung_yang_2():
# Example 1
A1 = IntuitionisticFuzzySet([0.3, 0.2, 0.1], [0.3, 0.2, 0.1], [0.4, 0.6, 0.8])
A2 = IntuitionisticFuzzySet([0.2, 0.2, 0.2], [0.2, 0.2, 0.2], [0.6, 0.6, 0.6])
A3 = IntuitionisticFuzzySet([0.4, 0.4, 0.4], [0.4, 0.4, 0.4], [0.2, 0.2, 0.2])
B = IntuitionisticFuzzySet([0.3, 0.2, 0.1], [0.3, 0.2, 0.1], [0.4, 0.6, 0.8])
assert_almost_equal(hung_yang_2(A1, B), 1.00, decimal=2)
assert_almost_equal(hung_yang_2(A2, B), 0.979, decimal=3)
assert_almost_equal(hung_yang_2(A3, B), 0.854, decimal=3)
assert_almost_equal(hung_yang_2(A1, B, similarity_type='c'), 1.00, decimal=2)
assert_almost_equal(hung_yang_2(A2, B, similarity_type='c'), 0.964, decimal=3)
assert_almost_equal(hung_yang_2(A3, B, similarity_type='c'), 0.776, decimal=3)
assert_almost_equal(hung_yang_2(A1, B, similarity_type='e'), 1.00, decimal=2)
assert_almost_equal(hung_yang_2(A2, B, similarity_type='e'), 0.971, decimal=3)
assert_almost_equal(hung_yang_2(A3, B, similarity_type='e'), 0.808, decimal=3)
# Example 2
A1 = IntuitionisticFuzzySet([0.2, 0.2, 0.2], [0.2, 0.2, 0.2], [0.6, 0.6, 0.6])
A2 = IntuitionisticFuzzySet([0.4, 0.4, 0.4], [0.4, 0.4, 0.4], [0.2, 0.2, 0.2])
B = IntuitionisticFuzzySet([0.3, 0.3, 0.1], [0.3, 0.3, 0.3], [0.4, 0.4, 0.6])
assert_almost_equal(hung_yang_2(A1, B), 0.974, decimal=3)
assert_almost_equal(hung_yang_2(A2, B), 0.928, decimal=3)
assert_almost_equal(hung_yang_2(A1, B, a=1.5), 0.974, decimal=3)
assert_almost_equal(hung_yang_2(A2, B, a=1.5), 0.928, decimal=3)
assert_almost_equal(hung_yang_2(A1, B, similarity_type='c'), 0.957, decimal=3)
assert_almost_equal(hung_yang_2(A2, B, similarity_type='c'), 0.882, decimal=3)
assert_almost_equal(hung_yang_2(A1, B, similarity_type='e'), 0.964, decimal=3)
assert_almost_equal(hung_yang_2(A2, B, similarity_type='e'), 0.901, decimal=3)
# Example 3 Division with zero
A1 = IntuitionisticFuzzySet([0.1, 0.5, 0.1], [0.1, 0.1, 0.9], [0.8, 0.4, 0.0])
A2 = IntuitionisticFuzzySet([0.5, 0.7, 0.0], [0.5, 0.3, 0.8], [0.0, 0.0, 0.2])
A3 = IntuitionisticFuzzySet([0.7, 0.1, 0.4], [0.2, 0.8, 0.4], [0.1, 0.1, 0.2])
B = IntuitionisticFuzzySet([0.4, 0.6, 0.0], [0.4, 0.2, 0.8], [0.2, 0.2, 0.2])
assert_almost_equal(hung_yang_2(A1, B), 0.843, decimal=3)
assert_almost_equal(hung_yang_2(A2, B), 0.927, decimal=3)
assert_almost_equal(hung_yang_2(A3, B), 0.797, decimal=3)
assert_almost_equal(hung_yang_2(A1, B, similarity_type='c'), 0.761, decimal=2)
assert_almost_equal(hung_yang_2(A2, B, similarity_type='c'), 0.883, decimal=3)
assert_almost_equal(hung_yang_2(A3, B, similarity_type='c'), 0.698, decimal=3)
assert_almost_equal(hung_yang_2(A1, B, similarity_type='e'), 0.794, decimal=3)
assert_almost_equal(hung_yang_2(A2, B, similarity_type='e'), 0.902, decimal=3)
assert_almost_equal(hung_yang_2(A3, B, similarity_type='e'), 0.737, decimal=3)
def test_zhang_fu():
# Example 1
A1 = IntuitionisticFuzzySet([0.4, 0.3, 0.5, 0.5, 0.6], [0.4, 0.3, 0.1, 0.2, 0.2])
A2 = IntuitionisticFuzzySet([0.2, 0.3, 0.2, 0.7, 0.8], [0.6, 0.5, 0.3, 0.1, 0.0])
A3 = IntuitionisticFuzzySet([0.1, 0.0, 0.2, 0.1, 0.2], [0.9, 1.0, 0.7, 0.8, 0.8])
A4 = IntuitionisticFuzzySet([0.8, 0.9, 1.0, 0.7, 0.6], [0.2, 0.0, 0.0, 0.2, 0.4])
A = IntuitionisticFuzzySet([0.3, 0.4, 0.6, 0.5, 0.9], [0.5, 0.4, 0.2, 0.1, 0.0])
assert_almost_equal(zhang_fu(A, A1), 0.884, decimal=3)
assert_almost_equal(zhang_fu(A, A2), 0.870, decimal=3)
assert_almost_equal(zhang_fu(A, A3), 0.449, decimal=3)
assert_almost_equal(zhang_fu(A, A4), 0.671, decimal=3)
def test_hung_yang_3():
# Example 1
A1 = IntuitionisticFuzzySet([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])
A2 = IntuitionisticFuzzySet([0.2, 0.2, 0.2], [0.2, 0.2, 0.2])
A3 = IntuitionisticFuzzySet([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])
B = IntuitionisticFuzzySet([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])
assert_almost_equal(hung_yang_3(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_1), 1.000, decimal=3) # 8
assert_almost_equal(hung_yang_3(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_1), 0.722, decimal=3)
assert_almost_equal(hung_yang_3(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_1), 0.500, decimal=3)
assert_almost_equal(hung_yang_3(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_2), 1.000, decimal=3) # 11
assert_almost_equal(hung_yang_3(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_2), 0.900, decimal=3)
assert_almost_equal(hung_yang_3(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_2), 0.700, decimal=3)
assert_almost_equal(hung_yang_3(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_3), 1.000, decimal=3) # 10
assert_almost_equal(hung_yang_3(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_3), 0.714, decimal=3)
assert_almost_equal(hung_yang_3(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_3), 0.500, decimal=3)
assert_almost_equal(hung_yang_3(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_4), 1.000, decimal=3)
assert_almost_equal(hung_yang_3(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_4), 0.714, decimal=3) # fails
assert_almost_equal(hung_yang_3(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_4), 0.500, decimal=3) # fails
assert_almost_equal(hung_yang_3(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_5), 1.000, decimal=3) # 12
assert_almost_equal(hung_yang_3(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_5), 0.833, decimal=3)
assert_almost_equal(hung_yang_3(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_5), 0.667, decimal=3)
assert_almost_equal(hung_yang_3(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_6), 1.000, decimal=3) # 13
assert_almost_equal(hung_yang_3(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_6), 0.809, decimal=3)
assert_almost_equal(hung_yang_3(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_6), 0.525, decimal=3)
assert_almost_equal(hung_yang_3(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_7), 1.000, decimal=3) # 14
assert_almost_equal(hung_yang_3(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_7), 0.783, decimal=3)
assert_almost_equal(hung_yang_3(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_7), 0.533, decimal=3)
def test_chen_1():
A = IntuitionisticFuzzySet([0.1, 0.2, 0.4, 0.6, 0.8], [0.3, 0.6, 0.8, 0.8, 1.0])
B = IntuitionisticFuzzySet([0.2, 0.3, 0.5, 0.7, 0.9], [0.5, 0.7, 0.8, 0.9, 1.0])
assert_almost_equal(chen_1(A, B, weights=[0.5, 0.8, 1.0, 0.7, 1.0]), 0.90625, decimal=5)
def test_hung_yang_4():
# Example 1
A1 = IntuitionisticFuzzySet([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])
A2 = IntuitionisticFuzzySet([0.2, 0.2, 0.2], [0.2, 0.2, 0.2])
A3 = IntuitionisticFuzzySet([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])
B = IntuitionisticFuzzySet([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])
assert_almost_equal(hung_yang_4(A1, B, p=2), 1.000, decimal=3)
assert_almost_equal(hung_yang_4(A2, B, p=2), 0.933, decimal=3)
assert_almost_equal(hung_yang_4(A3, B, p=2), 0.800, decimal=3)
assert_almost_equal(hung_yang_4(A1, B, similarity_type='c', p=2), 1.000, decimal=3)
assert_almost_equal(hung_yang_4(A2, B, similarity_type='c', p=2), 0.853, decimal=3)
assert_almost_equal(hung_yang_4(A3, B, similarity_type='c', p=2), 0.624, decimal=3)
assert_almost_equal(hung_yang_4(A1, B, similarity_type='e', p=2), 1.000, decimal=3)
assert_almost_equal(hung_yang_4(A2, B, similarity_type='e', p=2), 0.881, decimal=3)
assert_almost_equal(hung_yang_4(A3, B, similarity_type='e', p=2), 0.675, decimal=3)
# Example 2
A1 = IntuitionisticFuzzySet([0.1, 0.5, 0.1], [0.1, 0.1, 0.9])
A2 = IntuitionisticFuzzySet([0.5, 0.7, 0.0], [0.5, 0.3, 0.8])
A3 = IntuitionisticFuzzySet([0.7, 0.1, 0.4], [0.2, 0.8, 0.4])
B = IntuitionisticFuzzySet([0.4, 0.6, 0.0], [0.4, 0.2, 0.8])
assert_almost_equal(hung_yang_4(A1, B, p=2), 0.833, decimal=3)
assert_almost_equal(hung_yang_4(A2, B, p=2), 0.933, decimal=3)
assert_almost_equal(hung_yang_4(A3, B, p=2), 0.598, decimal=3)
assert_almost_equal(hung_yang_4(A1, B, similarity_type='c', p=2), 0.674, decimal=3)
assert_almost_equal(hung_yang_4(A2, B, similarity_type='c', p=2), 0.853, decimal=3)
assert_almost_equal(hung_yang_4(A3, B, similarity_type='c', p=2), 0.381, decimal=3)
assert_almost_equal(hung_yang_4(A1, B, similarity_type='e', p=2), 0.723, decimal=3)
assert_almost_equal(hung_yang_4(A2, B, similarity_type='e', p=2), 0.881, decimal=3)
assert_almost_equal(hung_yang_4(A3, B, similarity_type='e', p=2), 0.427, decimal=3)
# Example 3
A1 = IntuitionisticFuzzySet([0.2, 0.2, 0.2], [0.2, 0.2, 0.2])
A2 = IntuitionisticFuzzySet([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])
B = IntuitionisticFuzzySet([0.3, 0.3, 0.1], [0.3, 0.3, 0.3])
assert_almost_equal(hung_yang_4(A1, B, p=2), 0.900, decimal=3)
assert_almost_equal(hung_yang_4(A2, B, p=2), 0.859, decimal=3)
assert_almost_equal(hung_yang_4(A1, B, similarity_type='c', p=2), 0.788, decimal=3)
assert_almost_equal(hung_yang_4(A2, B, similarity_type='c', p=2), 0.716, decimal=3)
assert_almost_equal(hung_yang_4(A1, B, similarity_type='e', p=2), 0.826, decimal=3)
assert_almost_equal(hung_yang_4(A2, B, similarity_type='e', p=2), 0.761, decimal=3)
def test_hong_kim():
#Example 1
A = IntuitionisticFuzzySet([0.8, 0.3, 0.4], [0.9, 0.5, 0.6])
B = IntuitionisticFuzzySet([0.9, 0.0, 0.8], [0.9, 0.0, 0.9])
assert_almost_equal(hong_kim(A, B, weights=None), 0.7333, decimal=4)
def test_chen_2():
# Example 1
A = IntuitionisticFuzzySet([0.8, 0.3, 0.4], [0.9, 0.5, 0.6])
B = IntuitionisticFuzzySet([0.9, 0.0, 0.8], [0.9, 0.0, 0.9])
assert_almost_equal(chen_2(A, B, weights=None), 0.7333, decimal=4)
def test_liu():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1], [0.0, 0.2, 0.2])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0], [0.1, 0.0, 0.1])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0], [0.2, 0.2, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1], [0.2, 0.2, 0.1])
assert_almost_equal(liu(A1, B, p=2), 0.72, decimal=2)
assert_almost_equal(liu(A2, B, p=2), 0.74, decimal=2)
assert_almost_equal(liu(A3, B, p=2), 0.84, decimal=2)
def test_iancu():
# Example 1
A1 = IntuitionisticFuzzySet([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])
A2 = IntuitionisticFuzzySet([0.2, 0.2, 0.2], [0.2, 0.2, 0.2])
A3 = IntuitionisticFuzzySet([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])
B = IntuitionisticFuzzySet([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])
assert_almost_equal(iancu(A1, B, similarity_type=IANCU_SIMILARITY_1), 1.000, decimal=3)
assert_almost_equal(iancu(A2, B, similarity_type=IANCU_SIMILARITY_1), 1.000, decimal=3)
assert_almost_equal(iancu(A3, B, similarity_type=IANCU_SIMILARITY_1), 1.000, decimal=3)
assert_almost_equal(iancu(A1, B, similarity_type=IANCU_SIMILARITY_2), 1.000, decimal=3)
assert_almost_equal(iancu(A2, B, similarity_type=IANCU_SIMILARITY_2), 1.000, decimal=3)
assert_almost_equal(iancu(A3, B, similarity_type=IANCU_SIMILARITY_2), 1.000, decimal=3)
# fail
assert_almost_equal(iancu(A1, B, similarity_type=IANCU_SIMILARITY_9), 1.000, decimal=3)
assert_almost_equal(iancu(A2, B, similarity_type=IANCU_SIMILARITY_9), 0.938, decimal=3)
assert_almost_equal(iancu(A3, B, similarity_type=IANCU_SIMILARITY_9), 0.833, decimal=3)
# fail
assert_almost_equal(iancu(A1, B, similarity_type=IANCU_SIMILARITY_10), 1.000, decimal=3)
assert_almost_equal(iancu(A2, B, similarity_type=IANCU_SIMILARITY_10), 0.938, decimal=3)
assert_almost_equal(iancu(A3, B, similarity_type=IANCU_SIMILARITY_10), 0.833, decimal=3)
A1 = IntuitionisticFuzzySet([0.2, 0.2, 0.2], [0.2, 0.2, 0.2])
A2 = IntuitionisticFuzzySet([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])
B = IntuitionisticFuzzySet([0.3, 0.3, 0.1], [0.3, 0.3, 0.3])
assert_almost_equal(iancu(A1, B), 0.933, decimal=3)
assert_almost_equal(iancu(A2, B), 0.933, decimal=3)
assert_almost_equal(iancu(A1, B, similarity_type=IANCU_SIMILARITY_7), 0.938, decimal=3) # fails
assert_almost_equal(iancu(A2, B, similarity_type=IANCU_SIMILARITY_7), 0.938, decimal=3)
def test_song_wang_lei_xue():
# Example 1
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1], [0.0, 0.2, 0.2])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0], [0.1, 0.0, 0.1])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0], [0.2, 0.2, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1], [0.2, 0.2, 0.1])
assert_almost_equal(song_wang_lei_xue(A1, B), 0.887, decimal=3)
assert_almost_equal(song_wang_lei_xue(A2, B), 0.913, decimal=3)
assert_almost_equal(song_wang_lei_xue(A3, B), 0.936, decimal=3)
assert_almost_equal(song_wang_lei_xue(A1, B, weights=[0.5, 0.3, 0.2]), 0.853, decimal=3)
assert_almost_equal(song_wang_lei_xue(A2, B, weights=[0.5, 0.3, 0.2]), 0.919, decimal=3)
assert_almost_equal(song_wang_lei_xue(A3, B, weights=[0.5, 0.3, 0.2]), 0.949, decimal=3)
def test_deng_jiang_fu():
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
A2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
A3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
B = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
# Example 2
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_1), 0.489, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_1), 0.458, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_1), 0.546, decimal=3)
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_2), 0.454, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_2), 0.444, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_2), 0.541, decimal=3)
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.625, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.615, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.702, decimal=3)
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_1), 0.681, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_1), 0.668, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_1), 0.745, decimal=3)
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_2), 0.658, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_2), 0.658, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_2), 0.743, decimal=3)
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.783, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.783, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.850, decimal=3)
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_4), 0.644, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_4), 0.644, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_4), 0.739, decimal=3)
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.593, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.593, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.700, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.928, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.941, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.975, decimal=3)
assert_almost_equal(deng_jiang_fu(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.667, decimal=3)
assert_almost_equal(deng_jiang_fu(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.667, decimal=3)
assert_almost_equal(deng_jiang_fu(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.766, decimal=3)
# Example 3
diagnoses, patients = load_patients_diagnoses()
viral_fever, malaria, typhoid, stomach_problem, chest_problem = diagnoses
al, bob, joe, ted = patients
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.467,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.437,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.608,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.698,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.683,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.81,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.681,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.634, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.947,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.706,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.517,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.489,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.657,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.709,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.69, decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.82,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.695,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.65, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5),
0.946, decimal=3)
assert_almost_equal(deng_jiang_fu(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.721,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.544,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.474,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.643,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.698,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.661,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.8,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.667,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.619, decimal=3) # fails
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5),
0.92, decimal=3)
assert_almost_equal(deng_jiang_fu(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.691,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.216,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.186,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1),
0.313, decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.393,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.361,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.54,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.37,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.304, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.736,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1),
0.339, decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.26,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.184,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.311,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.375,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.324,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.5,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.333,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.269, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.678,
decimal=3)
assert_almost_equal(deng_jiang_fu(al, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.293,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.348,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.28,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.437,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.518,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.476,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.67,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.504,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.441, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.831,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.508,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.3, decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.21,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.348,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.419,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.352,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.54,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.37,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.304, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.694,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.34,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.415,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.366,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.536,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.594,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.567,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.74,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.587,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.531, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.898,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.605,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.641,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.635,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1),
0.777, decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.826,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.825,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.9,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.818,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.79, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.986,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1),
0.844, decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.371,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.309,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.472,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.509,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.463,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.64,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.471,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.406, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.802,
decimal=3)
assert_almost_equal(deng_jiang_fu(bob, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.464,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.363,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.348,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.516,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.618,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.603,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.75,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.6,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.545, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.915,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.617,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.344,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.308,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.471,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.533,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.492,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.68,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.515,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.453, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.844,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.52,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.498,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.47,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.639,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.712,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.7, decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.82,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.695,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.65, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.944,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.721,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.32,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.241,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1),
0.388, decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.512,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.452,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.6,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.429,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.363, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.762,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1),
0.415, decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.277,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.214,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.353,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.449,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.387,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.54,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.37,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.304, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.7,
decimal=3)
assert_almost_equal(deng_jiang_fu(joe, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.34,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.407,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.403,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.574,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.672,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.672,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.8,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.667,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.619, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.954,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.691,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.421,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.401,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.572,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.624,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.61,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.77,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.626,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.574, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.927,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.648,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.318,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.31,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.474,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.541,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.532,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.71,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.55,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.491, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.897,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.561,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.264,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.243,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1),
0.391, decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.481,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.464,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1),
0.63, decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.46,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.395, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.829,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, stomach_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1),
0.451, decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1, ), 0.198,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2, ), 0.189,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1), 0.319,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1, ), 0.376,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2, ), 0.366,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1), 0.55,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4, ), 0.379,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1), 0.314, decimal=3) # fails
assert_almost_equal(
deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5), 0.773,
decimal=3)
assert_almost_equal(deng_jiang_fu(ted, chest_problem, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1), 0.351,
decimal=3)
def test_nguyen():
# Example 1
M = IntuitionisticFuzzySet([1.0], [0.0], [0.0])
N = IntuitionisticFuzzySet([0.0], [1.0], [0.0])
N = IntuitionisticFuzzySet([0.0], [1.0], [0.0])
F = IntuitionisticFuzzySet([0.0], [0.0], [1.0])
assert_equal(nguyen(M, N), -1)
assert_equal(nguyen(M, F), 0.0)
R = IntuitionisticFuzzySet([0.5], [0.3], [0.2])
S = IntuitionisticFuzzySet([0.5], [0.2], [0.3])
assert_almost_equal(nguyen(M, R), 0.7, decimal=1)
assert_almost_equal(nguyen(M, S), 0.625, decimal=3)
# Example 2
A = IntuitionisticFuzzySet([0.3], [0.3], [0.4])
B = IntuitionisticFuzzySet([0.4], [0.4], [0.2])
assert_almost_equal(nguyen(A, B), 0.827, decimal=3)
A = IntuitionisticFuzzySet([0.3], [0.4], [0.3])
B = IntuitionisticFuzzySet([0.4], [0.3], [0.3])
assert_equal(nguyen(A, B), -1)
A = IntuitionisticFuzzySet([1.0], [0.0], [0.0])
B = IntuitionisticFuzzySet([0.0], [0.0], [1.0])
assert_equal(nguyen(A, B), 0.0)
A = IntuitionisticFuzzySet([0.5], [0.5], [0.0])
B = IntuitionisticFuzzySet([0.0], [0.0], [1.0])
assert_almost_equal(nguyen(A, B), 0.134, decimal=3)
A = IntuitionisticFuzzySet([0.4], [0.2], [0.4])
B = IntuitionisticFuzzySet([0.5], [0.3], [0.2])
assert_almost_equal(nguyen(A, B), 0.829, decimal=3)
A = IntuitionisticFuzzySet([0.4], [0.2], [0.4])
B = IntuitionisticFuzzySet([0.5], [0.2], [0.3])
assert_almost_equal(nguyen(A, B), 0.904, decimal=3)
A = IntuitionisticFuzzySet([0.0], [0.87], [0.13])
B = IntuitionisticFuzzySet([0.28], [0.55], [0.17])
assert_almost_equal(nguyen(A, B), 0.861, decimal=3)
A = IntuitionisticFuzzySet([0.6], [0.87], [-0.4])
B = IntuitionisticFuzzySet([0.28], [0.55], [0.17])
assert_almost_equal(nguyen(A, B), 0.960, decimal=3) # fails
# Example 3
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1], [0.0, 0.2, 0.2])
A2 = IntuitionisticFuzzySet([0.35, 0.45, 0.55], [0.15, 0.25, 0.35], [0.5, 0.3, 0.1])
A3 = IntuitionisticFuzzySet([0.25, 0.35, 0.45], [0.25, 0.35, 0.45], [0.5, 0.3, 0.1])
B = IntuitionisticFuzzySet([0.3, 0.4, 0.5], [0.2, 0.3, 0.4], [0.5, 0.3, 0.1])
assert_almost_equal(nguyen(A1, B), 0.757, decimal=3)
assert_almost_equal(nguyen(A2, B), 0.994, decimal=3)
assert_almost_equal(nguyen(A3, B), 0.998, decimal=3)
# Example 4
A1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1], [0.0, 0.2, 0.2])
A2 = IntuitionisticFuzzySet([0.3, 0.4, 0.2], [0.5, 0.4, 0.6], [0.2, 0.2, 0.2])
A3 = IntuitionisticFuzzySet([0.4, 0.3, 0.2], [0.4, 0.5, 0.6], [0.2, 0.2, 0.2])
B = IntuitionisticFuzzySet([0.3, 0.4, 0.5], [0.3, 0.4, 0.5], [0.4, 0.2, 0.0])
assert_almost_equal(nguyen(A1, B), 0.841, decimal=3)
assert_almost_equal(nguyen(A2, B), -0.988, decimal=3)
assert_almost_equal(nguyen(A3, B), -0.988, decimal=3)
def test_chen_cheng_lan():
A = IntuitionisticFuzzySet([0.3], [0.3])
B = IntuitionisticFuzzySet([0.4], [0.4])
assert_almost_equal(chen_cheng_lan(A, B), 0.9667, decimal=4)
A = IntuitionisticFuzzySet([0.3], [0.4])
B = IntuitionisticFuzzySet([0.4], [0.3])
assert_almost_equal(chen_cheng_lan(A, B), 0.9000, decimal=4)
A = IntuitionisticFuzzySet([1.0], [0.0])
B = IntuitionisticFuzzySet([0.0], [0.0])
assert_almost_equal(chen_cheng_lan(A, B), 0.5000, decimal=4)
A = IntuitionisticFuzzySet([0.5], [0.5])
B = IntuitionisticFuzzySet([0.0], [0.0])
assert_almost_equal(chen_cheng_lan(A, B), 0.8333, decimal=4)
A = IntuitionisticFuzzySet([0.4], [0.2])
B = IntuitionisticFuzzySet([0.5], [0.3])
assert_almost_equal(chen_cheng_lan(A, B), 0.9667, decimal=4)
A = IntuitionisticFuzzySet([0.4], [0.2])
B = IntuitionisticFuzzySet([0.5], [0.2])
assert_almost_equal(chen_cheng_lan(A, B), 0.9450, decimal=4)
# Table 2
A = IntuitionisticFuzzySet([0.5], [0.5])
B = IntuitionisticFuzzySet([0.0], [0.0])
assert_almost_equal(chen_cheng_lan(A, B), 0.8333, decimal=4)
A = IntuitionisticFuzzySet([0.6], [0.4])
B = IntuitionisticFuzzySet([0.0], [0.0])
assert_almost_equal(chen_cheng_lan(A, B), 0.8330, decimal=3)
A = IntuitionisticFuzzySet([0.0], [0.87])
B = IntuitionisticFuzzySet([0.28], [0.55])
assert_almost_equal(chen_cheng_lan(A, B), 0.7047, decimal=4)
A = IntuitionisticFuzzySet([0.6], [0.27])
B = IntuitionisticFuzzySet([0.28], [0.55])
assert_almost_equal(chen_cheng_lan(A, B), 0.6953, decimal=4)
# The examples below fails, most likely due to the rounding process of the authors
# Example 7.1
P1 = IntuitionisticFuzzySet([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])
P2 = IntuitionisticFuzzySet([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])
P3 = IntuitionisticFuzzySet([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])
Q = IntuitionisticFuzzySet([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])
assert_almost_equal(chen_cheng_lan(P1,Q), 0.7100, decimal=4)
assert_almost_equal(chen_cheng_lan(P2,Q), 0.7133, decimal=4)
assert_almost_equal(chen_cheng_lan(P3,Q), 0.8117, decimal=4)
# Example 7.2
P1 = IntuitionisticFuzzySet([0.1, 0.5, 0.1], [0.1, 0.1, 0.9])
P2 = IntuitionisticFuzzySet([0.5, 0.7, 0.0], [0.5, 0.3, 0.8])
P3 = IntuitionisticFuzzySet([0.7, 0.1, 0.4], [0.2, 0.8, 0.4])
Q = IntuitionisticFuzzySet([0.4, 0.6, 0.0], [0.4, 0.2, 0.8])
assert_almost_equal(chen_cheng_lan(P1,Q), 0.8544, decimal=4)
assert_almost_equal(chen_cheng_lan(P2,Q), 0.9356, decimal=4)
assert_almost_equal(chen_cheng_lan(P3,Q), 0.5333, decimal=4)
# Example 7.3
P1 = IntuitionisticFuzzySet([0.5, 0.7, 0.4, 0.7], [0.3, 0.0, 0.5, 0.3])
P2 = IntuitionisticFuzzySet([0.5, 0.6, 0.2, 0.7], [0.2, 0.1, 0.7, 0.3])
P3 = IntuitionisticFuzzySet([0.5, 0.7, 0.4, 0.7], [0.4, 0.1, 0.6, 0.2])
Q = IntuitionisticFuzzySet([0.4, 0.7, 0.3, 0.7], [0.3, 0.1, 0.6, 0.3])
assert_almost_equal(chen_cheng_lan(P1,Q), 0.9413, decimal=4)
assert_almost_equal(chen_cheng_lan(P2,Q), 0.9150, decimal=4)
assert_almost_equal(chen_cheng_lan(P3,Q), 0.9504, decimal=4)
def test_muthukumar_krishnanb():
# all tests fail
F = IntuitionisticFuzzySet([0.3, 0.5, 0.6, 0.5, 0.7, 0.9, 0.7, 0.8, 0.6, 0.7, 0.7, 0.3], [0.0, 0.1, 0.3, 0.0, 0.1, 0.0, 0.1, 0.2, 0.2, 0.0, 0.2, 0.0])
G = IntuitionisticFuzzySet([0.8, 0.7, 0.5, 0.4, 0.9, 0.9, 0.8, 0.7, 0.5, 0.9, 0.6, 0.8], [0.1, 0.2, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.1, 0.1])
assert_almost_equal(muthukumar_krishnanb(F, G), 0.81448, decimal=5)
F = IntuitionisticFuzzySet([0.6, 0.4, 0.8, 0.5, 0.7, 0.6, 0.8, 0.6, 0.9], [0.2, 0.5, 0.1, 0.3, 0.1, 0.3, 0.2, 0.0, 0.0])
G = IntuitionisticFuzzySet([0.5, 0.7, 0.6, 0.6, 0.4, 0.5, 0.9, 0.5, 0.8], [0.3, 0.0, 0.3, 0.2, 0.0, 0.1, 0.0, 0.1, 0.0])
H = IntuitionisticFuzzySet([0.4, 0.6, 0.5, 0.3, 0.7, 0.5, 0.2, 0.5, 0.1], [0.4, 0.2, 0.1, 0.2, 0.1, 0.4, 0.0, 0.0, 0.8])
assert_almost_equal(muthukumar_krishnanb(F, G), 0.8029, decimal=4)
assert_almost_equal(muthukumar_krishnanb(G, H), 0.4907, decimal=4)
assert_almost_equal(muthukumar_krishnanb(F, H), 0.4843, decimal=4)
M = IntuitionisticFuzzySet([0.6, 0.4, 0.8, 0.5, 0.7, 0.6, 0.8, 0.6, 0.9], [0.2, 0.5, 0.1, 0.3, 0.1, 0.3, 0.2, 0.0, 0.0])
P1 = IntuitionisticFuzzySet([0.5, 0.7, 0.6, 0.6, 0.4, 0.5, 0.9, 0.5, 0.8], [0.3, 0.0, 0.3, 0.2, 0.0, 0.1, 0.0, 0.1, 0.0])
P2 = IntuitionisticFuzzySet([0.2, 0.6, 0.5, 0.3, 0.7, 0.4, 0.2, 0.5, 0.1], [0.4, 0.2, 0.1, 0.2, 0.1, 0.4, 0.0, 0.0, 0.8])
P3 = IntuitionisticFuzzySet([0.5, 0.5, 0.3, 0.1, 0.3, 0.6, 0.3, 0.0, 0.2], [0.4, 0.0, 0.6, 0.8, 0.0, 0.2, 0.5, 0.2, 0.4])
P4 = IntuitionisticFuzzySet([0.3, 0.6, 0.2, 0.4, 0.2, 0.5, 0.3, 0.4, 0.2], [0.5, 0.0, 0.6, 0.5, 0.4, 0.0, 0.1, 0.0, 0.6])
P5 = IntuitionisticFuzzySet([0.5, 0.4, 0.6, 0.0, 0.3, 0.4, 0.1, 0.2, 0.4], [0.0, 0.0, 0.2, 0.2, 0.0, 0.0 ,0.5, 0.0, 0.4])
P6 = IntuitionisticFuzzySet([0.4, 0.6, 0.5, 0.3, 0.7, 0.5, 0.2, 0.5, 0.1], [0.4, 0.2, 0.1, 0.2, 0.1, 0.4, 0.0, 0.0, 0.8])
P7 = IntuitionisticFuzzySet([0.3, 0.7, 0.6, 0.5, 0.9, 0.7, 0.6, 0.7, 0.7], [0.0, 0.1, 0.2, 0.1, 0.0, 0.0, 0.3, 0.1, 0.2])
P8 = IntuitionisticFuzzySet([0.8, 0.9, 0.5, 0.7, 0.9, 0.9, 0.5, 0.8, 0.6], [0.1, 0.0, 0.3, 0.2, 0.0, 0.1, 0.2, 0.0, 0.1])
P9 = IntuitionisticFuzzySet([0.5, 0.8, 0.3, 0.4, 0.7, 0.8, 0.0, 0.4, 0.0], [0.0, 0.2, 0.0, 0.1, 0.0, 0.1, 0.8, 0.3, 0.7])
P10 = IntuitionisticFuzzySet([0.7, 0.4, 0.6, 0.5, 0.7, 0.6, 0.8, 0.6, 0.9], [0.2, 0.5, 0.1, 0.3, 0.1, 0.0, 0.2, 0.0, 0.0])
P11 = IntuitionisticFuzzySet([0.4, 0.7, 0.6, 0.6, 0.4, 0.5, 0.7, 0.5, 0.8], [0.3, 0.0, 0.3, 0.2, 0.0, 0.1, 0.2, 0.1, 0.0])
P12 = IntuitionisticFuzzySet([0.6, 0.5, 0.5, 0.3, 0.5, 0.4, 0.2, 0.5, 0.1], [0.4, 0.0, 0.1, 0.2, 0.1, 0.4, 0.0, 0.0, 0.8])
P13 = IntuitionisticFuzzySet([0.5, 0.6, 0.4, 0.5, 0.3, 0.2, 0.5, 0.4, 0.2], [0.3, 0.0, 0.3, 0.4, 0.2, 0.1, 0.0, 0.0, 0.5])
P14 = IntuitionisticFuzzySet([0.0, 0.4, 0.5, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3], [0.5, 0.3, 0.2, 0.1, 0.2, 0.1, 0.1, 0.3, 0.5])
P15 = IntuitionisticFuzzySet([0.4, 0.2, 0.0, 0.0, 0.5, 0.4, 0.5, 0.2, 0.4], [0.0, 0.3, 0.2, 0.3, 0.2, 0.3, 0.3, 0.3, 0.4])
assert_almost_equal(muthukumar_krishnanb(P1, M), 0.8092, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P2, M), 0.4733, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P3, M), 0.3906, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P4, M), 0.4047, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P5, M), 0.4232, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P6, M), 0.5064, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P7, M), 0.7305, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P8, M), 0.7279, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P9, M), 0.4497, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P10, M), 0.9323, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P11, M), 0.8000, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P12, M), 0.4738, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P13, M), 0.5112, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P14, M), 0.4755, decimal=4)
assert_almost_equal(muthukumar_krishnanb(P15, M), 0.4625, decimal=4)
|
[
"fsmpy.similarities.hung_yang_2",
"fsmpy.similarities.hung_yang_3",
"fsmpy.similarities.liu",
"fsmpy.similarities.nguyen",
"fsmpy.similarities.song_wang_lei_xue",
"fsmpy.similarities.hung_yang_4",
"fsmpy.similarities.ye",
"fsmpy.similarities.zhang_fu",
"fsmpy.similarities.dengfeng_chuntian",
"fsmpy.similarities.liang_shi",
"fsmpy.similarities.park_kwun_lim",
"fsmpy.datasets.load_patients_diagnoses",
"fsmpy.similarities.hung_yang_1",
"fsmpy.similarities.muthukumar_krishnanb",
"fsmpy.similarities.chen_2",
"fsmpy.similarities.hwang_yang",
"fsmpy.similarities.iancu",
"fsmpy.similarities.hong_kim",
"fsmpy.similarities.chen_cheng_lan",
"fsmpy.similarities.deng_jiang_fu",
"fsmpy.sets.IntuitionisticFuzzySet",
"fsmpy.similarities.chen_1"
] |
[((1849, 1905), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0, 0.8, 0.7]', '[0.0, 0.0, 0.1]'], {}), '([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])\n', (1871, 1905), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((1915, 1971), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 1.0, 0.9]', '[0.1, 0.0, 0.0]'], {}), '([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])\n', (1937, 1971), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((1981, 2037), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6, 0.8, 1.0]', '[0.2, 0.0, 0.0]'], {}), '([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])\n', (2003, 2037), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((2046, 2102), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.6, 0.8]', '[0.3, 0.2, 0.1]'], {}), '([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])\n', (2068, 2102), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((2961, 3017), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.1, 0.5, 0.1]', '[0.9, 0.9, 0.1]'], {}), '([0.1, 0.5, 0.1], [0.9, 0.9, 0.1])\n', (2983, 3017), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((3027, 3083), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.7, 0.0]', '[0.5, 0.7, 0.2]'], {}), '([0.5, 0.7, 0.0], [0.5, 0.7, 0.2])\n', (3049, 3083), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((3093, 3149), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.7, 0.1, 0.4]', '[0.8, 0.2, 0.6]'], {}), '([0.7, 0.1, 0.4], [0.8, 0.2, 0.6])\n', (3115, 3149), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((3158, 3214), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.6, 0.0]', '[0.6, 0.8, 0.2]'], {}), '([0.4, 0.6, 0.0], [0.6, 0.8, 0.2])\n', (3180, 3214), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((4373, 4429), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.1, 0.0]', '[0.6, 0.7, 0.6]'], {}), '([0.2, 0.1, 0.0], [0.6, 0.7, 0.6])\n', (4395, 4429), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((4439, 4495), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.0, 0.2]', '[0.6, 0.6, 0.8]'], {}), '([0.2, 0.0, 0.2], [0.6, 0.6, 0.8])\n', (4461, 4495), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((4505, 4561), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.1, 0.2, 0.2]', '[0.5, 0.7, 0.8]'], {}), '([0.1, 0.2, 0.2], [0.5, 0.7, 0.8])\n', (4527, 4561), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((4570, 4626), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.2, 0.1]', '[0.7, 0.8, 0.7]'], {}), '([0.3, 0.2, 0.1], [0.7, 0.8, 0.7])\n', (4592, 4626), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((4852, 4908), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0, 0.8, 0.7]', '[0.0, 0.0, 0.1]'], {}), '([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])\n', (4874, 4908), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((4918, 4974), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 1.0, 0.9]', '[0.1, 0.0, 0.0]'], {}), '([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])\n', (4940, 4974), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((4984, 5040), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6, 0.8, 1.0]', '[0.2, 0.0, 0.0]'], {}), '([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])\n', (5006, 5040), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((5049, 5105), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.6, 0.8]', '[0.3, 0.2, 0.1]'], {}), '([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])\n', (5071, 5105), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((5196, 5252), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.2, 0.1]', '[0.3, 0.2, 0.1]'], {}), '([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])\n', (5218, 5252), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((5262, 5318), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.2, 0.2]', '[0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2], [0.2, 0.2, 0.2])\n', (5284, 5318), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((5328, 5384), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.4, 0.4]', '[0.4, 0.4, 0.4]'], {}), '([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])\n', (5350, 5384), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((5393, 5449), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.2, 0.1]', '[0.3, 0.2, 0.1]'], {}), '([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])\n', (5415, 5449), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((6160, 6216), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.2, 0.2]', '[0.3, 0.3, 0.3]'], {}), '([0.2, 0.2, 0.2], [0.3, 0.3, 0.3])\n', (6182, 6216), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((6226, 6282), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.4, 0.4]', '[0.4, 0.4, 0.4]'], {}), '([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])\n', (6248, 6282), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((6291, 6347), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.3, 0.1]', '[0.3, 0.3, 0.3]'], {}), '([0.3, 0.3, 0.1], [0.3, 0.3, 0.3])\n', (6313, 6347), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((6833, 6889), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.1, 0.5, 0.1]', '[0.9, 0.9, 0.1]'], {}), '([0.1, 0.5, 0.1], [0.9, 0.9, 0.1])\n', (6855, 6889), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((6899, 6955), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.7, 0.0]', '[0.5, 0.7, 0.2]'], {}), '([0.5, 0.7, 0.0], [0.5, 0.7, 0.2])\n', (6921, 6955), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((6965, 7021), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.7, 0.1, 0.4]', '[0.8, 0.2, 0.6]'], {}), '([0.7, 0.1, 0.4], [0.8, 0.2, 0.6])\n', (6987, 7021), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((7030, 7086), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.6, 0.0]', '[0.6, 0.8, 0.2]'], {}), '([0.4, 0.6, 0.0], [0.6, 0.8, 0.2])\n', (7052, 7086), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((7816, 7872), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0, 0.8, 0.7]', '[0.0, 0.0, 0.1]'], {}), '([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])\n', (7838, 7872), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((7882, 7938), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 1.0, 0.9]', '[0.1, 0.0, 0.0]'], {}), '([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])\n', (7904, 7938), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((7948, 8004), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6, 0.8, 1.0]', '[0.2, 0.0, 0.0]'], {}), '([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])\n', (7970, 8004), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((8013, 8069), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.6, 0.8]', '[0.3, 0.2, 0.1]'], {}), '([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])\n', (8035, 8069), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((8499, 8575), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.3, 0.1, 0.4, 0.1]', '[0.0, 0.5, 0.7, 0.3, 0.7]'], {}), '([0.4, 0.3, 0.1, 0.4, 0.1], [0.0, 0.5, 0.7, 0.3, 0.7])\n', (8521, 8575), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((8617, 8693), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.7, 0.2, 0.0, 0.7, 0.1]', '[0.0, 0.6, 0.9, 0.0, 0.8]'], {}), '([0.7, 0.2, 0.0, 0.7, 0.1], [0.0, 0.6, 0.9, 0.0, 0.8])\n', (8639, 8693), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((8708, 8784), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.6, 0.2, 0.2, 0.1]', '[0.3, 0.1, 0.7, 0.6, 0.9]'], {}), '([0.3, 0.6, 0.2, 0.2, 0.1], [0.3, 0.1, 0.7, 0.6, 0.9])\n', (8730, 8784), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((8807, 8883), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.1, 0.2, 0.8, 0.2, 0.2]', '[0.7, 0.4, 0.0, 0.7, 0.7]'], {}), '([0.1, 0.2, 0.8, 0.2, 0.2], [0.7, 0.4, 0.0, 0.7, 0.7])\n', (8829, 8883), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((8913, 8989), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.1, 0.0, 0.2, 0.2, 0.8]', '[0.8, 0.8, 0.8, 0.8, 0.1]'], {}), '([0.1, 0.0, 0.2, 0.2, 0.8], [0.8, 0.8, 0.8, 0.8, 0.1])\n', (8935, 8989), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((9018, 9094), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 0.6, 0.2, 0.6, 0.1]', '[0.1, 0.1, 0.8, 0.1, 0.6]'], {}), '([0.8, 0.6, 0.2, 0.6, 0.1], [0.1, 0.1, 0.8, 0.1, 0.6])\n', (9040, 9094), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((9506, 9542), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3]', '[0.3]'], {}), '([0.3], [0.3])\n', (9528, 9542), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((9553, 9589), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4]', '[0.4]'], {}), '([0.4], [0.4])\n', (9575, 9589), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((9673, 9709), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3]', '[0.4]'], {}), '([0.3], [0.4])\n', (9695, 9709), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((9720, 9756), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4]', '[0.3]'], {}), '([0.4], [0.3])\n', (9742, 9756), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((9832, 9868), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0]', '[0.0]'], {}), '([1.0], [0.0])\n', (9854, 9868), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((9879, 9915), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[0.0]'], {}), '([0.0], [0.0])\n', (9901, 9915), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((9991, 10027), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5]', '[0.5]'], {}), '([0.5], [0.5])\n', (10013, 10027), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((10038, 10074), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[0.0]'], {}), '([0.0], [0.0])\n', (10060, 10074), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((10150, 10186), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4]', '[0.2]'], {}), '([0.4], [0.2])\n', (10172, 10186), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((10197, 10233), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5]', '[0.3]'], {}), '([0.5], [0.3])\n', (10219, 10233), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((10309, 10345), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4]', '[0.2]'], {}), '([0.4], [0.2])\n', (10331, 10345), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((10356, 10392), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5]', '[0.2]'], {}), '([0.5], [0.2])\n', (10378, 10392), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((10508, 10581), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.2, 0.1]', '[0.3, 0.2, 0.1]', '[0.4, 0.6, 0.8]'], {}), '([0.3, 0.2, 0.1], [0.3, 0.2, 0.1], [0.4, 0.6, 0.8])\n', (10530, 10581), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((10591, 10664), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.2, 0.2]', '[0.2, 0.2, 0.2]', '[0.6, 0.6, 0.6]'], {}), '([0.2, 0.2, 0.2], [0.2, 0.2, 0.2], [0.6, 0.6, 0.6])\n', (10613, 10664), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((10674, 10747), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.4, 0.4]', '[0.4, 0.4, 0.4]', '[0.2, 0.2, 0.2]'], {}), '([0.4, 0.4, 0.4], [0.4, 0.4, 0.4], [0.2, 0.2, 0.2])\n', (10696, 10747), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((10756, 10829), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.2, 0.1]', '[0.3, 0.2, 0.1]', '[0.4, 0.6, 0.8]'], {}), '([0.3, 0.2, 0.1], [0.3, 0.2, 0.1], [0.4, 0.6, 0.8])\n', (10778, 10829), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((11540, 11613), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.2, 0.2]', '[0.2, 0.2, 0.2]', '[0.6, 0.6, 0.6]'], {}), '([0.2, 0.2, 0.2], [0.2, 0.2, 0.2], [0.6, 0.6, 0.6])\n', (11562, 11613), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((11623, 11696), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.4, 0.4]', '[0.4, 0.4, 0.4]', '[0.2, 0.2, 0.2]'], {}), '([0.4, 0.4, 0.4], [0.4, 0.4, 0.4], [0.2, 0.2, 0.2])\n', (11645, 11696), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((11705, 11778), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.3, 0.1]', '[0.3, 0.3, 0.3]', '[0.4, 0.4, 0.6]'], {}), '([0.3, 0.3, 0.1], [0.3, 0.3, 0.3], [0.4, 0.4, 0.6])\n', (11727, 11778), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((12426, 12499), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.1, 0.5, 0.1]', '[0.1, 0.1, 0.9]', '[0.8, 0.4, 0.0]'], {}), '([0.1, 0.5, 0.1], [0.1, 0.1, 0.9], [0.8, 0.4, 0.0])\n', (12448, 12499), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((12509, 12582), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.7, 0.0]', '[0.5, 0.3, 0.8]', '[0.0, 0.0, 0.2]'], {}), '([0.5, 0.7, 0.0], [0.5, 0.3, 0.8], [0.0, 0.0, 0.2])\n', (12531, 12582), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((12592, 12665), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.7, 0.1, 0.4]', '[0.2, 0.8, 0.4]', '[0.1, 0.1, 0.2]'], {}), '([0.7, 0.1, 0.4], [0.2, 0.8, 0.4], [0.1, 0.1, 0.2])\n', (12614, 12665), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((12674, 12747), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.6, 0.0]', '[0.4, 0.2, 0.8]', '[0.2, 0.2, 0.2]'], {}), '([0.4, 0.6, 0.0], [0.4, 0.2, 0.8], [0.2, 0.2, 0.2])\n', (12696, 12747), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((13483, 13559), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.3, 0.5, 0.5, 0.6]', '[0.4, 0.3, 0.1, 0.2, 0.2]'], {}), '([0.4, 0.3, 0.5, 0.5, 0.6], [0.4, 0.3, 0.1, 0.2, 0.2])\n', (13505, 13559), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((13569, 13645), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.3, 0.2, 0.7, 0.8]', '[0.6, 0.5, 0.3, 0.1, 0.0]'], {}), '([0.2, 0.3, 0.2, 0.7, 0.8], [0.6, 0.5, 0.3, 0.1, 0.0])\n', (13591, 13645), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((13655, 13731), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.1, 0.0, 0.2, 0.1, 0.2]', '[0.9, 1.0, 0.7, 0.8, 0.8]'], {}), '([0.1, 0.0, 0.2, 0.1, 0.2], [0.9, 1.0, 0.7, 0.8, 0.8])\n', (13677, 13731), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((13741, 13817), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 0.9, 1.0, 0.7, 0.6]', '[0.2, 0.0, 0.0, 0.2, 0.4]'], {}), '([0.8, 0.9, 1.0, 0.7, 0.6], [0.2, 0.0, 0.0, 0.2, 0.4])\n', (13763, 13817), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((13826, 13902), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.4, 0.6, 0.5, 0.9]', '[0.5, 0.4, 0.2, 0.1, 0.0]'], {}), '([0.3, 0.4, 0.6, 0.5, 0.9], [0.5, 0.4, 0.2, 0.1, 0.0])\n', (13848, 13902), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((14191, 14247), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.2, 0.1]', '[0.3, 0.2, 0.1]'], {}), '([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])\n', (14213, 14247), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((14257, 14313), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.2, 0.2]', '[0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2], [0.2, 0.2, 0.2])\n', (14279, 14313), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((14323, 14379), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.4, 0.4]', '[0.4, 0.4, 0.4]'], {}), '([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])\n', (14345, 14379), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((14388, 14444), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.2, 0.1]', '[0.3, 0.2, 0.1]'], {}), '([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])\n', (14410, 14444), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((16717, 16793), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.1, 0.2, 0.4, 0.6, 0.8]', '[0.3, 0.6, 0.8, 0.8, 1.0]'], {}), '([0.1, 0.2, 0.4, 0.6, 0.8], [0.3, 0.6, 0.8, 0.8, 1.0])\n', (16739, 16793), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((16802, 16878), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.3, 0.5, 0.7, 0.9]', '[0.5, 0.7, 0.8, 0.9, 1.0]'], {}), '([0.2, 0.3, 0.5, 0.7, 0.9], [0.5, 0.7, 0.8, 0.9, 1.0])\n', (16824, 16878), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((17024, 17080), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.2, 0.1]', '[0.3, 0.2, 0.1]'], {}), '([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])\n', (17046, 17080), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((17090, 17146), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.2, 0.2]', '[0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2], [0.2, 0.2, 0.2])\n', (17112, 17146), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((17156, 17212), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.4, 0.4]', '[0.4, 0.4, 0.4]'], {}), '([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])\n', (17178, 17212), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((17221, 17277), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.2, 0.1]', '[0.3, 0.2, 0.1]'], {}), '([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])\n', (17243, 17277), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((18037, 18093), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.1, 0.5, 0.1]', '[0.1, 0.1, 0.9]'], {}), '([0.1, 0.5, 0.1], [0.1, 0.1, 0.9])\n', (18059, 18093), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((18103, 18159), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.7, 0.0]', '[0.5, 0.3, 0.8]'], {}), '([0.5, 0.7, 0.0], [0.5, 0.3, 0.8])\n', (18125, 18159), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((18169, 18225), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.7, 0.1, 0.4]', '[0.2, 0.8, 0.4]'], {}), '([0.7, 0.1, 0.4], [0.2, 0.8, 0.4])\n', (18191, 18225), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((18234, 18290), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.6, 0.0]', '[0.4, 0.2, 0.8]'], {}), '([0.4, 0.6, 0.0], [0.4, 0.2, 0.8])\n', (18256, 18290), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((19049, 19105), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.2, 0.2]', '[0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2], [0.2, 0.2, 0.2])\n', (19071, 19105), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((19115, 19171), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.4, 0.4]', '[0.4, 0.4, 0.4]'], {}), '([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])\n', (19137, 19171), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((19180, 19236), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.3, 0.1]', '[0.3, 0.3, 0.3]'], {}), '([0.3, 0.3, 0.1], [0.3, 0.3, 0.3])\n', (19202, 19236), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((19772, 19828), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 0.3, 0.4]', '[0.9, 0.5, 0.6]'], {}), '([0.8, 0.3, 0.4], [0.9, 0.5, 0.6])\n', (19794, 19828), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((19837, 19893), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.9, 0.0, 0.8]', '[0.9, 0.0, 0.9]'], {}), '([0.9, 0.0, 0.8], [0.9, 0.0, 0.9])\n', (19859, 19893), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((20013, 20069), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 0.3, 0.4]', '[0.9, 0.5, 0.6]'], {}), '([0.8, 0.3, 0.4], [0.9, 0.5, 0.6])\n', (20035, 20069), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((20078, 20134), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.9, 0.0, 0.8]', '[0.9, 0.0, 0.9]'], {}), '([0.9, 0.0, 0.8], [0.9, 0.0, 0.9])\n', (20100, 20134), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((20234, 20307), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0, 0.8, 0.7]', '[0.0, 0.0, 0.1]', '[0.0, 0.2, 0.2]'], {}), '([1.0, 0.8, 0.7], [0.0, 0.0, 0.1], [0.0, 0.2, 0.2])\n', (20256, 20307), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((20317, 20390), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 1.0, 0.9]', '[0.1, 0.0, 0.0]', '[0.1, 0.0, 0.1]'], {}), '([0.8, 1.0, 0.9], [0.1, 0.0, 0.0], [0.1, 0.0, 0.1])\n', (20339, 20390), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((20400, 20473), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6, 0.8, 1.0]', '[0.2, 0.0, 0.0]', '[0.2, 0.2, 0.0]'], {}), '([0.6, 0.8, 1.0], [0.2, 0.0, 0.0], [0.2, 0.2, 0.0])\n', (20422, 20473), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((20482, 20555), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.6, 0.8]', '[0.3, 0.2, 0.1]', '[0.2, 0.2, 0.1]'], {}), '([0.5, 0.6, 0.8], [0.3, 0.2, 0.1], [0.2, 0.2, 0.1])\n', (20504, 20555), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((20775, 20831), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.2, 0.1]', '[0.3, 0.2, 0.1]'], {}), '([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])\n', (20797, 20831), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((20841, 20897), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.2, 0.2]', '[0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2], [0.2, 0.2, 0.2])\n', (20863, 20897), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((20907, 20963), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.4, 0.4]', '[0.4, 0.4, 0.4]'], {}), '([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])\n', (20929, 20963), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((20972, 21028), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.2, 0.1]', '[0.3, 0.2, 0.1]'], {}), '([0.3, 0.2, 0.1], [0.3, 0.2, 0.1])\n', (20994, 21028), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((22172, 22228), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.2, 0.2]', '[0.2, 0.2, 0.2]'], {}), '([0.2, 0.2, 0.2], [0.2, 0.2, 0.2])\n', (22194, 22228), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((22238, 22294), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.4, 0.4]', '[0.4, 0.4, 0.4]'], {}), '([0.4, 0.4, 0.4], [0.4, 0.4, 0.4])\n', (22260, 22294), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((22303, 22359), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.3, 0.1]', '[0.3, 0.3, 0.3]'], {}), '([0.3, 0.3, 0.1], [0.3, 0.3, 0.3])\n', (22325, 22359), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((22723, 22796), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0, 0.8, 0.7]', '[0.0, 0.0, 0.1]', '[0.0, 0.2, 0.2]'], {}), '([1.0, 0.8, 0.7], [0.0, 0.0, 0.1], [0.0, 0.2, 0.2])\n', (22745, 22796), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((22806, 22879), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 1.0, 0.9]', '[0.1, 0.0, 0.0]', '[0.1, 0.0, 0.1]'], {}), '([0.8, 1.0, 0.9], [0.1, 0.0, 0.0], [0.1, 0.0, 0.1])\n', (22828, 22879), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((22889, 22962), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6, 0.8, 1.0]', '[0.2, 0.0, 0.0]', '[0.2, 0.2, 0.0]'], {}), '([0.6, 0.8, 1.0], [0.2, 0.0, 0.0], [0.2, 0.2, 0.0])\n', (22911, 22962), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((22971, 23044), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.6, 0.8]', '[0.3, 0.2, 0.1]', '[0.2, 0.2, 0.1]'], {}), '([0.5, 0.6, 0.8], [0.3, 0.2, 0.1], [0.2, 0.2, 0.1])\n', (22993, 23044), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((23567, 23623), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0, 0.8, 0.7]', '[0.0, 0.0, 0.1]'], {}), '([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])\n', (23589, 23623), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((23633, 23689), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 1.0, 0.9]', '[0.1, 0.0, 0.0]'], {}), '([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])\n', (23655, 23689), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((23699, 23755), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6, 0.8, 1.0]', '[0.2, 0.0, 0.0]'], {}), '([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])\n', (23721, 23755), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((23764, 23820), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.6, 0.8]', '[0.3, 0.2, 0.1]'], {}), '([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])\n', (23786, 23820), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((26971, 26996), 'fsmpy.datasets.load_patients_diagnoses', 'load_patients_diagnoses', ([], {}), '()\n', (26994, 26996), False, 'from fsmpy.datasets import load_patients_diagnoses\n'), ((57233, 57276), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0]', '[0.0]', '[0.0]'], {}), '([1.0], [0.0], [0.0])\n', (57255, 57276), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((57285, 57328), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[1.0]', '[0.0]'], {}), '([0.0], [1.0], [0.0])\n', (57307, 57328), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((57337, 57380), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[1.0]', '[0.0]'], {}), '([0.0], [1.0], [0.0])\n', (57359, 57380), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((57389, 57432), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[0.0]', '[1.0]'], {}), '([0.0], [0.0], [1.0])\n', (57411, 57432), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((57513, 57556), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5]', '[0.3]', '[0.2]'], {}), '([0.5], [0.3], [0.2])\n', (57535, 57556), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((57565, 57608), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5]', '[0.2]', '[0.3]'], {}), '([0.5], [0.2], [0.3])\n', (57587, 57608), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((57744, 57787), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3]', '[0.3]', '[0.4]'], {}), '([0.3], [0.3], [0.4])\n', (57766, 57787), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((57796, 57839), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4]', '[0.4]', '[0.2]'], {}), '([0.4], [0.4], [0.2])\n', (57818, 57839), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((57905, 57948), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3]', '[0.4]', '[0.3]'], {}), '([0.3], [0.4], [0.3])\n', (57927, 57948), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((57957, 58000), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4]', '[0.3]', '[0.3]'], {}), '([0.4], [0.3], [0.3])\n', (57979, 58000), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((58045, 58088), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0]', '[0.0]', '[0.0]'], {}), '([1.0], [0.0], [0.0])\n', (58067, 58088), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((58097, 58140), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[0.0]', '[1.0]'], {}), '([0.0], [0.0], [1.0])\n', (58119, 58140), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((58186, 58229), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5]', '[0.5]', '[0.0]'], {}), '([0.5], [0.5], [0.0])\n', (58208, 58229), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((58238, 58281), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[0.0]', '[1.0]'], {}), '([0.0], [0.0], [1.0])\n', (58260, 58281), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((58347, 58390), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4]', '[0.2]', '[0.4]'], {}), '([0.4], [0.2], [0.4])\n', (58369, 58390), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((58399, 58442), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5]', '[0.3]', '[0.2]'], {}), '([0.5], [0.3], [0.2])\n', (58421, 58442), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((58508, 58551), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4]', '[0.2]', '[0.4]'], {}), '([0.4], [0.2], [0.4])\n', (58530, 58551), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((58560, 58603), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5]', '[0.2]', '[0.3]'], {}), '([0.5], [0.2], [0.3])\n', (58582, 58603), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((58669, 58714), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[0.87]', '[0.13]'], {}), '([0.0], [0.87], [0.13])\n', (58691, 58714), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((58723, 58769), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.28]', '[0.55]', '[0.17]'], {}), '([0.28], [0.55], [0.17])\n', (58745, 58769), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((58835, 58880), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6]', '[0.87]', '[-0.4]'], {}), '([0.6], [0.87], [-0.4])\n', (58857, 58880), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((58889, 58935), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.28]', '[0.55]', '[0.17]'], {}), '([0.28], [0.55], [0.17])\n', (58911, 58935), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((59026, 59099), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0, 0.8, 0.7]', '[0.0, 0.0, 0.1]', '[0.0, 0.2, 0.2]'], {}), '([1.0, 0.8, 0.7], [0.0, 0.0, 0.1], [0.0, 0.2, 0.2])\n', (59048, 59099), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((59109, 59188), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.35, 0.45, 0.55]', '[0.15, 0.25, 0.35]', '[0.5, 0.3, 0.1]'], {}), '([0.35, 0.45, 0.55], [0.15, 0.25, 0.35], [0.5, 0.3, 0.1])\n', (59131, 59188), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((59198, 59277), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.25, 0.35, 0.45]', '[0.25, 0.35, 0.45]', '[0.5, 0.3, 0.1]'], {}), '([0.25, 0.35, 0.45], [0.25, 0.35, 0.45], [0.5, 0.3, 0.1])\n', (59220, 59277), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((59286, 59359), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.4, 0.5]', '[0.2, 0.3, 0.4]', '[0.5, 0.3, 0.1]'], {}), '([0.3, 0.4, 0.5], [0.2, 0.3, 0.4], [0.5, 0.3, 0.1])\n', (59308, 59359), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((59557, 59630), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0, 0.8, 0.7]', '[0.0, 0.0, 0.1]', '[0.0, 0.2, 0.2]'], {}), '([1.0, 0.8, 0.7], [0.0, 0.0, 0.1], [0.0, 0.2, 0.2])\n', (59579, 59630), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((59640, 59713), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.4, 0.2]', '[0.5, 0.4, 0.6]', '[0.2, 0.2, 0.2]'], {}), '([0.3, 0.4, 0.2], [0.5, 0.4, 0.6], [0.2, 0.2, 0.2])\n', (59662, 59713), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((59723, 59796), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.3, 0.2]', '[0.4, 0.5, 0.6]', '[0.2, 0.2, 0.2]'], {}), '([0.4, 0.3, 0.2], [0.4, 0.5, 0.6], [0.2, 0.2, 0.2])\n', (59745, 59796), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((59805, 59878), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.4, 0.5]', '[0.3, 0.4, 0.5]', '[0.4, 0.2, 0.0]'], {}), '([0.3, 0.4, 0.5], [0.3, 0.4, 0.5], [0.4, 0.2, 0.0])\n', (59827, 59878), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((60089, 60125), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3]', '[0.3]'], {}), '([0.3], [0.3])\n', (60111, 60125), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((60134, 60170), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4]', '[0.4]'], {}), '([0.4], [0.4])\n', (60156, 60170), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((60249, 60285), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3]', '[0.4]'], {}), '([0.3], [0.4])\n', (60271, 60285), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((60294, 60330), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4]', '[0.3]'], {}), '([0.4], [0.3])\n', (60316, 60330), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((60409, 60445), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0]', '[0.0]'], {}), '([1.0], [0.0])\n', (60431, 60445), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((60454, 60490), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[0.0]'], {}), '([0.0], [0.0])\n', (60476, 60490), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((60569, 60605), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5]', '[0.5]'], {}), '([0.5], [0.5])\n', (60591, 60605), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((60614, 60650), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[0.0]'], {}), '([0.0], [0.0])\n', (60636, 60650), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((60729, 60765), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4]', '[0.2]'], {}), '([0.4], [0.2])\n', (60751, 60765), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((60774, 60810), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5]', '[0.3]'], {}), '([0.5], [0.3])\n', (60796, 60810), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((60889, 60925), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4]', '[0.2]'], {}), '([0.4], [0.2])\n', (60911, 60925), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((60934, 60970), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5]', '[0.2]'], {}), '([0.5], [0.2])\n', (60956, 60970), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((61059, 61095), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5]', '[0.5]'], {}), '([0.5], [0.5])\n', (61081, 61095), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((61104, 61140), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[0.0]'], {}), '([0.0], [0.0])\n', (61126, 61140), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((61219, 61255), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6]', '[0.4]'], {}), '([0.6], [0.4])\n', (61241, 61255), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((61264, 61300), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[0.0]'], {}), '([0.0], [0.0])\n', (61286, 61300), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((61379, 61416), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0]', '[0.87]'], {}), '([0.0], [0.87])\n', (61401, 61416), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((61425, 61463), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.28]', '[0.55]'], {}), '([0.28], [0.55])\n', (61447, 61463), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((61542, 61579), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6]', '[0.27]'], {}), '([0.6], [0.27])\n', (61564, 61579), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((61588, 61626), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.28]', '[0.55]'], {}), '([0.28], [0.55])\n', (61610, 61626), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((61811, 61867), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[1.0, 0.8, 0.7]', '[0.0, 0.0, 0.1]'], {}), '([1.0, 0.8, 0.7], [0.0, 0.0, 0.1])\n', (61833, 61867), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((61877, 61933), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 1.0, 0.9]', '[0.1, 0.0, 0.0]'], {}), '([0.8, 1.0, 0.9], [0.1, 0.0, 0.0])\n', (61899, 61933), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((61943, 61999), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6, 0.8, 1.0]', '[0.2, 0.0, 0.0]'], {}), '([0.6, 0.8, 1.0], [0.2, 0.0, 0.0])\n', (61965, 61999), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((62008, 62064), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.6, 0.8]', '[0.3, 0.2, 0.1]'], {}), '([0.5, 0.6, 0.8], [0.3, 0.2, 0.1])\n', (62030, 62064), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((62289, 62345), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.1, 0.5, 0.1]', '[0.1, 0.1, 0.9]'], {}), '([0.1, 0.5, 0.1], [0.1, 0.1, 0.9])\n', (62311, 62345), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((62355, 62411), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.7, 0.0]', '[0.5, 0.3, 0.8]'], {}), '([0.5, 0.7, 0.0], [0.5, 0.3, 0.8])\n', (62377, 62411), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((62421, 62477), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.7, 0.1, 0.4]', '[0.2, 0.8, 0.4]'], {}), '([0.7, 0.1, 0.4], [0.2, 0.8, 0.4])\n', (62443, 62477), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((62486, 62542), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.6, 0.0]', '[0.4, 0.2, 0.8]'], {}), '([0.4, 0.6, 0.0], [0.4, 0.2, 0.8])\n', (62508, 62542), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((62767, 62833), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.7, 0.4, 0.7]', '[0.3, 0.0, 0.5, 0.3]'], {}), '([0.5, 0.7, 0.4, 0.7], [0.3, 0.0, 0.5, 0.3])\n', (62789, 62833), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((62843, 62909), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.6, 0.2, 0.7]', '[0.2, 0.1, 0.7, 0.3]'], {}), '([0.5, 0.6, 0.2, 0.7], [0.2, 0.1, 0.7, 0.3])\n', (62865, 62909), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((62919, 62985), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.7, 0.4, 0.7]', '[0.4, 0.1, 0.6, 0.2]'], {}), '([0.5, 0.7, 0.4, 0.7], [0.4, 0.1, 0.6, 0.2])\n', (62941, 62985), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((62994, 63060), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.7, 0.3, 0.7]', '[0.3, 0.1, 0.6, 0.3]'], {}), '([0.4, 0.7, 0.3, 0.7], [0.3, 0.1, 0.6, 0.3])\n', (63016, 63060), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((63322, 63473), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.5, 0.6, 0.5, 0.7, 0.9, 0.7, 0.8, 0.6, 0.7, 0.7, 0.3]', '[0.0, 0.1, 0.3, 0.0, 0.1, 0.0, 0.1, 0.2, 0.2, 0.0, 0.2, 0.0]'], {}), '([0.3, 0.5, 0.6, 0.5, 0.7, 0.9, 0.7, 0.8, 0.6, 0.7, \n 0.7, 0.3], [0.0, 0.1, 0.3, 0.0, 0.1, 0.0, 0.1, 0.2, 0.2, 0.0, 0.2, 0.0])\n', (63344, 63473), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((63477, 63628), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 0.7, 0.5, 0.4, 0.9, 0.9, 0.8, 0.7, 0.5, 0.9, 0.6, 0.8]', '[0.1, 0.2, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.1, 0.1]'], {}), '([0.8, 0.7, 0.5, 0.4, 0.9, 0.9, 0.8, 0.7, 0.5, 0.9, \n 0.6, 0.8], [0.1, 0.2, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0, 0.3, 0.1, 0.1, 0.1])\n', (63499, 63628), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((63705, 63825), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6, 0.4, 0.8, 0.5, 0.7, 0.6, 0.8, 0.6, 0.9]', '[0.2, 0.5, 0.1, 0.3, 0.1, 0.3, 0.2, 0.0, 0.0]'], {}), '([0.6, 0.4, 0.8, 0.5, 0.7, 0.6, 0.8, 0.6, 0.9], [0.2,\n 0.5, 0.1, 0.3, 0.1, 0.3, 0.2, 0.0, 0.0])\n', (63727, 63825), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((63830, 63950), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.7, 0.6, 0.6, 0.4, 0.5, 0.9, 0.5, 0.8]', '[0.3, 0.0, 0.3, 0.2, 0.0, 0.1, 0.0, 0.1, 0.0]'], {}), '([0.5, 0.7, 0.6, 0.6, 0.4, 0.5, 0.9, 0.5, 0.8], [0.3,\n 0.0, 0.3, 0.2, 0.0, 0.1, 0.0, 0.1, 0.0])\n', (63852, 63950), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((63955, 64075), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.6, 0.5, 0.3, 0.7, 0.5, 0.2, 0.5, 0.1]', '[0.4, 0.2, 0.1, 0.2, 0.1, 0.4, 0.0, 0.0, 0.8]'], {}), '([0.4, 0.6, 0.5, 0.3, 0.7, 0.5, 0.2, 0.5, 0.1], [0.4,\n 0.2, 0.1, 0.2, 0.1, 0.4, 0.0, 0.0, 0.8])\n', (63977, 64075), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((64294, 64414), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6, 0.4, 0.8, 0.5, 0.7, 0.6, 0.8, 0.6, 0.9]', '[0.2, 0.5, 0.1, 0.3, 0.1, 0.3, 0.2, 0.0, 0.0]'], {}), '([0.6, 0.4, 0.8, 0.5, 0.7, 0.6, 0.8, 0.6, 0.9], [0.2,\n 0.5, 0.1, 0.3, 0.1, 0.3, 0.2, 0.0, 0.0])\n', (64316, 64414), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((64420, 64540), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.7, 0.6, 0.6, 0.4, 0.5, 0.9, 0.5, 0.8]', '[0.3, 0.0, 0.3, 0.2, 0.0, 0.1, 0.0, 0.1, 0.0]'], {}), '([0.5, 0.7, 0.6, 0.6, 0.4, 0.5, 0.9, 0.5, 0.8], [0.3,\n 0.0, 0.3, 0.2, 0.0, 0.1, 0.0, 0.1, 0.0])\n', (64442, 64540), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((64546, 64666), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.2, 0.6, 0.5, 0.3, 0.7, 0.4, 0.2, 0.5, 0.1]', '[0.4, 0.2, 0.1, 0.2, 0.1, 0.4, 0.0, 0.0, 0.8]'], {}), '([0.2, 0.6, 0.5, 0.3, 0.7, 0.4, 0.2, 0.5, 0.1], [0.4,\n 0.2, 0.1, 0.2, 0.1, 0.4, 0.0, 0.0, 0.8])\n', (64568, 64666), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((64672, 64792), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.5, 0.3, 0.1, 0.3, 0.6, 0.3, 0.0, 0.2]', '[0.4, 0.0, 0.6, 0.8, 0.0, 0.2, 0.5, 0.2, 0.4]'], {}), '([0.5, 0.5, 0.3, 0.1, 0.3, 0.6, 0.3, 0.0, 0.2], [0.4,\n 0.0, 0.6, 0.8, 0.0, 0.2, 0.5, 0.2, 0.4])\n', (64694, 64792), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((64798, 64918), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.6, 0.2, 0.4, 0.2, 0.5, 0.3, 0.4, 0.2]', '[0.5, 0.0, 0.6, 0.5, 0.4, 0.0, 0.1, 0.0, 0.6]'], {}), '([0.3, 0.6, 0.2, 0.4, 0.2, 0.5, 0.3, 0.4, 0.2], [0.5,\n 0.0, 0.6, 0.5, 0.4, 0.0, 0.1, 0.0, 0.6])\n', (64820, 64918), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((64924, 65044), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.4, 0.6, 0.0, 0.3, 0.4, 0.1, 0.2, 0.4]', '[0.0, 0.0, 0.2, 0.2, 0.0, 0.0, 0.5, 0.0, 0.4]'], {}), '([0.5, 0.4, 0.6, 0.0, 0.3, 0.4, 0.1, 0.2, 0.4], [0.0,\n 0.0, 0.2, 0.2, 0.0, 0.0, 0.5, 0.0, 0.4])\n', (64946, 65044), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((65050, 65170), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.6, 0.5, 0.3, 0.7, 0.5, 0.2, 0.5, 0.1]', '[0.4, 0.2, 0.1, 0.2, 0.1, 0.4, 0.0, 0.0, 0.8]'], {}), '([0.4, 0.6, 0.5, 0.3, 0.7, 0.5, 0.2, 0.5, 0.1], [0.4,\n 0.2, 0.1, 0.2, 0.1, 0.4, 0.0, 0.0, 0.8])\n', (65072, 65170), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((65176, 65296), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.3, 0.7, 0.6, 0.5, 0.9, 0.7, 0.6, 0.7, 0.7]', '[0.0, 0.1, 0.2, 0.1, 0.0, 0.0, 0.3, 0.1, 0.2]'], {}), '([0.3, 0.7, 0.6, 0.5, 0.9, 0.7, 0.6, 0.7, 0.7], [0.0,\n 0.1, 0.2, 0.1, 0.0, 0.0, 0.3, 0.1, 0.2])\n', (65198, 65296), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((65302, 65422), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.8, 0.9, 0.5, 0.7, 0.9, 0.9, 0.5, 0.8, 0.6]', '[0.1, 0.0, 0.3, 0.2, 0.0, 0.1, 0.2, 0.0, 0.1]'], {}), '([0.8, 0.9, 0.5, 0.7, 0.9, 0.9, 0.5, 0.8, 0.6], [0.1,\n 0.0, 0.3, 0.2, 0.0, 0.1, 0.2, 0.0, 0.1])\n', (65324, 65422), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((65428, 65548), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.8, 0.3, 0.4, 0.7, 0.8, 0.0, 0.4, 0.0]', '[0.0, 0.2, 0.0, 0.1, 0.0, 0.1, 0.8, 0.3, 0.7]'], {}), '([0.5, 0.8, 0.3, 0.4, 0.7, 0.8, 0.0, 0.4, 0.0], [0.0,\n 0.2, 0.0, 0.1, 0.0, 0.1, 0.8, 0.3, 0.7])\n', (65450, 65548), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((65555, 65675), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.7, 0.4, 0.6, 0.5, 0.7, 0.6, 0.8, 0.6, 0.9]', '[0.2, 0.5, 0.1, 0.3, 0.1, 0.0, 0.2, 0.0, 0.0]'], {}), '([0.7, 0.4, 0.6, 0.5, 0.7, 0.6, 0.8, 0.6, 0.9], [0.2,\n 0.5, 0.1, 0.3, 0.1, 0.0, 0.2, 0.0, 0.0])\n', (65577, 65675), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((65682, 65802), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.7, 0.6, 0.6, 0.4, 0.5, 0.7, 0.5, 0.8]', '[0.3, 0.0, 0.3, 0.2, 0.0, 0.1, 0.2, 0.1, 0.0]'], {}), '([0.4, 0.7, 0.6, 0.6, 0.4, 0.5, 0.7, 0.5, 0.8], [0.3,\n 0.0, 0.3, 0.2, 0.0, 0.1, 0.2, 0.1, 0.0])\n', (65704, 65802), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((65809, 65929), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.6, 0.5, 0.5, 0.3, 0.5, 0.4, 0.2, 0.5, 0.1]', '[0.4, 0.0, 0.1, 0.2, 0.1, 0.4, 0.0, 0.0, 0.8]'], {}), '([0.6, 0.5, 0.5, 0.3, 0.5, 0.4, 0.2, 0.5, 0.1], [0.4,\n 0.0, 0.1, 0.2, 0.1, 0.4, 0.0, 0.0, 0.8])\n', (65831, 65929), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((65936, 66056), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.5, 0.6, 0.4, 0.5, 0.3, 0.2, 0.5, 0.4, 0.2]', '[0.3, 0.0, 0.3, 0.4, 0.2, 0.1, 0.0, 0.0, 0.5]'], {}), '([0.5, 0.6, 0.4, 0.5, 0.3, 0.2, 0.5, 0.4, 0.2], [0.3,\n 0.0, 0.3, 0.4, 0.2, 0.1, 0.0, 0.0, 0.5])\n', (65958, 66056), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((66063, 66183), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.0, 0.4, 0.5, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3]', '[0.5, 0.3, 0.2, 0.1, 0.2, 0.1, 0.1, 0.3, 0.5]'], {}), '([0.0, 0.4, 0.5, 0.4, 0.3, 0.4, 0.3, 0.4, 0.3], [0.5,\n 0.3, 0.2, 0.1, 0.2, 0.1, 0.1, 0.3, 0.5])\n', (66085, 66183), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((66190, 66310), 'fsmpy.sets.IntuitionisticFuzzySet', 'IntuitionisticFuzzySet', (['[0.4, 0.2, 0.0, 0.0, 0.5, 0.4, 0.5, 0.2, 0.4]', '[0.0, 0.3, 0.2, 0.3, 0.2, 0.3, 0.3, 0.3, 0.4]'], {}), '([0.4, 0.2, 0.0, 0.0, 0.5, 0.4, 0.5, 0.2, 0.4], [0.0,\n 0.3, 0.2, 0.3, 0.2, 0.3, 0.3, 0.3, 0.4])\n', (66212, 66310), False, 'from fsmpy.sets import IntuitionisticFuzzySet\n'), ((2128, 2171), 'fsmpy.similarities.dengfeng_chuntian', 'dengfeng_chuntian', (['A1', 'B'], {'p': '(1)', 'weights': 'None'}), '(A1, B, p=1, weights=None)\n', (2145, 2171), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((2310, 2353), 'fsmpy.similarities.dengfeng_chuntian', 'dengfeng_chuntian', (['A3', 'B'], {'p': '(1)', 'weights': 'None'}), '(A3, B, p=1, weights=None)\n', (2327, 2353), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((2397, 2440), 'fsmpy.similarities.dengfeng_chuntian', 'dengfeng_chuntian', (['A1', 'B'], {'p': '(2)', 'weights': 'None'}), '(A1, B, p=2, weights=None)\n', (2414, 2440), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((2483, 2526), 'fsmpy.similarities.dengfeng_chuntian', 'dengfeng_chuntian', (['A2', 'B'], {'p': '(2)', 'weights': 'None'}), '(A2, B, p=2, weights=None)\n', (2500, 2526), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((2569, 2612), 'fsmpy.similarities.dengfeng_chuntian', 'dengfeng_chuntian', (['A3', 'B'], {'p': '(2)', 'weights': 'None'}), '(A3, B, p=2, weights=None)\n', (2586, 2612), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((2656, 2710), 'fsmpy.similarities.dengfeng_chuntian', 'dengfeng_chuntian', (['A1', 'B'], {'p': '(2)', 'weights': '[0.5, 0.3, 0.2]'}), '(A1, B, p=2, weights=[0.5, 0.3, 0.2])\n', (2673, 2710), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((2854, 2908), 'fsmpy.similarities.dengfeng_chuntian', 'dengfeng_chuntian', (['A3', 'B'], {'p': '(2)', 'weights': '[0.5, 0.3, 0.2]'}), '(A3, B, p=2, weights=[0.5, 0.3, 0.2])\n', (2871, 2908), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((3256, 3317), 'fsmpy.similarities.liang_shi', 'liang_shi', (['A1', 'B'], {'similarity_type': 'LIANG_SHI_SIMILARITY_1', 'p': '(1)'}), '(A1, B, similarity_type=LIANG_SHI_SIMILARITY_1, p=1)\n', (3265, 3317), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((3360, 3421), 'fsmpy.similarities.liang_shi', 'liang_shi', (['A2', 'B'], {'similarity_type': 'LIANG_SHI_SIMILARITY_1', 'p': '(1)'}), '(A2, B, similarity_type=LIANG_SHI_SIMILARITY_1, p=1)\n', (3369, 3421), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((3464, 3525), 'fsmpy.similarities.liang_shi', 'liang_shi', (['A3', 'B'], {'similarity_type': 'LIANG_SHI_SIMILARITY_1', 'p': '(1)'}), '(A3, B, similarity_type=LIANG_SHI_SIMILARITY_1, p=1)\n', (3473, 3525), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((3593, 3654), 'fsmpy.similarities.liang_shi', 'liang_shi', (['A1', 'B'], {'similarity_type': 'LIANG_SHI_SIMILARITY_2', 'p': '(1)'}), '(A1, B, similarity_type=LIANG_SHI_SIMILARITY_2, p=1)\n', (3602, 3654), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((3697, 3758), 'fsmpy.similarities.liang_shi', 'liang_shi', (['A2', 'B'], {'similarity_type': 'LIANG_SHI_SIMILARITY_2', 'p': '(1)'}), '(A2, B, similarity_type=LIANG_SHI_SIMILARITY_2, p=1)\n', (3706, 3758), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((3801, 3862), 'fsmpy.similarities.liang_shi', 'liang_shi', (['A3', 'B'], {'similarity_type': 'LIANG_SHI_SIMILARITY_2', 'p': '(1)'}), '(A3, B, similarity_type=LIANG_SHI_SIMILARITY_2, p=1)\n', (3810, 3862), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((3922, 4029), 'fsmpy.similarities.liang_shi', 'liang_shi', (['A1', 'B'], {'similarity_type': 'LIANG_SHI_SIMILARITY_3', 'p': '(1)', 'omegas': '[1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0]'}), '(A1, B, similarity_type=LIANG_SHI_SIMILARITY_3, p=1, omegas=[1.0 /\n 3.0, 1.0 / 3.0, 1.0 / 3.0])\n', (3931, 4029), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((4068, 4175), 'fsmpy.similarities.liang_shi', 'liang_shi', (['A2', 'B'], {'similarity_type': 'LIANG_SHI_SIMILARITY_3', 'p': '(1)', 'omegas': '[1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0]'}), '(A2, B, similarity_type=LIANG_SHI_SIMILARITY_3, p=1, omegas=[1.0 /\n 3.0, 1.0 / 3.0, 1.0 / 3.0])\n', (4077, 4175), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((4214, 4321), 'fsmpy.similarities.liang_shi', 'liang_shi', (['A3', 'B'], {'similarity_type': 'LIANG_SHI_SIMILARITY_3', 'p': '(1)', 'omegas': '[1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0]'}), '(A3, B, similarity_type=LIANG_SHI_SIMILARITY_3, p=1, omegas=[1.0 /\n 3.0, 1.0 / 3.0, 1.0 / 3.0])\n', (4223, 4321), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((4652, 4672), 'fsmpy.similarities.park_kwun_lim', 'park_kwun_lim', (['A1', 'B'], {}), '(A1, B)\n', (4665, 4672), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((4716, 4736), 'fsmpy.similarities.park_kwun_lim', 'park_kwun_lim', (['A2', 'B'], {}), '(A2, B)\n', (4729, 4736), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((4780, 4800), 'fsmpy.similarities.park_kwun_lim', 'park_kwun_lim', (['A3', 'B'], {}), '(A3, B)\n', (4793, 4800), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((5475, 5493), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A1', 'B'], {}), '(A1, B)\n', (5486, 5493), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((5536, 5554), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A2', 'B'], {}), '(A2, B)\n', (5547, 5554), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((5598, 5616), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A3', 'B'], {}), '(A3, B)\n', (5609, 5616), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((5661, 5700), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A1', 'B'], {'similarity_type': '"""c"""'}), "(A1, B, similarity_type='c')\n", (5672, 5700), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((5743, 5782), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A2', 'B'], {'similarity_type': '"""c"""'}), "(A2, B, similarity_type='c')\n", (5754, 5782), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((5826, 5865), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A3', 'B'], {'similarity_type': '"""c"""'}), "(A3, B, similarity_type='c')\n", (5837, 5865), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((5910, 5949), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A1', 'B'], {'similarity_type': '"""e"""'}), "(A1, B, similarity_type='e')\n", (5921, 5949), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((5992, 6031), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A2', 'B'], {'similarity_type': '"""e"""'}), "(A2, B, similarity_type='e')\n", (6003, 6031), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((6075, 6114), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A3', 'B'], {'similarity_type': '"""e"""'}), "(A3, B, similarity_type='e')\n", (6086, 6114), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((6373, 6391), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A1', 'B'], {}), '(A1, B)\n', (6384, 6391), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((6435, 6453), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A2', 'B'], {}), '(A2, B)\n', (6446, 6453), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((6498, 6537), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A1', 'B'], {'similarity_type': '"""c"""'}), "(A1, B, similarity_type='c')\n", (6509, 6537), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((6581, 6620), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A2', 'B'], {'similarity_type': '"""c"""'}), "(A2, B, similarity_type='c')\n", (6592, 6620), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((6665, 6704), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A1', 'B'], {'similarity_type': '"""e"""'}), "(A1, B, similarity_type='e')\n", (6676, 6704), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((6748, 6787), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A2', 'B'], {'similarity_type': '"""e"""'}), "(A2, B, similarity_type='e')\n", (6759, 6787), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((7112, 7130), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A1', 'B'], {}), '(A1, B)\n', (7123, 7130), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((7174, 7192), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A2', 'B'], {}), '(A2, B)\n', (7185, 7192), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((7236, 7254), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A3', 'B'], {}), '(A3, B)\n', (7247, 7254), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((7299, 7338), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A1', 'B'], {'similarity_type': '"""c"""'}), "(A1, B, similarity_type='c')\n", (7310, 7338), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((7382, 7421), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A2', 'B'], {'similarity_type': '"""c"""'}), "(A2, B, similarity_type='c')\n", (7393, 7421), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((7465, 7504), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A3', 'B'], {'similarity_type': '"""c"""'}), "(A3, B, similarity_type='c')\n", (7476, 7504), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((7549, 7588), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A1', 'B'], {'similarity_type': '"""e"""'}), "(A1, B, similarity_type='e')\n", (7560, 7588), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((7632, 7671), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A2', 'B'], {'similarity_type': '"""e"""'}), "(A2, B, similarity_type='e')\n", (7643, 7671), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((7715, 7754), 'fsmpy.similarities.hung_yang_1', 'hung_yang_1', (['A3', 'B'], {'similarity_type': '"""e"""'}), "(A3, B, similarity_type='e')\n", (7726, 7754), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((8095, 8104), 'fsmpy.similarities.ye', 'ye', (['C1', 'Q'], {}), '(C1, Q)\n', (8097, 8104), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((8149, 8158), 'fsmpy.similarities.ye', 'ye', (['C2', 'Q'], {}), '(C2, Q)\n', (8151, 8158), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((8203, 8212), 'fsmpy.similarities.ye', 'ye', (['C3', 'Q'], {}), '(C3, Q)\n', (8205, 8212), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((8291, 8317), 'fsmpy.similarities.ye', 'ye', (['C1', 'Q'], {'weights': 'weights'}), '(C1, Q, weights=weights)\n', (8293, 8317), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((8362, 8388), 'fsmpy.similarities.ye', 'ye', (['C2', 'Q'], {'weights': 'weights'}), '(C2, Q, weights=weights)\n', (8364, 8388), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((8433, 8459), 'fsmpy.similarities.ye', 'ye', (['C3', 'Q'], {'weights': 'weights'}), '(C3, Q, weights=weights)\n', (8435, 8459), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((9120, 9144), 'fsmpy.similarities.ye', 'ye', (['patient', 'viral_fever'], {}), '(patient, viral_fever)\n', (9122, 9144), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((9189, 9209), 'fsmpy.similarities.ye', 'ye', (['patient', 'malaria'], {}), '(patient, malaria)\n', (9191, 9209), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((9262, 9282), 'fsmpy.similarities.ye', 'ye', (['patient', 'typhoid'], {}), '(patient, typhoid)\n', (9264, 9282), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((9327, 9355), 'fsmpy.similarities.ye', 'ye', (['patient', 'stomach_problem'], {}), '(patient, stomach_problem)\n', (9329, 9355), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((9400, 9426), 'fsmpy.similarities.ye', 'ye', (['patient', 'chest_problem'], {}), '(patient, chest_problem)\n', (9402, 9426), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((9614, 9634), 'fsmpy.similarities.hwang_yang', 'hwang_yang', (['X1A', 'X1B'], {}), '(X1A, X1B)\n', (9624, 9634), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((9781, 9801), 'fsmpy.similarities.hwang_yang', 'hwang_yang', (['X2A', 'X2B'], {}), '(X2A, X2B)\n', (9791, 9801), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((9940, 9960), 'fsmpy.similarities.hwang_yang', 'hwang_yang', (['X3A', 'X3B'], {}), '(X3A, X3B)\n', (9950, 9960), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((10099, 10119), 'fsmpy.similarities.hwang_yang', 'hwang_yang', (['X4A', 'X4B'], {}), '(X4A, X4B)\n', (10109, 10119), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((10258, 10278), 'fsmpy.similarities.hwang_yang', 'hwang_yang', (['X5A', 'X5B'], {}), '(X5A, X5B)\n', (10268, 10278), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((10417, 10437), 'fsmpy.similarities.hwang_yang', 'hwang_yang', (['X6A', 'X6B'], {}), '(X6A, X6B)\n', (10427, 10437), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((10855, 10873), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A1', 'B'], {}), '(A1, B)\n', (10866, 10873), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((10916, 10934), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A2', 'B'], {}), '(A2, B)\n', (10927, 10934), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((10978, 10996), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A3', 'B'], {}), '(A3, B)\n', (10989, 10996), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((11041, 11080), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A1', 'B'], {'similarity_type': '"""c"""'}), "(A1, B, similarity_type='c')\n", (11052, 11080), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((11123, 11162), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A2', 'B'], {'similarity_type': '"""c"""'}), "(A2, B, similarity_type='c')\n", (11134, 11162), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((11206, 11245), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A3', 'B'], {'similarity_type': '"""c"""'}), "(A3, B, similarity_type='c')\n", (11217, 11245), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((11290, 11329), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A1', 'B'], {'similarity_type': '"""e"""'}), "(A1, B, similarity_type='e')\n", (11301, 11329), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((11372, 11411), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A2', 'B'], {'similarity_type': '"""e"""'}), "(A2, B, similarity_type='e')\n", (11383, 11411), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((11455, 11494), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A3', 'B'], {'similarity_type': '"""e"""'}), "(A3, B, similarity_type='e')\n", (11466, 11494), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((11804, 11822), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A1', 'B'], {}), '(A1, B)\n', (11815, 11822), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((11866, 11884), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A2', 'B'], {}), '(A2, B)\n', (11877, 11884), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((11928, 11953), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A1', 'B'], {'a': '(1.5)'}), '(A1, B, a=1.5)\n', (11939, 11953), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((11997, 12022), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A2', 'B'], {'a': '(1.5)'}), '(A2, B, a=1.5)\n', (12008, 12022), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((12067, 12106), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A1', 'B'], {'similarity_type': '"""c"""'}), "(A1, B, similarity_type='c')\n", (12078, 12106), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((12150, 12189), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A2', 'B'], {'similarity_type': '"""c"""'}), "(A2, B, similarity_type='c')\n", (12161, 12189), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((12234, 12273), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A1', 'B'], {'similarity_type': '"""e"""'}), "(A1, B, similarity_type='e')\n", (12245, 12273), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((12317, 12356), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A2', 'B'], {'similarity_type': '"""e"""'}), "(A2, B, similarity_type='e')\n", (12328, 12356), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((12773, 12791), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A1', 'B'], {}), '(A1, B)\n', (12784, 12791), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((12835, 12853), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A2', 'B'], {}), '(A2, B)\n', (12846, 12853), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((12897, 12915), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A3', 'B'], {}), '(A3, B)\n', (12908, 12915), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((12960, 12999), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A1', 'B'], {'similarity_type': '"""c"""'}), "(A1, B, similarity_type='c')\n", (12971, 12999), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((13043, 13082), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A2', 'B'], {'similarity_type': '"""c"""'}), "(A2, B, similarity_type='c')\n", (13054, 13082), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((13126, 13165), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A3', 'B'], {'similarity_type': '"""c"""'}), "(A3, B, similarity_type='c')\n", (13137, 13165), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((13210, 13249), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A1', 'B'], {'similarity_type': '"""e"""'}), "(A1, B, similarity_type='e')\n", (13221, 13249), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((13293, 13332), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A2', 'B'], {'similarity_type': '"""e"""'}), "(A2, B, similarity_type='e')\n", (13304, 13332), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((13376, 13415), 'fsmpy.similarities.hung_yang_2', 'hung_yang_2', (['A3', 'B'], {'similarity_type': '"""e"""'}), "(A3, B, similarity_type='e')\n", (13387, 13415), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((13928, 13943), 'fsmpy.similarities.zhang_fu', 'zhang_fu', (['A', 'A1'], {}), '(A, A1)\n', (13936, 13943), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((13987, 14002), 'fsmpy.similarities.zhang_fu', 'zhang_fu', (['A', 'A2'], {}), '(A, A2)\n', (13995, 14002), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((14046, 14061), 'fsmpy.similarities.zhang_fu', 'zhang_fu', (['A', 'A3'], {}), '(A, A3)\n', (14054, 14061), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((14105, 14120), 'fsmpy.similarities.zhang_fu', 'zhang_fu', (['A', 'A4'], {}), '(A, A4)\n', (14113, 14120), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((14470, 14530), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A1', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_1'}), '(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_1)\n', (14481, 14530), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((14580, 14640), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A2', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_1'}), '(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_1)\n', (14591, 14640), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((14684, 14744), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A3', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_1'}), '(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_1)\n', (14695, 14744), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((14789, 14849), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A1', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_2'}), '(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_2)\n', (14800, 14849), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((14899, 14959), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A2', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_2'}), '(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_2)\n', (14910, 14959), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((15003, 15063), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A3', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_2'}), '(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_2)\n', (15014, 15063), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((15108, 15168), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A1', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_3'}), '(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_3)\n', (15119, 15168), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((15218, 15278), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A2', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_3'}), '(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_3)\n', (15229, 15278), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((15322, 15382), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A3', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_3'}), '(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_3)\n', (15333, 15382), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((15427, 15487), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A1', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_4'}), '(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_4)\n', (15438, 15487), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((15531, 15591), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A2', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_4'}), '(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_4)\n', (15542, 15591), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((15643, 15703), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A3', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_4'}), '(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_4)\n', (15654, 15703), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((15756, 15816), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A1', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_5'}), '(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_5)\n', (15767, 15816), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((15866, 15926), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A2', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_5'}), '(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_5)\n', (15877, 15926), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((15970, 16030), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A3', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_5'}), '(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_5)\n', (15981, 16030), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((16075, 16135), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A1', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_6'}), '(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_6)\n', (16086, 16135), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((16185, 16245), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A2', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_6'}), '(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_6)\n', (16196, 16245), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((16289, 16349), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A3', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_6'}), '(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_6)\n', (16300, 16349), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((16394, 16454), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A1', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_7'}), '(A1, B, similarity_type=HUNG_YANG_3_SIMILARITY_7)\n', (16405, 16454), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((16504, 16564), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A2', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_7'}), '(A2, B, similarity_type=HUNG_YANG_3_SIMILARITY_7)\n', (16515, 16564), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((16608, 16668), 'fsmpy.similarities.hung_yang_3', 'hung_yang_3', (['A3', 'B'], {'similarity_type': 'HUNG_YANG_3_SIMILARITY_7'}), '(A3, B, similarity_type=HUNG_YANG_3_SIMILARITY_7)\n', (16619, 16668), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((16904, 16951), 'fsmpy.similarities.chen_1', 'chen_1', (['A', 'B'], {'weights': '[0.5, 0.8, 1.0, 0.7, 1.0]'}), '(A, B, weights=[0.5, 0.8, 1.0, 0.7, 1.0])\n', (16910, 16951), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((17303, 17326), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A1', 'B'], {'p': '(2)'}), '(A1, B, p=2)\n', (17314, 17326), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((17370, 17393), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A2', 'B'], {'p': '(2)'}), '(A2, B, p=2)\n', (17381, 17393), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((17437, 17460), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A3', 'B'], {'p': '(2)'}), '(A3, B, p=2)\n', (17448, 17460), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((17505, 17549), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A1', 'B'], {'similarity_type': '"""c"""', 'p': '(2)'}), "(A1, B, similarity_type='c', p=2)\n", (17516, 17549), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((17593, 17637), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A2', 'B'], {'similarity_type': '"""c"""', 'p': '(2)'}), "(A2, B, similarity_type='c', p=2)\n", (17604, 17637), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((17681, 17725), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A3', 'B'], {'similarity_type': '"""c"""', 'p': '(2)'}), "(A3, B, similarity_type='c', p=2)\n", (17692, 17725), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((17770, 17814), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A1', 'B'], {'similarity_type': '"""e"""', 'p': '(2)'}), "(A1, B, similarity_type='e', p=2)\n", (17781, 17814), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((17858, 17902), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A2', 'B'], {'similarity_type': '"""e"""', 'p': '(2)'}), "(A2, B, similarity_type='e', p=2)\n", (17869, 17902), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((17946, 17990), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A3', 'B'], {'similarity_type': '"""e"""', 'p': '(2)'}), "(A3, B, similarity_type='e', p=2)\n", (17957, 17990), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((18316, 18339), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A1', 'B'], {'p': '(2)'}), '(A1, B, p=2)\n', (18327, 18339), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((18383, 18406), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A2', 'B'], {'p': '(2)'}), '(A2, B, p=2)\n', (18394, 18406), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((18450, 18473), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A3', 'B'], {'p': '(2)'}), '(A3, B, p=2)\n', (18461, 18473), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((18518, 18562), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A1', 'B'], {'similarity_type': '"""c"""', 'p': '(2)'}), "(A1, B, similarity_type='c', p=2)\n", (18529, 18562), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((18606, 18650), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A2', 'B'], {'similarity_type': '"""c"""', 'p': '(2)'}), "(A2, B, similarity_type='c', p=2)\n", (18617, 18650), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((18694, 18738), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A3', 'B'], {'similarity_type': '"""c"""', 'p': '(2)'}), "(A3, B, similarity_type='c', p=2)\n", (18705, 18738), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((18783, 18827), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A1', 'B'], {'similarity_type': '"""e"""', 'p': '(2)'}), "(A1, B, similarity_type='e', p=2)\n", (18794, 18827), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((18871, 18915), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A2', 'B'], {'similarity_type': '"""e"""', 'p': '(2)'}), "(A2, B, similarity_type='e', p=2)\n", (18882, 18915), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((18959, 19003), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A3', 'B'], {'similarity_type': '"""e"""', 'p': '(2)'}), "(A3, B, similarity_type='e', p=2)\n", (18970, 19003), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((19262, 19285), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A1', 'B'], {'p': '(2)'}), '(A1, B, p=2)\n', (19273, 19285), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((19329, 19352), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A2', 'B'], {'p': '(2)'}), '(A2, B, p=2)\n', (19340, 19352), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((19397, 19441), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A1', 'B'], {'similarity_type': '"""c"""', 'p': '(2)'}), "(A1, B, similarity_type='c', p=2)\n", (19408, 19441), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((19485, 19529), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A2', 'B'], {'similarity_type': '"""c"""', 'p': '(2)'}), "(A2, B, similarity_type='c', p=2)\n", (19496, 19529), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((19574, 19618), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A1', 'B'], {'similarity_type': '"""e"""', 'p': '(2)'}), "(A1, B, similarity_type='e', p=2)\n", (19585, 19618), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((19662, 19706), 'fsmpy.similarities.hung_yang_4', 'hung_yang_4', (['A2', 'B'], {'similarity_type': '"""e"""', 'p': '(2)'}), "(A2, B, similarity_type='e', p=2)\n", (19673, 19706), False, 'from fsmpy.similarities import chen_1, hung_yang_4, hung_yang_3, hung_yang_2, hwang_yang, park_kwun_lim, ye, hung_yang_1, julian_hung_lin, zhang_fu\n'), ((19919, 19947), 'fsmpy.similarities.hong_kim', 'hong_kim', (['A', 'B'], {'weights': 'None'}), '(A, B, weights=None)\n', (19927, 19947), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((20160, 20186), 'fsmpy.similarities.chen_2', 'chen_2', (['A', 'B'], {'weights': 'None'}), '(A, B, weights=None)\n', (20166, 20186), False, 'from fsmpy.similarities import liu, chen_2\n'), ((20581, 20596), 'fsmpy.similarities.liu', 'liu', (['A1', 'B'], {'p': '(2)'}), '(A1, B, p=2)\n', (20584, 20596), False, 'from fsmpy.similarities import liu, chen_2\n'), ((20639, 20654), 'fsmpy.similarities.liu', 'liu', (['A2', 'B'], {'p': '(2)'}), '(A2, B, p=2)\n', (20642, 20654), False, 'from fsmpy.similarities import liu, chen_2\n'), ((20697, 20712), 'fsmpy.similarities.liu', 'liu', (['A3', 'B'], {'p': '(2)'}), '(A3, B, p=2)\n', (20700, 20712), False, 'from fsmpy.similarities import liu, chen_2\n'), ((21054, 21102), 'fsmpy.similarities.iancu', 'iancu', (['A1', 'B'], {'similarity_type': 'IANCU_SIMILARITY_1'}), '(A1, B, similarity_type=IANCU_SIMILARITY_1)\n', (21059, 21102), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((21146, 21194), 'fsmpy.similarities.iancu', 'iancu', (['A2', 'B'], {'similarity_type': 'IANCU_SIMILARITY_1'}), '(A2, B, similarity_type=IANCU_SIMILARITY_1)\n', (21151, 21194), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((21238, 21286), 'fsmpy.similarities.iancu', 'iancu', (['A3', 'B'], {'similarity_type': 'IANCU_SIMILARITY_1'}), '(A3, B, similarity_type=IANCU_SIMILARITY_1)\n', (21243, 21286), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((21331, 21379), 'fsmpy.similarities.iancu', 'iancu', (['A1', 'B'], {'similarity_type': 'IANCU_SIMILARITY_2'}), '(A1, B, similarity_type=IANCU_SIMILARITY_2)\n', (21336, 21379), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((21423, 21471), 'fsmpy.similarities.iancu', 'iancu', (['A2', 'B'], {'similarity_type': 'IANCU_SIMILARITY_2'}), '(A2, B, similarity_type=IANCU_SIMILARITY_2)\n', (21428, 21471), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((21515, 21563), 'fsmpy.similarities.iancu', 'iancu', (['A3', 'B'], {'similarity_type': 'IANCU_SIMILARITY_2'}), '(A3, B, similarity_type=IANCU_SIMILARITY_2)\n', (21520, 21563), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((21619, 21667), 'fsmpy.similarities.iancu', 'iancu', (['A1', 'B'], {'similarity_type': 'IANCU_SIMILARITY_9'}), '(A1, B, similarity_type=IANCU_SIMILARITY_9)\n', (21624, 21667), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((21711, 21759), 'fsmpy.similarities.iancu', 'iancu', (['A2', 'B'], {'similarity_type': 'IANCU_SIMILARITY_9'}), '(A2, B, similarity_type=IANCU_SIMILARITY_9)\n', (21716, 21759), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((21803, 21851), 'fsmpy.similarities.iancu', 'iancu', (['A3', 'B'], {'similarity_type': 'IANCU_SIMILARITY_9'}), '(A3, B, similarity_type=IANCU_SIMILARITY_9)\n', (21808, 21851), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((21907, 21956), 'fsmpy.similarities.iancu', 'iancu', (['A1', 'B'], {'similarity_type': 'IANCU_SIMILARITY_10'}), '(A1, B, similarity_type=IANCU_SIMILARITY_10)\n', (21912, 21956), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((22000, 22049), 'fsmpy.similarities.iancu', 'iancu', (['A2', 'B'], {'similarity_type': 'IANCU_SIMILARITY_10'}), '(A2, B, similarity_type=IANCU_SIMILARITY_10)\n', (22005, 22049), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((22093, 22142), 'fsmpy.similarities.iancu', 'iancu', (['A3', 'B'], {'similarity_type': 'IANCU_SIMILARITY_10'}), '(A3, B, similarity_type=IANCU_SIMILARITY_10)\n', (22098, 22142), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((22385, 22397), 'fsmpy.similarities.iancu', 'iancu', (['A1', 'B'], {}), '(A1, B)\n', (22390, 22397), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((22441, 22453), 'fsmpy.similarities.iancu', 'iancu', (['A2', 'B'], {}), '(A2, B)\n', (22446, 22453), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((22498, 22546), 'fsmpy.similarities.iancu', 'iancu', (['A1', 'B'], {'similarity_type': 'IANCU_SIMILARITY_7'}), '(A1, B, similarity_type=IANCU_SIMILARITY_7)\n', (22503, 22546), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((22598, 22646), 'fsmpy.similarities.iancu', 'iancu', (['A2', 'B'], {'similarity_type': 'IANCU_SIMILARITY_7'}), '(A2, B, similarity_type=IANCU_SIMILARITY_7)\n', (22603, 22646), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((23070, 23094), 'fsmpy.similarities.song_wang_lei_xue', 'song_wang_lei_xue', (['A1', 'B'], {}), '(A1, B)\n', (23087, 23094), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((23138, 23162), 'fsmpy.similarities.song_wang_lei_xue', 'song_wang_lei_xue', (['A2', 'B'], {}), '(A2, B)\n', (23155, 23162), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((23206, 23230), 'fsmpy.similarities.song_wang_lei_xue', 'song_wang_lei_xue', (['A3', 'B'], {}), '(A3, B)\n', (23223, 23230), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((23275, 23324), 'fsmpy.similarities.song_wang_lei_xue', 'song_wang_lei_xue', (['A1', 'B'], {'weights': '[0.5, 0.3, 0.2]'}), '(A1, B, weights=[0.5, 0.3, 0.2])\n', (23292, 23324), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((23368, 23417), 'fsmpy.similarities.song_wang_lei_xue', 'song_wang_lei_xue', (['A2', 'B'], {'weights': '[0.5, 0.3, 0.2]'}), '(A2, B, weights=[0.5, 0.3, 0.2])\n', (23385, 23417), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((23461, 23510), 'fsmpy.similarities.song_wang_lei_xue', 'song_wang_lei_xue', (['A3', 'B'], {'weights': '[0.5, 0.3, 0.2]'}), '(A3, B, weights=[0.5, 0.3, 0.2])\n', (23478, 23510), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((23862, 23916), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A1', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'], {}), '(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (23875, 23916), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((23960, 24014), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A2', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'], {}), '(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (23973, 24014), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((24058, 24112), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A3', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'], {}), '(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (24071, 24112), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((24157, 24211), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A1', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'], {}), '(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (24170, 24211), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((24255, 24309), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A2', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'], {}), '(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (24268, 24309), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((24353, 24407), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A3', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'], {}), '(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (24366, 24407), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((24452, 24511), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A1', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3'], {'p': '(1)'}), '(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (24465, 24511), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((24555, 24614), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A2', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3'], {'p': '(1)'}), '(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (24568, 24614), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((24658, 24717), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A3', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3'], {'p': '(1)'}), '(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (24671, 24717), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((24762, 24816), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A1', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'], {}), '(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (24775, 24816), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((24860, 24914), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A2', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'], {}), '(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (24873, 24914), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((24958, 25012), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A3', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'], {}), '(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (24971, 25012), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((25057, 25111), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A1', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'], {}), '(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (25070, 25111), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((25155, 25209), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A2', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'], {}), '(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (25168, 25209), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((25253, 25307), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A3', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'], {}), '(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (25266, 25307), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((25352, 25411), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A1', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3'], {'p': '(1)'}), '(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (25365, 25411), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((25455, 25514), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A2', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3'], {'p': '(1)'}), '(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (25468, 25514), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((25558, 25617), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A3', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3'], {'p': '(1)'}), '(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (25571, 25617), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((25662, 25716), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A1', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'], {}), '(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (25675, 25716), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((25760, 25814), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A2', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'], {}), '(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (25773, 25814), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((25858, 25912), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A3', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'], {}), '(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (25871, 25912), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((25957, 26016), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A1', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1'], {'p': '(1)'}), '(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (25970, 26016), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((26068, 26127), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A2', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1'], {'p': '(1)'}), '(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (26081, 26127), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((26179, 26238), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A3', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1'], {'p': '(1)'}), '(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (26192, 26238), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((26291, 26364), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A1', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2'], {'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (26304, 26364), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((26408, 26481), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A2', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2'], {'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (26421, 26481), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((26525, 26598), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A3', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2'], {'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (26538, 26598), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((26643, 26702), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A1', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3'], {'p': '(1)'}), '(A1, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (26656, 26702), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((26746, 26805), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A2', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3'], {'p': '(1)'}), '(A2, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (26759, 26805), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((26849, 26908), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['A3', 'B', 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3'], {'p': '(1)'}), '(A3, B, DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (26862, 26908), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((27133, 27218), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1\n )\n', (27146, 27218), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((27283, 27368), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2\n )\n', (27296, 27368), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((27433, 27523), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(al, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (27446, 27523), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((27586, 27671), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1\n )\n', (27599, 27671), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((27736, 27821), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2\n )\n', (27749, 27821), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((27886, 27976), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(al, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (27899, 27976), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((28038, 28123), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(al, viral_fever, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4\n )\n', (28051, 28123), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((28188, 28278), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(al, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (28201, 28278), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((28334, 28438), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(al, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (28347, 28438), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((28485, 28575), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(al, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (28498, 28575), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((28638, 28714), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (28651, 28714), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((28784, 28860), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (28797, 28860), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((28930, 29015), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3,\n p=1)\n', (28943, 29015), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((29079, 29155), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (29092, 29155), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((29225, 29301), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (29238, 29301), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((29346, 29431), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3,\n p=1)\n', (29359, 29431), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((29494, 29570), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (29507, 29570), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((29640, 29725), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1,\n p=1)\n', (29653, 29725), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((29772, 29871), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2,\n p=2, u=0.5, v=0.5)\n', (29785, 29871), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((29935, 30020), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(al, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3,\n p=1)\n', (29948, 30020), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((30084, 30160), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (30097, 30160), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((30230, 30306), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (30243, 30306), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((30376, 30461), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_3,\n p=1)\n', (30389, 30461), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((30525, 30601), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (30538, 30601), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((30671, 30747), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (30684, 30747), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((30817, 30902), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_3,\n p=1)\n', (30830, 30902), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((30964, 31040), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (30977, 31040), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((31110, 31195), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_1,\n p=1)\n', (31123, 31195), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((31243, 31342), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_2,\n p=2, u=0.5, v=0.5)\n', (31256, 31342), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((31405, 31490), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(al, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_3_3,\n p=1)\n', (31418, 31490), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((31554, 31643), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(al, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (31567, 31643), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((31708, 31797), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(al, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (31721, 31797), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((31862, 31956), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(al, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (31875, 31956), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((32019, 32108), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(al, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (32032, 32108), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((32173, 32262), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(al, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (32186, 32262), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((32327, 32421), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(al, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (32340, 32421), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((32483, 32572), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(al, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (32496, 32572), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((32636, 32730), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(al, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (32649, 32730), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((32786, 32894), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(al, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (32799, 32894), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((32941, 33035), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(al, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (32954, 33035), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((33098, 33185), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(al, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (33111, 33185), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((33249, 33336), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(al, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (33262, 33336), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((33401, 33493), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(al, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (33414, 33493), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((33556, 33643), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(al, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (33569, 33643), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((33708, 33795), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(al, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (33721, 33795), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((33860, 33952), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(al, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (33873, 33952), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((34013, 34100), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(al, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (34026, 34100), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((34165, 34257), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(al, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (34178, 34257), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((34313, 34419), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(al, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (34326, 34419), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((34466, 34558), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['al', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(al, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (34479, 34558), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((34621, 34707), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(bob, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (34634, 34707), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((34772, 34858), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(bob, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (34785, 34858), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((34922, 35013), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(bob, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (34935, 35013), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((35076, 35162), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(bob, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (35089, 35162), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((35227, 35313), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(bob, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (35240, 35313), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((35378, 35469), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(bob, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (35391, 35469), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((35531, 35617), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(bob, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (35544, 35617), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((35682, 35773), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(bob, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (35695, 35773), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((35829, 35934), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(bob, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (35842, 35934), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((35981, 36072), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(bob, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (35994, 36072), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((36135, 36212), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (36148, 36212), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((36256, 36333), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (36269, 36333), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((36402, 36489), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(bob, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (36415, 36489), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((36552, 36629), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (36565, 36629), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((36699, 36776), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (36712, 36776), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((36846, 36933), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(bob, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (36859, 36933), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((36995, 37072), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(bob, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (37008, 37072), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((37141, 37228), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(bob, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (37154, 37228), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((37284, 37385), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(bob, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (37297, 37385), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((37432, 37519), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(bob, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (37445, 37519), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((37581, 37658), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (37594, 37658), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((37728, 37805), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (37741, 37805), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((37875, 37962), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(bob, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (37888, 37962), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((38025, 38102), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (38038, 38102), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((38172, 38249), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (38185, 38249), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((38319, 38406), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(bob, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (38332, 38406), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((38468, 38545), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(bob, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (38481, 38545), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((38615, 38702), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(bob, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (38628, 38702), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((38758, 38859), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(bob, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (38771, 38859), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((38906, 38993), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(bob, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (38919, 38993), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((39056, 39146), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(bob, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (39069, 39146), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((39211, 39301), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(bob, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (39224, 39301), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((39366, 39461), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(bob, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (39379, 39461), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((39524, 39614), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(bob, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (39537, 39614), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((39679, 39769), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(bob, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (39692, 39769), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((39834, 39929), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(bob, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (39847, 39929), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((39990, 40080), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(bob, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (40003, 40080), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((40145, 40240), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(bob, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (40158, 40240), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((40295, 40404), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(bob, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (40308, 40404), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((40451, 40546), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(bob, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (40464, 40546), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((40609, 40697), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(bob, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (40622, 40697), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((40762, 40850), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(bob, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (40775, 40850), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((40915, 41008), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(bob, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (40928, 41008), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((41071, 41159), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(bob, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (41084, 41159), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((41224, 41312), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(bob, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (41237, 41312), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((41377, 41470), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(bob, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (41390, 41470), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((41532, 41620), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(bob, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (41545, 41620), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((41685, 41778), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(bob, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (41698, 41778), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((41834, 41941), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(bob, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (41847, 41941), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((41988, 42081), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['bob', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(bob, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (42001, 42081), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((42144, 42230), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(joe, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (42157, 42230), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((42295, 42381), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(joe, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (42308, 42381), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((42446, 42537), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(joe, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (42459, 42537), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((42600, 42686), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(joe, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (42613, 42686), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((42751, 42837), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(joe, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (42764, 42837), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((42902, 42993), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(joe, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (42915, 42993), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((43055, 43141), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(joe, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (43068, 43141), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((43204, 43295), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(joe, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (43217, 43295), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((43351, 43456), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(joe, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (43364, 43456), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((43503, 43594), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(joe, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (43516, 43594), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((43657, 43734), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (43670, 43734), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((43804, 43881), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (43817, 43881), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((43951, 44038), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(joe, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (43964, 44038), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((44101, 44178), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (44114, 44178), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((44248, 44325), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (44261, 44325), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((44395, 44482), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(joe, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (44408, 44482), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((44544, 44621), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(joe, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (44557, 44621), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((44691, 44778), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(joe, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (44704, 44778), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((44834, 44935), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(joe, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (44847, 44935), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((44982, 45069), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(joe, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (44995, 45069), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((45131, 45208), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (45144, 45208), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((45278, 45355), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (45291, 45355), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((45424, 45511), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(joe, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (45437, 45511), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((45574, 45651), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (45587, 45651), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((45721, 45798), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (45734, 45798), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((45842, 45929), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(joe, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (45855, 45929), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((45991, 46068), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(joe, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (46004, 46068), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((46138, 46225), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(joe, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (46151, 46225), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((46280, 46381), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(joe, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (46293, 46381), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((46428, 46515), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(joe, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (46441, 46515), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((46578, 46668), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(joe, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (46591, 46668), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((46732, 46822), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(joe, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (46745, 46822), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((46887, 46982), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(joe, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (46900, 46982), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((47045, 47135), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(joe, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (47058, 47135), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((47200, 47290), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(joe, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (47213, 47290), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((47355, 47450), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(joe, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (47368, 47450), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((47511, 47601), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(joe, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (47524, 47601), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((47666, 47761), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(joe, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (47679, 47761), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((47817, 47926), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(joe, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (47830, 47926), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((47973, 48068), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(joe, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (47986, 48068), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((48131, 48219), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(joe, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (48144, 48219), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((48284, 48372), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(joe, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (48297, 48372), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((48437, 48530), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(joe, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (48450, 48530), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((48593, 48681), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(joe, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (48606, 48681), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((48746, 48834), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(joe, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (48759, 48834), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((48899, 48992), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(joe, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (48912, 48992), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((49054, 49142), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(joe, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (49067, 49142), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((49206, 49299), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(joe, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (49219, 49299), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((49355, 49462), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(joe, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (49368, 49462), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((49507, 49600), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['joe', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(joe, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (49520, 49600), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((49662, 49748), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(ted, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (49675, 49748), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((49813, 49899), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(ted, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (49826, 49899), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((49964, 50055), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(ted, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (49977, 50055), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((50118, 50204), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(ted, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (50131, 50204), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((50269, 50355), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(ted, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (50282, 50355), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((50420, 50511), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(ted, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (50433, 50511), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((50572, 50658), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(ted, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (50585, 50658), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((50723, 50814), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(ted, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (50736, 50814), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((50870, 50975), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(ted, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (50883, 50975), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((51022, 51113), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'viral_fever'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(ted, viral_fever, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (51035, 51113), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((51176, 51253), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (51189, 51253), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((51323, 51400), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (51336, 51400), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((51470, 51557), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(ted, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (51483, 51557), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((51620, 51697), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (51633, 51697), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((51767, 51844), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (51780, 51844), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((51913, 52000), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(ted, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (51926, 52000), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((52062, 52139), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(ted, malaria, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (52075, 52139), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((52209, 52296), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(ted, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (52222, 52296), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((52352, 52453), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(ted, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (52365, 52453), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((52500, 52587), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'malaria'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(ted, malaria, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (52513, 52587), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((52650, 52727), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (52663, 52727), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((52797, 52874), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (52810, 52874), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((52943, 53030), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(ted, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (52956, 53030), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((53093, 53170), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (53106, 53170), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((53240, 53317), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (53253, 53317), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((53387, 53474), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(ted, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (53400, 53474), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((53536, 53613), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(ted, typhoid, similarity_type=DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (53549, 53613), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((53682, 53769), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(ted, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (53695, 53769), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((53825, 53926), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(ted, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (53838, 53926), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((53973, 54060), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'typhoid'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(ted, typhoid, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (53986, 54060), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((54123, 54213), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(ted, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (54136, 54213), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((54278, 54368), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(ted, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (54291, 54368), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((54433, 54528), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(ted, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (54446, 54528), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((54591, 54681), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(ted, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (54604, 54681), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((54746, 54836), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(ted, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (54759, 54836), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((54901, 54996), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(ted, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (54914, 54996), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((55058, 55148), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(ted, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (55071, 55148), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((55212, 55307), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(ted, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (55225, 55307), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((55363, 55472), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(ted, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (55376, 55472), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((55519, 55614), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'stomach_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(ted, stomach_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (55532, 55614), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((55677, 55765), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_1'}), '(ted, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_1)\n', (55690, 55765), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((55830, 55918), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_2'}), '(ted, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_2)\n', (55843, 55918), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((55983, 56076), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_1_3', 'p': '(1)'}), '(ted, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_1_3, p=1)\n', (55996, 56076), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((56139, 56227), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_1'}), '(ted, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_1)\n', (56152, 56227), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((56292, 56380), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_2'}), '(ted, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_2)\n', (56305, 56380), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((56445, 56538), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_3', 'p': '(1)'}), '(ted, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_3, p=1)\n', (56458, 56538), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((56600, 56688), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_2_4'}), '(ted, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_2_4)\n', (56613, 56688), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((56753, 56846), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_1', 'p': '(1)'}), '(ted, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_1, p=1)\n', (56766, 56846), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((56902, 57009), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_2', 'p': '(2)', 'u': '(0.5)', 'v': '(0.5)'}), '(ted, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_2, p=2, u=0.5, v=0.5)\n', (56915, 57009), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((57056, 57149), 'fsmpy.similarities.deng_jiang_fu', 'deng_jiang_fu', (['ted', 'chest_problem'], {'similarity_type': 'DENG_JIANG_FU_MONOTONIC_TYPE_3_3', 'p': '(1)'}), '(ted, chest_problem, similarity_type=\n DENG_JIANG_FU_MONOTONIC_TYPE_3_3, p=1)\n', (57069, 57149), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((57450, 57462), 'fsmpy.similarities.nguyen', 'nguyen', (['M', 'N'], {}), '(M, N)\n', (57456, 57462), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((57485, 57497), 'fsmpy.similarities.nguyen', 'nguyen', (['M', 'F'], {}), '(M, F)\n', (57491, 57497), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((57633, 57645), 'fsmpy.similarities.nguyen', 'nguyen', (['M', 'R'], {}), '(M, R)\n', (57639, 57645), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((57687, 57699), 'fsmpy.similarities.nguyen', 'nguyen', (['M', 'S'], {}), '(M, S)\n', (57693, 57699), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((57864, 57876), 'fsmpy.similarities.nguyen', 'nguyen', (['A', 'B'], {}), '(A, B)\n', (57870, 57876), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((58018, 58030), 'fsmpy.similarities.nguyen', 'nguyen', (['A', 'B'], {}), '(A, B)\n', (58024, 58030), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((58158, 58170), 'fsmpy.similarities.nguyen', 'nguyen', (['A', 'B'], {}), '(A, B)\n', (58164, 58170), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((58306, 58318), 'fsmpy.similarities.nguyen', 'nguyen', (['A', 'B'], {}), '(A, B)\n', (58312, 58318), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((58467, 58479), 'fsmpy.similarities.nguyen', 'nguyen', (['A', 'B'], {}), '(A, B)\n', (58473, 58479), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((58628, 58640), 'fsmpy.similarities.nguyen', 'nguyen', (['A', 'B'], {}), '(A, B)\n', (58634, 58640), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((58794, 58806), 'fsmpy.similarities.nguyen', 'nguyen', (['A', 'B'], {}), '(A, B)\n', (58800, 58806), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((58960, 58972), 'fsmpy.similarities.nguyen', 'nguyen', (['A', 'B'], {}), '(A, B)\n', (58966, 58972), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((59384, 59397), 'fsmpy.similarities.nguyen', 'nguyen', (['A1', 'B'], {}), '(A1, B)\n', (59390, 59397), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((59441, 59454), 'fsmpy.similarities.nguyen', 'nguyen', (['A2', 'B'], {}), '(A2, B)\n', (59447, 59454), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((59498, 59511), 'fsmpy.similarities.nguyen', 'nguyen', (['A3', 'B'], {}), '(A3, B)\n', (59504, 59511), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((59903, 59916), 'fsmpy.similarities.nguyen', 'nguyen', (['A1', 'B'], {}), '(A1, B)\n', (59909, 59916), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((59960, 59973), 'fsmpy.similarities.nguyen', 'nguyen', (['A2', 'B'], {}), '(A2, B)\n', (59966, 59973), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((60018, 60031), 'fsmpy.similarities.nguyen', 'nguyen', (['A3', 'B'], {}), '(A3, B)\n', (60024, 60031), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((60195, 60215), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['A', 'B'], {}), '(A, B)\n', (60209, 60215), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((60355, 60375), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['A', 'B'], {}), '(A, B)\n', (60369, 60375), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((60515, 60535), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['A', 'B'], {}), '(A, B)\n', (60529, 60535), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((60675, 60695), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['A', 'B'], {}), '(A, B)\n', (60689, 60695), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((60835, 60855), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['A', 'B'], {}), '(A, B)\n', (60849, 60855), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((60995, 61015), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['A', 'B'], {}), '(A, B)\n', (61009, 61015), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((61165, 61185), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['A', 'B'], {}), '(A, B)\n', (61179, 61185), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((61325, 61345), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['A', 'B'], {}), '(A, B)\n', (61339, 61345), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((61488, 61508), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['A', 'B'], {}), '(A, B)\n', (61502, 61508), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((61651, 61671), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['A', 'B'], {}), '(A, B)\n', (61665, 61671), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((62090, 62111), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['P1', 'Q'], {}), '(P1, Q)\n', (62104, 62111), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((62155, 62176), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['P2', 'Q'], {}), '(P2, Q)\n', (62169, 62176), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((62220, 62241), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['P3', 'Q'], {}), '(P3, Q)\n', (62234, 62241), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((62568, 62589), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['P1', 'Q'], {}), '(P1, Q)\n', (62582, 62589), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((62633, 62654), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['P2', 'Q'], {}), '(P2, Q)\n', (62647, 62654), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((62698, 62719), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['P3', 'Q'], {}), '(P3, Q)\n', (62712, 62719), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((63086, 63107), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['P1', 'Q'], {}), '(P1, Q)\n', (63100, 63107), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((63151, 63172), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['P2', 'Q'], {}), '(P2, Q)\n', (63165, 63172), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((63216, 63237), 'fsmpy.similarities.chen_cheng_lan', 'chen_cheng_lan', (['P3', 'Q'], {}), '(P3, Q)\n', (63230, 63237), False, 'from fsmpy.similarities import mitchell, iancu, liang_shi, dengfeng_chuntian, hong_kim, chen_cheng_lan, song_wang_lei_xue\n'), ((63648, 63674), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['F', 'G'], {}), '(F, G)\n', (63668, 63674), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((64096, 64122), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['F', 'G'], {}), '(F, G)\n', (64116, 64122), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((64167, 64193), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['G', 'H'], {}), '(G, H)\n', (64187, 64193), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((64238, 64264), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['F', 'H'], {}), '(F, H)\n', (64258, 64264), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((66336, 66363), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P1', 'M'], {}), '(P1, M)\n', (66356, 66363), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((66408, 66435), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P2', 'M'], {}), '(P2, M)\n', (66428, 66435), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((66480, 66507), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P3', 'M'], {}), '(P3, M)\n', (66500, 66507), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((66552, 66579), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P4', 'M'], {}), '(P4, M)\n', (66572, 66579), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((66624, 66651), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P5', 'M'], {}), '(P5, M)\n', (66644, 66651), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((66696, 66723), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P6', 'M'], {}), '(P6, M)\n', (66716, 66723), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((66768, 66795), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P7', 'M'], {}), '(P7, M)\n', (66788, 66795), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((66840, 66867), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P8', 'M'], {}), '(P8, M)\n', (66860, 66867), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((66912, 66939), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P9', 'M'], {}), '(P9, M)\n', (66932, 66939), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((66984, 67012), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P10', 'M'], {}), '(P10, M)\n', (67004, 67012), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((67057, 67085), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P11', 'M'], {}), '(P11, M)\n', (67077, 67085), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((67130, 67158), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P12', 'M'], {}), '(P12, M)\n', (67150, 67158), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((67203, 67231), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P13', 'M'], {}), '(P13, M)\n', (67223, 67231), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((67276, 67304), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P14', 'M'], {}), '(P14, M)\n', (67296, 67304), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n'), ((67349, 67377), 'fsmpy.similarities.muthukumar_krishnanb', 'muthukumar_krishnanb', (['P15', 'M'], {}), '(P15, M)\n', (67369, 67377), False, 'from fsmpy.similarities import dengfeng_chuntian, hong_kim, song_wang_lei_xue, muthukumar_krishnanb, nguyen, deng_jiang_fu\n')]
|
"""
this is a test handler for the unit test - test_ham_run.
"""
from datacoco_core import Logger
LOG = Logger()
class SqlCompare(object):
"""
"""
def __init__(self, test_conf):
"""
:param test_conf:
:return:
"""
self.test_conf = test_conf
def setup(self, CONF):
print("Setting up...")
# pass
return self
def run(self):
status = "failure"
detail = {
"status": status,
"test": "sample test",
"result_a": "any result a",
"result_b": "any result b",
"diff": 1,
"test_conf": self.test_conf,
}
return status, detail
|
[
"datacoco_core.Logger"
] |
[((106, 114), 'datacoco_core.Logger', 'Logger', ([], {}), '()\n', (112, 114), False, 'from datacoco_core import Logger\n')]
|
'''
Copyright (C) 2020 Golagola
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# 외장모듈
import os, sys # 시스템 모듈
import serial # 직렬 통신 모듈
# 경로설정
CURRENT_PATH = os.path.dirname(os.path.realpath(__file__))
sys.path.append(f'{CURRENT_PATH}/module')
# 내장모듈
from module.config import *
from module.Http import Http
from module.DataManager import DataManager
from module.Speak import Gspeak
from module.Serial import Serial
# 메인 함수
def main():
try:
# 아두이노와 시리얼 통신할 인스턴스 생성
port = serial.Serial(
port = PORT,
baudrate = 9600
)
print(f'아두이노로부터 데이터를 수신합니다.\n현 메세지 출력 후 동작이 멈췄다면 아두이노와 라즈베리파이의 시리얼 포트가 일치하는지 config.py를 확인해주십시오.\n현재 포트 : {PORT}')
# 캐시 비우기
port.flushInput()
except:
print(f'오류 : {PORT} -> 잘못된 신호입니다.\n시리얼 모니터를 꺼주시거나 연결된 포트가 올바른지 config.py를 확인해주십시오.')
exit(1)
# 아두이노 센싱 데이터 한 줄 단위로 수신 -> 현 시리얼 포트가 통신 가능한지 테스트용으로 먼저 수신해보는 것.
receive = Serial.get_receive_data(port)
# 인스턴스 생성
data_manager = DataManager() # 음료 데이터 관리용 인스턴스
speak = Gspeak(25100) # gTTS를 사용하여 Hango 음성 출력을 제공하는 인스턴스 => 인자는 음성 출력 속도
# 음료수 정보 요청
response = Http.request_drinks(SERIAL_NUMBER)
data_manager.refresh_drinks(response)
# 초기 사운드 메세지 설정
drinks = data_manager.get_drinks()
speak.refresh_message(drinks)
# 설정된 메세지 오브젝트 불러오기
sound_msgs = speak.get_sound_msgs()
# 음료수 이름을 파일명으로하는 사운드 만들고 저장
for file_path in sound_msgs.keys() :
try :
file_name_items = os.listdir(f"{CURRENT_PATH}/sounds/{file_path}")
except :
print(f"'main.py'와 같은 경로에 'sounds' 폴더가 존재하는지 확인해주십시오.\n'sounds' 폴더 안에는 'basic', 'position', 'duplicate', 'sold', 'soldout' 폴더가 필수로 존재해야 합니다.")
exit()
for file_name, message in sound_msgs[file_path].items() :
# 이미 만들어진 음성 파일이 아닐 경우에만 새로 만든다
if file_name+'.mp3' not in file_name_items :
print(f"음성 파일 생성\nFILE_PATH : {file_path}\nFILE_NAME : {file_name}\nMESSAGE : {message}")
speak.save_sound(file_path, file_name, message)
# 무한 반복
while True:
# 아두이노 센싱 데이터 한 줄 단위로 수신
receive = Serial.get_receive_data(port)
# 이용 가능한 데이터인지 검사
if Serial.is_available(receive) :
# 아두이노 수신 데이터 저장
Serial.save_received_data(receive)
received_keys = Serial.get_received_keys()
# 아두이노 센싱 데이터 불러오기
sensings = Serial.get_sensings()
# 라즈베리파이가 가공할 데이터를 모두 수신 했다면 실행
if BASIC_KEYS.difference(received_keys) == set() :
# 아두이노에서 센싱된 데이터가 있으면 실행
if sensings["success"] :
# 출력
print("센싱 데이터 수신 성공")
# 캐시 비우기
port.flushInput()
# 판매된 음료수가 있을 경우에 실행
if sensings["sold_position"] != -1 :
# 감지 정보가 새로운 감지 정보와 다르면 실행 => 같은 말을 반복하지 않기 위함
if Serial.current_sensing_data != sensings["sold_position"] :
# 새로 감지된 정보 저장 => 같은 말을 반복하지 않기 위함
Serial.current_sensing_data = f"sold_position {sensings['sold_position']}"
drink = {
'name' : drinks["name"][sensings["sold_position"]],
'price' : drinks["price"][sensings["sold_position"]],
'sold_position' : sensings["sold_position"]
}
# 판매된 음료수 정보 차감 요청
print("판매된 음료 차감 데이터를 요청하고 스피커 출력을 실행합니다.")
response = Http.update_sold_drink(USER_ID, SERIAL_NUMBER, drink)
data_manager.check_drink_update(response)
# 스피커 출력
speak.stop()
print("스피커 출력을 실행합니다.")
# 해당 음료가 품절일 경우 실행
if drinks["count"][sensings["sold_position"]] <= 0 :
# 스피커 출력
speak.say("sold_out", drinks["name"][sensings["sold_position"]])
else :
# 스피커 출력
speak.say("sold", drinks["name"][sensings["sold_position"]])
elif sensings["duplicate"] :
# 감지 정보가 새로운 감지 정보와 다르면 실행 => 같은 말을 반복하지 않기 위함
if Serial.current_sensing_data != sensings["duplicate"] :
# 새로 감지된 정보 저장 => 같은 말을 반복하지 않기 위함
Serial.current_sensing_data = f"duplicate {sensings['duplicate']}"
speak.stop()
print("물체가 감지되어 스피커 출력을 실행합니다.")
# 스피커 출력
speak.say("duplicate", "duplicate")
# 손이 음료 버튼에 위치했을 경우에 실행
elif sensings["sensed_position"] != -1 :
# 감지 정보가 새로운 감지 정보와 다르면 실행 => 같은 말을 반복하지 않기 위함
if Serial.current_sensing_data != sensings["sensed_position"] :
# 새로 감지된 정보 저장 => 같은 말을 반복하지 않기 위함
Serial.current_sensing_data = f"sensed_position {sensings['sensed_position']}"
speak.stop()
print("물체가 감지되어 스피커 출력을 실행합니다.")
# 해당 음료가 품절일 경우 실행
if drinks["count"][sensings["sensed_position"]] <= 0 :
# 스피커 출력
speak.say("sold_out", drinks["name"][sensings["sensed_position"]])
else :
# 스피커 출력
speak.say("position", drinks["name"][sensings["sensed_position"]])
# 수신한 변수명 집합 비우기 => 다음 센싱 때에도 정상 수신하는지 검사하기 위함
received_keys.clear()
# 음성 출력이 가능하면 실행 => 이미 음성이 출력 중일 땐 실행되지 않는다.
if "success" in sensings and speak.is_available():
# 음료수 정보 요청
print("센싱 데이터가 없습니다.\n서버로부터 음료 정보를 불러옵니다...")
response = Http.request_drinks(SERIAL_NUMBER)
data_manager.refresh_drinks(response)
# 수정된 음료수가 있다면 사운드 파일 업데이트
drinks = data_manager.get_drinks()
speak.update_message(drinks)
# 스피커 출력
print("스피커 출력을 실행합니다.\n:인사말 ")
speak.stop()
speak.say("basic")
else :
print("수신 가능한 센싱 데이터가 아닙니다.")
# 파일이 직접 실행됐다면 (모듈로써 사용된게 아니라면) 실행
if __name__ == "__main__":
main()
|
[
"sys.path.append",
"serial.Serial",
"module.Serial.Serial.save_received_data",
"module.Serial.Serial.get_received_keys",
"module.Serial.Serial.get_sensings",
"os.path.realpath",
"module.Http.Http.request_drinks",
"module.Serial.Serial.get_receive_data",
"module.Speak.Gspeak",
"module.Serial.Serial.is_available",
"module.Http.Http.update_sold_drink",
"os.listdir",
"module.DataManager.DataManager"
] |
[((761, 802), 'sys.path.append', 'sys.path.append', (['f"""{CURRENT_PATH}/module"""'], {}), "(f'{CURRENT_PATH}/module')\n", (776, 802), False, 'import os, sys\n'), ((733, 759), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (749, 759), False, 'import os, sys\n'), ((1507, 1536), 'module.Serial.Serial.get_receive_data', 'Serial.get_receive_data', (['port'], {}), '(port)\n', (1530, 1536), False, 'from module.Serial import Serial\n'), ((1571, 1584), 'module.DataManager.DataManager', 'DataManager', ([], {}), '()\n', (1582, 1584), False, 'from module.DataManager import DataManager\n'), ((1618, 1631), 'module.Speak.Gspeak', 'Gspeak', (['(25100)'], {}), '(25100)\n', (1624, 1631), False, 'from module.Speak import Gspeak\n'), ((1726, 1760), 'module.Http.Http.request_drinks', 'Http.request_drinks', (['SERIAL_NUMBER'], {}), '(SERIAL_NUMBER)\n', (1745, 1760), False, 'from module.Http import Http\n'), ((1058, 1097), 'serial.Serial', 'serial.Serial', ([], {'port': 'PORT', 'baudrate': '(9600)'}), '(port=PORT, baudrate=9600)\n', (1071, 1097), False, 'import serial\n'), ((2742, 2771), 'module.Serial.Serial.get_receive_data', 'Serial.get_receive_data', (['port'], {}), '(port)\n', (2765, 2771), False, 'from module.Serial import Serial\n'), ((2810, 2838), 'module.Serial.Serial.is_available', 'Serial.is_available', (['receive'], {}), '(receive)\n', (2829, 2838), False, 'from module.Serial import Serial\n'), ((2085, 2133), 'os.listdir', 'os.listdir', (['f"""{CURRENT_PATH}/sounds/{file_path}"""'], {}), "(f'{CURRENT_PATH}/sounds/{file_path}')\n", (2095, 2133), False, 'import os, sys\n'), ((2882, 2916), 'module.Serial.Serial.save_received_data', 'Serial.save_received_data', (['receive'], {}), '(receive)\n', (2907, 2916), False, 'from module.Serial import Serial\n'), ((2945, 2971), 'module.Serial.Serial.get_received_keys', 'Serial.get_received_keys', ([], {}), '()\n', (2969, 2971), False, 'from module.Serial import Serial\n'), ((3027, 3048), 'module.Serial.Serial.get_sensings', 'Serial.get_sensings', ([], {}), '()\n', (3046, 3048), False, 'from module.Serial import Serial\n'), ((6964, 6998), 'module.Http.Http.request_drinks', 'Http.request_drinks', (['SERIAL_NUMBER'], {}), '(SERIAL_NUMBER)\n', (6983, 6998), False, 'from module.Http import Http\n'), ((4305, 4358), 'module.Http.Http.update_sold_drink', 'Http.update_sold_drink', (['USER_ID', 'SERIAL_NUMBER', 'drink'], {}), '(USER_ID, SERIAL_NUMBER, drink)\n', (4327, 4358), False, 'from module.Http import Http\n')]
|
import os
import unittest
import tblib # ensure installed
import bigflow.testing.isolate
from . import nonpure
class TestIsolateMixinTestCase(unittest.TestCase):
class SubTest(unittest.TestCase):
failed = False
@classmethod
def setUpClass(cls):
cls.pid = os.getpid()
def test_fail(self):
type(self).failed = True
self.fail('fail-ok')
def test_ok(self):
self.assertNotEqual(self.pid, os.getpid())
def test_error(self):
raise RuntimeError('fail-error')
@unittest.skip('test-skip')
def test_skip(self):
self.fail()
@unittest.expectedFailure
def test_expected_failure_but(self):
pass
@unittest.expectedFailure
def test_expected_failure(self):
self.fail("expected")
class ForkTest(bigflow.testing.ForkIsolateMixin, SubTest):
pass
class SpawnTest(bigflow.testing.SpawnIsolateMixin, SubTest):
pass
def check_test_mixin_subtest(self, test_class):
# when
result = self._run_test(test_class)
# then
self.assertEqual(6, result.testsRun, "All tests should run")
# then
self.assertEqual(1, len(result.failures))
self.assertRegex(result.failures[0][0].id(), r".*\.test_fail", "Test 'test_fail' sould fail")
self.assertIsNotNone(result.failures[0][1], "Traceback is attached")
self.assertFalse(test_class.failed, "State is not propogated")
# then]
self.assertEqual(1, len(result.errors))
self.assertRegex(result.errors[0][0].id(), ".*\.test_error", "Test 'test_error' sould raise exception")
self.assertIsNotNone(result.errors[0][1], "Traceback is attached")
# then
self.assertEqual(1, len(result.skipped), "Single skipped test")
self.assertEqual(1, len(result.unexpectedSuccesses), "Single unexpected success")
self.assertEqual(1, len(result.expectedFailures), "Single expected failure")
def _run_test(self, test_class):
devnull = open(os.devnull, 'wt')
self.addCleanup(devnull.close)
runner = unittest.TextTestRunner(stream=devnull)
suite = unittest.defaultTestLoader.loadTestsFromTestCase(test_class)
return runner.run(suite)
@unittest.skip
def test_fork_mixin(self):
self.check_test_mixin_subtest(self.ForkTest)
def test_spawn_mixin(self):
self.check_test_mixin_subtest(self.SpawnTest)
class ForkReloadModulesTest(
bigflow.testing.ForkIsolateMixin,
unittest.TestCase,
):
def setUpParent(self):
super().setUpParent()
self.const = nonpure.CONST
def test_test(self):
from . import nonpure
self.assertEqual(self.const, nonpure.CONST, "Module should not be reloaded")
class SpawnReloadModulesTest(
bigflow.testing.SpawnIsolateMixin,
unittest.TestCase,
):
def setUpParent(self):
super().setUpParent()
self.const = nonpure.CONST
def test_test(self):
from . import nonpure
self.assertNotEqual(self.const, nonpure.CONST, "Module should be reloaded")
class _IsolateDisabledBase(
bigflow.testing.isolate._IsolatedProcessMixin,
unittest.TestCase,
):
isolate = False
_state = None
@classmethod
def _change_state(cls, from_, to_):
if cls._state != from_:
raise AssertionError(f"invalid state, expected {from_} but got {cls._state}")
cls._state = to_
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._change_state(None, 'setupClass')
def setUpParent(self):
super().setUpParent()
self._change_state('setupClass', 'setupParent')
def setUp(self):
super().setUp()
self._change_state('setupParent', 'setup')
def test_test(self):
#self.assertEqual(self.thepid, os.getpid())
self._change_state('setup', 'run')
def tearDown(self):
self._change_state('run', 'teardown')
super().tearDown()
def tearDownParent(self):
self._change_state('teardown', 'teardownParent')
super().tearDownParent()
@classmethod
def tearDownClass(cls):
cls._change_state('teardownParent', None)
super().tearDownClass()
class ForkDisabledTest(
_IsolateDisabledBase,
bigflow.testing.ForkIsolateMixin,
unittest.TestCase,
):
pass
class SpawnDisabledTest(
_IsolateDisabledBase,
bigflow.testing.SpawnIsolateMixin,
unittest.TestCase,
):
pass
|
[
"unittest.defaultTestLoader.loadTestsFromTestCase",
"unittest.skip",
"os.getpid",
"unittest.TextTestRunner"
] |
[((584, 610), 'unittest.skip', 'unittest.skip', (['"""test-skip"""'], {}), "('test-skip')\n", (597, 610), False, 'import unittest\n'), ((2190, 2229), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'stream': 'devnull'}), '(stream=devnull)\n', (2213, 2229), False, 'import unittest\n'), ((2246, 2306), 'unittest.defaultTestLoader.loadTestsFromTestCase', 'unittest.defaultTestLoader.loadTestsFromTestCase', (['test_class'], {}), '(test_class)\n', (2294, 2306), False, 'import unittest\n'), ((303, 314), 'os.getpid', 'os.getpid', ([], {}), '()\n', (312, 314), False, 'import os\n'), ((485, 496), 'os.getpid', 'os.getpid', ([], {}), '()\n', (494, 496), False, 'import os\n')]
|
import pytest
from citrination_client.models.columns import *
from citrination_client.base.errors import *
class TestVectorColumn(object):
@classmethod
def setup_class(self):
self.name = "Property Band gap"
self.role = "Input"
self.group_by_key = False
self.units = "eV"
def test_vector_column_validates_length_castable_to_int(self):
"""
Tests that the length option passed into the RealColumn constructor
is castable to int
"""
# Non intable length
length = "asdf"
with pytest.raises(CitrinationClientError):
VectorColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, length=length)
def test_vector_column_serializes_length_correctly(self):
"""
Tests that the VectorColumn class expresses the length option correctly
in its dictionary form.
"""
length = 10.0
column = VectorColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, units=self.units, length=length)
c_dict = column.to_dict()
assert c_dict["name"] == self.name
assert c_dict["role"] == self.role
assert c_dict["group_by_key"] == self.group_by_key
assert c_dict["units"] == self.units
assert c_dict["type"] == VectorColumn.TYPE
assert c_dict["options"]["length"] == 10
|
[
"pytest.raises"
] |
[((576, 613), 'pytest.raises', 'pytest.raises', (['CitrinationClientError'], {}), '(CitrinationClientError)\n', (589, 613), False, 'import pytest\n')]
|
# Copyright 2014 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
def _make_credentials():
import google.auth.credentials
return mock.Mock(spec=google.auth.credentials.Credentials)
def _make_entity_pb(project, kind, integer_id, name=None, str_val=None):
from google.cloud.datastore_v1.proto import entity_pb2
from google.cloud.datastore.helpers import _new_value_pb
entity_pb = entity_pb2.Entity()
entity_pb.key.partition_id.project_id = project
path_element = entity_pb.key.path.add()
path_element.kind = kind
path_element.id = integer_id
if name is not None and str_val is not None:
value_pb = _new_value_pb(entity_pb, name)
value_pb.string_value = str_val
return entity_pb
class Test__get_gcd_project(unittest.TestCase):
def _call_fut(self):
from google.cloud.datastore.client import _get_gcd_project
return _get_gcd_project()
def test_no_value(self):
environ = {}
with mock.patch('os.getenv', new=environ.get):
project = self._call_fut()
self.assertIsNone(project)
def test_value_set(self):
from google.cloud.datastore.client import GCD_DATASET
MOCK_PROJECT = object()
environ = {GCD_DATASET: MOCK_PROJECT}
with mock.patch('os.getenv', new=environ.get):
project = self._call_fut()
self.assertEqual(project, MOCK_PROJECT)
class Test__determine_default_project(unittest.TestCase):
def _call_fut(self, project=None):
from google.cloud.datastore.client import (
_determine_default_project)
return _determine_default_project(project=project)
def _determine_default_helper(self, gcd=None, fallback=None,
project_called=None):
_callers = []
def gcd_mock():
_callers.append('gcd_mock')
return gcd
def fallback_mock(project=None):
_callers.append(('fallback_mock', project))
return fallback
patch = mock.patch.multiple(
'google.cloud.datastore.client',
_get_gcd_project=gcd_mock,
_base_default_project=fallback_mock)
with patch:
returned_project = self._call_fut(project_called)
return returned_project, _callers
def test_no_value(self):
project, callers = self._determine_default_helper()
self.assertIsNone(project)
self.assertEqual(callers, ['gcd_mock', ('fallback_mock', None)])
def test_explicit(self):
PROJECT = object()
project, callers = self._determine_default_helper(
project_called=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, [])
def test_gcd(self):
PROJECT = object()
project, callers = self._determine_default_helper(gcd=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, ['gcd_mock'])
def test_fallback(self):
PROJECT = object()
project, callers = self._determine_default_helper(fallback=PROJECT)
self.assertEqual(project, PROJECT)
self.assertEqual(callers, ['gcd_mock', ('fallback_mock', None)])
class TestClient(unittest.TestCase):
PROJECT = 'PROJECT'
@staticmethod
def _get_target_class():
from google.cloud.datastore.client import Client
return Client
def _make_one(self, project=PROJECT, namespace=None,
credentials=None, _http=None, _use_grpc=None):
return self._get_target_class()(project=project,
namespace=namespace,
credentials=credentials,
_http=_http,
_use_grpc=_use_grpc)
def test_constructor_w_project_no_environ(self):
# Some environments (e.g. AppVeyor CI) run in GCE, so
# this test would fail artificially.
patch = mock.patch(
'google.cloud.datastore.client._base_default_project',
return_value=None)
with patch:
self.assertRaises(EnvironmentError, self._make_one, None)
def test_constructor_w_implicit_inputs(self):
from google.cloud.datastore.client import _DATASTORE_BASE_URL
other = 'other'
creds = _make_credentials()
klass = self._get_target_class()
patch1 = mock.patch(
'google.cloud.datastore.client._determine_default_project',
return_value=other)
patch2 = mock.patch(
'google.auth.default', return_value=(creds, None))
with patch1 as _determine_default_project:
with patch2 as default:
client = klass()
self.assertEqual(client.project, other)
self.assertIsNone(client.namespace)
self.assertIs(client._credentials, creds)
self.assertIsNone(client._http_internal)
self.assertEqual(client._base_url, _DATASTORE_BASE_URL)
self.assertIsNone(client.current_batch)
self.assertIsNone(client.current_transaction)
default.assert_called_once_with()
_determine_default_project.assert_called_once_with(None)
def test_constructor_w_explicit_inputs(self):
from google.cloud.datastore.client import _DATASTORE_BASE_URL
other = 'other'
namespace = 'namespace'
creds = _make_credentials()
http = object()
client = self._make_one(project=other,
namespace=namespace,
credentials=creds,
_http=http)
self.assertEqual(client.project, other)
self.assertEqual(client.namespace, namespace)
self.assertIs(client._credentials, creds)
self.assertIs(client._http_internal, http)
self.assertIsNone(client.current_batch)
self.assertEqual(list(client._batch_stack), [])
self.assertEqual(client._base_url, _DATASTORE_BASE_URL)
def test_constructor_use_grpc_default(self):
import google.cloud.datastore.client as MUT
project = 'PROJECT'
creds = _make_credentials()
http = object()
with mock.patch.object(MUT, '_USE_GRPC', new=True):
client1 = self._make_one(
project=project, credentials=creds, _http=http)
self.assertTrue(client1._use_grpc)
# Explicitly over-ride the environment.
client2 = self._make_one(
project=project, credentials=creds, _http=http,
_use_grpc=False)
self.assertFalse(client2._use_grpc)
with mock.patch.object(MUT, '_USE_GRPC', new=False):
client3 = self._make_one(
project=project, credentials=creds, _http=http)
self.assertFalse(client3._use_grpc)
# Explicitly over-ride the environment.
client4 = self._make_one(
project=project, credentials=creds, _http=http,
_use_grpc=True)
self.assertTrue(client4._use_grpc)
def test_constructor_gcd_host(self):
from google.cloud.environment_vars import GCD_HOST
host = 'localhost:1234'
fake_environ = {GCD_HOST: host}
project = 'PROJECT'
creds = _make_credentials()
http = object()
with mock.patch('os.environ', new=fake_environ):
client = self._make_one(
project=project, credentials=creds, _http=http)
self.assertEqual(client._base_url, 'http://' + host)
def test__datastore_api_property_gapic(self):
client = self._make_one(
project='prahj-ekt', credentials=_make_credentials(),
_http=object(), _use_grpc=True)
self.assertIsNone(client._datastore_api_internal)
patch = mock.patch(
'google.cloud.datastore.client.make_datastore_api',
return_value=mock.sentinel.ds_api)
with patch as make_api:
ds_api = client._datastore_api
self.assertIs(ds_api, mock.sentinel.ds_api)
make_api.assert_called_once_with(client)
self.assertIs(
client._datastore_api_internal, mock.sentinel.ds_api)
# Make sure the cached value is used.
self.assertEqual(make_api.call_count, 1)
self.assertIs(
client._datastore_api, mock.sentinel.ds_api)
self.assertEqual(make_api.call_count, 1)
def test__datastore_api_property_http(self):
from google.cloud.datastore._http import HTTPDatastoreAPI
client = self._make_one(
project='prahj-ekt', credentials=_make_credentials(),
_http=object(), _use_grpc=False)
self.assertIsNone(client._datastore_api_internal)
ds_api = client._datastore_api
self.assertIsInstance(ds_api, HTTPDatastoreAPI)
self.assertIs(ds_api.client, client)
# Make sure the cached value is used.
self.assertIs(client._datastore_api_internal, ds_api)
self.assertIs(client._datastore_api, ds_api)
def test__push_batch_and__pop_batch(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
batch = client.batch()
xact = client.transaction()
client._push_batch(batch)
self.assertEqual(list(client._batch_stack), [batch])
self.assertIs(client.current_batch, batch)
self.assertIsNone(client.current_transaction)
client._push_batch(xact)
self.assertIs(client.current_batch, xact)
self.assertIs(client.current_transaction, xact)
# list(_LocalStack) returns in reverse order.
self.assertEqual(list(client._batch_stack), [xact, batch])
self.assertIs(client._pop_batch(), xact)
self.assertEqual(list(client._batch_stack), [batch])
self.assertIs(client._pop_batch(), batch)
self.assertEqual(list(client._batch_stack), [])
def test_get_miss(self):
_called_with = []
def _get_multi(*args, **kw):
_called_with.append((args, kw))
return []
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.get_multi = _get_multi
key = object()
self.assertIsNone(client.get(key))
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]['keys'], [key])
self.assertIsNone(_called_with[0][1]['missing'])
self.assertIsNone(_called_with[0][1]['deferred'])
self.assertIsNone(_called_with[0][1]['transaction'])
def test_get_hit(self):
TXN_ID = '123'
_called_with = []
_entity = object()
def _get_multi(*args, **kw):
_called_with.append((args, kw))
return [_entity]
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.get_multi = _get_multi
key, missing, deferred = object(), [], []
self.assertIs(client.get(key, missing, deferred, TXN_ID), _entity)
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]['keys'], [key])
self.assertIs(_called_with[0][1]['missing'], missing)
self.assertIs(_called_with[0][1]['deferred'], deferred)
self.assertEqual(_called_with[0][1]['transaction'], TXN_ID)
def test_get_multi_no_keys(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
results = client.get_multi([])
self.assertEqual(results, [])
def test_get_multi_miss(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
creds = _make_credentials()
client = self._make_one(credentials=creds)
ds_api = _make_datastore_api()
client._datastore_api_internal = ds_api
key = Key('Kind', 1234, project=self.PROJECT)
results = client.get_multi([key])
self.assertEqual(results, [])
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
self.PROJECT,
[key.to_protobuf()],
read_options=read_options,
)
def test_get_multi_miss_w_missing(self):
from google.cloud.datastore_v1.proto import entity_pb2
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
KIND = 'Kind'
ID = 1234
# Make a missing entity pb to be returned from mock backend.
missed = entity_pb2.Entity()
missed.key.partition_id.project_id = self.PROJECT
path_element = missed.key.path.add()
path_element.kind = KIND
path_element.id = ID
creds = _make_credentials()
client = self._make_one(credentials=creds)
# Set missing entity on mock connection.
lookup_response = _make_lookup_response(missing=[missed])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(KIND, ID, project=self.PROJECT)
missing = []
entities = client.get_multi([key], missing=missing)
self.assertEqual(entities, [])
key_pb = key.to_protobuf()
self.assertEqual(
[missed.key.to_protobuf() for missed in missing], [key_pb])
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
self.PROJECT,
[key_pb],
read_options=read_options,
)
def test_get_multi_w_missing_non_empty(self):
from google.cloud.datastore.key import Key
creds = _make_credentials()
client = self._make_one(credentials=creds)
key = Key('Kind', 1234, project=self.PROJECT)
missing = ['this', 'list', 'is', 'not', 'empty']
self.assertRaises(ValueError, client.get_multi,
[key], missing=missing)
def test_get_multi_w_deferred_non_empty(self):
from google.cloud.datastore.key import Key
creds = _make_credentials()
client = self._make_one(credentials=creds)
key = Key('Kind', 1234, project=self.PROJECT)
deferred = ['this', 'list', 'is', 'not', 'empty']
self.assertRaises(ValueError, client.get_multi,
[key], deferred=deferred)
def test_get_multi_miss_w_deferred(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
key = Key('Kind', 1234, project=self.PROJECT)
key_pb = key.to_protobuf()
# Set deferred entity on mock connection.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(deferred=[key_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
deferred = []
entities = client.get_multi([key], deferred=deferred)
self.assertEqual(entities, [])
self.assertEqual(
[def_key.to_protobuf() for def_key in deferred], [key_pb])
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
self.PROJECT,
[key_pb],
read_options=read_options,
)
def test_get_multi_w_deferred_from_backend_but_not_passed(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore_v1.proto import entity_pb2
from google.cloud.datastore.entity import Entity
from google.cloud.datastore.key import Key
key1 = Key('Kind', project=self.PROJECT)
key1_pb = key1.to_protobuf()
key2 = Key('Kind', 2345, project=self.PROJECT)
key2_pb = key2.to_protobuf()
entity1_pb = entity_pb2.Entity()
entity1_pb.key.CopyFrom(key1_pb)
entity2_pb = entity_pb2.Entity()
entity2_pb.key.CopyFrom(key2_pb)
creds = _make_credentials()
client = self._make_one(credentials=creds)
# Mock up two separate requests. Using an iterable as side_effect
# allows multiple return values.
lookup_response1 = _make_lookup_response(
results=[entity1_pb], deferred=[key2_pb])
lookup_response2 = _make_lookup_response(results=[entity2_pb])
ds_api = _make_datastore_api()
ds_api.lookup = mock.Mock(
side_effect=[lookup_response1, lookup_response2], spec=[])
client._datastore_api_internal = ds_api
missing = []
found = client.get_multi([key1, key2], missing=missing)
self.assertEqual(len(found), 2)
self.assertEqual(len(missing), 0)
# Check the actual contents on the response.
self.assertIsInstance(found[0], Entity)
self.assertEqual(found[0].key.path, key1.path)
self.assertEqual(found[0].key.project, key1.project)
self.assertIsInstance(found[1], Entity)
self.assertEqual(found[1].key.path, key2.path)
self.assertEqual(found[1].key.project, key2.project)
self.assertEqual(ds_api.lookup.call_count, 2)
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_any_call(
self.PROJECT,
[key2_pb],
read_options=read_options,
)
ds_api.lookup.assert_any_call(
self.PROJECT,
[key1_pb, key2_pb],
read_options=read_options,
)
def test_get_multi_hit(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
kind = 'Kind'
id_ = 1234
path = [{'kind': kind, 'id': id_}]
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(self.PROJECT, kind, id_, 'foo', 'Foo')
# Make a connection to return the entity pb.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(kind, id_, project=self.PROJECT)
result, = client.get_multi([key])
new_key = result.key
# Check the returned value is as expected.
self.assertIsNot(new_key, key)
self.assertEqual(new_key.project, self.PROJECT)
self.assertEqual(new_key.path, path)
self.assertEqual(list(result), ['foo'])
self.assertEqual(result['foo'], 'Foo')
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
self.PROJECT,
[key.to_protobuf()],
read_options=read_options,
)
def test_get_multi_hit_w_transaction(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
txn_id = b'123'
kind = 'Kind'
id_ = 1234
path = [{'kind': kind, 'id': id_}]
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(self.PROJECT, kind, id_, 'foo', 'Foo')
# Make a connection to return the entity pb.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(kind, id_, project=self.PROJECT)
txn = client.transaction()
txn._id = txn_id
result, = client.get_multi([key], transaction=txn)
new_key = result.key
# Check the returned value is as expected.
self.assertIsNot(new_key, key)
self.assertEqual(new_key.project, self.PROJECT)
self.assertEqual(new_key.path, path)
self.assertEqual(list(result), ['foo'])
self.assertEqual(result['foo'], 'Foo')
read_options = datastore_pb2.ReadOptions(transaction=txn_id)
ds_api.lookup.assert_called_once_with(
self.PROJECT,
[key.to_protobuf()],
read_options=read_options,
)
def test_get_multi_hit_multiple_keys_same_project(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.key import Key
kind = 'Kind'
id1 = 1234
id2 = 2345
# Make a found entity pb to be returned from mock backend.
entity_pb1 = _make_entity_pb(self.PROJECT, kind, id1)
entity_pb2 = _make_entity_pb(self.PROJECT, kind, id2)
# Make a connection to return the entity pbs.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(
results=[entity_pb1, entity_pb2])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key1 = Key(kind, id1, project=self.PROJECT)
key2 = Key(kind, id2, project=self.PROJECT)
retrieved1, retrieved2 = client.get_multi([key1, key2])
# Check values match.
self.assertEqual(retrieved1.key.path, key1.path)
self.assertEqual(dict(retrieved1), {})
self.assertEqual(retrieved2.key.path, key2.path)
self.assertEqual(dict(retrieved2), {})
read_options = datastore_pb2.ReadOptions()
ds_api.lookup.assert_called_once_with(
self.PROJECT,
[key1.to_protobuf(), key2.to_protobuf()],
read_options=read_options,
)
def test_get_multi_hit_multiple_keys_different_project(self):
from google.cloud.datastore.key import Key
PROJECT1 = 'PROJECT'
PROJECT2 = 'PROJECT-ALT'
# Make sure our IDs are actually different.
self.assertNotEqual(PROJECT1, PROJECT2)
key1 = Key('KIND', 1234, project=PROJECT1)
key2 = Key('KIND', 1234, project=PROJECT2)
creds = _make_credentials()
client = self._make_one(credentials=creds)
with self.assertRaises(ValueError):
client.get_multi([key1, key2])
def test_get_multi_max_loops(self):
from google.cloud.datastore.key import Key
kind = 'Kind'
id_ = 1234
# Make a found entity pb to be returned from mock backend.
entity_pb = _make_entity_pb(self.PROJECT, kind, id_, 'foo', 'Foo')
# Make a connection to return the entity pb.
creds = _make_credentials()
client = self._make_one(credentials=creds)
lookup_response = _make_lookup_response(results=[entity_pb])
ds_api = _make_datastore_api(lookup_response=lookup_response)
client._datastore_api_internal = ds_api
key = Key(kind, id_, project=self.PROJECT)
deferred = []
missing = []
patch = mock.patch(
'google.cloud.datastore.client._MAX_LOOPS', new=-1)
with patch:
result = client.get_multi([key], missing=missing,
deferred=deferred)
# Make sure we have no results, even though the connection has been
# set up as in `test_hit` to return a single result.
self.assertEqual(result, [])
self.assertEqual(missing, [])
self.assertEqual(deferred, [])
ds_api.lookup.assert_not_called()
def test_put(self):
_called_with = []
def _put_multi(*args, **kw):
_called_with.append((args, kw))
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.put_multi = _put_multi
entity = object()
client.put(entity)
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]['entities'], [entity])
def test_put_multi_no_entities(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertIsNone(client.put_multi([]))
def test_put_multi_w_single_empty_entity(self):
# https://github.com/GoogleCloudPlatform/google-cloud-python/issues/649
from google.cloud.datastore.entity import Entity
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertRaises(ValueError, client.put_multi, Entity())
def test_put_multi_no_batch_w_partial_key(self):
from google.cloud.datastore_v1.proto import datastore_pb2
from google.cloud.datastore.helpers import _property_tuples
entity = _Entity(foo=u'bar')
key = entity.key = _Key(self.PROJECT)
key._id = None
creds = _make_credentials()
client = self._make_one(credentials=creds)
key_pb = _make_key(234)
ds_api = _make_datastore_api(key_pb)
client._datastore_api_internal = ds_api
result = client.put_multi([entity])
self.assertIsNone(result)
self.assertEqual(ds_api.commit.call_count, 1)
_, positional, keyword = ds_api.commit.mock_calls[0]
self.assertEqual(keyword, {'transaction': None})
self.assertEqual(len(positional), 3)
self.assertEqual(positional[0], self.PROJECT)
self.assertEqual(
positional[1], datastore_pb2.CommitRequest.NON_TRANSACTIONAL)
mutations = positional[2]
mutated_entity = _mutated_pb(self, mutations, 'insert')
self.assertEqual(mutated_entity.key, key.to_protobuf())
prop_list = list(_property_tuples(mutated_entity))
self.assertTrue(len(prop_list), 1)
name, value_pb = prop_list[0]
self.assertEqual(name, 'foo')
self.assertEqual(value_pb.string_value, u'bar')
def test_put_multi_existing_batch_w_completed_key(self):
from google.cloud.datastore.helpers import _property_tuples
creds = _make_credentials()
client = self._make_one(credentials=creds)
entity = _Entity(foo=u'bar')
key = entity.key = _Key(self.PROJECT)
with _NoCommitBatch(client) as CURR_BATCH:
result = client.put_multi([entity])
self.assertIsNone(result)
mutated_entity = _mutated_pb(self, CURR_BATCH.mutations, 'upsert')
self.assertEqual(mutated_entity.key, key.to_protobuf())
prop_list = list(_property_tuples(mutated_entity))
self.assertTrue(len(prop_list), 1)
name, value_pb = prop_list[0]
self.assertEqual(name, 'foo')
self.assertEqual(value_pb.string_value, u'bar')
def test_delete(self):
_called_with = []
def _delete_multi(*args, **kw):
_called_with.append((args, kw))
creds = _make_credentials()
client = self._make_one(credentials=creds)
client.delete_multi = _delete_multi
key = object()
client.delete(key)
self.assertEqual(_called_with[0][0], ())
self.assertEqual(_called_with[0][1]['keys'], [key])
def test_delete_multi_no_keys(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._datastore_api_internal = _make_datastore_api()
result = client.delete_multi([])
self.assertIsNone(result)
client._datastore_api_internal.commit.assert_not_called()
def test_delete_multi_no_batch(self):
from google.cloud.datastore_v1.proto import datastore_pb2
key = _Key(self.PROJECT)
creds = _make_credentials()
client = self._make_one(credentials=creds)
ds_api = _make_datastore_api()
client._datastore_api_internal = ds_api
result = client.delete_multi([key])
self.assertIsNone(result)
self.assertEqual(ds_api.commit.call_count, 1)
_, positional, keyword = ds_api.commit.mock_calls[0]
self.assertEqual(keyword, {'transaction': None})
self.assertEqual(len(positional), 3)
self.assertEqual(positional[0], self.PROJECT)
self.assertEqual(
positional[1], datastore_pb2.CommitRequest.NON_TRANSACTIONAL)
mutations = positional[2]
mutated_key = _mutated_pb(self, mutations, 'delete')
self.assertEqual(mutated_key, key.to_protobuf())
def test_delete_multi_w_existing_batch(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._datastore_api_internal = _make_datastore_api()
key = _Key(self.PROJECT)
with _NoCommitBatch(client) as CURR_BATCH:
result = client.delete_multi([key])
self.assertIsNone(result)
mutated_key = _mutated_pb(self, CURR_BATCH.mutations, 'delete')
self.assertEqual(mutated_key, key._key)
client._datastore_api_internal.commit.assert_not_called()
def test_delete_multi_w_existing_transaction(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
client._datastore_api_internal = _make_datastore_api()
key = _Key(self.PROJECT)
with _NoCommitTransaction(client) as CURR_XACT:
result = client.delete_multi([key])
self.assertIsNone(result)
mutated_key = _mutated_pb(self, CURR_XACT.mutations, 'delete')
self.assertEqual(mutated_key, key._key)
client._datastore_api_internal.commit.assert_not_called()
def test_allocate_ids_w_partial_key(self):
num_ids = 2
incomplete_key = _Key(self.PROJECT)
incomplete_key._id = None
creds = _make_credentials()
client = self._make_one(credentials=creds, _use_grpc=False)
allocated = mock.Mock(
keys=[_KeyPB(i) for i in range(num_ids)], spec=['keys'])
alloc_ids = mock.Mock(return_value=allocated, spec=[])
ds_api = mock.Mock(allocate_ids=alloc_ids, spec=['allocate_ids'])
client._datastore_api_internal = ds_api
result = client.allocate_ids(incomplete_key, num_ids)
# Check the IDs returned.
self.assertEqual([key._id for key in result], list(range(num_ids)))
def test_allocate_ids_with_completed_key(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
COMPLETE_KEY = _Key(self.PROJECT)
self.assertRaises(ValueError, client.allocate_ids, COMPLETE_KEY, 2)
def test_key_w_project(self):
KIND = 'KIND'
ID = 1234
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertRaises(TypeError,
client.key, KIND, ID, project=self.PROJECT)
def test_key_wo_project(self):
kind = 'KIND'
id_ = 1234
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Key', spec=['__call__'])
with patch as mock_klass:
key = client.key(kind, id_)
self.assertIs(key, mock_klass.return_value)
mock_klass.assert_called_once_with(
kind, id_, project=self.PROJECT, namespace=None)
def test_key_w_namespace(self):
kind = 'KIND'
id_ = 1234
namespace = object()
creds = _make_credentials()
client = self._make_one(namespace=namespace, credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Key', spec=['__call__'])
with patch as mock_klass:
key = client.key(kind, id_)
self.assertIs(key, mock_klass.return_value)
mock_klass.assert_called_once_with(
kind, id_, project=self.PROJECT, namespace=namespace)
def test_key_w_namespace_collision(self):
kind = 'KIND'
id_ = 1234
namespace1 = object()
namespace2 = object()
creds = _make_credentials()
client = self._make_one(namespace=namespace1, credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Key', spec=['__call__'])
with patch as mock_klass:
key = client.key(kind, id_, namespace=namespace2)
self.assertIs(key, mock_klass.return_value)
mock_klass.assert_called_once_with(
kind, id_, project=self.PROJECT, namespace=namespace2)
def test_batch(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Batch', spec=['__call__'])
with patch as mock_klass:
batch = client.batch()
self.assertIs(batch, mock_klass.return_value)
mock_klass.assert_called_once_with(client)
def test_transaction_defaults(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Transaction', spec=['__call__'])
with patch as mock_klass:
xact = client.transaction()
self.assertIs(xact, mock_klass.return_value)
mock_klass.assert_called_once_with(client)
def test_read_only_transaction_defaults(self):
from google.cloud.datastore_v1.types import TransactionOptions
creds = _make_credentials()
client = self._make_one(credentials=creds)
xact = client.transaction(read_only=True)
self.assertEqual(
xact._options,
TransactionOptions(
read_only=TransactionOptions.ReadOnly()
)
)
self.assertFalse(xact._options.HasField("read_write"))
self.assertTrue(xact._options.HasField("read_only"))
self.assertEqual(xact._options.read_only,
TransactionOptions.ReadOnly())
def test_query_w_client(self):
KIND = 'KIND'
creds = _make_credentials()
client = self._make_one(credentials=creds)
other = self._make_one(credentials=_make_credentials())
self.assertRaises(TypeError, client.query, kind=KIND, client=other)
def test_query_w_project(self):
KIND = 'KIND'
creds = _make_credentials()
client = self._make_one(credentials=creds)
self.assertRaises(TypeError,
client.query, kind=KIND, project=self.PROJECT)
def test_query_w_defaults(self):
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Query', spec=['__call__'])
with patch as mock_klass:
query = client.query()
self.assertIs(query, mock_klass.return_value)
mock_klass.assert_called_once_with(
client, project=self.PROJECT, namespace=None)
def test_query_explicit(self):
kind = 'KIND'
namespace = 'NAMESPACE'
ancestor = object()
filters = [('PROPERTY', '==', 'VALUE')]
projection = ['__key__']
order = ['PROPERTY']
distinct_on = ['DISTINCT_ON']
creds = _make_credentials()
client = self._make_one(credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Query', spec=['__call__'])
with patch as mock_klass:
query = client.query(
kind=kind,
namespace=namespace,
ancestor=ancestor,
filters=filters,
projection=projection,
order=order,
distinct_on=distinct_on,
)
self.assertIs(query, mock_klass.return_value)
mock_klass.assert_called_once_with(
client,
project=self.PROJECT,
kind=kind,
namespace=namespace,
ancestor=ancestor,
filters=filters,
projection=projection,
order=order,
distinct_on=distinct_on,
)
def test_query_w_namespace(self):
kind = 'KIND'
namespace = object()
creds = _make_credentials()
client = self._make_one(namespace=namespace, credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Query', spec=['__call__'])
with patch as mock_klass:
query = client.query(kind=kind)
self.assertIs(query, mock_klass.return_value)
mock_klass.assert_called_once_with(
client, project=self.PROJECT, namespace=namespace, kind=kind)
def test_query_w_namespace_collision(self):
kind = 'KIND'
namespace1 = object()
namespace2 = object()
creds = _make_credentials()
client = self._make_one(namespace=namespace1, credentials=creds)
patch = mock.patch(
'google.cloud.datastore.client.Query', spec=['__call__'])
with patch as mock_klass:
query = client.query(kind=kind, namespace=namespace2)
self.assertIs(query, mock_klass.return_value)
mock_klass.assert_called_once_with(
client, project=self.PROJECT, namespace=namespace2, kind=kind)
class _NoCommitBatch(object):
def __init__(self, client):
from google.cloud.datastore.batch import Batch
self._client = client
self._batch = Batch(client)
self._batch.begin()
def __enter__(self):
self._client._push_batch(self._batch)
return self._batch
def __exit__(self, *args):
self._client._pop_batch()
class _NoCommitTransaction(object):
def __init__(self, client, transaction_id='TRANSACTION'):
from google.cloud.datastore.batch import Batch
from google.cloud.datastore.transaction import Transaction
self._client = client
xact = self._transaction = Transaction(client)
xact._id = transaction_id
Batch.begin(xact)
def __enter__(self):
self._client._push_batch(self._transaction)
return self._transaction
def __exit__(self, *args):
self._client._pop_batch()
class _Entity(dict):
key = None
exclude_from_indexes = ()
_meanings = {}
class _Key(object):
_MARKER = object()
_kind = 'KIND'
_key = 'KEY'
_path = None
_id = 1234
_stored = None
def __init__(self, project):
self.project = project
@property
def is_partial(self):
return self._id is None
def to_protobuf(self):
from google.cloud.datastore_v1.proto import entity_pb2
key = self._key = entity_pb2.Key()
# Don't assign it, because it will just get ripped out
# key.partition_id.project_id = self.project
element = key.path.add()
element.kind = self._kind
if self._id is not None:
element.id = self._id
return key
def completed_key(self, new_id):
assert self.is_partial
new_key = self.__class__(self.project)
new_key._id = new_id
return new_key
class _PathElementPB(object):
def __init__(self, id_):
self.id = id_
class _KeyPB(object):
def __init__(self, id_):
self.path = [_PathElementPB(id_)]
def _assert_num_mutations(test_case, mutation_pb_list, num_mutations):
test_case.assertEqual(len(mutation_pb_list), num_mutations)
def _mutated_pb(test_case, mutation_pb_list, mutation_type):
# Make sure there is only one mutation.
_assert_num_mutations(test_case, mutation_pb_list, 1)
# We grab the only mutation.
mutated_pb = mutation_pb_list[0]
# Then check if it is the correct type.
test_case.assertEqual(mutated_pb.WhichOneof('operation'),
mutation_type)
return getattr(mutated_pb, mutation_type)
def _make_key(id_):
from google.cloud.datastore_v1.proto import entity_pb2
key = entity_pb2.Key()
elem = key.path.add()
elem.id = id_
return key
def _make_commit_response(*keys):
from google.cloud.datastore_v1.proto import datastore_pb2
mutation_results = [
datastore_pb2.MutationResult(key=key) for key in keys]
return datastore_pb2.CommitResponse(mutation_results=mutation_results)
def _make_lookup_response(results=(), missing=(), deferred=()):
entity_results_found = [
mock.Mock(entity=result, spec=['entity']) for result in results]
entity_results_missing = [
mock.Mock(entity=missing_entity, spec=['entity'])
for missing_entity in missing]
return mock.Mock(
found=entity_results_found,
missing=entity_results_missing,
deferred=deferred,
spec=['found', 'missing', 'deferred'])
def _make_datastore_api(*keys, **kwargs):
commit_method = mock.Mock(
return_value=_make_commit_response(*keys), spec=[])
lookup_response = kwargs.pop(
'lookup_response', _make_lookup_response())
lookup_method = mock.Mock(
return_value=lookup_response, spec=[])
return mock.Mock(
commit=commit_method, lookup=lookup_method,
spec=['commit', 'lookup'])
|
[
"google.cloud.datastore_v1.proto.entity_pb2.Entity",
"google.cloud.datastore.batch.Batch",
"google.cloud.datastore.helpers._new_value_pb",
"mock.patch.multiple",
"google.cloud.datastore.batch.Batch.begin",
"google.cloud.datastore.entity.Entity",
"google.cloud.datastore.client._determine_default_project",
"google.cloud.datastore_v1.proto.entity_pb2.Key",
"google.cloud.datastore.client._determine_default_project.assert_called_once_with",
"google.cloud.datastore_v1.proto.datastore_pb2.MutationResult",
"mock.patch.object",
"google.cloud.datastore_v1.types.TransactionOptions.ReadOnly",
"mock.patch",
"google.cloud.datastore.transaction.Transaction",
"google.cloud.datastore.client._get_gcd_project",
"google.cloud.datastore_v1.proto.datastore_pb2.ReadOptions",
"google.cloud.datastore.key.Key",
"google.cloud.datastore.helpers._property_tuples",
"google.cloud.datastore_v1.proto.datastore_pb2.CommitResponse",
"mock.Mock"
] |
[((678, 729), 'mock.Mock', 'mock.Mock', ([], {'spec': 'google.auth.credentials.Credentials'}), '(spec=google.auth.credentials.Credentials)\n', (687, 729), False, 'import mock\n'), ((942, 961), 'google.cloud.datastore_v1.proto.entity_pb2.Entity', 'entity_pb2.Entity', ([], {}), '()\n', (959, 961), False, 'from google.cloud.datastore_v1.proto import entity_pb2\n'), ((40609, 40625), 'google.cloud.datastore_v1.proto.entity_pb2.Key', 'entity_pb2.Key', ([], {}), '()\n', (40623, 40625), False, 'from google.cloud.datastore_v1.proto import entity_pb2\n'), ((40883, 40946), 'google.cloud.datastore_v1.proto.datastore_pb2.CommitResponse', 'datastore_pb2.CommitResponse', ([], {'mutation_results': 'mutation_results'}), '(mutation_results=mutation_results)\n', (40911, 40946), False, 'from google.cloud.datastore_v1.proto import datastore_pb2\n'), ((41254, 41385), 'mock.Mock', 'mock.Mock', ([], {'found': 'entity_results_found', 'missing': 'entity_results_missing', 'deferred': 'deferred', 'spec': "['found', 'missing', 'deferred']"}), "(found=entity_results_found, missing=entity_results_missing,\n deferred=deferred, spec=['found', 'missing', 'deferred'])\n", (41263, 41385), False, 'import mock\n'), ((41656, 41704), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'lookup_response', 'spec': '[]'}), '(return_value=lookup_response, spec=[])\n', (41665, 41704), False, 'import mock\n'), ((41725, 41810), 'mock.Mock', 'mock.Mock', ([], {'commit': 'commit_method', 'lookup': 'lookup_method', 'spec': "['commit', 'lookup']"}), "(commit=commit_method, lookup=lookup_method, spec=['commit', 'lookup']\n )\n", (41734, 41810), False, 'import mock\n'), ((1188, 1218), 'google.cloud.datastore.helpers._new_value_pb', '_new_value_pb', (['entity_pb', 'name'], {}), '(entity_pb, name)\n', (1201, 1218), False, 'from google.cloud.datastore.helpers import _new_value_pb\n'), ((1440, 1458), 'google.cloud.datastore.client._get_gcd_project', '_get_gcd_project', ([], {}), '()\n', (1456, 1458), False, 'from google.cloud.datastore.client import _get_gcd_project\n'), ((2169, 2212), 'google.cloud.datastore.client._determine_default_project', '_determine_default_project', ([], {'project': 'project'}), '(project=project)\n', (2195, 2212), False, 'from google.cloud.datastore.client import _determine_default_project\n'), ((2588, 2709), 'mock.patch.multiple', 'mock.patch.multiple', (['"""google.cloud.datastore.client"""'], {'_get_gcd_project': 'gcd_mock', '_base_default_project': 'fallback_mock'}), "('google.cloud.datastore.client', _get_gcd_project=\n gcd_mock, _base_default_project=fallback_mock)\n", (2607, 2709), False, 'import mock\n'), ((4550, 4638), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client._base_default_project"""'], {'return_value': 'None'}), "('google.cloud.datastore.client._base_default_project',\n return_value=None)\n", (4560, 4638), False, 'import mock\n'), ((4991, 5085), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client._determine_default_project"""'], {'return_value': 'other'}), "('google.cloud.datastore.client._determine_default_project',\n return_value=other)\n", (5001, 5085), False, 'import mock\n'), ((5124, 5185), 'mock.patch', 'mock.patch', (['"""google.auth.default"""'], {'return_value': '(creds, None)'}), "('google.auth.default', return_value=(creds, None))\n", (5134, 5185), False, 'import mock\n'), ((5730, 5786), 'google.cloud.datastore.client._determine_default_project.assert_called_once_with', '_determine_default_project.assert_called_once_with', (['None'], {}), '(None)\n', (5780, 5786), False, 'from google.cloud.datastore.client import _determine_default_project\n'), ((8427, 8529), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client.make_datastore_api"""'], {'return_value': 'mock.sentinel.ds_api'}), "('google.cloud.datastore.client.make_datastore_api', return_value\n =mock.sentinel.ds_api)\n", (8437, 8529), False, 'import mock\n'), ((12533, 12572), 'google.cloud.datastore.key.Key', 'Key', (['"""Kind"""', '(1234)'], {'project': 'self.PROJECT'}), "('Kind', 1234, project=self.PROJECT)\n", (12536, 12572), False, 'from google.cloud.datastore.key import Key\n'), ((12677, 12704), 'google.cloud.datastore_v1.proto.datastore_pb2.ReadOptions', 'datastore_pb2.ReadOptions', ([], {}), '()\n', (12702, 12704), False, 'from google.cloud.datastore_v1.proto import datastore_pb2\n'), ((13214, 13233), 'google.cloud.datastore_v1.proto.entity_pb2.Entity', 'entity_pb2.Entity', ([], {}), '()\n', (13231, 13233), False, 'from google.cloud.datastore_v1.proto import entity_pb2\n'), ((13735, 13770), 'google.cloud.datastore.key.Key', 'Key', (['KIND', 'ID'], {'project': 'self.PROJECT'}), '(KIND, ID, project=self.PROJECT)\n', (13738, 13770), False, 'from google.cloud.datastore.key import Key\n'), ((14048, 14075), 'google.cloud.datastore_v1.proto.datastore_pb2.ReadOptions', 'datastore_pb2.ReadOptions', ([], {}), '()\n', (14073, 14075), False, 'from google.cloud.datastore_v1.proto import datastore_pb2\n'), ((14424, 14463), 'google.cloud.datastore.key.Key', 'Key', (['"""Kind"""', '(1234)'], {'project': 'self.PROJECT'}), "('Kind', 1234, project=self.PROJECT)\n", (14427, 14463), False, 'from google.cloud.datastore.key import Key\n'), ((14833, 14872), 'google.cloud.datastore.key.Key', 'Key', (['"""Kind"""', '(1234)'], {'project': 'self.PROJECT'}), "('Kind', 1234, project=self.PROJECT)\n", (14836, 14872), False, 'from google.cloud.datastore.key import Key\n'), ((15219, 15258), 'google.cloud.datastore.key.Key', 'Key', (['"""Kind"""', '(1234)'], {'project': 'self.PROJECT'}), "('Kind', 1234, project=self.PROJECT)\n", (15222, 15258), False, 'from google.cloud.datastore.key import Key\n'), ((15862, 15889), 'google.cloud.datastore_v1.proto.datastore_pb2.ReadOptions', 'datastore_pb2.ReadOptions', ([], {}), '()\n', (15887, 15889), False, 'from google.cloud.datastore_v1.proto import datastore_pb2\n'), ((16357, 16390), 'google.cloud.datastore.key.Key', 'Key', (['"""Kind"""'], {'project': 'self.PROJECT'}), "('Kind', project=self.PROJECT)\n", (16360, 16390), False, 'from google.cloud.datastore.key import Key\n'), ((16443, 16482), 'google.cloud.datastore.key.Key', 'Key', (['"""Kind"""', '(2345)'], {'project': 'self.PROJECT'}), "('Kind', 2345, project=self.PROJECT)\n", (16446, 16482), False, 'from google.cloud.datastore.key import Key\n'), ((16542, 16561), 'google.cloud.datastore_v1.proto.entity_pb2.Entity', 'entity_pb2.Entity', ([], {}), '()\n', (16559, 16561), False, 'from google.cloud.datastore_v1.proto import entity_pb2\n'), ((16624, 16643), 'google.cloud.datastore_v1.proto.entity_pb2.Entity', 'entity_pb2.Entity', ([], {}), '()\n', (16641, 16643), False, 'from google.cloud.datastore_v1.proto import entity_pb2\n'), ((17126, 17194), 'mock.Mock', 'mock.Mock', ([], {'side_effect': '[lookup_response1, lookup_response2]', 'spec': '[]'}), '(side_effect=[lookup_response1, lookup_response2], spec=[])\n', (17135, 17194), False, 'import mock\n'), ((17885, 17912), 'google.cloud.datastore_v1.proto.datastore_pb2.ReadOptions', 'datastore_pb2.ReadOptions', ([], {}), '()\n', (17910, 17912), False, 'from google.cloud.datastore_v1.proto import datastore_pb2\n'), ((18919, 18955), 'google.cloud.datastore.key.Key', 'Key', (['kind', 'id_'], {'project': 'self.PROJECT'}), '(kind, id_, project=self.PROJECT)\n', (18922, 18955), False, 'from google.cloud.datastore.key import Key\n'), ((19338, 19365), 'google.cloud.datastore_v1.proto.datastore_pb2.ReadOptions', 'datastore_pb2.ReadOptions', ([], {}), '()\n', (19363, 19365), False, 'from google.cloud.datastore_v1.proto import datastore_pb2\n'), ((20282, 20318), 'google.cloud.datastore.key.Key', 'Key', (['kind', 'id_'], {'project': 'self.PROJECT'}), '(kind, id_, project=self.PROJECT)\n', (20285, 20318), False, 'from google.cloud.datastore.key import Key\n'), ((20778, 20823), 'google.cloud.datastore_v1.proto.datastore_pb2.ReadOptions', 'datastore_pb2.ReadOptions', ([], {'transaction': 'txn_id'}), '(transaction=txn_id)\n', (20803, 20823), False, 'from google.cloud.datastore_v1.proto import datastore_pb2\n'), ((21782, 21818), 'google.cloud.datastore.key.Key', 'Key', (['kind', 'id1'], {'project': 'self.PROJECT'}), '(kind, id1, project=self.PROJECT)\n', (21785, 21818), False, 'from google.cloud.datastore.key import Key\n'), ((21834, 21870), 'google.cloud.datastore.key.Key', 'Key', (['kind', 'id2'], {'project': 'self.PROJECT'}), '(kind, id2, project=self.PROJECT)\n', (21837, 21870), False, 'from google.cloud.datastore.key import Key\n'), ((22198, 22225), 'google.cloud.datastore_v1.proto.datastore_pb2.ReadOptions', 'datastore_pb2.ReadOptions', ([], {}), '()\n', (22223, 22225), False, 'from google.cloud.datastore_v1.proto import datastore_pb2\n'), ((22700, 22735), 'google.cloud.datastore.key.Key', 'Key', (['"""KIND"""', '(1234)'], {'project': 'PROJECT1'}), "('KIND', 1234, project=PROJECT1)\n", (22703, 22735), False, 'from google.cloud.datastore.key import Key\n'), ((22751, 22786), 'google.cloud.datastore.key.Key', 'Key', (['"""KIND"""', '(1234)'], {'project': 'PROJECT2'}), "('KIND', 1234, project=PROJECT2)\n", (22754, 22786), False, 'from google.cloud.datastore.key import Key\n'), ((23583, 23619), 'google.cloud.datastore.key.Key', 'Key', (['kind', 'id_'], {'project': 'self.PROJECT'}), '(kind, id_, project=self.PROJECT)\n', (23586, 23619), False, 'from google.cloud.datastore.key import Key\n'), ((23680, 23742), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client._MAX_LOOPS"""'], {'new': '(-1)'}), "('google.cloud.datastore.client._MAX_LOOPS', new=-1)\n", (23690, 23742), False, 'import mock\n'), ((30492, 30534), 'mock.Mock', 'mock.Mock', ([], {'return_value': 'allocated', 'spec': '[]'}), '(return_value=allocated, spec=[])\n', (30501, 30534), False, 'import mock\n'), ((30552, 30608), 'mock.Mock', 'mock.Mock', ([], {'allocate_ids': 'alloc_ids', 'spec': "['allocate_ids']"}), "(allocate_ids=alloc_ids, spec=['allocate_ids'])\n", (30561, 30608), False, 'import mock\n'), ((31543, 31609), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client.Key"""'], {'spec': "['__call__']"}), "('google.cloud.datastore.client.Key', spec=['__call__'])\n", (31553, 31609), False, 'import mock\n'), ((32099, 32165), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client.Key"""'], {'spec': "['__call__']"}), "('google.cloud.datastore.client.Key', spec=['__call__'])\n", (32109, 32165), False, 'import mock\n'), ((32702, 32768), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client.Key"""'], {'spec': "['__call__']"}), "('google.cloud.datastore.client.Key', spec=['__call__'])\n", (32712, 32768), False, 'import mock\n'), ((33184, 33252), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client.Batch"""'], {'spec': "['__call__']"}), "('google.cloud.datastore.client.Batch', spec=['__call__'])\n", (33194, 33252), False, 'import mock\n'), ((33594, 33668), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client.Transaction"""'], {'spec': "['__call__']"}), "('google.cloud.datastore.client.Transaction', spec=['__call__'])\n", (33604, 33668), False, 'import mock\n'), ((35210, 35278), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client.Query"""'], {'spec': "['__call__']"}), "('google.cloud.datastore.client.Query', spec=['__call__'])\n", (35220, 35278), False, 'import mock\n'), ((35900, 35968), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client.Query"""'], {'spec': "['__call__']"}), "('google.cloud.datastore.client.Query', spec=['__call__'])\n", (35910, 35968), False, 'import mock\n'), ((36944, 37012), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client.Query"""'], {'spec': "['__call__']"}), "('google.cloud.datastore.client.Query', spec=['__call__'])\n", (36954, 37012), False, 'import mock\n'), ((37546, 37614), 'mock.patch', 'mock.patch', (['"""google.cloud.datastore.client.Query"""'], {'spec': "['__call__']"}), "('google.cloud.datastore.client.Query', spec=['__call__'])\n", (37556, 37614), False, 'import mock\n'), ((38086, 38099), 'google.cloud.datastore.batch.Batch', 'Batch', (['client'], {}), '(client)\n', (38091, 38099), False, 'from google.cloud.datastore.batch import Batch\n'), ((38582, 38601), 'google.cloud.datastore.transaction.Transaction', 'Transaction', (['client'], {}), '(client)\n', (38593, 38601), False, 'from google.cloud.datastore.transaction import Transaction\n'), ((38644, 38661), 'google.cloud.datastore.batch.Batch.begin', 'Batch.begin', (['xact'], {}), '(xact)\n', (38655, 38661), False, 'from google.cloud.datastore.batch import Batch\n'), ((39314, 39330), 'google.cloud.datastore_v1.proto.entity_pb2.Key', 'entity_pb2.Key', ([], {}), '()\n', (39328, 39330), False, 'from google.cloud.datastore_v1.proto import entity_pb2\n'), ((40817, 40854), 'google.cloud.datastore_v1.proto.datastore_pb2.MutationResult', 'datastore_pb2.MutationResult', ([], {'key': 'key'}), '(key=key)\n', (40845, 40854), False, 'from google.cloud.datastore_v1.proto import datastore_pb2\n'), ((41050, 41091), 'mock.Mock', 'mock.Mock', ([], {'entity': 'result', 'spec': "['entity']"}), "(entity=result, spec=['entity'])\n", (41059, 41091), False, 'import mock\n'), ((41154, 41203), 'mock.Mock', 'mock.Mock', ([], {'entity': 'missing_entity', 'spec': "['entity']"}), "(entity=missing_entity, spec=['entity'])\n", (41163, 41203), False, 'import mock\n'), ((1523, 1563), 'mock.patch', 'mock.patch', (['"""os.getenv"""'], {'new': 'environ.get'}), "('os.getenv', new=environ.get)\n", (1533, 1563), False, 'import mock\n'), ((1828, 1868), 'mock.patch', 'mock.patch', (['"""os.getenv"""'], {'new': 'environ.get'}), "('os.getenv', new=environ.get)\n", (1838, 1868), False, 'import mock\n'), ((6796, 6841), 'mock.patch.object', 'mock.patch.object', (['MUT', '"""_USE_GRPC"""'], {'new': '(True)'}), "(MUT, '_USE_GRPC', new=True)\n", (6813, 6841), False, 'import mock\n'), ((7241, 7287), 'mock.patch.object', 'mock.patch.object', (['MUT', '"""_USE_GRPC"""'], {'new': '(False)'}), "(MUT, '_USE_GRPC', new=False)\n", (7258, 7287), False, 'import mock\n'), ((7948, 7990), 'mock.patch', 'mock.patch', (['"""os.environ"""'], {'new': 'fake_environ'}), "('os.environ', new=fake_environ)\n", (7958, 7990), False, 'import mock\n'), ((25131, 25139), 'google.cloud.datastore.entity.Entity', 'Entity', ([], {}), '()\n', (25137, 25139), False, 'from google.cloud.datastore.entity import Entity\n'), ((26290, 26322), 'google.cloud.datastore.helpers._property_tuples', '_property_tuples', (['mutated_entity'], {}), '(mutated_entity)\n', (26306, 26322), False, 'from google.cloud.datastore.helpers import _property_tuples\n'), ((27100, 27132), 'google.cloud.datastore.helpers._property_tuples', '_property_tuples', (['mutated_entity'], {}), '(mutated_entity)\n', (27116, 27132), False, 'from google.cloud.datastore.helpers import _property_tuples\n'), ((34492, 34521), 'google.cloud.datastore_v1.types.TransactionOptions.ReadOnly', 'TransactionOptions.ReadOnly', ([], {}), '()\n', (34519, 34521), False, 'from google.cloud.datastore_v1.types import TransactionOptions\n'), ((34239, 34268), 'google.cloud.datastore_v1.types.TransactionOptions.ReadOnly', 'TransactionOptions.ReadOnly', ([], {}), '()\n', (34266, 34268), False, 'from google.cloud.datastore_v1.types import TransactionOptions\n')]
|
# -*- coding: utf-8 -*-
"""The CUPS IPP Control Files Parser.
CUPS IPP version 1.0:
* http://tools.ietf.org/html/rfc2565
* http://tools.ietf.org/html/rfc2566
* http://tools.ietf.org/html/rfc2567
* http://tools.ietf.org/html/rfc2568
* http://tools.ietf.org/html/rfc2569
* http://tools.ietf.org/html/rfc2639
CUPS IPP version 1.1:
* http://tools.ietf.org/html/rfc2910
* http://tools.ietf.org/html/rfc2911
* http://tools.ietf.org/html/rfc3196
* http://tools.ietf.org/html/rfc3510
CUPS IPP version 2.0:
* N/A
Also see:
* https://github.com/libyal/assorted/blob/master/documentation/
CUPS%20Internet%20Printing%20Protocol%20format.asciidoc
"""
from __future__ import unicode_literals
import logging
import construct
from dfdatetime import posix_time as dfdatetime_posix_time
from dfdatetime import rfc2579_date_time as dfdatetime_rfc2579_date_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import errors
from plaso.lib import definitions
from plaso.parsers import interface
from plaso.parsers import manager
# TODO: RFC Pendings types: resolution, dateTime, rangeOfInteger.
# "dateTime" is not used by Mac OS, instead it uses integer types.
# TODO: Only tested against CUPS IPP MacOS.
class CupsIppEventData(events.EventData):
"""CUPS IPP event data.
Attributes:
application (str): application that prints the document.
data_dict (dict[str, object]): parsed data coming from the file.
computer_name (str): name of the computer.
copies (int): number of copies.
doc_type (str): type of document.
job_id (str): job identifier.
job_name (str): job name.
owner (str): real name of the user.
printer_id (str): identification name of the print.
uri (str): URL of the CUPS service.
user (str): system user name.
"""
DATA_TYPE = 'cups:ipp:event'
def __init__(self):
"""Initializes event data."""
super(CupsIppEventData, self).__init__(data_type=self.DATA_TYPE)
self.application = None
# TODO: remove data_dict.
self.data_dict = None
self.computer_name = None
self.copies = None
self.data_dict = None
self.doc_type = None
self.job_id = None
self.job_name = None
self.owner = None
self.printer_id = None
self.uri = None
self.user = None
class CupsIppParser(interface.FileObjectParser):
"""Parser for CUPS IPP files. """
NAME = 'cups_ipp'
DESCRIPTION = 'Parser for CUPS IPP files.'
# INFO:
# For each file, we have only one document with three different timestamps:
# Created, process and finished.
# Format:
# [HEADER: MAGIC + KNOWN_TYPE][GROUP A]...[GROUP Z][GROUP_END: 0x03]
# GROUP: [GROUP ID][PAIR A]...[PAIR Z] where [PAIR: NAME + VALUE]
# GROUP ID: [1byte ID]
# PAIR: [TagID][\x00][Name][Value])
# TagID: 1 byte integer with the type of "Value".
# Name: [Length][Text][\00]
# Name can be empty when the name has more than one value.
# Example: family name "<NAME>" with more than one surname.
# Type_Text + [0x06, family, 0x00] + [0x05, lopez, 0x00] +
# Type_Text + [0x00, 0x00] + [0x04, mata, 0x00]
# Value: can be integer, boolean, or text provided by TagID.
# If boolean, Value: [\x01][0x00(False)] or [\x01(True)]
# If integer, Value: [\x04][Integer]
# If text, Value: [Length text][Text][\00]
# Magic number that identify the CUPS IPP supported version.
IPP_MAJOR_VERSION = 2
IPP_MINOR_VERSION = 0
# Supported Operation ID.
IPP_OP_ID = 5
# CUPS IPP File header.
CUPS_IPP_HEADER = construct.Struct(
'cups_ipp_header_struct',
construct.UBInt8('major_version'),
construct.UBInt8('minor_version'),
construct.UBInt16('operation_id'),
construct.UBInt32('request_id'))
# Group ID that indicates the end of the IPP Control file.
GROUP_END = 3
# Identification Groups.
GROUP_LIST = [1, 2, 4, 5, 6, 7]
# Type ID, per cups source file ipp-support.c.
TYPE_GENERAL_INTEGER = 0x20
TYPE_INTEGER = 0x21
TYPE_BOOL = 0x22
TYPE_ENUMERATION = 0x23
TYPE_DATETIME = 0x31
# Type of values that can be extracted.
INTEGER_8 = construct.UBInt8('integer')
INTEGER_32 = construct.UBInt32('integer')
TEXT = construct.PascalString(
'text',
length_field=construct.UBInt8('length'))
BOOLEAN = construct.Struct(
'boolean_value',
construct.Padding(1),
INTEGER_8)
INTEGER = construct.Struct(
'integer_value',
construct.Padding(1),
INTEGER_32)
# This is an RFC2579 datetime.
DATETIME = construct.Struct(
'datetime',
construct.Padding(1),
construct.UBInt16('year'),
construct.UBInt8('month'),
construct.UBInt8('day'),
construct.UBInt8('hour'),
construct.UBInt8('minutes'),
construct.UBInt8('seconds'),
construct.UBInt8('deciseconds'),
construct.String('direction_from_utc', length=1, encoding='ascii'),
construct.UBInt8('hours_from_utc'),
construct.UBInt8('minutes_from_utc'),
)
# Name of the pair.
PAIR_NAME = construct.Struct(
'pair_name',
TEXT,
construct.Padding(1))
# Specific CUPS IPP to generic name.
_NAME_PAIR_TRANSLATION = {
'com.apple.print.JobInfo.PMApplicationName': 'application',
'com.apple.print.JobInfo.PMJobOwner': 'owner',
'DestinationPrinterID': 'printer_id',
'document-format': 'doc_type',
'job-name': 'job_name',
'job-originating-host-name': 'computer_name',
'job-originating-user-name': 'user',
'job-uuid': 'job_id',
'printer-uri': 'uri'}
_DATE_TIME_VALUES = {
'date-time-at-creation': definitions.TIME_DESCRIPTION_CREATION,
'date-time-at-processing': definitions.TIME_DESCRIPTION_START,
'date-time-at-completed': definitions.TIME_DESCRIPTION_END}
_POSIX_TIME_VALUES = {
'time-at-creation': definitions.TIME_DESCRIPTION_CREATION,
'time-at-processing': definitions.TIME_DESCRIPTION_START,
'time-at-completed': definitions.TIME_DESCRIPTION_END}
_DATE_TIME_VALUE_NAMES = list(_DATE_TIME_VALUES.keys())
_DATE_TIME_VALUE_NAMES.extend(list(_POSIX_TIME_VALUES.keys()))
def _GetStringValue(self, data_dict, name, default_value=None):
"""Retrieves a specific string value from the data dict.
Args:
data_dict (dict[str, list[str]): values per name.
name (str): name of the value to retrieve.
Returns:
str: value represented as a string.
"""
values = data_dict.get(name, None)
if not values:
return default_value
for index, value in enumerate(values):
if ',' in value:
values[index] = '"{0:s}"'.format(value)
return ', '.join(values)
def _ReadPair(self, parser_mediator, file_object):
"""Reads an attribute name and value pair from a CUPS IPP event.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Returns:
tuple: contains:
str: name or None.
str: value or None.
"""
# Pair = Type ID + Name + Value.
try:
# Can be:
# Group ID + IDtag = Group ID (1byte) + Tag ID (1byte) + '0x00'.
# IDtag = Tag ID (1byte) + '0x00'.
type_id = self.INTEGER_8.parse_stream(file_object)
if type_id == self.GROUP_END:
return None, None
elif type_id in self.GROUP_LIST:
# If it is a group ID we must read the next byte that contains
# the first TagID.
type_id = self.INTEGER_8.parse_stream(file_object)
# 0x00 separator character.
self.INTEGER_8.parse_stream(file_object)
except (IOError, construct.FieldError) as exception:
parser_mediator.ProduceExtractionError(
'unable to parse pair identifier with error: {0!s}'.format(
exception))
return None, None
# Name = Length name + name + 0x00
try:
pair_name = self.PAIR_NAME.parse_stream(file_object)
except (IOError, construct.FieldError) as exception:
parser_mediator.ProduceExtractionError(
'unable to parse pair name with error: {0!s}'.format(exception))
return None, None
try:
name = pair_name.text.decode('utf-8')
except UnicodeDecodeError as exception:
parser_mediator.ProduceExtractionError(
'unable to decode pair name with error: {0!s}'.format(exception))
return None, None
# Value: can be integer, boolean or text select by Type ID.
if type_id in (
self.TYPE_GENERAL_INTEGER, self.TYPE_INTEGER, self.TYPE_ENUMERATION):
value_structure = self.INTEGER
elif type_id == self.TYPE_BOOL:
value_structure = self.BOOLEAN
elif type_id == self.TYPE_DATETIME:
value_structure = self.DATETIME
else:
value_structure = self.TEXT
try:
value = value_structure.parse_stream(file_object)
except (IOError, construct.FieldError) as exception:
parser_mediator.ProduceExtractionError(
'unable to parse value with error: {0!s}'.format(exception))
return None, None
if type_id in (
self.TYPE_GENERAL_INTEGER, self.TYPE_INTEGER, self.TYPE_ENUMERATION):
value = value.integer
elif type_id == self.TYPE_BOOL:
value = bool(value.integer)
elif type_id == self.TYPE_DATETIME:
rfc2579_date_time_tuple = (
value.year, value.month, value.day, value.hour,
value.minutes, value.seconds, value.deciseconds,
value.direction_from_utc, value.hours_from_utc,
value.minutes_from_utc)
value = dfdatetime_rfc2579_date_time.RFC2579DateTime(
rfc2579_date_time_tuple=rfc2579_date_time_tuple)
else:
try:
value = value.decode('utf-8')
except UnicodeDecodeError as exception:
parser_mediator.ProduceExtractionError(
'unable to decode value with error: {0!s}'.format(exception))
return None, None
return name, value
def _ReadPairs(self, parser_mediator, file_object):
"""Reads the attribute name and value pairs from a CUPS IPP event.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Returns:
dict[str, list[str]]: values per name.
"""
data_dict = {}
name, value = self._ReadPair(parser_mediator, file_object)
while name or value:
# Translate the known "name" CUPS IPP to a generic name value.
pretty_name = self._NAME_PAIR_TRANSLATION.get(name, name)
data_dict.setdefault(pretty_name, []).append(value)
name, value = self._ReadPair(parser_mediator, file_object)
return data_dict
def ParseFileObject(self, parser_mediator, file_object, **kwargs):
"""Parses a CUPS IPP file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
Raises:
UnableToParseFile: when the file cannot be parsed.
"""
try:
header = self.CUPS_IPP_HEADER.parse_stream(file_object)
except (IOError, construct.FieldError) as exception:
raise errors.UnableToParseFile(
'Unable to parse CUPS IPP Header with error: {0!s}'.format(
exception))
if (header.major_version != self.IPP_MAJOR_VERSION or
header.minor_version != self.IPP_MINOR_VERSION):
raise errors.UnableToParseFile(
'[{0:s}] Unsupported version number.'.format(self.NAME))
if header.operation_id != self.IPP_OP_ID:
# Warn if the operation ID differs from the standard one. We should be
# able to parse the file nonetheless.
logging.debug(
'[{0:s}] Unsupported operation identifier in file: {1:s}.'.format(
self.NAME, parser_mediator.GetDisplayName()))
data_dict = self._ReadPairs(parser_mediator, file_object)
time_dict = {}
for name in self._DATE_TIME_VALUE_NAMES:
value = data_dict.get(name, None)
if value is not None:
time_dict[name] = value
del data_dict[name]
event_data = CupsIppEventData()
event_data.application = self._GetStringValue(data_dict, 'application')
event_data.computer_name = self._GetStringValue(data_dict, 'computer_name')
event_data.copies = data_dict.get('copies', [0])[0]
event_data.data_dict = data_dict
event_data.doc_type = self._GetStringValue(data_dict, 'doc_type')
event_data.job_id = self._GetStringValue(data_dict, 'job_id')
event_data.job_name = self._GetStringValue(data_dict, 'job_name')
event_data.user = self._GetStringValue(data_dict, 'user')
event_data.owner = self._GetStringValue(data_dict, 'owner')
event_data.printer_id = self._GetStringValue(data_dict, 'printer_id')
event_data.uri = self._GetStringValue(data_dict, 'uri')
for name, usage in iter(self._DATE_TIME_VALUES.items()):
time_values = time_dict.get(name, [])
for date_time in time_values:
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
for name, usage in iter(self._POSIX_TIME_VALUES.items()):
time_values = time_dict.get(name, [])
for time_value in time_values:
date_time = dfdatetime_posix_time.PosixTime(timestamp=time_value)
event = time_events.DateTimeValuesEvent(date_time, usage)
parser_mediator.ProduceEventWithEventData(event, event_data)
manager.ParsersManager.RegisterParser(CupsIppParser)
|
[
"plaso.containers.time_events.DateTimeValuesEvent",
"construct.String",
"dfdatetime.rfc2579_date_time.RFC2579DateTime",
"construct.UBInt32",
"plaso.parsers.manager.ParsersManager.RegisterParser",
"construct.UBInt8",
"dfdatetime.posix_time.PosixTime",
"construct.UBInt16",
"construct.Padding"
] |
[((13647, 13699), 'plaso.parsers.manager.ParsersManager.RegisterParser', 'manager.ParsersManager.RegisterParser', (['CupsIppParser'], {}), '(CupsIppParser)\n', (13684, 13699), False, 'from plaso.parsers import manager\n'), ((4163, 4190), 'construct.UBInt8', 'construct.UBInt8', (['"""integer"""'], {}), "('integer')\n", (4179, 4190), False, 'import construct\n'), ((4206, 4234), 'construct.UBInt32', 'construct.UBInt32', (['"""integer"""'], {}), "('integer')\n", (4223, 4234), False, 'import construct\n'), ((3641, 3674), 'construct.UBInt8', 'construct.UBInt8', (['"""major_version"""'], {}), "('major_version')\n", (3657, 3674), False, 'import construct\n'), ((3682, 3715), 'construct.UBInt8', 'construct.UBInt8', (['"""minor_version"""'], {}), "('minor_version')\n", (3698, 3715), False, 'import construct\n'), ((3723, 3756), 'construct.UBInt16', 'construct.UBInt16', (['"""operation_id"""'], {}), "('operation_id')\n", (3740, 3756), False, 'import construct\n'), ((3764, 3795), 'construct.UBInt32', 'construct.UBInt32', (['"""request_id"""'], {}), "('request_id')\n", (3781, 3795), False, 'import construct\n'), ((4388, 4408), 'construct.Padding', 'construct.Padding', (['(1)'], {}), '(1)\n', (4405, 4408), False, 'import construct\n'), ((4486, 4506), 'construct.Padding', 'construct.Padding', (['(1)'], {}), '(1)\n', (4503, 4506), False, 'import construct\n'), ((4615, 4635), 'construct.Padding', 'construct.Padding', (['(1)'], {}), '(1)\n', (4632, 4635), False, 'import construct\n'), ((4643, 4668), 'construct.UBInt16', 'construct.UBInt16', (['"""year"""'], {}), "('year')\n", (4660, 4668), False, 'import construct\n'), ((4676, 4701), 'construct.UBInt8', 'construct.UBInt8', (['"""month"""'], {}), "('month')\n", (4692, 4701), False, 'import construct\n'), ((4709, 4732), 'construct.UBInt8', 'construct.UBInt8', (['"""day"""'], {}), "('day')\n", (4725, 4732), False, 'import construct\n'), ((4740, 4764), 'construct.UBInt8', 'construct.UBInt8', (['"""hour"""'], {}), "('hour')\n", (4756, 4764), False, 'import construct\n'), ((4772, 4799), 'construct.UBInt8', 'construct.UBInt8', (['"""minutes"""'], {}), "('minutes')\n", (4788, 4799), False, 'import construct\n'), ((4807, 4834), 'construct.UBInt8', 'construct.UBInt8', (['"""seconds"""'], {}), "('seconds')\n", (4823, 4834), False, 'import construct\n'), ((4842, 4873), 'construct.UBInt8', 'construct.UBInt8', (['"""deciseconds"""'], {}), "('deciseconds')\n", (4858, 4873), False, 'import construct\n'), ((4881, 4947), 'construct.String', 'construct.String', (['"""direction_from_utc"""'], {'length': '(1)', 'encoding': '"""ascii"""'}), "('direction_from_utc', length=1, encoding='ascii')\n", (4897, 4947), False, 'import construct\n'), ((4955, 4989), 'construct.UBInt8', 'construct.UBInt8', (['"""hours_from_utc"""'], {}), "('hours_from_utc')\n", (4971, 4989), False, 'import construct\n'), ((4997, 5033), 'construct.UBInt8', 'construct.UBInt8', (['"""minutes_from_utc"""'], {}), "('minutes_from_utc')\n", (5013, 5033), False, 'import construct\n'), ((5131, 5151), 'construct.Padding', 'construct.Padding', (['(1)'], {}), '(1)\n', (5148, 5151), False, 'import construct\n'), ((4301, 4327), 'construct.UBInt8', 'construct.UBInt8', (['"""length"""'], {}), "('length')\n", (4317, 4327), False, 'import construct\n'), ((13173, 13222), 'plaso.containers.time_events.DateTimeValuesEvent', 'time_events.DateTimeValuesEvent', (['date_time', 'usage'], {}), '(date_time, usage)\n', (13204, 13222), False, 'from plaso.containers import time_events\n'), ((13456, 13509), 'dfdatetime.posix_time.PosixTime', 'dfdatetime_posix_time.PosixTime', ([], {'timestamp': 'time_value'}), '(timestamp=time_value)\n', (13487, 13509), True, 'from dfdatetime import posix_time as dfdatetime_posix_time\n'), ((13526, 13575), 'plaso.containers.time_events.DateTimeValuesEvent', 'time_events.DateTimeValuesEvent', (['date_time', 'usage'], {}), '(date_time, usage)\n', (13557, 13575), False, 'from plaso.containers import time_events\n'), ((9644, 9742), 'dfdatetime.rfc2579_date_time.RFC2579DateTime', 'dfdatetime_rfc2579_date_time.RFC2579DateTime', ([], {'rfc2579_date_time_tuple': 'rfc2579_date_time_tuple'}), '(rfc2579_date_time_tuple=\n rfc2579_date_time_tuple)\n', (9688, 9742), True, 'from dfdatetime import rfc2579_date_time as dfdatetime_rfc2579_date_time\n')]
|
from django_visipedia import client, PersistentStorage
from django_visipedia.models import VisipediaUser
from django.contrib.auth import authenticate, login, logout
class VisipediaMiddleware(object):
@staticmethod
def process_request(request):
assert hasattr(request,
'session'), "The Visipedia authentication middleware requires session middleware to be installed. Edit your MIDDLEWARE_CLASSES setting to insert 'django.contrib.sessions.middleware.SessionMiddleware'."
if request.path.endswith('.jpg'):
return
client.persistent_storage = PersistentStorage(request.session)
cookie = request.COOKIES.get('visipedia_session', None)
if cookie is None:
# user is not logged in to Visipedia, log her out
# but only if she has the Visipedia account
if request.user.is_authenticated():
try:
request.user.visipedia_user
logout(request)
except VisipediaUser.DoesNotExist:
pass
else:
# somebody is logged in to Visipedia, but not the same user is logged in here
if request.user.is_authenticated() and cookie != request.session.get('visipedia_session', None):
logout(request)
# the visipedia_cookie exists - somebody should be authenticated
if not request.user.is_authenticated():
user = authenticate(visipedia_session=cookie)
# ignoring unsucessful authentication (bad visipedia cookie, etc.)
# TODO: is it really the best solution? (will send us to sign-in screen even when signed in)
if user is not None:
login(request, user)
request.session['visipedia_session'] = cookie
|
[
"django.contrib.auth.login",
"django.contrib.auth.logout",
"django_visipedia.PersistentStorage",
"django.contrib.auth.authenticate"
] |
[((626, 660), 'django_visipedia.PersistentStorage', 'PersistentStorage', (['request.session'], {}), '(request.session)\n', (643, 660), False, 'from django_visipedia import client, PersistentStorage\n'), ((1344, 1359), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (1350, 1359), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((1517, 1555), 'django.contrib.auth.authenticate', 'authenticate', ([], {'visipedia_session': 'cookie'}), '(visipedia_session=cookie)\n', (1529, 1555), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((1017, 1032), 'django.contrib.auth.logout', 'logout', (['request'], {}), '(request)\n', (1023, 1032), False, 'from django.contrib.auth import authenticate, login, logout\n'), ((1811, 1831), 'django.contrib.auth.login', 'login', (['request', 'user'], {}), '(request, user)\n', (1816, 1831), False, 'from django.contrib.auth import authenticate, login, logout\n')]
|
'''
selenium
软件测试工具, 功能测试, 单元测试, 性能测试, 压力测试, 安全性测试
1) 安装 selenium
pip install selenium
2) 浏览器的驱动
下载驱动程序, 版本
查看Chrome浏览器的版本
3) 将驱动程序拷贝到项目目录下
注意:
驱动一般直接放到项目目录下,
如果不行, 放到Anaconda下的Scripts文件夹下
'''
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import time
# 打开 浏览器, 获取到浏览器对象
browser = webdriver.Chrome()
# webdriver.Firefox()
# webdriver.Opera()
# 请求URL
url = 'https://www.jd.com/'
browser.get(url)
# 找到页面中的输入框
input = browser.find_element_by_id('key')
# 向输入框中添加字符串
input.send_keys("手机")
# 模拟键盘输入回车
input.send_keys('\n')
# 等待页面
'''
WebDriverWait()
第一个参数: 等待的浏览器对象
第二个参数: 等待的最长时间
'''
wait = WebDriverWait(browser, 10)
# 等待一个操作, 操作类型
# presence_of_element_located( 对象 ) 等待操作
# 等待对象加载完成
# 等待对象的加载, 直到找到对象, 或者超出设置的等待时间
goods_list = wait.until(EC.presence_of_element_located((By.ID, 'J_goodsList')))
print(goods_list)
# 获取手机页面信息
#print(browser.page_source)
# 选择节点的方式
# browser.find_element_by_id()
# browser.find_element_by_class_name()
# browser.find_element_by_css_selector()
div = browser.find_element_by_class_name('gl-i-wrap')
print(div.text) # 类似于bs4 get_text()
print(div.get_attribute('class'))
ls_li = goods_list.find_elements_by_class_name('gl-item')
print(len(ls_li))
# 在浏览器执行一段JS代码, 让页面滚动到指定位置
browser.execute_script('arguments[0].scrollIntoView();', ls_li[len(ls_li)-1])
# 等待 后面的元素加载完成
time.sleep(1)
ls_li = goods_list.find_elements_by_class_name('gl-item')
print(len(ls_li))
# 遍历60节点, 找到每一个商品的信息
for li in ls_li:
price = li.find_element_by_css_selector('.p-price i')
print(price.text)
name = li.find_element_by_css_selector('.p-name em')
print(name.text)
time.sleep(100)
# 关闭浏览器对象
browser.close()
|
[
"selenium.webdriver.support.expected_conditions.presence_of_element_located",
"time.sleep",
"selenium.webdriver.Chrome",
"selenium.webdriver.support.wait.WebDriverWait"
] |
[((457, 475), 'selenium.webdriver.Chrome', 'webdriver.Chrome', ([], {}), '()\n', (473, 475), False, 'from selenium import webdriver\n'), ((767, 793), 'selenium.webdriver.support.wait.WebDriverWait', 'WebDriverWait', (['browser', '(10)'], {}), '(browser, 10)\n', (780, 793), False, 'from selenium.webdriver.support.wait import WebDriverWait\n'), ((1494, 1507), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1504, 1507), False, 'import time\n'), ((1783, 1798), 'time.sleep', 'time.sleep', (['(100)'], {}), '(100)\n', (1793, 1798), False, 'import time\n'), ((930, 984), 'selenium.webdriver.support.expected_conditions.presence_of_element_located', 'EC.presence_of_element_located', (["(By.ID, 'J_goodsList')"], {}), "((By.ID, 'J_goodsList'))\n", (960, 984), True, 'from selenium.webdriver.support import expected_conditions as EC\n')]
|
# Enable HSM shapes (unsetup meas_extensions_shapeHSM to disable)
# 'config' is a SourceMeasurementConfig.
import os.path
from lsst.utils import getPackageDir
try:
config.load(os.path.join(getPackageDir("meas_extensions_shapeHSM"), "config", "enable.py"))
config.plugins["ext_shapeHSM_HsmShapeRegauss"].deblendNChild = "deblend_nChild"
except Exception as e:
print("Cannot enable shapeHSM (%s): disabling HSM shape measurements" % (e,))
|
[
"lsst.utils.getPackageDir"
] |
[((194, 235), 'lsst.utils.getPackageDir', 'getPackageDir', (['"""meas_extensions_shapeHSM"""'], {}), "('meas_extensions_shapeHSM')\n", (207, 235), False, 'from lsst.utils import getPackageDir\n')]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import math
import random
import logging
import pickle
import numpy as np
import sklearn
from data import FaceImageIter
import mxnet as mx
from mxnet import ndarray as nd
import argparse
import mxnet.optimizer as optimizer
sys.path.append(os.path.join(os.path.dirname(__file__), 'common'))
#import face_image
import fresnet
import fmobilenet
logger = logging.getLogger()
logger.setLevel(logging.INFO)
AGE=100
args = None
class AccMetric(mx.metric.EvalMetric):
def __init__(self):
self.axis = 1
super(AccMetric, self).__init__(
'acc', axis=self.axis,
output_names=None, label_names=None)
self.losses = []
self.count = 0
def update(self, labels, preds):
self.count+=1
label = labels[0].asnumpy()[:,0:1]
pred_label = preds[-1].asnumpy()[:,0:2]
pred_label = np.argmax(pred_label, axis=self.axis)
pred_label = pred_label.astype('int32').flatten()
label = label.astype('int32').flatten()
assert label.shape==pred_label.shape
self.sum_metric += (pred_label.flat == label.flat).sum()
self.num_inst += len(pred_label.flat)
class LossValueMetric(mx.metric.EvalMetric):
def __init__(self):
self.axis = 1
super(LossValueMetric, self).__init__(
'lossvalue', axis=self.axis,
output_names=None, label_names=None)
self.losses = []
def update(self, labels, preds):
loss = preds[-1].asnumpy()[0]
self.sum_metric += loss
self.num_inst += 1.0
gt_label = preds[-2].asnumpy()
#print(gt_label)
class MAEMetric(mx.metric.EvalMetric):
def __init__(self):
self.axis = 1
super(MAEMetric, self).__init__(
'MAE', axis=self.axis,
output_names=None, label_names=None)
self.losses = []
self.count = 0
def update(self, labels, preds):
self.count+=1
label = labels[0].asnumpy()
label_age = np.count_nonzero(label[:,1:], axis=1)
pred_age = np.zeros( label_age.shape, dtype=np.int)
#pred_age = np.zeros( label_age.shape, dtype=np.float32)
pred = preds[-1].asnumpy()
for i in range(AGE):
_pred = pred[:,2+i*2:4+i*2]
_pred = np.argmax(_pred, axis=1)
#pred = pred[:,1]
pred_age += _pred
#pred_age = pred_age.astype(np.int)
mae = np.mean(np.abs(label_age - pred_age))
self.sum_metric += mae
self.num_inst += 1.0
class CUMMetric(mx.metric.EvalMetric):
def __init__(self, n=5):
self.axis = 1
self.n = n
super(CUMMetric, self).__init__(
'CUM_%d'%n, axis=self.axis,
output_names=None, label_names=None)
self.losses = []
self.count = 0
def update(self, labels, preds):
self.count+=1
label = labels[0].asnumpy()
label_age = np.count_nonzero(label[:,1:], axis=1)
pred_age = np.zeros( label_age.shape, dtype=np.int)
pred = preds[-1].asnumpy()
for i in range(AGE):
_pred = pred[:,2+i*2:4+i*2]
_pred = np.argmax(_pred, axis=1)
#pred = pred[:,1]
pred_age += _pred
diff = np.abs(label_age - pred_age)
cum = np.sum( (diff<self.n) )
self.sum_metric += cum
self.num_inst += len(label_age)
def parse_args():
parser = argparse.ArgumentParser(description='Train face network')
# general
parser.add_argument('--data-dir', default='', help='training set directory')
parser.add_argument('--prefix', default='../model/model', help='directory to save model.')
parser.add_argument('--pretrained', default='', help='pretrained model to load')
parser.add_argument('--ckpt', type=int, default=1, help='checkpoint saving option. 0: discard saving. 1: save when necessary. 2: always save')
parser.add_argument('--loss-type', type=int, default=4, help='loss type')
parser.add_argument('--verbose', type=int, default=2000, help='do verification testing and model saving every verbose batches')
parser.add_argument('--max-steps', type=int, default=0, help='max training batches')
parser.add_argument('--end-epoch', type=int, default=100000, help='training epoch size.')
parser.add_argument('--network', default='r50', help='specify network')
parser.add_argument('--image-size', default='112,112', help='specify input image height and width')
parser.add_argument('--version-input', type=int, default=1, help='network input config')
parser.add_argument('--version-output', type=str, default='GAP', help='network embedding output config')
parser.add_argument('--version-act', type=str, default='prelu', help='network activation config')
parser.add_argument('--multiplier', type=float, default=1.0, help='')
parser.add_argument('--lr', type=float, default=0.1, help='start learning rate')
parser.add_argument('--lr-steps', type=str, default='', help='steps of lr changing')
parser.add_argument('--wd', type=float, default=0.0005, help='weight decay')
parser.add_argument('--bn-mom', type=float, default=0.9, help='bn mom')
parser.add_argument('--mom', type=float, default=0.9, help='momentum')
parser.add_argument('--per-batch-size', type=int, default=128, help='batch size in each context')
parser.add_argument('--rand-mirror', type=int, default=1, help='if do random mirror in training')
parser.add_argument('--cutoff', type=int, default=0, help='cut off aug')
parser.add_argument('--color', type=int, default=0, help='color jittering aug')
parser.add_argument('--ce-loss', default=False, action='store_true', help='if output ce loss')
args = parser.parse_args()
return args
def get_symbol(args, arg_params, aux_params):
data_shape = (args.image_channel,args.image_h,args.image_w)
image_shape = ",".join([str(x) for x in data_shape])
margin_symbols = []
if args.network[0]=='m':
fc1 = fmobilenet.get_symbol(AGE*2+2,
multiplier = args.multiplier,
version_input=args.version_input,
version_output=args.version_output)
else:
fc1 = fresnet.get_symbol(AGE*2+2, args.num_layers,
version_input=args.version_input,
version_output=args.version_output)
label = mx.symbol.Variable('softmax_label')
gender_label = mx.symbol.slice_axis(data = label, axis=1, begin=0, end=1)
gender_label = mx.symbol.reshape(gender_label, shape=(args.per_batch_size,))
gender_fc1 = mx.symbol.slice_axis(data = fc1, axis=1, begin=0, end=2)
#gender_fc7 = mx.sym.FullyConnected(data=gender_fc1, num_hidden=2, name='gender_fc7')
gender_softmax = mx.symbol.SoftmaxOutput(data=gender_fc1, label = gender_label, name='gender_softmax', normalization='valid', use_ignore=True, ignore_label = 9999)
outs = [gender_softmax]
for i in range(AGE):
age_label = mx.symbol.slice_axis(data = label, axis=1, begin=i+1, end=i+2)
age_label = mx.symbol.reshape(age_label, shape=(args.per_batch_size,))
age_fc1 = mx.symbol.slice_axis(data = fc1, axis=1, begin=2+i*2, end=4+i*2)
#age_fc7 = mx.sym.FullyConnected(data=age_fc1, num_hidden=2, name='age_fc7_%i'%i)
age_softmax = mx.symbol.SoftmaxOutput(data=age_fc1, label = age_label, name='age_softmax_%d'%i, normalization='valid', grad_scale=1)
outs.append(age_softmax)
outs.append(mx.sym.BlockGrad(fc1))
out = mx.symbol.Group(outs)
return (out, arg_params, aux_params)
def train_net(args):
ctx = []
cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if len(cvd)>0:
for i in range(len(cvd.split(','))):
ctx.append(mx.gpu(i))
if len(ctx)==0:
ctx = [mx.cpu()]
print('use cpu')
else:
print('gpu num:', len(ctx))
prefix = args.prefix
prefix_dir = os.path.dirname(prefix)
if not os.path.exists(prefix_dir):
os.makedirs(prefix_dir)
end_epoch = args.end_epoch
args.ctx_num = len(ctx)
args.num_layers = int(args.network[1:])
print('num_layers', args.num_layers)
if args.per_batch_size==0:
args.per_batch_size = 128
args.batch_size = args.per_batch_size*args.ctx_num
args.rescale_threshold = 0
args.image_channel = 3
data_dir_list = args.data_dir.split(',')
assert len(data_dir_list)==1
data_dir = data_dir_list[0]
path_imgrec = None
path_imglist = None
image_size = [int(x) for x in args.image_size.split(',')]
assert len(image_size)==2
assert image_size[0]==image_size[1]
args.image_h = image_size[0]
args.image_w = image_size[1]
print('image_size', image_size)
path_imgrec = os.path.join(data_dir, "train.rec")
path_imgrec_val = os.path.join(data_dir, "val.rec")
print('Called with argument:', args)
data_shape = (args.image_channel,image_size[0],image_size[1])
mean = None
begin_epoch = 0
base_lr = args.lr
base_wd = args.wd
base_mom = args.mom
if len(args.pretrained)==0:
arg_params = None
aux_params = None
sym, arg_params, aux_params = get_symbol(args, arg_params, aux_params)
else:
vec = args.pretrained.split(',')
print('loading', vec)
_, arg_params, aux_params = mx.model.load_checkpoint(vec[0], int(vec[1]))
sym, arg_params, aux_params = get_symbol(args, arg_params, aux_params)
#label_name = 'softmax_label'
#label_shape = (args.batch_size,)
model = mx.mod.Module(
context = ctx,
symbol = sym,
)
val_dataiter = None
train_dataiter = FaceImageIter(
batch_size = args.batch_size,
data_shape = data_shape,
path_imgrec = path_imgrec,
shuffle = True,
rand_mirror = args.rand_mirror,
mean = mean,
cutoff = args.cutoff,
color_jittering = args.color,
)
val_dataiter = FaceImageIter(
batch_size = args.batch_size,
data_shape = data_shape,
path_imgrec = path_imgrec_val,
shuffle = False,
rand_mirror = False,
mean = mean,
)
metric = mx.metric.CompositeEvalMetric([AccMetric(), MAEMetric(), CUMMetric()])
if args.network[0]=='r' or args.network[0]=='y':
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="out", magnitude=2) #resnet style
elif args.network[0]=='i' or args.network[0]=='x':
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="in", magnitude=2) #inception
else:
initializer = mx.init.Xavier(rnd_type='uniform', factor_type="in", magnitude=2)
_rescale = 1.0/args.ctx_num
opt = optimizer.SGD(learning_rate=base_lr, momentum=base_mom, wd=base_wd, rescale_grad=_rescale)
#opt = optimizer.Nadam(learning_rate=base_lr, wd=base_wd, rescale_grad=_rescale)
som = 20
_cb = mx.callback.Speedometer(args.batch_size, som)
lr_steps = [int(x) for x in args.lr_steps.split(',')]
global_step = [0]
def _batch_callback(param):
_cb(param)
global_step[0]+=1
mbatch = global_step[0]
for _lr in lr_steps:
if mbatch==_lr:
opt.lr *= 0.1
print('lr change to', opt.lr)
break
if mbatch%1000==0:
print('lr-batch-epoch:',opt.lr,param.nbatch,param.epoch)
if mbatch==lr_steps[-1]:
arg, aux = model.get_params()
all_layers = model.symbol.get_internals()
_sym = all_layers['fc1_output']
mx.model.save_checkpoint(args.prefix, 0, _sym, arg, aux)
sys.exit(0)
epoch_cb = None
train_dataiter = mx.io.PrefetchingIter(train_dataiter)
print('start fitting')
model.fit(train_dataiter,
begin_epoch = begin_epoch,
num_epoch = end_epoch,
eval_data = val_dataiter,
eval_metric = metric,
kvstore = 'device',
optimizer = opt,
#optimizer_params = optimizer_params,
initializer = initializer,
arg_params = arg_params,
aux_params = aux_params,
allow_missing = True,
batch_end_callback = _batch_callback,
epoch_end_callback = epoch_cb )
def main():
#time.sleep(3600*6.5)
global args
args = parse_args()
train_net(args)
if __name__ == '__main__':
main()
|
[
"numpy.abs",
"argparse.ArgumentParser",
"numpy.sum",
"numpy.argmax",
"fresnet.get_symbol",
"fmobilenet.get_symbol",
"mxnet.io.PrefetchingIter",
"mxnet.optimizer.SGD",
"os.path.join",
"mxnet.sym.BlockGrad",
"mxnet.callback.Speedometer",
"os.path.dirname",
"os.path.exists",
"mxnet.gpu",
"mxnet.symbol.SoftmaxOutput",
"mxnet.init.Xavier",
"mxnet.symbol.Group",
"mxnet.cpu",
"sys.exit",
"numpy.count_nonzero",
"os.makedirs",
"mxnet.model.save_checkpoint",
"mxnet.mod.Module",
"data.FaceImageIter",
"numpy.zeros",
"mxnet.symbol.Variable",
"mxnet.symbol.reshape",
"mxnet.symbol.slice_axis",
"logging.getLogger"
] |
[((484, 503), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (501, 503), False, 'import logging\n'), ((3241, 3298), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Train face network"""'}), "(description='Train face network')\n", (3264, 3298), False, 'import argparse\n'), ((6073, 6108), 'mxnet.symbol.Variable', 'mx.symbol.Variable', (['"""softmax_label"""'], {}), "('softmax_label')\n", (6091, 6108), True, 'import mxnet as mx\n'), ((6126, 6182), 'mxnet.symbol.slice_axis', 'mx.symbol.slice_axis', ([], {'data': 'label', 'axis': '(1)', 'begin': '(0)', 'end': '(1)'}), '(data=label, axis=1, begin=0, end=1)\n', (6146, 6182), True, 'import mxnet as mx\n'), ((6202, 6263), 'mxnet.symbol.reshape', 'mx.symbol.reshape', (['gender_label'], {'shape': '(args.per_batch_size,)'}), '(gender_label, shape=(args.per_batch_size,))\n', (6219, 6263), True, 'import mxnet as mx\n'), ((6279, 6333), 'mxnet.symbol.slice_axis', 'mx.symbol.slice_axis', ([], {'data': 'fc1', 'axis': '(1)', 'begin': '(0)', 'end': '(2)'}), '(data=fc1, axis=1, begin=0, end=2)\n', (6299, 6333), True, 'import mxnet as mx\n'), ((6443, 6595), 'mxnet.symbol.SoftmaxOutput', 'mx.symbol.SoftmaxOutput', ([], {'data': 'gender_fc1', 'label': 'gender_label', 'name': '"""gender_softmax"""', 'normalization': '"""valid"""', 'use_ignore': '(True)', 'ignore_label': '(9999)'}), "(data=gender_fc1, label=gender_label, name=\n 'gender_softmax', normalization='valid', use_ignore=True, ignore_label=9999\n )\n", (6466, 6595), True, 'import mxnet as mx\n'), ((7170, 7191), 'mxnet.symbol.Group', 'mx.symbol.Group', (['outs'], {}), '(outs)\n', (7185, 7191), True, 'import mxnet as mx\n'), ((7563, 7586), 'os.path.dirname', 'os.path.dirname', (['prefix'], {}), '(prefix)\n', (7578, 7586), False, 'import os\n'), ((8386, 8421), 'os.path.join', 'os.path.join', (['data_dir', '"""train.rec"""'], {}), "(data_dir, 'train.rec')\n", (8398, 8421), False, 'import os\n'), ((8444, 8477), 'os.path.join', 'os.path.join', (['data_dir', '"""val.rec"""'], {}), "(data_dir, 'val.rec')\n", (8456, 8477), False, 'import os\n'), ((9168, 9206), 'mxnet.mod.Module', 'mx.mod.Module', ([], {'context': 'ctx', 'symbol': 'sym'}), '(context=ctx, symbol=sym)\n', (9181, 9206), True, 'import mxnet as mx\n'), ((9293, 9493), 'data.FaceImageIter', 'FaceImageIter', ([], {'batch_size': 'args.batch_size', 'data_shape': 'data_shape', 'path_imgrec': 'path_imgrec', 'shuffle': '(True)', 'rand_mirror': 'args.rand_mirror', 'mean': 'mean', 'cutoff': 'args.cutoff', 'color_jittering': 'args.color'}), '(batch_size=args.batch_size, data_shape=data_shape,\n path_imgrec=path_imgrec, shuffle=True, rand_mirror=args.rand_mirror,\n mean=mean, cutoff=args.cutoff, color_jittering=args.color)\n', (9306, 9493), False, 'from data import FaceImageIter\n'), ((9678, 9820), 'data.FaceImageIter', 'FaceImageIter', ([], {'batch_size': 'args.batch_size', 'data_shape': 'data_shape', 'path_imgrec': 'path_imgrec_val', 'shuffle': '(False)', 'rand_mirror': '(False)', 'mean': 'mean'}), '(batch_size=args.batch_size, data_shape=data_shape,\n path_imgrec=path_imgrec_val, shuffle=False, rand_mirror=False, mean=mean)\n', (9691, 9820), False, 'from data import FaceImageIter\n'), ((10483, 10577), 'mxnet.optimizer.SGD', 'optimizer.SGD', ([], {'learning_rate': 'base_lr', 'momentum': 'base_mom', 'wd': 'base_wd', 'rescale_grad': '_rescale'}), '(learning_rate=base_lr, momentum=base_mom, wd=base_wd,\n rescale_grad=_rescale)\n', (10496, 10577), True, 'import mxnet.optimizer as optimizer\n'), ((10682, 10727), 'mxnet.callback.Speedometer', 'mx.callback.Speedometer', (['args.batch_size', 'som'], {}), '(args.batch_size, som)\n', (10705, 10727), True, 'import mxnet as mx\n'), ((11419, 11456), 'mxnet.io.PrefetchingIter', 'mx.io.PrefetchingIter', (['train_dataiter'], {}), '(train_dataiter)\n', (11440, 11456), True, 'import mxnet as mx\n'), ((383, 408), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (398, 408), False, 'import os\n'), ((945, 982), 'numpy.argmax', 'np.argmax', (['pred_label'], {'axis': 'self.axis'}), '(pred_label, axis=self.axis)\n', (954, 982), True, 'import numpy as np\n'), ((1971, 2009), 'numpy.count_nonzero', 'np.count_nonzero', (['label[:, 1:]'], {'axis': '(1)'}), '(label[:, 1:], axis=1)\n', (1987, 2009), True, 'import numpy as np\n'), ((2024, 2063), 'numpy.zeros', 'np.zeros', (['label_age.shape'], {'dtype': 'np.int'}), '(label_age.shape, dtype=np.int)\n', (2032, 2063), True, 'import numpy as np\n'), ((2803, 2841), 'numpy.count_nonzero', 'np.count_nonzero', (['label[:, 1:]'], {'axis': '(1)'}), '(label[:, 1:], axis=1)\n', (2819, 2841), True, 'import numpy as np\n'), ((2856, 2895), 'numpy.zeros', 'np.zeros', (['label_age.shape'], {'dtype': 'np.int'}), '(label_age.shape, dtype=np.int)\n', (2864, 2895), True, 'import numpy as np\n'), ((3085, 3113), 'numpy.abs', 'np.abs', (['(label_age - pred_age)'], {}), '(label_age - pred_age)\n', (3091, 3113), True, 'import numpy as np\n'), ((3124, 3145), 'numpy.sum', 'np.sum', (['(diff < self.n)'], {}), '(diff < self.n)\n', (3130, 3145), True, 'import numpy as np\n'), ((5759, 5895), 'fmobilenet.get_symbol', 'fmobilenet.get_symbol', (['(AGE * 2 + 2)'], {'multiplier': 'args.multiplier', 'version_input': 'args.version_input', 'version_output': 'args.version_output'}), '(AGE * 2 + 2, multiplier=args.multiplier,\n version_input=args.version_input, version_output=args.version_output)\n', (5780, 5895), False, 'import fmobilenet\n'), ((5932, 6055), 'fresnet.get_symbol', 'fresnet.get_symbol', (['(AGE * 2 + 2)', 'args.num_layers'], {'version_input': 'args.version_input', 'version_output': 'args.version_output'}), '(AGE * 2 + 2, args.num_layers, version_input=args.\n version_input, version_output=args.version_output)\n', (5950, 6055), False, 'import fresnet\n'), ((6655, 6719), 'mxnet.symbol.slice_axis', 'mx.symbol.slice_axis', ([], {'data': 'label', 'axis': '(1)', 'begin': '(i + 1)', 'end': '(i + 2)'}), '(data=label, axis=1, begin=i + 1, end=i + 2)\n', (6675, 6719), True, 'import mxnet as mx\n'), ((6734, 6792), 'mxnet.symbol.reshape', 'mx.symbol.reshape', (['age_label'], {'shape': '(args.per_batch_size,)'}), '(age_label, shape=(args.per_batch_size,))\n', (6751, 6792), True, 'import mxnet as mx\n'), ((6807, 6877), 'mxnet.symbol.slice_axis', 'mx.symbol.slice_axis', ([], {'data': 'fc1', 'axis': '(1)', 'begin': '(2 + i * 2)', 'end': '(4 + i * 2)'}), '(data=fc1, axis=1, begin=2 + i * 2, end=4 + i * 2)\n', (6827, 6877), True, 'import mxnet as mx\n'), ((6976, 7099), 'mxnet.symbol.SoftmaxOutput', 'mx.symbol.SoftmaxOutput', ([], {'data': 'age_fc1', 'label': 'age_label', 'name': "('age_softmax_%d' % i)", 'normalization': '"""valid"""', 'grad_scale': '(1)'}), "(data=age_fc1, label=age_label, name=\n 'age_softmax_%d' % i, normalization='valid', grad_scale=1)\n", (6999, 7099), True, 'import mxnet as mx\n'), ((7138, 7159), 'mxnet.sym.BlockGrad', 'mx.sym.BlockGrad', (['fc1'], {}), '(fc1)\n', (7154, 7159), True, 'import mxnet as mx\n'), ((7598, 7624), 'os.path.exists', 'os.path.exists', (['prefix_dir'], {}), '(prefix_dir)\n', (7612, 7624), False, 'import os\n'), ((7632, 7655), 'os.makedirs', 'os.makedirs', (['prefix_dir'], {}), '(prefix_dir)\n', (7643, 7655), False, 'import os\n'), ((10110, 10177), 'mxnet.init.Xavier', 'mx.init.Xavier', ([], {'rnd_type': '"""gaussian"""', 'factor_type': '"""out"""', 'magnitude': '(2)'}), "(rnd_type='gaussian', factor_type='out', magnitude=2)\n", (10124, 10177), True, 'import mxnet as mx\n'), ((2230, 2254), 'numpy.argmax', 'np.argmax', (['_pred'], {'axis': '(1)'}), '(_pred, axis=1)\n', (2239, 2254), True, 'import numpy as np\n'), ((2361, 2389), 'numpy.abs', 'np.abs', (['(label_age - pred_age)'], {}), '(label_age - pred_age)\n', (2367, 2389), True, 'import numpy as np\n'), ((3001, 3025), 'numpy.argmax', 'np.argmax', (['_pred'], {'axis': '(1)'}), '(_pred, axis=1)\n', (3010, 3025), True, 'import numpy as np\n'), ((7444, 7452), 'mxnet.cpu', 'mx.cpu', ([], {}), '()\n', (7450, 7452), True, 'import mxnet as mx\n'), ((10267, 10333), 'mxnet.init.Xavier', 'mx.init.Xavier', ([], {'rnd_type': '"""gaussian"""', 'factor_type': '"""in"""', 'magnitude': '(2)'}), "(rnd_type='gaussian', factor_type='in', magnitude=2)\n", (10281, 10333), True, 'import mxnet as mx\n'), ((10375, 10440), 'mxnet.init.Xavier', 'mx.init.Xavier', ([], {'rnd_type': '"""uniform"""', 'factor_type': '"""in"""', 'magnitude': '(2)'}), "(rnd_type='uniform', factor_type='in', magnitude=2)\n", (10389, 10440), True, 'import mxnet as mx\n'), ((11300, 11356), 'mxnet.model.save_checkpoint', 'mx.model.save_checkpoint', (['args.prefix', '(0)', '_sym', 'arg', 'aux'], {}), '(args.prefix, 0, _sym, arg, aux)\n', (11324, 11356), True, 'import mxnet as mx\n'), ((11365, 11376), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (11373, 11376), False, 'import sys\n'), ((7400, 7409), 'mxnet.gpu', 'mx.gpu', (['i'], {}), '(i)\n', (7406, 7409), True, 'import mxnet as mx\n')]
|
from validator import Validator, DatetimeField, create_validator
class V(Validator):
create_at = DatetimeField()
def test_ok():
data = {'create_at': 1532339910}
v = V(data)
assert v.is_valid()
data = {'create_at': '1532339910'}
v = V(data)
assert v.is_valid()
data = {'create_at': '2018/07/01 17:01:20'}
v = V(data)
assert v.is_valid()
def test_wrong_value():
data = {'create_at': 'abc'}
v = V(data)
assert not v.is_valid(), v.str_errors
def test_mock_data():
data = V.mock_data()
assert 'create_at' in data
assert V(data).is_valid()
def test_to_dict():
data_dict = V.to_dict()
assert 'create_at' in data_dict
field_info = data_dict['create_at']
for p in DatetimeField.PARAMS:
assert p in field_info
assert field_info['type'] == DatetimeField.FIELD_TYPE_NAME
assert field_info['dt_format'] == DatetimeField.DEFAULT_FORMAT
def test_create_valiadtor():
data = {
'create_at': {
'type': 'datetime',
'dt_format': '%Y/%m/%d %H:%M:%S',
'tzinfo': 'Asia/Shanghai'
}
}
V = create_validator(data)
assert issubclass(V, Validator)
data = {'create_at': '2018/07/01 17:01:20'}
v = V(data)
assert v.is_valid(), v.str_errors
|
[
"validator.create_validator",
"validator.DatetimeField"
] |
[((107, 122), 'validator.DatetimeField', 'DatetimeField', ([], {}), '()\n', (120, 122), False, 'from validator import Validator, DatetimeField, create_validator\n'), ((1140, 1162), 'validator.create_validator', 'create_validator', (['data'], {}), '(data)\n', (1156, 1162), False, 'from validator import Validator, DatetimeField, create_validator\n')]
|
from dcos_migrate.system import StorableList, Backup
def create_example_list(dir: str) -> StorableList:
list = StorableList(str(dir))
p = "testPlugin"
b = "foobar"
d = {"foo": "bar"}
list.append(Backup(pluginName=p,
backupName=b, data=d))
list.store()
return list, p, b, d
def test_store(tmpdir):
dir = tmpdir.mkdir("test")
list, p, b, d = create_example_list(str(dir))
assert len(dir.listdir()) == 1
assert dir.dirpath("test/{}/{}.Backup.json".format(p, b)).check()
def test_load(tmpdir):
dir = tmpdir.mkdir("test")
list, p, b, d = create_example_list(str(dir))
list2 = StorableList(str(dir)).load()
# we expect different objects
assert list != list2
# but the same amount
assert len(list) == len(list2)
# and data
assert list[0].data == list2[0].data
|
[
"dcos_migrate.system.Backup"
] |
[((218, 260), 'dcos_migrate.system.Backup', 'Backup', ([], {'pluginName': 'p', 'backupName': 'b', 'data': 'd'}), '(pluginName=p, backupName=b, data=d)\n', (224, 260), False, 'from dcos_migrate.system import StorableList, Backup\n')]
|
from amaranth.hdl.mem import *
from amaranth.hdl.mem import __all__
import warnings
warnings.warn("instead of nmigen.hdl.mem, use amaranth.hdl.mem",
DeprecationWarning, stacklevel=2)
|
[
"warnings.warn"
] |
[((86, 188), 'warnings.warn', 'warnings.warn', (['"""instead of nmigen.hdl.mem, use amaranth.hdl.mem"""', 'DeprecationWarning'], {'stacklevel': '(2)'}), "('instead of nmigen.hdl.mem, use amaranth.hdl.mem',\n DeprecationWarning, stacklevel=2)\n", (99, 188), False, 'import warnings\n')]
|
#!/usr/bin/env python
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
import logging
import os
import sys
from git.git_repository import GitRepository
from manifests.build_manifest import BuildManifest
from manifests.bundle_manifest import BundleManifest
from manifests.test_manifest import TestManifest
from system import console
from system.temporary_directory import TemporaryDirectory
from test_workflow.dependency_installer import DependencyInstaller
from test_workflow.integ_test.integ_test_suite import IntegTestSuite
from test_workflow.test_args import TestArgs
from test_workflow.test_recorder.test_recorder import TestRecorder
from test_workflow.test_result.test_suite_results import TestSuiteResults
def pull_build_repo(work_dir):
logging.info("Pulling opensearch-build")
with GitRepository("https://github.com/opensearch-project/opensearch-build.git", "main", os.path.join(work_dir, "opensearch-build")) as repo:
logging.info(f"Checked out opensearch-build into {repo.dir}")
def main():
args = TestArgs()
console.configure(level=args.logging_level)
test_manifest_path = os.path.join(os.path.dirname(__file__), "test_workflow", "config", "test_manifest.yml")
test_manifest = TestManifest.from_path(test_manifest_path)
with TemporaryDirectory(keep=args.keep, chdir=True) as work_dir:
logging.info(f"Switching to temporary work_dir: {work_dir.name}")
test_recorder = TestRecorder(args.test_run_id, "integ-test", work_dir.name)
bundle_manifest = BundleManifest.from_s3(args.s3_bucket, args.build_id, args.opensearch_version, args.platform, args.architecture, work_dir.name)
build_manifest = BuildManifest.from_s3(args.s3_bucket, args.build_id, args.opensearch_version, args.platform, args.architecture, work_dir.name)
pull_build_repo(work_dir.name)
DependencyInstaller(build_manifest.build).install_all_maven_dependencies()
all_results = TestSuiteResults()
for component in bundle_manifest.components.values():
if component.name in test_manifest.components:
test_config = test_manifest.components[component.name]
if test_config.integ_test:
test_suite = IntegTestSuite(
component, test_config, bundle_manifest, build_manifest, work_dir.name, args.s3_bucket, test_recorder
)
test_results = test_suite.execute()
all_results.append(component.name, test_results)
else:
logging.info("Skipping tests for %s, as it is currently not supported" % component.name)
all_results.log()
if all_results.failed():
sys.exit(1)
if __name__ == "__main__":
sys.exit(main())
|
[
"os.path.join",
"test_workflow.test_result.test_suite_results.TestSuiteResults",
"test_workflow.dependency_installer.DependencyInstaller",
"os.path.dirname",
"test_workflow.integ_test.integ_test_suite.IntegTestSuite",
"test_workflow.test_recorder.test_recorder.TestRecorder",
"logging.info",
"test_workflow.test_args.TestArgs",
"manifests.test_manifest.TestManifest.from_path",
"system.temporary_directory.TemporaryDirectory",
"manifests.build_manifest.BuildManifest.from_s3",
"system.console.configure",
"manifests.bundle_manifest.BundleManifest.from_s3",
"sys.exit"
] |
[((894, 934), 'logging.info', 'logging.info', (['"""Pulling opensearch-build"""'], {}), "('Pulling opensearch-build')\n", (906, 934), False, 'import logging\n'), ((1176, 1186), 'test_workflow.test_args.TestArgs', 'TestArgs', ([], {}), '()\n', (1184, 1186), False, 'from test_workflow.test_args import TestArgs\n'), ((1191, 1234), 'system.console.configure', 'console.configure', ([], {'level': 'args.logging_level'}), '(level=args.logging_level)\n', (1208, 1234), False, 'from system import console\n'), ((1369, 1411), 'manifests.test_manifest.TestManifest.from_path', 'TestManifest.from_path', (['test_manifest_path'], {}), '(test_manifest_path)\n', (1391, 1411), False, 'from manifests.test_manifest import TestManifest\n'), ((1089, 1150), 'logging.info', 'logging.info', (['f"""Checked out opensearch-build into {repo.dir}"""'], {}), "(f'Checked out opensearch-build into {repo.dir}')\n", (1101, 1150), False, 'import logging\n'), ((1274, 1299), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1289, 1299), False, 'import os\n'), ((1421, 1467), 'system.temporary_directory.TemporaryDirectory', 'TemporaryDirectory', ([], {'keep': 'args.keep', 'chdir': '(True)'}), '(keep=args.keep, chdir=True)\n', (1439, 1467), False, 'from system.temporary_directory import TemporaryDirectory\n'), ((1489, 1554), 'logging.info', 'logging.info', (['f"""Switching to temporary work_dir: {work_dir.name}"""'], {}), "(f'Switching to temporary work_dir: {work_dir.name}')\n", (1501, 1554), False, 'import logging\n'), ((1579, 1638), 'test_workflow.test_recorder.test_recorder.TestRecorder', 'TestRecorder', (['args.test_run_id', '"""integ-test"""', 'work_dir.name'], {}), "(args.test_run_id, 'integ-test', work_dir.name)\n", (1591, 1638), False, 'from test_workflow.test_recorder.test_recorder import TestRecorder\n'), ((1665, 1797), 'manifests.bundle_manifest.BundleManifest.from_s3', 'BundleManifest.from_s3', (['args.s3_bucket', 'args.build_id', 'args.opensearch_version', 'args.platform', 'args.architecture', 'work_dir.name'], {}), '(args.s3_bucket, args.build_id, args.\n opensearch_version, args.platform, args.architecture, work_dir.name)\n', (1687, 1797), False, 'from manifests.bundle_manifest import BundleManifest\n'), ((1818, 1949), 'manifests.build_manifest.BuildManifest.from_s3', 'BuildManifest.from_s3', (['args.s3_bucket', 'args.build_id', 'args.opensearch_version', 'args.platform', 'args.architecture', 'work_dir.name'], {}), '(args.s3_bucket, args.build_id, args.\n opensearch_version, args.platform, args.architecture, work_dir.name)\n', (1839, 1949), False, 'from manifests.build_manifest import BuildManifest\n'), ((2089, 2107), 'test_workflow.test_result.test_suite_results.TestSuiteResults', 'TestSuiteResults', ([], {}), '()\n', (2105, 2107), False, 'from test_workflow.test_result.test_suite_results import TestSuiteResults\n'), ((1028, 1070), 'os.path.join', 'os.path.join', (['work_dir', '"""opensearch-build"""'], {}), "(work_dir, 'opensearch-build')\n", (1040, 1070), False, 'import os\n'), ((2861, 2872), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2869, 2872), False, 'import sys\n'), ((1992, 2033), 'test_workflow.dependency_installer.DependencyInstaller', 'DependencyInstaller', (['build_manifest.build'], {}), '(build_manifest.build)\n', (2011, 2033), False, 'from test_workflow.dependency_installer import DependencyInstaller\n'), ((2699, 2791), 'logging.info', 'logging.info', (["('Skipping tests for %s, as it is currently not supported' % component.name)"], {}), "('Skipping tests for %s, as it is currently not supported' %\n component.name)\n", (2711, 2791), False, 'import logging\n'), ((2376, 2497), 'test_workflow.integ_test.integ_test_suite.IntegTestSuite', 'IntegTestSuite', (['component', 'test_config', 'bundle_manifest', 'build_manifest', 'work_dir.name', 'args.s3_bucket', 'test_recorder'], {}), '(component, test_config, bundle_manifest, build_manifest,\n work_dir.name, args.s3_bucket, test_recorder)\n', (2390, 2497), False, 'from test_workflow.integ_test.integ_test_suite import IntegTestSuite\n')]
|
#
from __future__ import division
import timeit
import time
from math import sqrt
from numpy import concatenate
import matplotlib.pyplot as plt
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn.metrics import mean_squared_error
from sklearn import preprocessing
import numpy as np
import pandas as pd
import multiprocessing
import matplotlib
from IOHMM import UnSupervisedIOHMM
from IOHMM import OLS, DiscreteMNL, CrossEntropyMNL
from IOHMM import forward_backward
from scipy.special import logsumexp
import pickle
from copy import deepcopy
import random
from sklearn.decomposition import PCA
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import os
import keras
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, LSTM
from keras.utils import to_categorical
from keras import backend as K
from tensorflow.keras import regularizers
# with open ('data/individual_ID_list', 'rb') as fp:
# individual_ID_list = pickle.load(fp)
# num_of_test_samples=500
# individual_ID_list_test = [ day_list[i] for i in sorted(random.sample(range(len(day_list)), num_of_test_samples)) ]
Accurate_duration = []
# filename1='data/activity_index_test.txt'
# file1=open(filename1,'r')
# activity_index_test=eval(file1.read())
activity_index_test = {}
def process_data(data, test_proportion,Card_ID, test_last):
#data['duration'] = np.log(data['duration']) # log for better modeling
data.loc[data['duration_last'] == -1, 'duration_last'] = 0 # first activity, assign to 0
column_list = list(data.columns.values)
location_list = []
hour_list = []
for ele in column_list:
if 'location' in ele:
location_list.append(ele)
if 'hour' in ele:
hour_list.append(ele)
location_list.remove('location_o')
location_list.remove('location')
hour_list.remove('hour')
# set covariates to this OLS model
weather_list=['rain','heavy_rain','sun','cloud','Avrg_Temp','fengli']
Weekday_list=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
holiday_list=['National_holiday','Observance']
last_activity=['duration_last','duration_trip']
previous_trips = ['Last_trip_time_yesterday','N_days_withtrip_past20',
'N_consec_days_no_trips','N_trips_yesterday']
Ut_list=weather_list + hour_list + Weekday_list+ location_list + holiday_list +last_activity + previous_trips
# U1_list=Weekday_list+weather_list + holiday_list
x_array = np.array(data.loc[:,Ut_list])
min_max_scaler = preprocessing.MinMaxScaler()
x_array_minmax = min_max_scaler.fit_transform(x_array)
print(x_array_minmax.shape)
weather_list=['rain','heavy_rain','sun','cloud','Avrg_Temp','fengli']
Weekday_list=['Monday','Tuesday','Wednesday','Thursday','Friday','Saturday','Sunday']
holiday_list=['National_holiday','Observance']
last_activity=['duration_last','duration_trip']
previous_trips = ['Last_trip_time_yesterday','N_days_withtrip_past20',
'N_consec_days_no_trips','N_trips_yesterday']
Ut_list = weather_list + hour_list + Weekday_list + location_list + holiday_list + last_activity + previous_trips
# U1_list=Weekday_list+weather_list + holiday_list
data_array = np.array(data.loc[:, Ut_list])
min_max_scaler = preprocessing.MinMaxScaler()
array_minmax = min_max_scaler.fit_transform(data_array)
data.loc[:, Ut_list] = array_minmax
total_days = data['seq_ID'].max()
train_days = int(total_days - round(total_days*test_proportion))
# drop last
#data = data.loc[data['if_last']!=1]
if test_last:
# last 30 days
data_train = data.loc[data['seq_ID']<=train_days]
data_test = data.loc[data['seq_ID']>train_days]
else:
random.seed(Card_ID)
test_seq = random.sample(list(range(1,total_days+1)), total_days - train_days)
data_train = data.loc[~data['seq_ID'].isin(test_seq)]
data_test = data.loc[data['seq_ID'].isin(test_seq)]
return min_max_scaler, data, data_train, data_test, Ut_list
def gen_sequence(id_df, seq_length, seq_cols):
'''
padding with zero
'''
# for one id I put all the rows in a single matrix
data_matrix = id_df[seq_cols].values
num_elements = data_matrix.shape[0]
# Iterate over two lists in parallel.
# For example id1 have 192 rows and sequence_length is equal to 50
# so zip iterate over two following list of numbers (0,112),(50,192)
# 0 50 -> from row 0 to row 50
# 1 51 -> from row 1 to row 51
# 2 52 -> from row 2 to row 52
# ...
# 111 191 -> from row 111 to 191
for start, stop in zip(range(-seq_length+1, num_elements), range(1, num_elements+1)):
if start<0: # padding with zero
padding = np.zeros([-start, data_matrix.shape[1]])
used_data = data_matrix[0:stop, :]
yield np.vstack([padding, used_data])
else:
yield data_matrix[start:stop, :]
def gen_labels(id_df, seq_length, label):
# For one id I put all the labels in a single matrix.
# For example:
# [[1]
# [4]
# [1]
# [5]
# [9]
# ...
# [200]]
data_matrix = id_df[label].values
num_elements = data_matrix.shape[0]
# I have to remove the first seq_length labels
# because for one id the first sequence of seq_length size have as target
# the last label (the previus ones are discarded).
# All the next id's sequences will have associated step by step one label as target.
return data_matrix[0:num_elements, :]
def pre_process_to_LSTM(data_train, data_test, Ut_list, depend_var, sequence_length):
# test = list(gen_sequence(data_train[data_train['seq_ID'] == 59], sequence_length, Ut_list))
seq_gen_train = (list(gen_sequence(data_train[data_train['seq_ID'] == idx], sequence_length, Ut_list))
for idx in data_train['seq_ID'].unique())
seq_gen_test = (list(gen_sequence(data_test[data_test['seq_ID'] == idx], sequence_length, Ut_list))
for idx in data_test['seq_ID'].unique())
seq_array_train = np.concatenate(list(seq_gen_train)).astype(np.float32)
seq_array_test = np.concatenate(list(seq_gen_test)).astype(np.float32)
# generate labels
# val_label = gen_labels(data_train[data_train['seq_ID']==59], sequence_length, depend_var)
data = data_train.append(data_test)
# label_gen_train = [gen_labels(data_train[data_train['seq_ID']==idx], sequence_length, depend_var)
# for idx in data_train['seq_ID'].unique()]
# label_gen_test = [gen_labels(data_test[data_test['seq_ID']==idx], sequence_length, depend_var)
# for idx in data_test['seq_ID'].unique()]
dict_label = sorted(list(pd.unique(data.loc[:,depend_var[0]])))
dict_label2 = {}
idx = 0
for key in dict_label:
dict_label2[key] = idx
idx += 1
data['new_dep'] = data[depend_var[0]].apply(lambda x: dict_label2[x])
label_gen = [gen_labels(data[data['seq_ID']==idx], sequence_length, ['new_dep'])
for idx in data['seq_ID'].unique()]
label_gen = np.concatenate(label_gen).astype(np.int32)
label_gen = to_categorical(label_gen, num_classes=len(dict_label))
# label_array_train = np.concatenate(label_gen_train).astype(np.int32)
# label_array_test = np.concatenate(label_gen_test).astype(np.int32)
label_array_train = label_gen[0:len(data_train),:]
label_array_test = label_gen[len(data_train):,:]
return seq_array_train, seq_array_test, label_array_train, label_array_test, dict_label
def Model(Card_ID, RE_RUN):
file_name_test_results = data_path + 'results/result_Location_LSTM' + str(Card_ID) + 'test.csv'
model_path = 'output/LSTM/' + 'model_Location_' + str(Card_ID) + '.h5'
if not RE_RUN:
if os.path.exists(file_name_test_results):
print('Finish model', Card_ID)
return
# if Card_ID in activity_index_test:
# print ('Running model', Card_ID)
# return
file_name_train = data_path + 'samples/sample_' + str(Card_ID) + '_201407_201408_all.csv'
data = pd.read_csv(file_name_train)
data = data.loc[data['if_last']==0,:] # drop the last one, it will distract the training (because it is manually added)
test_proportion = 0.2
#========================= #data_preprocessing
test_last = False
scaler, data, data_train, data_test, Ut_list = process_data(data, test_proportion,Card_ID, test_last)
data_train['duration_hour'] = round(data['duration'] / 3600).astype('int') # classification
data_test['duration_hour'] = round(data['duration'] / 3600).astype('int') # classification
depend_var = ['Next_tapin_station']
sequence_length = 2 # look back period, use 2 because most people only has 2 trips.
seq_array_train, seq_array_test, label_array_train, label_array_test, dict_label = pre_process_to_LSTM(data_train, data_test, Ut_list, depend_var, sequence_length)
# print(seq_array_train.shape, seq_array_test.shape, label_array_train.shape, label_array_test.shape)
nb_features = seq_array_train.shape[2]
nb_out = label_array_train.shape[1]
#===========================
# design network
model = Sequential()
model.add(LSTM(
input_shape=(sequence_length, nb_features),
units=50,
return_sequences=False,
)) #
model.add(Dropout(0.05))
# model.add(Dense(units=50, activation='relu'))
# model.add(LSTM(
# units=50,
# return_sequences=False))
# model.add(Dropout(0.05))
# opt = keras.optimizers.SGD(lr=1e-2)
model.add(Dense(units=nb_out, activation='sigmoid',name='output_rank'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# print(model.summary())
# fit the network
history = model.fit(seq_array_train, label_array_train, epochs=200, batch_size=30, verbose=0,
validation_data=(seq_array_test, label_array_test),
callbacks=[
keras.callbacks.EarlyStopping(monitor='val_acc', min_delta=0, patience=50, verbose=0,
mode='max'),
keras.callbacks.ModelCheckpoint(model_path, monitor='val_acc', save_best_only=True,
mode='max', verbose=0)]
)
# history = model.fit(seq_array_train, label_array_train, epochs=200, batch_size=30, verbose=2,
# validation_data=(seq_array_test, label_array_test)
# )
#####################################plot history
# fig_acc = plt.figure(figsize=(10, 10))
# plt.plot(history.history['acc'])
# plt.plot(history.history['val_acc'])
# plt.title('model accuracy')
# plt.ylabel('accuracy')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
# #
# # summarize history for Loss
# fig_loss = plt.figure(figsize=(10, 10))
# plt.plot(history.history['loss'])
# plt.plot(history.history['val_loss'])
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.legend(['train', 'test'], loc='upper left')
# plt.show()
#
#########################
# test
if os.path.isfile(model_path):
estimator = load_model(model_path)
get_last_layer_output = K.function([estimator.layers[0].input],
[estimator.get_layer('output_rank').output])
layer_output = get_last_layer_output([seq_array_test])[0]
top_N = np.min([20, nb_out])
idx_top_N = np.argsort(-layer_output, axis = 1) # use negative because from small to large
idx_top_N = idx_top_N[:,0:top_N]
results = data_test.loc[:,['ID',depend_var[0],'act_ID']]
results['Card_ID'] = Card_ID
results = results.reset_index(drop=True)
predict_topN = [np.array(dict_label)[row_index.astype('int')] for row_index in idx_top_N]
pred_col = ['Predict' + str(i + 1) for i in range(top_N)]
results_predict = pd.DataFrame(predict_topN, columns= pred_col)
results = pd.concat([results, results_predict],axis=1)
results = results.rename(columns = {depend_var[0]:'Ground_truth','act_ID':'activity_index'})
results['Correct'] = 0
results.loc[results['Predict1'] == results['Ground_truth'],'Correct'] = 1
test_acc = sum(results['Correct'])/len(results)
if top_N < 20:
for k in range(top_N+1,20+1):
results['Predict'+str(k)] = -1
file_name_test_results = data_path + 'results/result_Location_LSTM' + str(Card_ID) + 'test.csv'
results.to_csv(file_name_test_results, columns=['ID','Card_ID'] + ['Predict' + str(i + 1) for i in range(20)] + ['Ground_truth', 'Correct', 'activity_index'],index=False)
# train
get_last_layer_output = K.function([model.layers[0].input],
[model.get_layer('output_rank').output])
layer_output = get_last_layer_output([seq_array_train])[0]
idx_top_N = np.argsort(-layer_output, axis = 1) # use negative because from small to large
idx_top_N = idx_top_N[:,0:top_N]
results = data_train.loc[:,['ID',depend_var[0],'act_ID']]
results['Card_ID'] = Card_ID
results = results.reset_index(drop=True)
predict_topN = [np.array(dict_label)[row_index.astype('int')] for row_index in idx_top_N]
pred_col = ['Predict' + str(i + 1) for i in range(top_N)]
results_predict = pd.DataFrame(predict_topN, columns= pred_col)
results = pd.concat([results, results_predict],axis=1)
results = results.rename(columns = {depend_var[0]:'Ground_truth','act_ID':'activity_index'})
results['Correct'] = 0
results.loc[results['Predict1'] == results['Ground_truth'],'Correct'] = 1
train_acc = sum(results['Correct']) / len(results)
print('Train accuracy', train_acc)
if top_N < 20:
for k in range(top_N+1,20+1):
results['Predict'+str(k)] = -1
file_name_train_results = data_path + 'results/result_Location_LSTM' + str(Card_ID) + 'train.csv'
results.to_csv(file_name_train_results,columns=['ID','Card_ID'] + ['Predict' + str(i + 1) for i in range(20)] + ['Ground_truth', 'Correct', 'activity_index'],index=False)
return test_acc
def calculate_accuracy(result_df):
N_first = result_df['Correct'].loc[result_df['activity_index']==0].count()
Accuracy_first = result_df['Correct'].loc[(result_df['Correct']==1)&
(result_df['activity_index']==0)].count()/N_first
N_middle = result_df['Correct'].loc[result_df['activity_index']!=0].count()
Accuracy_middle = result_df['Correct'].loc[(result_df['Correct']==1)&
(result_df['activity_index']!=0)].count()/N_middle
N_all = result_df['Correct'].count()
Accuracy_all = result_df['Correct'].loc[result_df['Correct']==1].count()/N_all
return Accuracy_first, Accuracy_middle, Accuracy_all, N_first, N_middle, N_all
if __name__ == '__main__':
# card_ID = 954394568
# individual_ID_list_test = [958999238]
data_path = '../data/'
# with open(data_path + 'individual_ID_list_test', 'rb') as fp:
# individual_ID_list_test = pickle.load(fp)
SHOW_BASELINE = False
SKIP_RUNNED_MODEL = True
num_ind = 1000
with open(data_path + 'individual_ID_list_test_' + str(num_ind) + '.pickle', 'rb') as fp:
individual_ID_list_test = pickle.load(fp)
individual_ID_list_test = individual_ID_list_test[0:500]
count = 0
RE_RUN = True
tic = time.time()
for Card_ID in individual_ID_list_test:
count+=1
print('Current Card ID',Card_ID,'count',count, 'total',len(individual_ID_list_test))
file_name_test_ = data_path + 'results/result_Location_LSTM' + str(Card_ID) + 'test.csv'
if SKIP_RUNNED_MODEL:
if os.path.exists(file_name_test_):
print ('Finish model', Card_ID)
continue
test_acc = Model(Card_ID,RE_RUN)
if test_acc is None:
result_df = pd.read_csv(data_path + 'results/result_Location_LSTM' + str(Card_ID) + 'test.csv')
_, _, test_acc, _, _, _ = calculate_accuracy(result_df)
if SHOW_BASELINE:
result_df_MC = pd.read_csv(data_path + 'results/result_Location_MC' + str(Card_ID) + '.csv')
_, _, Accuracy_MC, _, _, _ = calculate_accuracy(result_df_MC)
result_df_IOHMM = pd.read_csv(data_path + 'results/result_Location_' + str(Card_ID) + 'test.csv')
_, _, Accuracy_IOHMM, _, _, _ = calculate_accuracy(result_df_IOHMM)
else:
Accuracy_MC = -1
Accuracy_IOHMM = -1
print ('Num_people_processed', count)
print(Card_ID, 'Total Testing Accuracy:', test_acc)
print(Card_ID, 'Base Total Testing Accuracy:', Accuracy_MC)
print(Card_ID, 'IOHMM Total Testing Accuracy:', Accuracy_IOHMM)
print('Elapsed time', time.time() - tic)
print('------****------')
# pool = multiprocessing.Pool(processes=3)
print('Total time', time.time() - tic)
# pool.map(Model, individual_ID_list_test)
# pool.close()
# print ('Accurate_duration',sum(Accurate_duration)/len(Accurate_duration))
# filename1='data/activity_index_test.txt'
# file1=open(filename1,'r')
# activity_index_test=eval(file1.read())
|
[
"keras.models.load_model",
"pandas.read_csv",
"sklearn.preprocessing.MinMaxScaler",
"numpy.argsort",
"os.path.isfile",
"pickle.load",
"pandas.DataFrame",
"os.path.exists",
"random.seed",
"pandas.concat",
"keras.callbacks.ModelCheckpoint",
"keras.layers.Dropout",
"numpy.min",
"numpy.concatenate",
"numpy.vstack",
"keras.layers.LSTM",
"numpy.zeros",
"pandas.unique",
"time.time",
"keras.layers.Dense",
"numpy.array",
"keras.callbacks.EarlyStopping",
"keras.models.Sequential"
] |
[((2622, 2652), 'numpy.array', 'np.array', (['data.loc[:, Ut_list]'], {}), '(data.loc[:, Ut_list])\n', (2630, 2652), True, 'import numpy as np\n'), ((2673, 2701), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (2699, 2701), False, 'from sklearn import preprocessing\n'), ((3394, 3424), 'numpy.array', 'np.array', (['data.loc[:, Ut_list]'], {}), '(data.loc[:, Ut_list])\n', (3402, 3424), True, 'import numpy as np\n'), ((3446, 3474), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (3472, 3474), False, 'from sklearn import preprocessing\n'), ((8264, 8292), 'pandas.read_csv', 'pd.read_csv', (['file_name_train'], {}), '(file_name_train)\n', (8275, 8292), True, 'import pandas as pd\n'), ((9373, 9385), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (9383, 9385), False, 'from keras.models import Sequential, load_model\n'), ((11517, 11543), 'os.path.isfile', 'os.path.isfile', (['model_path'], {}), '(model_path)\n', (11531, 11543), False, 'import os\n'), ((11813, 11833), 'numpy.min', 'np.min', (['[20, nb_out]'], {}), '([20, nb_out])\n', (11819, 11833), True, 'import numpy as np\n'), ((11851, 11884), 'numpy.argsort', 'np.argsort', (['(-layer_output)'], {'axis': '(1)'}), '(-layer_output, axis=1)\n', (11861, 11884), True, 'import numpy as np\n'), ((12284, 12328), 'pandas.DataFrame', 'pd.DataFrame', (['predict_topN'], {'columns': 'pred_col'}), '(predict_topN, columns=pred_col)\n', (12296, 12328), True, 'import pandas as pd\n'), ((12344, 12389), 'pandas.concat', 'pd.concat', (['[results, results_predict]'], {'axis': '(1)'}), '([results, results_predict], axis=1)\n', (12353, 12389), True, 'import pandas as pd\n'), ((13256, 13289), 'numpy.argsort', 'np.argsort', (['(-layer_output)'], {'axis': '(1)'}), '(-layer_output, axis=1)\n', (13266, 13289), True, 'import numpy as np\n'), ((13690, 13734), 'pandas.DataFrame', 'pd.DataFrame', (['predict_topN'], {'columns': 'pred_col'}), '(predict_topN, columns=pred_col)\n', (13702, 13734), True, 'import pandas as pd\n'), ((13750, 13795), 'pandas.concat', 'pd.concat', (['[results, results_predict]'], {'axis': '(1)'}), '([results, results_predict], axis=1)\n', (13759, 13795), True, 'import pandas as pd\n'), ((15743, 15754), 'time.time', 'time.time', ([], {}), '()\n', (15752, 15754), False, 'import time\n'), ((3913, 3933), 'random.seed', 'random.seed', (['Card_ID'], {}), '(Card_ID)\n', (3924, 3933), False, 'import random\n'), ((7964, 8002), 'os.path.exists', 'os.path.exists', (['file_name_test_results'], {}), '(file_name_test_results)\n', (7978, 8002), False, 'import os\n'), ((9400, 9487), 'keras.layers.LSTM', 'LSTM', ([], {'input_shape': '(sequence_length, nb_features)', 'units': '(50)', 'return_sequences': '(False)'}), '(input_shape=(sequence_length, nb_features), units=50, return_sequences\n =False)\n', (9404, 9487), False, 'from keras.layers import Dense, Dropout, LSTM\n'), ((9535, 9548), 'keras.layers.Dropout', 'Dropout', (['(0.05)'], {}), '(0.05)\n', (9542, 9548), False, 'from keras.layers import Dense, Dropout, LSTM\n'), ((9766, 9827), 'keras.layers.Dense', 'Dense', ([], {'units': 'nb_out', 'activation': '"""sigmoid"""', 'name': '"""output_rank"""'}), "(units=nb_out, activation='sigmoid', name='output_rank')\n", (9771, 9827), False, 'from keras.layers import Dense, Dropout, LSTM\n'), ((11565, 11587), 'keras.models.load_model', 'load_model', (['model_path'], {}), '(model_path)\n', (11575, 11587), False, 'from keras.models import Sequential, load_model\n'), ((15623, 15638), 'pickle.load', 'pickle.load', (['fp'], {}), '(fp)\n', (15634, 15638), False, 'import pickle\n'), ((4923, 4963), 'numpy.zeros', 'np.zeros', (['[-start, data_matrix.shape[1]]'], {}), '([-start, data_matrix.shape[1]])\n', (4931, 4963), True, 'import numpy as np\n'), ((6886, 6923), 'pandas.unique', 'pd.unique', (['data.loc[:, depend_var[0]]'], {}), '(data.loc[:, depend_var[0]])\n', (6895, 6923), True, 'import pandas as pd\n'), ((7261, 7286), 'numpy.concatenate', 'np.concatenate', (['label_gen'], {}), '(label_gen)\n', (7275, 7286), True, 'import numpy as np\n'), ((12126, 12146), 'numpy.array', 'np.array', (['dict_label'], {}), '(dict_label)\n', (12134, 12146), True, 'import numpy as np\n'), ((13532, 13552), 'numpy.array', 'np.array', (['dict_label'], {}), '(dict_label)\n', (13540, 13552), True, 'import numpy as np\n'), ((16051, 16082), 'os.path.exists', 'os.path.exists', (['file_name_test_'], {}), '(file_name_test_)\n', (16065, 16082), False, 'import os\n'), ((17279, 17290), 'time.time', 'time.time', ([], {}), '()\n', (17288, 17290), False, 'import time\n'), ((5029, 5060), 'numpy.vstack', 'np.vstack', (['[padding, used_data]'], {}), '([padding, used_data])\n', (5038, 5060), True, 'import numpy as np\n'), ((10209, 10310), 'keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_acc"""', 'min_delta': '(0)', 'patience': '(50)', 'verbose': '(0)', 'mode': '"""max"""'}), "(monitor='val_acc', min_delta=0, patience=50,\n verbose=0, mode='max')\n", (10238, 10310), False, 'import keras\n'), ((10394, 10504), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['model_path'], {'monitor': '"""val_acc"""', 'save_best_only': '(True)', 'mode': '"""max"""', 'verbose': '(0)'}), "(model_path, monitor='val_acc',\n save_best_only=True, mode='max', verbose=0)\n", (10425, 10504), False, 'import keras\n'), ((17151, 17162), 'time.time', 'time.time', ([], {}), '()\n', (17160, 17162), False, 'import time\n')]
|
# This code is written at BigVision LLC. It is based on the OpenCV project. It is subject to the license terms in the LICENSE file found in this distribution and at http://opencv.org/license.html
# Usage example: python3 object_detection_yolo.py --video=run.mp4 --device 'cpu'
# python3 object_detection_yolo.py --video=run.mp4 --device 'gpu'
# python3 object_detection_yolo.py --image=office.jpg --device 'cpu'
# python3 object_detection_yolo.py --image=bird.jpg --device 'gpu'
import cv2 as cv
import argparse
import sys
import numpy as np
import os.path
import telebot
import datetime
import random
import bisect
import threading
import requests
import requests
import sqlite3
import sys
import MySQLdb
import psycopg2
import pyautogui
import win32gui
import time
from ftplib import FTP
import os
import calendar
print(calendar.day_abbr[datetime.date(2019, 2, 2).weekday()])
d_day_to_cat = {'Fri':'20211112', 'Sat':'20211113', 'Sun':'20211114', 'Mon': '20211115', 'Tue': '20211116', 'Wed': '20211117', 'Thu': '20211118'}
# Initialize the parameters
not_worked_time = 0
time0 = '0:0'
time1 = '0:0'
time0_flag = 0
time1_flag = 0
worked_time_v2 = 0
fl_newtime = 0
bot = telebot.TeleBot('2002045567:AAFBWxp3Fpxf9OdhRFm8HxCUMvAhuZVLwq4')
ch_id = ''
api_token = '2002045567:AAFBWxp3Fpxf9OdhRFm8HxCUMv<PASSWORD>'
flag_of_sent_msg = 0
global ft
time_a = 0
time_t = 0
ftp = FTP('172.16.58.3', user='taxiuser', passwd='<PASSWORD>')
def send_telegram(text, tpe):
token = '<KEY>'
url = "https://api.telegram.org/bot"
channel_id = "-1001668840613"
url += token
method = url + "/sendMessage"
if tpe == 1:
tx = 'Рабочее время: ' + str(text[0]) + ' часов, ' + str(text[1]) + ' минут.'
elif tpe == 2:
tx = 'Работал 1 человек' + str(text[0]) + ' часов, ' + str(text[1]) + ' минут.'
else:
tx = 'Работало более 1 человека' + str(text[0]) + ' часов, ' + str(text[1]) + ' минут.'
r = requests.post(method, data={
"chat_id": channel_id,
"text": tx
})
if r.status_code != 200:
raise Exception("post_text error")
def screenshot(window_title='InternetExplorer'):
if window_title:
hwnd = win32gui.FindWindow(None, window_title)
if hwnd:
win32gui.SetForegroundWindow(hwnd)
x, y, x1, y1 = win32gui.GetClientRect(hwnd)
x, y = win32gui.ClientToScreen(hwnd, (x, y))
x1, y1 = win32gui.ClientToScreen(hwnd, (x1 - x, y1 - y))
im = pyautogui.screenshot(region=(x, y, x1, y1))
return im
else:
print('Window not found!')
else:
im = pyautogui.screenshot()
return im
def gettime1(time):
global time1
global time1_flag
global time0_flag
global worked_time_v2
time1 = time
time1_flag = 1
time0_flag = 0
worked_time_v2 += 2
print(worked_time_v2)
def gettime0(time):
global time0
global time1
global time1_flag
global time0_flag
global not_worked_time
if time0_flag == 0:
time0 = time
time0_flag = 1
time1_flag = 0
print(time0, time1)
time_ms_1 = [int(i) for i in str(time1).split(':')[:2]]
time_ms_0 = [int(i) for i in str(time0).split(':')[:2]]
sum_time1 = (time_ms_1[0] * 60) + time_ms_1[1]
sum_time0 = (time_ms_0[0] * 60) + time_ms_0[1]
not_worked_time += sum_time0 - sum_time1
print(str(time0).split(':'), str(time1).split(':'), not_worked_time)
def new_time_counter():
global fl_newtime
global worked_time_v2
if fl_newtime == 0:
worked_time_v2 += 2
fl_newtime = 1
def time_alone():
global time_a
time_a += 2
def time_two():
global time_t
time_t += 2
def image_analis():
confThreshold = 0.5 #Confidence threshold
nmsThreshold = 0.4 #Non-maximum suppression threshold
inpWidth = 416 #Width of network's input image
inpHeight = 416 #Height of network's input image
ms_label_list = []
parser = argparse.ArgumentParser(description='Object Detection using YOLO in OPENCV')
parser.add_argument('--device', default='cpu', help="Device to perform inference on 'cpu' or 'gpu'.")
parser.add_argument('--image', help='office.jpg')
parser.add_argument('--video', help='Path to video file.')
args = parser.parse_args()
# Load names of classes
classesFile = "coco.names"
classes = None
with open(classesFile, 'rt') as f:
classes = f.read().rstrip('\n').split('\n')
# Give the configuration and weight files for the model and load the network using them.
modelConfiguration = "yolov3.cfg"
modelWeights = "yolov3.weights"
net = cv.dnn.readNetFromDarknet(modelConfiguration, modelWeights)
if(args.device == 'cpu'):
net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)
print('Using CPU device.')
elif(args.device == 'gpu'):
net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)
print('Using GPU device.')
# Get the names of the output layers
def getOutputsNames(net):
# Get the names of all the layers in the network
layersNames = net.getLayerNames()
# Get the names of the output layers, i.e. the layers with unconnected outputs
return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# Draw the predicted bounding box
def drawPred(classId, conf, left, top, right, bottom):
# Draw a bounding box.
cv.rectangle(frame, (left, top), (right, bottom), (255, 178, 50), 3)
label = '%.2f' % conf
# Get the label for the class name and its confidence
if classes:
assert(classId < len(classes))
label = '%s:%s' % (classes[classId], label)
ms_label_list.append(label.split(':')[0])
#Display the label at the top of the bounding box
labelSize, baseLine = cv.getTextSize(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)
top = max(top, labelSize[1])
cv.rectangle(frame, (left, top - round(1.5*labelSize[1])), (left + round(1.5*labelSize[0]), top + baseLine), (255, 255, 255), cv.FILLED)
cv.putText(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0,0,0), 1)
# Remove the bounding boxes with low confidence using non-maxima suppression
def postprocess(frame, outs):
frameHeight = frame.shape[0]
frameWidth = frame.shape[1]
# Scan through all the bounding boxes output from the network and keep only the
# ones with high confidence scores. Assign the box's class label as the class with the highest score.
classIds = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
classId = np.argmax(scores)
confidence = scores[classId]
if confidence > confThreshold:
center_x = int(detection[0] * frameWidth)
center_y = int(detection[1] * frameHeight)
width = int(detection[2] * frameWidth)
height = int(detection[3] * frameHeight)
left = int(center_x - width / 2)
top = int(center_y - height / 2)
classIds.append(classId)
confidences.append(float(confidence))
boxes.append([left, top, width, height])
# Perform non maximum suppression to eliminate redundant overlapping boxes with
# lower confidences.
indices = cv.dnn.NMSBoxes(boxes, confidences, confThreshold, nmsThreshold)
for i in indices:
i = i[0]
box = boxes[i]
left = box[0]
top = box[1]
width = box[2]
height = box[3]
drawPred(classIds[i], confidences[i], left, top, left + width, top + height)
# Process inputs
winName = 'Deep learning object detection in OpenCV'
cv.namedWindow(winName, cv.WINDOW_NORMAL)
outputFile = "yolo_out_py.avi"
if (True):
# Open the image file
if not os.path.isfile('office.jpg'):
print("Input image file ", args.image, " doesn't exist")
sys.exit(1)
cap = cv.VideoCapture('office.jpg')
outputFile = 'off' + '_yolo_out_py.jpg'
"""elif (args.video):
# Open the video file
if not os.path.isfile(args.video):
print("Input video file ", args.video, " doesn't exist")
sys.exit(1)
cap = cv.VideoCapture(args.video)
outputFile = args.video[:-4]+'_yolo_out_py.avi'
else:
# Webcam input
cap = cv.VideoCapture(0)"""
# Get the video writer initialized to save the output video
#if (not args.image):
# vid_writer = cv.VideoWriter(outputFile, cv.VideoWriter_fourcc('M','J','P','G'), 30, (round(cap.get(cv.CAP_PROP_FRAME_WIDTH)),round(cap.get(cv.CAP_PROP_FRAME_HEIGHT))))
while cv.waitKey(1) < 0:
# get frame from the video
hasFrame, frame = cap.read()
# Stop the program if reached end of video
if not hasFrame:
print("Done processing !!!")
print("Output file is stored as ", outputFile)
cv.waitKey(3000)
# Release device
cap.release()
break
# Create a 4D blob from a frame.
blob = cv.dnn.blobFromImage(frame, 1/255, (inpWidth, inpHeight), [0,0,0], 1, crop=False)
# Sets the input to the network
net.setInput(blob)
# Runs the forward pass to get output of the output layers
outs = net.forward(getOutputsNames(net))
# Remove the bounding boxes with low confidence
postprocess(frame, outs)
# Put efficiency information. The function getPerfProfile returns the overall time for inference(t) and the timings for each of the layers(in layersTimes)
t, _ = net.getPerfProfile()
label = 'Inference time: %.2f ms' % (t * 1000.0 / cv.getTickFrequency())
cv.putText(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
# Write the frame with the detection boxes
if (True):
cv.imwrite(outputFile, frame.astype(np.uint8))
else:
vid_writer.write(frame.astype(np.uint8))
cv.imshow(winName, frame)
print(ms_label_list)
is_two = (ms_label_list.count('person') >= 2)
is_one = (1 == ms_label_list.count('person'))
if is_one:
return 1
elif is_two:
return 2
return 0
image_analis()
b_time_sender = []
lst = ftp.nlst('20211114')
print('llll', len(lst))
while True:
dt_now = datetime.datetime.now()
m_d = str(dt_now).split(' ')[0].split('-')
if ''.join(str(datetime.datetime.now().time()).split(':')[:2]) == '0801':
flag_of_sent_msg = 0
if ''.join(str(datetime.datetime.now().time()).split(':')[:2]) == '0800':
print('вход')
worked_time = 1440 - not_worked_time
actual_time_hours = str(worked_time_v2 // 60)
actual_time_minutes = str(worked_time_v2 % 60)
time_a_hours = str(time_a // 60)
time_a_minutes = str(time_a % 60)
time_t_hours = str(time_t // 60)
time_t_minutes = str(time_t % 60)
b_time_sender = [actual_time_hours, actual_time_minutes]
b_time_sender_a = [time_a_hours, time_a_minutes]
b_time_sender_t = [time_t_hours, time_t_minutes]
b_dop_time_sender = []
if flag_of_sent_msg == 0:
send_telegram(b_time_sender, 1)
send_telegram(b_time_sender_a, 2)
send_telegram(b_time_sender_t, 3)
flag_of_sent_msg = 1
not_worked_time = 0
time0 = '0:0'
time1 = '0:0'
time0_flag = '0:0'
time1_flag = '0:0'
worked_time_v2 = 0
if int(''.join(str(datetime.datetime.now().time()).split(':')[1])) % 2 == 1:
fl_newtime = 0
if int(''.join(str(datetime.datetime.now().time()).split(':')[1])) % 2 == 0:
print(worked_time_v2)
out = 'office.jpg'
print("получил изображение")
lst = ftp.nlst(d_day_to_cat[calendar.day_abbr[datetime.date(int(m_d[0]), int(m_d[1]), int(m_d[2])).weekday()]])
print(lst[-1], d_day_to_cat[calendar.day_abbr[datetime.date(int(m_d[0]), int(m_d[1]), int(m_d[2])).weekday()]])
with open(out, 'wb') as f:
ftp.retrbinary('RETR ' + f'{lst[-1]}', f.write)
result = image_analis()
if result != 0:
if result == 1:
time_alone()
if result == 2:
time_two()
print("попало в 1")
new_time_counter()
elif result == 0:
print('прошло в 0')
gettime0(datetime.datetime.now().time())
|
[
"win32gui.ClientToScreen",
"argparse.ArgumentParser",
"cv2.dnn.NMSBoxes",
"numpy.argmax",
"pyautogui.screenshot",
"os.path.isfile",
"cv2.rectangle",
"win32gui.SetForegroundWindow",
"requests.post",
"cv2.imshow",
"telebot.TeleBot",
"cv2.getTickFrequency",
"cv2.dnn.blobFromImage",
"datetime.datetime.now",
"cv2.waitKey",
"datetime.date",
"cv2.dnn.readNetFromDarknet",
"sys.exit",
"cv2.putText",
"cv2.getTextSize",
"win32gui.GetClientRect",
"win32gui.FindWindow",
"cv2.VideoCapture",
"ftplib.FTP",
"cv2.namedWindow"
] |
[((1220, 1285), 'telebot.TeleBot', 'telebot.TeleBot', (['"""2002045567:AAFBWxp3Fpxf9OdhRFm8HxCUMvAhuZVLwq4"""'], {}), "('2002045567:AAFBWxp3Fpxf9OdhRFm8HxCUMvAhuZVLwq4')\n", (1235, 1285), False, 'import telebot\n'), ((1418, 1474), 'ftplib.FTP', 'FTP', (['"""172.16.58.3"""'], {'user': '"""taxiuser"""', 'passwd': '"""<PASSWORD>"""'}), "('172.16.58.3', user='taxiuser', passwd='<PASSWORD>')\n", (1421, 1474), False, 'from ftplib import FTP\n'), ((1977, 2040), 'requests.post', 'requests.post', (['method'], {'data': "{'chat_id': channel_id, 'text': tx}"}), "(method, data={'chat_id': channel_id, 'text': tx})\n", (1990, 2040), False, 'import requests\n'), ((4072, 4148), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Object Detection using YOLO in OPENCV"""'}), "(description='Object Detection using YOLO in OPENCV')\n", (4095, 4148), False, 'import argparse\n'), ((4751, 4810), 'cv2.dnn.readNetFromDarknet', 'cv.dnn.readNetFromDarknet', (['modelConfiguration', 'modelWeights'], {}), '(modelConfiguration, modelWeights)\n', (4776, 4810), True, 'import cv2 as cv\n'), ((8150, 8191), 'cv2.namedWindow', 'cv.namedWindow', (['winName', 'cv.WINDOW_NORMAL'], {}), '(winName, cv.WINDOW_NORMAL)\n', (8164, 8191), True, 'import cv2 as cv\n'), ((10839, 10862), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10860, 10862), False, 'import datetime\n'), ((2231, 2270), 'win32gui.FindWindow', 'win32gui.FindWindow', (['None', 'window_title'], {}), '(None, window_title)\n', (2250, 2270), False, 'import win32gui\n'), ((2676, 2698), 'pyautogui.screenshot', 'pyautogui.screenshot', ([], {}), '()\n', (2696, 2698), False, 'import pyautogui\n'), ((5646, 5714), 'cv2.rectangle', 'cv.rectangle', (['frame', '(left, top)', '(right, bottom)', '(255, 178, 50)', '(3)'], {}), '(frame, (left, top), (right, bottom), (255, 178, 50), 3)\n', (5658, 5714), True, 'import cv2 as cv\n'), ((6068, 6122), 'cv2.getTextSize', 'cv.getTextSize', (['label', 'cv.FONT_HERSHEY_SIMPLEX', '(0.5)', '(1)'], {}), '(label, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n', (6082, 6122), True, 'import cv2 as cv\n'), ((6313, 6399), 'cv2.putText', 'cv.putText', (['frame', 'label', '(left, top)', 'cv.FONT_HERSHEY_SIMPLEX', '(0.75)', '(0, 0, 0)', '(1)'], {}), '(frame, label, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0,\n 0), 1)\n', (6323, 6399), True, 'import cv2 as cv\n'), ((7733, 7797), 'cv2.dnn.NMSBoxes', 'cv.dnn.NMSBoxes', (['boxes', 'confidences', 'confThreshold', 'nmsThreshold'], {}), '(boxes, confidences, confThreshold, nmsThreshold)\n', (7748, 7797), True, 'import cv2 as cv\n'), ((8425, 8454), 'cv2.VideoCapture', 'cv.VideoCapture', (['"""office.jpg"""'], {}), "('office.jpg')\n", (8440, 8454), True, 'import cv2 as cv\n'), ((9141, 9154), 'cv2.waitKey', 'cv.waitKey', (['(1)'], {}), '(1)\n', (9151, 9154), True, 'import cv2 as cv\n'), ((9569, 9658), 'cv2.dnn.blobFromImage', 'cv.dnn.blobFromImage', (['frame', '(1 / 255)', '(inpWidth, inpHeight)', '[0, 0, 0]', '(1)'], {'crop': '(False)'}), '(frame, 1 / 255, (inpWidth, inpHeight), [0, 0, 0], 1,\n crop=False)\n', (9589, 9658), True, 'import cv2 as cv\n'), ((10215, 10291), 'cv2.putText', 'cv.putText', (['frame', 'label', '(0, 15)', 'cv.FONT_HERSHEY_SIMPLEX', '(0.5)', '(0, 0, 255)'], {}), '(frame, label, (0, 15), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))\n', (10225, 10291), True, 'import cv2 as cv\n'), ((10498, 10523), 'cv2.imshow', 'cv.imshow', (['winName', 'frame'], {}), '(winName, frame)\n', (10507, 10523), True, 'import cv2 as cv\n'), ((2300, 2334), 'win32gui.SetForegroundWindow', 'win32gui.SetForegroundWindow', (['hwnd'], {}), '(hwnd)\n', (2328, 2334), False, 'import win32gui\n'), ((2362, 2390), 'win32gui.GetClientRect', 'win32gui.GetClientRect', (['hwnd'], {}), '(hwnd)\n', (2384, 2390), False, 'import win32gui\n'), ((2410, 2447), 'win32gui.ClientToScreen', 'win32gui.ClientToScreen', (['hwnd', '(x, y)'], {}), '(hwnd, (x, y))\n', (2433, 2447), False, 'import win32gui\n'), ((2469, 2516), 'win32gui.ClientToScreen', 'win32gui.ClientToScreen', (['hwnd', '(x1 - x, y1 - y)'], {}), '(hwnd, (x1 - x, y1 - y))\n', (2492, 2516), False, 'import win32gui\n'), ((2534, 2577), 'pyautogui.screenshot', 'pyautogui.screenshot', ([], {'region': '(x, y, x1, y1)'}), '(region=(x, y, x1, y1))\n', (2554, 2577), False, 'import pyautogui\n'), ((8288, 8316), 'os.path.isfile', 'os.path.isfile', (['"""office.jpg"""'], {}), "('office.jpg')\n", (8302, 8316), False, 'import os\n'), ((8399, 8410), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (8407, 8410), False, 'import sys\n'), ((9422, 9438), 'cv2.waitKey', 'cv.waitKey', (['(3000)'], {}), '(3000)\n', (9432, 9438), True, 'import cv2 as cv\n'), ((890, 915), 'datetime.date', 'datetime.date', (['(2019)', '(2)', '(2)'], {}), '(2019, 2, 2)\n', (903, 915), False, 'import datetime\n'), ((6972, 6989), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (6981, 6989), True, 'import numpy as np\n'), ((10184, 10205), 'cv2.getTickFrequency', 'cv.getTickFrequency', ([], {}), '()\n', (10203, 10205), True, 'import cv2 as cv\n'), ((12923, 12946), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12944, 12946), False, 'import datetime\n'), ((10929, 10952), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10950, 10952), False, 'import datetime\n'), ((11036, 11059), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11057, 11059), False, 'import datetime\n'), ((12022, 12045), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12043, 12045), False, 'import datetime\n'), ((12126, 12149), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (12147, 12149), False, 'import datetime\n')]
|
## Libraries imports
import os
import discord
from discord.ext import commands
from dotenv import load_dotenv
from config import *
## App imports
from app.settings import Settings
from app.utils import guild_to_audiocontroller, guild_to_settings
from app.audio_controller import AudioController
from app.static import COMMANDS
## Command imports
from app.commands.general import General
from app.commands.music import Music
load_dotenv()
## Discord API config
DESCRIPTION = 'Toss a coin to your witcher'
INTENTS = discord.Intents.all()
PREFIX = BOT_PREFIX
TOKEN = os.getenv("DISCORD_TOKEN")
bot = commands.Bot(
command_prefix=commands.when_mentioned_or(PREFIX),
description=DESCRIPTION,
Intents=INTENTS,
help_command=None,
case_insensitive=True
)
## Events
@bot.event
async def on_ready():
print(STARTUP_MESSAGE)
await bot.change_presence(activity = discord.Activity(
type=discord.ActivityType.listening,
name=f'{PREFIX}help'
))
for guild in bot.guilds:
await register(guild)
print(f'Uniu-se a: {guild.name}')
print(STARTUP_COMPLETE_MESSAGE)
print(f'{bot.user.name} is online! ID: {bot.user.id}')
@bot.event
async def on_guild_join(guild):
print(f'Uniu-se a: {guild.name}')
await register(guild)
async def register(guild):
guild_to_settings[guild] = Settings(guild)
guild_to_audiocontroller[guild] = AudioController(bot, guild)
sett = guild_to_settings[guild]
await guild.me.edit(nick=sett.get('default_nickname'))
if GLOBAL_DISABLE_AUTOJOIN_VC == True:
return
voice_channels = guild.voice_channels
if sett.get('vc_timeout') == False:
if sett.get('start_voice_channel') == None:
try:
await guild_to_audiocontroller[guild].connect_to_voice_channel(guild.voice_channels[0])
except Exception as e:
print(e)
else:
for vc in voice_channels:
if vc.id == sett.get('start_voice_channel'):
try:
await guild_to_audiocontroller[guild].connect_to_voice_channel(voice_channels[voice_channels.index(vc)])
except Exception as e:
print(e)
## General commands
@bot.command('help')
async def _help(ctx):
"""List of commands"""
embed = discord.Embed(
title = 'Comandos do Jaskier:',
color = EMBED_COLOR
)
embed.set_footer(text='<> opcional | [] obrigatório')
for cmd, msg in COMMANDS.items():
embed.add_field(name=f'{PREFIX}{cmd}', value=f'{msg}', inline=True)
await ctx.send(embed=embed)
@bot.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
await ctx.send(f"{str(error)}, use `{PREFIX} help` para mostrar os comandos disponíveis")
return
if isinstance(error, commands.ChannelNotFound):
await ctx.send(str(error))
return
if isinstance(error, commands.CommandInvokeError):
await ctx.send(str(error))
return
if isinstance(error, commands.MissingRequiredArgument):
await ctx.send("MissingRequiredArgument")
return
if isinstance(error, commands.NoPrivateMessage):
await ctx.send(str(error))
return
await ctx.send(str(error))
raise error
## Add cogs and run bot
bot.add_cog(General(bot, PREFIX))
bot.add_cog(Music(bot, PREFIX))
bot.run(TOKEN)
|
[
"discord.Activity",
"app.audio_controller.AudioController",
"discord.ext.commands.when_mentioned_or",
"discord.Embed",
"app.commands.music.Music",
"dotenv.load_dotenv",
"app.settings.Settings",
"app.commands.general.General",
"app.static.COMMANDS.items",
"os.getenv",
"discord.Intents.all"
] |
[((427, 440), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (438, 440), False, 'from dotenv import load_dotenv\n'), ((519, 540), 'discord.Intents.all', 'discord.Intents.all', ([], {}), '()\n', (538, 540), False, 'import discord\n'), ((569, 595), 'os.getenv', 'os.getenv', (['"""DISCORD_TOKEN"""'], {}), "('DISCORD_TOKEN')\n", (578, 595), False, 'import os\n'), ((1370, 1385), 'app.settings.Settings', 'Settings', (['guild'], {}), '(guild)\n', (1378, 1385), False, 'from app.settings import Settings\n'), ((1424, 1451), 'app.audio_controller.AudioController', 'AudioController', (['bot', 'guild'], {}), '(bot, guild)\n', (1439, 1451), False, 'from app.audio_controller import AudioController\n'), ((2381, 2443), 'discord.Embed', 'discord.Embed', ([], {'title': '"""Comandos do Jaskier:"""', 'color': 'EMBED_COLOR'}), "(title='Comandos do Jaskier:', color=EMBED_COLOR)\n", (2394, 2443), False, 'import discord\n'), ((2549, 2565), 'app.static.COMMANDS.items', 'COMMANDS.items', ([], {}), '()\n', (2563, 2565), False, 'from app.static import COMMANDS\n'), ((3426, 3446), 'app.commands.general.General', 'General', (['bot', 'PREFIX'], {}), '(bot, PREFIX)\n', (3433, 3446), False, 'from app.commands.general import General\n'), ((3460, 3478), 'app.commands.music.Music', 'Music', (['bot', 'PREFIX'], {}), '(bot, PREFIX)\n', (3465, 3478), False, 'from app.commands.music import Music\n'), ((639, 673), 'discord.ext.commands.when_mentioned_or', 'commands.when_mentioned_or', (['PREFIX'], {}), '(PREFIX)\n', (665, 673), False, 'from discord.ext import commands\n'), ((906, 981), 'discord.Activity', 'discord.Activity', ([], {'type': 'discord.ActivityType.listening', 'name': 'f"""{PREFIX}help"""'}), "(type=discord.ActivityType.listening, name=f'{PREFIX}help')\n", (922, 981), False, 'import discord\n')]
|
from django.shortcuts import get_object_or_404
from rest_framework import permissions
from boards.models import Board
class IsAuthorOrParticipantOrAdminForCreateList(permissions.BasePermission):
def has_permission(self, request, view):
board = get_object_or_404(Board, id=request.data['board'])
if request.user.is_authenticated:
return (request.user == board.author or
board.participants.filter(id=request.user.id).exists()
or request.user.is_staff)
class IsAuthor(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj.board.author == request.user
class IsParticipant(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return obj.board.participants.filter(id=request.user.id).exists()
class IsStaff(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
return request.user.is_staff
|
[
"django.shortcuts.get_object_or_404"
] |
[((260, 310), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Board'], {'id': "request.data['board']"}), "(Board, id=request.data['board'])\n", (277, 310), False, 'from django.shortcuts import get_object_or_404\n')]
|
from django.db import models
from django.forms.models import ModelForm
from django import forms
from django.contrib import admin
from django.contrib.auth.models import User,Group
import string, datetime
from django.template.defaultfilters import slugify
class List(models.Model):
name = models.CharField(max_length=60)
slug = models.SlugField(max_length=60,editable=False)
# slug = models.SlugField(max_length=60)
group = models.ForeignKey(Group)
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.name)
super(List, self).save(*args, **kwargs)
def __unicode__(self):
return self.name
# Custom manager lets us do things like Item.completed_tasks.all()
objects = models.Manager()
def incomplete_tasks(self):
# Count all incomplete tasks on the current list instance
return Item.objects.filter(list=self,completed=0)
class Meta:
ordering = ["name"]
verbose_name_plural = "Lists"
# Prevents (at the database level) creation of two lists with the same name in the same group
unique_together = ("group", "slug")
class Item(models.Model):
title = models.CharField(max_length=140)
list = models.ForeignKey(List)
created_date = models.DateField(auto_now_add=True)
due_date = models.DateField(blank=True,null=True,)
completed = models.BooleanField()
completed_date = models.DateField(blank=True,null=True)
created_by = models.ForeignKey(User, related_name='created_by')
assigned_to = models.ForeignKey(User, related_name='todo_assigned_to')
note = models.TextField(blank=True,null=True)
priority = models.PositiveIntegerField()
# Model method: Has due date for an instance of this object passed?
def overdue_status(self):
"Returns whether the item's due date has passed or not."
if datetime.date.today() > self.due_date :
return 1
def __unicode__(self):
return self.title
# Auto-set the item creation / completed date
def save(self):
# If Item is being marked complete, set the completed_date
if self.completed :
self.completed_date = datetime.datetime.now()
super(Item, self).save()
class Meta:
ordering = ["priority"]
class Comment(models.Model):
"""
Not using Django's built-in comments becase we want to be able to save
a comment and change task details at the same time. Rolling our own since it's easy.
"""
author = models.ForeignKey(User)
task = models.ForeignKey(Item)
date = models.DateTimeField(default=datetime.datetime.now)
body = models.TextField(blank=True)
def __unicode__(self):
return '%s - %s' % (
self.author,
self.date,
)
|
[
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.PositiveIntegerField",
"django.db.models.Manager",
"django.db.models.SlugField",
"django.db.models.BooleanField",
"datetime.date.today",
"django.template.defaultfilters.slugify",
"django.db.models.DateField",
"django.db.models.DateTimeField",
"datetime.datetime.now"
] |
[((293, 324), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(60)'}), '(max_length=60)\n', (309, 324), False, 'from django.db import models\n'), ((336, 383), 'django.db.models.SlugField', 'models.SlugField', ([], {'max_length': '(60)', 'editable': '(False)'}), '(max_length=60, editable=False)\n', (352, 383), False, 'from django.db import models\n'), ((444, 468), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Group'], {}), '(Group)\n', (461, 468), False, 'from django.db import models\n'), ((780, 796), 'django.db.models.Manager', 'models.Manager', ([], {}), '()\n', (794, 796), False, 'from django.db import models\n'), ((1279, 1311), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(140)'}), '(max_length=140)\n', (1295, 1311), False, 'from django.db import models\n'), ((1323, 1346), 'django.db.models.ForeignKey', 'models.ForeignKey', (['List'], {}), '(List)\n', (1340, 1346), False, 'from django.db import models\n'), ((1366, 1401), 'django.db.models.DateField', 'models.DateField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (1382, 1401), False, 'from django.db import models\n'), ((1417, 1456), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1433, 1456), False, 'from django.db import models\n'), ((1473, 1494), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (1492, 1494), False, 'from django.db import models\n'), ((1516, 1555), 'django.db.models.DateField', 'models.DateField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1532, 1555), False, 'from django.db import models\n'), ((1572, 1622), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'related_name': '"""created_by"""'}), "(User, related_name='created_by')\n", (1589, 1622), False, 'from django.db import models\n'), ((1641, 1697), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'related_name': '"""todo_assigned_to"""'}), "(User, related_name='todo_assigned_to')\n", (1658, 1697), False, 'from django.db import models\n'), ((1709, 1748), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)', 'null': '(True)'}), '(blank=True, null=True)\n', (1725, 1748), False, 'from django.db import models\n'), ((1763, 1792), 'django.db.models.PositiveIntegerField', 'models.PositiveIntegerField', ([], {}), '()\n', (1790, 1792), False, 'from django.db import models\n'), ((2651, 2674), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {}), '(User)\n', (2668, 2674), False, 'from django.db import models\n'), ((2686, 2709), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Item'], {}), '(Item)\n', (2703, 2709), False, 'from django.db import models\n'), ((2721, 2772), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'datetime.datetime.now'}), '(default=datetime.datetime.now)\n', (2741, 2772), False, 'from django.db import models\n'), ((2784, 2812), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(True)'}), '(blank=True)\n', (2800, 2812), False, 'from django.db import models\n'), ((559, 577), 'django.template.defaultfilters.slugify', 'slugify', (['self.name'], {}), '(self.name)\n', (566, 577), False, 'from django.template.defaultfilters import slugify\n'), ((1976, 1997), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (1995, 1997), False, 'import string, datetime\n'), ((2299, 2322), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2320, 2322), False, 'import string, datetime\n')]
|
from django.http import JsonResponse
from rest_framework.views import APIView
from rest_framework.response import Response
class TestAPIView(APIView):
def get(self, request, *args, **kwargs):
return self.finalize_response(request, Response({
'data': [
{'id': 1, 'some_data': 'foo'},
{'id': 2, 'some_data': 'bar'},
{'id': 3, 'some_data': 'baz'},
],
'page': 1,
'get': {k: v[0] for k, v in request.GET.items()}
}))
def post(self, request, *args, **kwargs):
return self.finalize_response(request, Response({'data': request.data.get('data')}))
def test_fbv(request):
if request.method == 'POST':
return JsonResponse(request.POST)
else:
return JsonResponse({'field1': 'field1_value', 'field2': 'field2_value'})
class TestFilesAPIView(APIView):
def post(self, request, *args, **kwargs):
return self.finalize_response(request, Response({
'files': {
key: {
'name': attachment.name,
'size': attachment.size
}
for key, attachment in request.FILES.items()
}
}))
|
[
"django.http.JsonResponse"
] |
[((743, 769), 'django.http.JsonResponse', 'JsonResponse', (['request.POST'], {}), '(request.POST)\n', (755, 769), False, 'from django.http import JsonResponse\n'), ((795, 861), 'django.http.JsonResponse', 'JsonResponse', (["{'field1': 'field1_value', 'field2': 'field2_value'}"], {}), "({'field1': 'field1_value', 'field2': 'field2_value'})\n", (807, 861), False, 'from django.http import JsonResponse\n')]
|
import os
import sys
import subprocess
from io import StringIO
from typing import List
from caos._internal.types import ExitCode
from caos._internal.utils.yaml import get_virtual_environment_from_yaml
from caos._internal.utils.working_directory import get_current_dir
from caos._internal.utils.os import is_posix_os, is_win_os
from caos._internal.constants import (
CAOS_YAML_FILE_NAME, DEFAULT_VIRTUAL_ENVIRONMENT_NAME,
PIP_PATH_VENV_WIN, PIP_PATH_VENV_POSIX
)
from caos._cli_commands.raise_exceptions import (
raise_missing_yaml_exception,
raise_missing_virtual_environment_exception,
raise_missing_pip_binary_exception
)
def main(args: List[str]) -> ExitCode:
current_dir: str = get_current_dir()
if not os.path.isfile(os.path.abspath(current_dir + "/" + CAOS_YAML_FILE_NAME)):
raise_missing_yaml_exception()
venv_name: str = get_virtual_environment_from_yaml()
if not os.path.isdir(os.path.abspath(current_dir + "/" + venv_name)):
raise_missing_virtual_environment_exception(env_name=venv_name)
if is_win_os():
pip_path: str = PIP_PATH_VENV_WIN.replace(DEFAULT_VIRTUAL_ENVIRONMENT_NAME, venv_name)
elif is_posix_os():
pip_path: str = PIP_PATH_VENV_POSIX.replace(DEFAULT_VIRTUAL_ENVIRONMENT_NAME, venv_name)
if not os.path.isfile(pip_path):
raise_missing_pip_binary_exception(env_name=venv_name)
# The current Unittest for this redirects the stdout to a StringIO() buffer, which is not compatible with
# subprocess, so for this scenario a subprocess.PIPE is used instead of the sys.stdout to be able to capture
# the output in the unittests
is_unittest: bool = True if isinstance(sys.stdout, StringIO) else False
pip_process: subprocess.CompletedProcess = subprocess.run(
[pip_path] + args,
stdout=subprocess.PIPE if is_unittest else sys.stdout,
stderr=subprocess.STDOUT,
stdin=sys.stdin,
universal_newlines=True
)
if is_unittest and pip_process.stdout:
print(pip_process.stdout)
return ExitCode(pip_process.returncode)
|
[
"subprocess.run",
"os.path.abspath",
"caos._cli_commands.raise_exceptions.raise_missing_yaml_exception",
"caos._internal.constants.PIP_PATH_VENV_POSIX.replace",
"caos._cli_commands.raise_exceptions.raise_missing_virtual_environment_exception",
"caos._internal.utils.yaml.get_virtual_environment_from_yaml",
"caos._internal.utils.os.is_win_os",
"caos._internal.utils.os.is_posix_os",
"os.path.isfile",
"caos._cli_commands.raise_exceptions.raise_missing_pip_binary_exception",
"caos._internal.types.ExitCode",
"caos._internal.constants.PIP_PATH_VENV_WIN.replace",
"caos._internal.utils.working_directory.get_current_dir"
] |
[((708, 725), 'caos._internal.utils.working_directory.get_current_dir', 'get_current_dir', ([], {}), '()\n', (723, 725), False, 'from caos._internal.utils.working_directory import get_current_dir\n'), ((872, 907), 'caos._internal.utils.yaml.get_virtual_environment_from_yaml', 'get_virtual_environment_from_yaml', ([], {}), '()\n', (905, 907), False, 'from caos._internal.utils.yaml import get_virtual_environment_from_yaml\n'), ((1063, 1074), 'caos._internal.utils.os.is_win_os', 'is_win_os', ([], {}), '()\n', (1072, 1074), False, 'from caos._internal.utils.os import is_posix_os, is_win_os\n'), ((1774, 1938), 'subprocess.run', 'subprocess.run', (['([pip_path] + args)'], {'stdout': '(subprocess.PIPE if is_unittest else sys.stdout)', 'stderr': 'subprocess.STDOUT', 'stdin': 'sys.stdin', 'universal_newlines': '(True)'}), '([pip_path] + args, stdout=subprocess.PIPE if is_unittest else\n sys.stdout, stderr=subprocess.STDOUT, stdin=sys.stdin,\n universal_newlines=True)\n', (1788, 1938), False, 'import subprocess\n'), ((2067, 2099), 'caos._internal.types.ExitCode', 'ExitCode', (['pip_process.returncode'], {}), '(pip_process.returncode)\n', (2075, 2099), False, 'from caos._internal.types import ExitCode\n'), ((819, 849), 'caos._cli_commands.raise_exceptions.raise_missing_yaml_exception', 'raise_missing_yaml_exception', ([], {}), '()\n', (847, 849), False, 'from caos._cli_commands.raise_exceptions import raise_missing_yaml_exception, raise_missing_virtual_environment_exception, raise_missing_pip_binary_exception\n'), ((991, 1054), 'caos._cli_commands.raise_exceptions.raise_missing_virtual_environment_exception', 'raise_missing_virtual_environment_exception', ([], {'env_name': 'venv_name'}), '(env_name=venv_name)\n', (1034, 1054), False, 'from caos._cli_commands.raise_exceptions import raise_missing_yaml_exception, raise_missing_virtual_environment_exception, raise_missing_pip_binary_exception\n'), ((1100, 1170), 'caos._internal.constants.PIP_PATH_VENV_WIN.replace', 'PIP_PATH_VENV_WIN.replace', (['DEFAULT_VIRTUAL_ENVIRONMENT_NAME', 'venv_name'], {}), '(DEFAULT_VIRTUAL_ENVIRONMENT_NAME, venv_name)\n', (1125, 1170), False, 'from caos._internal.constants import CAOS_YAML_FILE_NAME, DEFAULT_VIRTUAL_ENVIRONMENT_NAME, PIP_PATH_VENV_WIN, PIP_PATH_VENV_POSIX\n'), ((1180, 1193), 'caos._internal.utils.os.is_posix_os', 'is_posix_os', ([], {}), '()\n', (1191, 1193), False, 'from caos._internal.utils.os import is_posix_os, is_win_os\n'), ((1304, 1328), 'os.path.isfile', 'os.path.isfile', (['pip_path'], {}), '(pip_path)\n', (1318, 1328), False, 'import os\n'), ((1338, 1392), 'caos._cli_commands.raise_exceptions.raise_missing_pip_binary_exception', 'raise_missing_pip_binary_exception', ([], {'env_name': 'venv_name'}), '(env_name=venv_name)\n', (1372, 1392), False, 'from caos._cli_commands.raise_exceptions import raise_missing_yaml_exception, raise_missing_virtual_environment_exception, raise_missing_pip_binary_exception\n'), ((752, 808), 'os.path.abspath', 'os.path.abspath', (["(current_dir + '/' + CAOS_YAML_FILE_NAME)"], {}), "(current_dir + '/' + CAOS_YAML_FILE_NAME)\n", (767, 808), False, 'import os\n'), ((934, 980), 'os.path.abspath', 'os.path.abspath', (["(current_dir + '/' + venv_name)"], {}), "(current_dir + '/' + venv_name)\n", (949, 980), False, 'import os\n'), ((1219, 1291), 'caos._internal.constants.PIP_PATH_VENV_POSIX.replace', 'PIP_PATH_VENV_POSIX.replace', (['DEFAULT_VIRTUAL_ENVIRONMENT_NAME', 'venv_name'], {}), '(DEFAULT_VIRTUAL_ENVIRONMENT_NAME, venv_name)\n', (1246, 1291), False, 'from caos._internal.constants import CAOS_YAML_FILE_NAME, DEFAULT_VIRTUAL_ENVIRONMENT_NAME, PIP_PATH_VENV_WIN, PIP_PATH_VENV_POSIX\n')]
|
import serial
import time
import paho.mqtt.client as mqtt
import sys
host = "192.168.1.11"
puerto = 1883
arduino = serial.Serial('/dev/ttyACM0', 115200)
time.sleep(2)
Connected = False
def on_connect(client, userdata, flags, rc):
if rc == 0:
print("Connected to broker")
client.subscribe("Control/orden")
else:
print("Connection failed")
def on_message(client, userdata, msg):
global Connected
print('%s %s' % (msg.topic, msg.payload.decode()))
mensaje = msg.payload.decode()
if mensaje == "on":
arduino.write(str.encode('ON'))
Connected = True
else:
pass
#arduino.write(str.encode(mensaje))
def on_publish(client, userdata, result):
#print("Enviado")
pass
client = mqtt.Client()
client.on_connect = on_connect
client.on_message = on_message
client.on_publish = on_publish
client.connect(host, puerto)
client.loop_start()
while True:
if(Connected == True):
dist = arduino.readline()
if(dist.decode() != ''):
print(dist.decode())
client.publish("Control/distancia", dist.decode())
|
[
"serial.Serial",
"paho.mqtt.client.Client",
"time.sleep"
] |
[((116, 153), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyACM0"""', '(115200)'], {}), "('/dev/ttyACM0', 115200)\n", (129, 153), False, 'import serial\n'), ((154, 167), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (164, 167), False, 'import time\n'), ((783, 796), 'paho.mqtt.client.Client', 'mqtt.Client', ([], {}), '()\n', (794, 796), True, 'import paho.mqtt.client as mqtt\n')]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.7 on 2017-11-24 16:02
from __future__ import unicode_literals
from django.db import migrations
def sublists_to_children(apps, schema):
TestList = apps.get_model("qa", "TestList")
Sublist = apps.get_model("qa", "Sublist")
for tl in TestList.objects.filter(sublists__isnull=False):
ntests = tl.tests.count()
for i, sublist in enumerate(tl.sublists.order_by("name")):
Sublist.objects.create(parent=tl, child=sublist, order=ntests + i)
tl.sublists.remove(sublist)
def children_to_sublists(apps, schema):
TestList = apps.get_model("qa", "TestList")
for tl in TestList.objects.filter(children__isnull=False):
for child in tl.sublists.all():
tl.sublists.add(child)
class Migration(migrations.Migration):
dependencies = [
('qa', '0007_auto_20171124_1102'),
]
operations = [
migrations.RunPython(sublists_to_children, children_to_sublists)
]
|
[
"django.db.migrations.RunPython"
] |
[((938, 1002), 'django.db.migrations.RunPython', 'migrations.RunPython', (['sublists_to_children', 'children_to_sublists'], {}), '(sublists_to_children, children_to_sublists)\n', (958, 1002), False, 'from django.db import migrations\n')]
|
"""
Code for Parsing-Reading-Predict Networks (PRPN; Shen et al., 2018)
This file is a version of a class from https://github.com/yikangshen/PRPN, modified to integrate with jiant.
We modified the forward function of original PRPN code.
"""
import torch
import torch.nn as nn
from .ParsingNetwork import ParsingNetwork
from .PredictNetwork import PredictNetwork
from .ReadingNetwork import ReadingNetwork
class PRPN(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(
self,
ninp,
nhid,
nlayers,
nslots=15,
nlookback=5,
resolution=0.1,
embedder=None,
dropout=0.7,
idropout=0.5,
rdropout=0.5,
phrase_layer=None,
tie_weights=True,
hard=True,
res=0,
batch_size=20,
):
super(PRPN, self).__init__()
self.nhid = nhid
self.ninp = ninp
self.nlayers = nlayers
self.nslots = nslots
self.nlookback = nlookback
self.drop = nn.Dropout(dropout)
self.idrop = nn.Dropout(idropout)
self.rdrop = nn.Dropout(rdropout)
# Feedforward layers
self.embedder = embedder
dim = self.embedder.token_embedder_words.weight.shape
self._phrase_layer = phrase_layer
self.ninp = ninp
self.emb = nn.Embedding(dim[0], self.ninp)
self.parser = ParsingNetwork(self.ninp, nhid, nslots, nlookback, resolution, idropout, hard)
self.reader = nn.ModuleList(
[ReadingNetwork(self.ninp, nhid, nslots, dropout=dropout, idropout=idropout)]
+ [
ReadingNetwork(nhid, nhid, nslots, dropout=idropout, idropout=idropout)
for i in range(nlayers - 1)
]
)
self.predictor = PredictNetwork(nhid, self.ninp, nslots, idropout, res)
# self.decoder = nn.Linear(ninp, ntoken)
# if tie_weights:
# self.decoder.weight = self.encoder.weight
self.attentions = None
self.gates = None
self.init_weights()
def get_input_dim(self):
return self.ninp
def get_output_dim(self):
return self.ninp
def init_weights(self):
initrange = 0.01
self.emb.weight.data.uniform_(-initrange, initrange)
# self.decoder.bias.data.fill_(0)
# self.decoder.weight.data.uniform_(-initrange, initrange)
def clip_grad_norm(self, clip):
for model in self.reader:
torch.nn.utils.clip_grad_norm(model.memory_rnn.parameters(), clip)
def forward(self, input, task=None):
batch_size = input.size()[1]
hidden = self.init_hidden(batch_size)
return self.forward_actual(input, hidden)
def forward_actual(self, input, hidden_states):
abs_inp = input
ntimestep = input.size(0)
bsz = input.size(1)
emb = self.emb(input) # timesteps, bsz, ninp
output_h = []
output_memory = []
attentions = []
reader_state, parser_state, predictor_state = hidden_states # memory_h: bsz, nslots, nhid
(memory_gate, memory_gate_next), gate, parser_state = self.parser(emb, parser_state)
rmask = torch.autograd.Variable(torch.ones(self.nlayers, self.nhid))
if input.is_cuda:
rmask = rmask.cuda()
rmask = self.rdrop(rmask)
for i in range(input.size(0)):
emb_i = emb[i] # emb_i: bsz, nhid
attention = []
attention.append(memory_gate[i])
# summarize layer
h_i = emb_i
for j in range(self.nlayers):
hidden = reader_state[j]
h_i, new_memory, attention0 = self.reader[j](h_i, hidden, memory_gate[i], rmask[j])
# updata states
attention.append(attention0)
reader_state[j] = new_memory
# predict layer
selected_memory_h, predictor_state, attention1 = self.predictor.attention(
h_i, predictor_state, gate_time=memory_gate_next[i]
)
output_h.append(h_i)
output_memory.append(selected_memory_h)
attention.append(memory_gate_next[i])
attention.append(attention1)
attentions.append(torch.stack(attention, dim=1))
self.attentions = torch.stack(attentions, dim=0)
self.gates = gate
output_h = torch.stack(output_h, dim=0)
output_memory = torch.stack(output_memory, dim=0)
output = self.predictor(output_h.view(-1, self.nhid), output_memory.view(-1, self.nhid))
output = self.drop(output).view(ntimestep, bsz, -1)
# decoded = self.decoder(output)
# return decoded.view(ntimestep, bsz, -1), (reader_state, parser_state, predictor_state)
mask = abs_inp != 0
return output, mask
def init_hidden(self, bsz):
return (
[self.reader[i].init_hidden(bsz) for i in range(self.nlayers)],
self.parser.init_hidden(bsz),
self.predictor.init_hidden(bsz),
)
|
[
"torch.nn.Dropout",
"torch.ones",
"torch.nn.Embedding",
"torch.stack"
] |
[((1061, 1080), 'torch.nn.Dropout', 'nn.Dropout', (['dropout'], {}), '(dropout)\n', (1071, 1080), True, 'import torch.nn as nn\n'), ((1102, 1122), 'torch.nn.Dropout', 'nn.Dropout', (['idropout'], {}), '(idropout)\n', (1112, 1122), True, 'import torch.nn as nn\n'), ((1144, 1164), 'torch.nn.Dropout', 'nn.Dropout', (['rdropout'], {}), '(rdropout)\n', (1154, 1164), True, 'import torch.nn as nn\n'), ((1379, 1410), 'torch.nn.Embedding', 'nn.Embedding', (['dim[0]', 'self.ninp'], {}), '(dim[0], self.ninp)\n', (1391, 1410), True, 'import torch.nn as nn\n'), ((4384, 4414), 'torch.stack', 'torch.stack', (['attentions'], {'dim': '(0)'}), '(attentions, dim=0)\n', (4395, 4414), False, 'import torch\n'), ((4461, 4489), 'torch.stack', 'torch.stack', (['output_h'], {'dim': '(0)'}), '(output_h, dim=0)\n', (4472, 4489), False, 'import torch\n'), ((4514, 4547), 'torch.stack', 'torch.stack', (['output_memory'], {'dim': '(0)'}), '(output_memory, dim=0)\n', (4525, 4547), False, 'import torch\n'), ((3270, 3305), 'torch.ones', 'torch.ones', (['self.nlayers', 'self.nhid'], {}), '(self.nlayers, self.nhid)\n', (3280, 3305), False, 'import torch\n'), ((4326, 4355), 'torch.stack', 'torch.stack', (['attention'], {'dim': '(1)'}), '(attention, dim=1)\n', (4337, 4355), False, 'import torch\n')]
|
import netCDF4
import bisect
import warnings
from collections import OrderedDict
import numpy as np
from kid_readout.roach.tools import ntone_power_correction
import kid_readout.analysis.timeseries.fftfilt
from kid_readout.measurement.io.data_block import lpf
import kid_readout.roach.tools
class TimestreamGroup(object):
def __init__(self,ncgroup, parent=None):
self.parent = parent
keys = ncgroup.variables.keys()
keys.remove('data')
keys.remove('dt')
keys.remove('fs')
keys.remove('tone')
keys.remove('nsamp')
for key in keys:
setattr(self,key,ncgroup.variables[key][:])
required_keys = ['wavenorm', 'sweep_index']
for key in required_keys:
if key not in keys:
setattr(self,key,None)
# self.epoch = ncgroup.variables['epoch'][:]
self.tonebin = ncgroup.variables['tone'][:]
self.tone_nsamp = ncgroup.variables['nsamp'][:]
# self.fftbin = ncgroup.variables['fftbin'][:]
# self.nfft = ncgroup.variables['nfft'][:]
# self.dt = ncgroup.variables['dt'][:] # the dt property is actually misleading at this point, so leaving it out
self.adc_sampling_freq = ncgroup.variables['fs'][:]
self.baseband_measurement_freq = self.adc_sampling_freq*self.tonebin/(1.0*self.tone_nsamp)
if self.parent.heterodyne:
self.baseband_measurement_freq = np.where(self.baseband_measurement_freq>=self.adc_sampling_freq/2,
self.baseband_measurement_freq-self.adc_sampling_freq,
self.baseband_measurement_freq)
self.measurement_freq = self.lo + self.baseband_measurement_freq
self.sample_rate = self.adc_sampling_freq*1e6/(self.nfft)
else:
self.measurement_freq = self.baseband_measurement_freq
self.sample_rate = self.adc_sampling_freq*1e6/(2*self.nfft)
# if ncgroup.variables.has_key('wavenorm'):
# self.wavenorm = ncgroup.variables['wavenorm'][:]
# else:
# self.wavenorm = None
# if ncgroup.variables.has_key('sweep_index'):
# self.sweep_index = ncgroup.variables['sweep_index'][:]
# else:
# self.sweep_index = None
if self.parent is not None:
self.modulation_duty_cycle = np.zeros_like(self.epoch)
self.modulation_phase = np.zeros_like(self.epoch)
self.modulation_freq = np.zeros_like(self.epoch)
self.modulation_period_samples = np.zeros_like(self.epoch)
for index in range(len(self.epoch)):
out, rate = self.parent.get_modulation_state_at(self.epoch[index])
if out == 2:
self.modulation_duty_cycle[index] = 0.5
self.modulation_freq[index] = self.sample_rate[index]/2.**rate
self.modulation_period_samples[index] = 2.**rate
else:
self.modulation_duty_cycle[index] = out
self.modulation_freq[index] = 0.0
self.modulation_period_samples[index] = 0.0
self._data = ncgroup.variables['data']
self.num_data_samples = self._data.shape[1]
self.data_len_seconds = self.num_data_samples/self.sample_rate
self._datacache = None
@property
def data(self):
if self._datacache is None:
if self.wavenorm is None:
wavenorm = 1.0
warnings.warn("wave normalization not found, time series will not match sweep")
else:
wavenorm = self.wavenorm[:,None]
self._datacache = self._data[:].view(self._data.datatype.name)*wavenorm
return self._datacache
def get_data_index(self,index):
if self._datacache is None:
if self.wavenorm is None:
wavenorm = 1.0
warnings.warn("wave normalization not found, time series will not match sweep")
else:
wavenorm = self.wavenorm[index]
return self._data[index].view(self._data.datatype.name)*wavenorm
else:
return self._datacache[index]
class SweepGroup(object):
def __init__(self,ncgroup, parent=None):
self.parent = parent
self.frequency = ncgroup.variables['frequency'][:]
self.s21 = ncgroup.variables['s21'][:].view(ncgroup.variables['s21'].datatype.name)
self.index = ncgroup.variables['index'][:]
self.timestream_group = TimestreamGroup(ncgroup.groups['datablocks'], parent=parent)
self.start_epoch = self.timestream_group.epoch.min()
self.end_epoch = self.timestream_group.epoch.max()
@property
def errors(self):
return self._get_errors()
def _get_errors(self,mask = None):
if self.timestream_group.wavenorm is None:
wavenorm = 1
else:
wavenorm = self.timestream_group.wavenorm[0]
if mask is None:
indexes_to_calculate = np.arange(self.timestream_group.data.shape[0],dtype='int')
else:
indexes_to_calculate = np.flatnonzero(mask)
errors = np.zeros(indexes_to_calculate.shape[0], dtype='complex')
for output_index,input_index in enumerate(indexes_to_calculate):
filtered = kid_readout.analysis.timeseries.fftfilt.fftfilt(lpf, self.timestream_group.data[input_index,
:])[len(lpf):]
# the standard deviation is scaled by the number of independent samples
# to compute the error on the mean.
error_scaling = np.sqrt(float(len(filtered))/len(lpf))
real_error = filtered.real.std()/error_scaling
imag_error = filtered.imag.std()/error_scaling
errors[output_index] = real_error + 1j*imag_error
return errors
def select_by_index(self,index):
mask = self.index == index
freq,s21,errors = self.frequency[mask], self.s21[mask], self._get_errors(mask)
order = freq.argsort()
return freq[order], s21[order], errors[order]
def select_by_frequency(self,freq):
findex = np.argmin(abs(self.frequency - freq))
index = self.index[findex]
return self.select_by_index(index)
class ReadoutNetCDF(object):
def __init__(self,filename):
self.filename = filename
self.ncroot = netCDF4.Dataset(filename,mode='r')
hwgroup = self.ncroot.groups['hw_state']
self.hardware_state_epoch = hwgroup.variables['epoch'][:]
self.adc_atten = hwgroup.variables['adc_atten'][:]
self.dac_atten = hwgroup.variables['dac_atten'][:]
try:
self.heterodyne = bool(self.ncroot.heterodyne)
except AttributeError:
self.heterodyne = False
if 'ntones' in hwgroup.variables:
self.num_tones = hwgroup.variables['ntones'][:]
else:
self.num_tones = None
for key in ['modulation_rate', 'modulation_output']:
if key in hwgroup.variables:
self.__setattr__(key,hwgroup.variables[key][:])
else:
self.__setattr__(key,None)
try:
self.gitinfo = self.ncroot.gitinfo
except AttributeError:
self.gitinfo = ''
try:
self.boffile = self.ncroot.boffile
except AttributeError:
self.boffile = ''
try:
self.mmw_atten_turns = self.ncroot.mmw_atten_turns
except AttributeError:
self.mmw_atten_turns = (np.nan,np.nan)
self.sweeps_dict = OrderedDict()
self.timestreams_dict = OrderedDict()
for name,group in self.ncroot.groups['sweeps'].groups.items():
self.sweeps_dict[name] = SweepGroup(group, parent=self)
self.__setattr__(name,self.sweeps_dict[name])
self.sweeps = self.sweeps_dict.values()
for name,group in self.ncroot.groups['timestreams'].groups.items():
self.timestreams_dict[name] = TimestreamGroup(group, parent=self)
self.__setattr__(name,self.timestreams_dict[name])
self.timestreams = self.timestreams_dict.values()
def close(self):
self.ncroot.close()
def get_delay_estimate(self):
if self.boffile == '':
try:
nfft = self.sweeps[0].timestream_group.nfft[0]
except IndexError:
raise Exception("could not find any means to estimate the delay for %s" % self.filename)
return kid_readout.roach.tools.get_delay_estimate_for_nfft(nfft)
else:
return kid_readout.roach.tools.get_delay_estimate_for_boffile(self.boffile)
def _get_hwstate_index_at(self,epoch):
"""
Find the index of the hardware state arrays corresponding to the hardware state at a given epoch
:param epoch: unix timestamp
:return:
"""
index = bisect.bisect_left(self.hardware_state_epoch, epoch) # find the index of the epoch immediately preceding the desired epoch
index = index - 1
if index < 0:
index = 0
return index
def get_effective_dac_atten_at(self,epoch):
"""
Get the dac attenuator value and total signal attenuation at a given time
:param epoch: unix timestamp
:return: dac attenuator in dB, total attenuation in dB
"""
index = self._get_hwstate_index_at(epoch)
dac_atten = self.dac_atten[index]
if self.num_tones is not None:
ntones = self.num_tones[index]
else:
ntones = 1
warnings.warn("ntones parameter not found in data file %s, assuming 1. The effective power level may be wrong" % self.filename)
total = dac_atten + ntone_power_correction(ntones)
return dac_atten, total
def get_modulation_state_at(self,epoch):
"""
Get the source modulation TTL output state at a given time
:param epoch: unix timestamp
:return: modulation output state: 0 -> low, 1 -> high, 2 -> modulated
modulation rate parameter: FIXME
"""
if self.modulation_rate is None:
return 0,0
index = self._get_hwstate_index_at(epoch)
modulation_rate = self.modulation_rate[index]
modulation_output = self.modulation_output[index]
return modulation_output, modulation_rate
|
[
"netCDF4.Dataset",
"numpy.zeros_like",
"numpy.flatnonzero",
"numpy.zeros",
"kid_readout.roach.tools.ntone_power_correction",
"numpy.where",
"numpy.arange",
"collections.OrderedDict",
"warnings.warn",
"bisect.bisect_left"
] |
[((5280, 5336), 'numpy.zeros', 'np.zeros', (['indexes_to_calculate.shape[0]'], {'dtype': '"""complex"""'}), "(indexes_to_calculate.shape[0], dtype='complex')\n", (5288, 5336), True, 'import numpy as np\n'), ((6564, 6599), 'netCDF4.Dataset', 'netCDF4.Dataset', (['filename'], {'mode': '"""r"""'}), "(filename, mode='r')\n", (6579, 6599), False, 'import netCDF4\n'), ((7792, 7805), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7803, 7805), False, 'from collections import OrderedDict\n'), ((7838, 7851), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7849, 7851), False, 'from collections import OrderedDict\n'), ((9125, 9177), 'bisect.bisect_left', 'bisect.bisect_left', (['self.hardware_state_epoch', 'epoch'], {}), '(self.hardware_state_epoch, epoch)\n', (9143, 9177), False, 'import bisect\n'), ((1438, 1607), 'numpy.where', 'np.where', (['(self.baseband_measurement_freq >= self.adc_sampling_freq / 2)', '(self.baseband_measurement_freq - self.adc_sampling_freq)', 'self.baseband_measurement_freq'], {}), '(self.baseband_measurement_freq >= self.adc_sampling_freq / 2, self\n .baseband_measurement_freq - self.adc_sampling_freq, self.\n baseband_measurement_freq)\n', (1446, 1607), True, 'import numpy as np\n'), ((2414, 2439), 'numpy.zeros_like', 'np.zeros_like', (['self.epoch'], {}), '(self.epoch)\n', (2427, 2439), True, 'import numpy as np\n'), ((2476, 2501), 'numpy.zeros_like', 'np.zeros_like', (['self.epoch'], {}), '(self.epoch)\n', (2489, 2501), True, 'import numpy as np\n'), ((2537, 2562), 'numpy.zeros_like', 'np.zeros_like', (['self.epoch'], {}), '(self.epoch)\n', (2550, 2562), True, 'import numpy as np\n'), ((2608, 2633), 'numpy.zeros_like', 'np.zeros_like', (['self.epoch'], {}), '(self.epoch)\n', (2621, 2633), True, 'import numpy as np\n'), ((5134, 5193), 'numpy.arange', 'np.arange', (['self.timestream_group.data.shape[0]'], {'dtype': '"""int"""'}), "(self.timestream_group.data.shape[0], dtype='int')\n", (5143, 5193), True, 'import numpy as np\n'), ((5242, 5262), 'numpy.flatnonzero', 'np.flatnonzero', (['mask'], {}), '(mask)\n', (5256, 5262), True, 'import numpy as np\n'), ((9817, 9954), 'warnings.warn', 'warnings.warn', (["('ntones parameter not found in data file %s, assuming 1. The effective power level may be wrong'\n % self.filename)"], {}), "(\n 'ntones parameter not found in data file %s, assuming 1. The effective power level may be wrong'\n % self.filename)\n", (9830, 9954), False, 'import warnings\n'), ((9973, 10003), 'kid_readout.roach.tools.ntone_power_correction', 'ntone_power_correction', (['ntones'], {}), '(ntones)\n', (9995, 10003), False, 'from kid_readout.roach.tools import ntone_power_correction\n'), ((3573, 3652), 'warnings.warn', 'warnings.warn', (['"""wave normalization not found, time series will not match sweep"""'], {}), "('wave normalization not found, time series will not match sweep')\n", (3586, 3652), False, 'import warnings\n'), ((3997, 4076), 'warnings.warn', 'warnings.warn', (['"""wave normalization not found, time series will not match sweep"""'], {}), "('wave normalization not found, time series will not match sweep')\n", (4010, 4076), False, 'import warnings\n')]
|
"""
File: bouncing_ball.py
Name: Minny
-------------------------
TODO: This program simulates a bouncing ball at (START_X, START_Y) that
has VX as x velocity and 0 as y velocity.
"""
from campy.graphics.gobjects import GOval
from campy.graphics.gwindow import GWindow
from campy.gui.events.timer import pause
from campy.gui.events.mouse import onmouseclicked
VX = 3
DELAY = 10
GRAVITY = 1
SIZE = 20
REDUCE = 0.9
START_X = 30
START_Y = 40
is_in_a_run = True
run = 0
ball = GOval(SIZE, SIZE, x=START_X, y=START_Y)
ball.filled = True
ball.fill_color = 'Black'
window = GWindow(800, 500, title='bouncing_ball.py')
window.add(ball)
def main():
"""
This program simulates a bouncing ball at (START_X, START_Y)
that has VX as x velocity and 0 as y velocity. Each bounce reduces
y velocity to REDUCE of itself.
"""
onmouseclicked(draw)
def draw(click):
global is_in_a_run, run
# 分辨是否為同一次點擊內執行與第幾次點擊
if is_in_a_run and run < 3:
# 將開關關閉
is_in_a_run = False
# 計次為第幾次執行
run += 1
count = 0
while True:
global GRAVITY, ball
ball.move(VX, GRAVITY * count)
if GRAVITY > 0:
count += 1
else:
count -= 1
# 判斷是否撞擊地面
if ball.y >= window.height-START_Y:
GRAVITY = -GRAVITY
count = count * REDUCE
# 判斷是否超出邊界
if ball.x >= window.width:
window.add(ball, START_X, START_Y)
GRAVITY = 1
is_in_a_run = True
break
pause(DELAY)
if __name__ == "__main__":
main()
|
[
"campy.gui.events.mouse.onmouseclicked",
"campy.graphics.gobjects.GOval",
"campy.graphics.gwindow.GWindow",
"campy.gui.events.timer.pause"
] |
[((506, 545), 'campy.graphics.gobjects.GOval', 'GOval', (['SIZE', 'SIZE'], {'x': 'START_X', 'y': 'START_Y'}), '(SIZE, SIZE, x=START_X, y=START_Y)\n', (511, 545), False, 'from campy.graphics.gobjects import GOval\n'), ((605, 648), 'campy.graphics.gwindow.GWindow', 'GWindow', (['(800)', '(500)'], {'title': '"""bouncing_ball.py"""'}), "(800, 500, title='bouncing_ball.py')\n", (612, 648), False, 'from campy.graphics.gwindow import GWindow\n'), ((882, 902), 'campy.gui.events.mouse.onmouseclicked', 'onmouseclicked', (['draw'], {}), '(draw)\n', (896, 902), False, 'from campy.gui.events.mouse import onmouseclicked\n'), ((1686, 1698), 'campy.gui.events.timer.pause', 'pause', (['DELAY'], {}), '(DELAY)\n', (1691, 1698), False, 'from campy.gui.events.timer import pause\n')]
|
from django.db import models
from constants import APPLICATION_LABEL
from project import Project
class ProjectImpact(models.Model):
project = models.ForeignKey(Project, blank=False, related_name='impacts')
title = models.CharField(max_length=200, help_text='Title for this impact.', blank=False, null=False, default='')
description = models.TextField(blank=False, null=False, verbose_name='Project Impact', help_text='Describe a major impact of this project in its field.')
# IMPORTANT: NEVER USE A DEFAULT WITH A FORMSET, OTHERWISE has_changed=True for empty forms!
order = models.IntegerField(blank=True)
def __unicode__(self):
return self.description
class Meta:
app_label= APPLICATION_LABEL
|
[
"django.db.models.ForeignKey",
"django.db.models.TextField",
"django.db.models.IntegerField",
"django.db.models.CharField"
] |
[((152, 215), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Project'], {'blank': '(False)', 'related_name': '"""impacts"""'}), "(Project, blank=False, related_name='impacts')\n", (169, 215), False, 'from django.db import models\n'), ((228, 338), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)', 'help_text': '"""Title for this impact."""', 'blank': '(False)', 'null': '(False)', 'default': '""""""'}), "(max_length=200, help_text='Title for this impact.', blank=\n False, null=False, default='')\n", (244, 338), False, 'from django.db import models\n'), ((352, 495), 'django.db.models.TextField', 'models.TextField', ([], {'blank': '(False)', 'null': '(False)', 'verbose_name': '"""Project Impact"""', 'help_text': '"""Describe a major impact of this project in its field."""'}), "(blank=False, null=False, verbose_name='Project Impact',\n help_text='Describe a major impact of this project in its field.')\n", (368, 495), False, 'from django.db import models\n'), ((601, 632), 'django.db.models.IntegerField', 'models.IntegerField', ([], {'blank': '(True)'}), '(blank=True)\n', (620, 632), False, 'from django.db import models\n')]
|
import pandas as pd
import itertools as it
import numpy as np
global CACHE
CACHE = {}
COLS = ['FlightDate', 'CRSDepTime', 'Tail_Number', 'ArrDelay', 'DepDelay', 'AirTime', 'DistanceGroup', 'Distance', 'Origin', 'Dest']
def get_df(strng, source_path):
global CACHE
if strng not in CACHE:
df = pd.read_csv(source_path + strng + '.csv', usecols=COLS)
df['DateTime'] = df['FlightDate'] + '-' + df['CRSDepTime'].map('{:04}'.format)
df = df.sort_values(by=['DateTime'], ascending=False)
CACHE[strng] = df
if len(CACHE) > 3:
to_del = sorted(CACHE.keys())[0]
CACHE.pop(to_del)
return CACHE[strng]
def get_n_prior_flights(n, tail_num, year, month, day, time, source_path, last=False):
month_str = '{:02}'.format(month)
day_str = '{:02}'.format(day)
date_time = '-'.join(['20' + str(year), month_str, day_str, '{:04}'.format(time)])
df = get_df(str(year) + '_' + month_str, source_path)
flights = df[df['Tail_Number'] == tail_num]
flights = flights[flights['DateTime'] < date_time]
flights = flights.iloc[: n]
if flights.shape[0] < n and not last:
year = year - 1 if month == 1 else year
month = month - 1 if month != 1 else 12
day = 31
time = 2400
flights2 = get_n_prior_flights(n - flights.shape[0], tail_num, year, month, day, time, source_path, last=True)
flights = pd.concat([flights, flights2])
return flights
# if __name__ == '__main__':
# new_cols = list(it.chain.from_iterable([['ARR_DELAY_{}'.format(i), 'DEP_DELAY_{}'.format(i + 1)] for i in range(n)]))
# for col in new_cols:
# dep_df[col] = np.nan
# for i in range(dep_df.shape[0]):
# print(i)
# tail_num = dep_df.at[i, 'TAIL_NUM']
# month = dep_df.at[i, 'MONTH']
# year = dep_df.at[i, 'YEAR'] % 2000
# day = dep_df.at[i, 'DAY_OF_MONTH']
# time = dep_df.at[i, 'CRS_DEP_TIME']
# flights = get_n_prior_flights(n, tail_num, year, month, day, time)
# for j, (index, series) in enumerate(flights.iterrows()):
# dep_df.at[i, 'ARR_DELAY_{}'.format(j)] = flights.at[index, 'ARR_DELAY']
# if j < n:
# dep_df.at[i, 'DEP_DELAY_{}'.format(j + 1)] = flights.at[index, 'DEP_DELAY']
# dep_df.to_csv(out_file)
#
|
[
"pandas.read_csv",
"pandas.concat"
] |
[((312, 367), 'pandas.read_csv', 'pd.read_csv', (["(source_path + strng + '.csv')"], {'usecols': 'COLS'}), "(source_path + strng + '.csv', usecols=COLS)\n", (323, 367), True, 'import pandas as pd\n'), ((1410, 1440), 'pandas.concat', 'pd.concat', (['[flights, flights2]'], {}), '([flights, flights2])\n', (1419, 1440), True, 'import pandas as pd\n')]
|
from collections import Counter
from autonmt.bundle.utils import read_file_lines, write_file_lines, flatten
from autonmt.vocabularies.base_vocab import BaseVocabulary
class Vocabulary(BaseVocabulary):
def __init__(self,
unk_id=0, sos_id=1, eos_id=2, pad_id=3,
unk_piece="<unk>", sos_piece="<s>", eos_piece="</s>", pad_piece="<pad>", lang=None, max_tokens=None):
super().__init__(sos_id=sos_id, eos_id=eos_id, pad_id=pad_id,
sos_piece=sos_piece, eos_piece=eos_piece, pad_piece=pad_piece,
lang=lang, max_tokens=max_tokens)
# Set special tokens
self.unk_id = unk_id
self.unk_piece = unk_piece
self.special_tokens = [(self.unk_piece, self.unk_id), (self.sos_piece, self.sos_id),
(self.eos_piece, self.eos_id), (self.pad_piece, self.pad_id)]
# Build vocab
self.voc2idx = {}
self.idx2voc = {}
self.voc2freq = {}
def __len__(self):
return len(self.voc2idx)
def _assert_vocab(self):
assert self.idx2voc[self.unk_id] == self.unk_piece
assert self.idx2voc[self.sos_id] == self.sos_piece
assert self.idx2voc[self.eos_id] == self.eos_piece
assert self.idx2voc[self.pad_id] == self.pad_piece
def build_from_tokens(self, tokens):
# Tokens must include the special tokens
self.voc2idx = {tok: idx for idx, (tok, log_prob) in enumerate(tokens)}
self.idx2voc = {idx: tok for idx, (tok, log_prob) in enumerate(tokens)}
self.voc2freq = {tok: log_prob.strip() for idx, (tok, log_prob) in enumerate(tokens)}
self._assert_vocab()
return self
def build_from_vocab(self, filename, includes_special_tokes=True):
# Parse file. Special tokens must appear first in the file
tokens = [line.split('\t') for line in read_file_lines(filename, autoclean=False)]
special_tokens = [(tok, 0) for tok, tok_id in self.special_tokens] if not includes_special_tokes else []
tokens = special_tokens + tokens # Do not sort. It could lead to different idxs
self.build_from_tokens(tokens)
self._assert_vocab()
return self
def build_from_dataset(self, filename):
tokens = Counter(flatten([line.strip().split(' ') for line in read_file_lines(filename, autoclean=True)]))
special_tokens = [(tok, 0) for tok, tok_id in self.special_tokens]
tokens = special_tokens + tokens.most_common()
self.build_from_tokens(tokens)
self._assert_vocab()
return self
def build_from_ds(self, ds, lang):
self.lang = lang
vocab_path = ds.get_vocab_path(lang) + ".vocab"
self.build_from_vocab(vocab_path)
self._assert_vocab()
return self
def get_tokens(self):
# Tokens must be returned in their correct order
return [self.idx2voc[i] for i in range(len(self.idx2voc))]
def encode(self, text, add_special_tokens=True):
tokens = text.strip().split(' ')
idxs = [self.voc2idx.get(tok, self.unk_id) for tok in tokens]
idxs = idxs[:self.max_tokens-2*int(add_special_tokens)] if self.max_tokens else idxs # count <sos> and <eos>
idxs = [self.sos_id] + idxs + [self.eos_id] if add_special_tokens else idxs
return idxs
def decode(self, idxs, remove_special_tokens=True):
# Remove special tokens
if remove_special_tokens:
try:
# Remove <sos>
sos_pos = idxs.index(self.sos_id) # Get first sos (important!)
idxs = idxs[sos_pos+1:]
except ValueError:
pass
try:
# Remove <eos>
eos_pos = idxs.index(self.eos_id) # Get first eos (important!)
idxs = idxs[:eos_pos]
except ValueError:
pass
# Decode sentences
tokens = [self.idx2voc.get(idx, self.unk_piece) for idx in idxs]
s = ' '.join(tokens)
return s
def save(self, filename, include_special_tokens=True):
lines = []
# Add special tokens
if include_special_tokens:
lines.append((self.unk_piece, 0))
lines.append((self.sos_piece, 0))
lines.append((self.eos_piece, 0))
lines.append((self.pad_piece, 0))
# Add tokens
for voc, idx in self.voc2idx.items():
lines.append(f"{voc}\t{self.voc2freq.get(voc, 0)}")
# Save file
write_file_lines(lines=lines, filename=filename, insert_break_line=True)
|
[
"autonmt.bundle.utils.read_file_lines",
"autonmt.bundle.utils.write_file_lines"
] |
[((4566, 4638), 'autonmt.bundle.utils.write_file_lines', 'write_file_lines', ([], {'lines': 'lines', 'filename': 'filename', 'insert_break_line': '(True)'}), '(lines=lines, filename=filename, insert_break_line=True)\n', (4582, 4638), False, 'from autonmt.bundle.utils import read_file_lines, write_file_lines, flatten\n'), ((1906, 1948), 'autonmt.bundle.utils.read_file_lines', 'read_file_lines', (['filename'], {'autoclean': '(False)'}), '(filename, autoclean=False)\n', (1921, 1948), False, 'from autonmt.bundle.utils import read_file_lines, write_file_lines, flatten\n'), ((2355, 2396), 'autonmt.bundle.utils.read_file_lines', 'read_file_lines', (['filename'], {'autoclean': '(True)'}), '(filename, autoclean=True)\n', (2370, 2396), False, 'from autonmt.bundle.utils import read_file_lines, write_file_lines, flatten\n')]
|
import os
import csv
from PIL import Image
import numpy as np
import torch
import torch.utils.data as data
from torchvision import datasets, transforms
import params
class COVID19_Dataset(Dataset):
"""
COVID-19 image data collection
Dataset: https://github.com/ieee8023/covid-chestxray-dataset
Paper: https://arxiv.org/abs/2003.11597
"""
def __init__(self,
imgpath=os.path.join(thispath, "covid-chestxray-dataset", "images"),
csvpath=os.path.join(thispath, "covid-chestxray-dataset", "metadata.csv"),
views=["PA", "AP"],
transform=None,
data_aug=None,
nrows=None,
seed=0,
pure_labels=False,
unique_patients=True):
super(COVID19_Dataset, self).__init__()
np.random.seed(seed) # Reset the seed so all runs are the same.
self.imgpath = imgpath
self.transform = transform
self.data_aug = data_aug
self.views = views
# defined here to make the code easier to read
pneumonias = ["COVID-19", "SARS", "MERS", "ARDS", "Streptococcus", "Pneumocystis", "Klebsiella", "Chlamydophila", "Legionella", "Influenza", "Mycoplasma", "Varicella", "Viral", "Bacterial", "Fungal", "Lipoid","E.Coli"]
self.pathologies = ["Pneumonia","No Finding"] + pneumonias
self.pathologies = sorted(self.pathologies)
mapping = dict()
mapping["Pneumonia"] = pneumonias
mapping["Viral"] = ["COVID-19", "SARS", "MERS", "Influenza", "Varicella"]
mapping["Bacterial"] = ["Streptococcus", "Klebsiella", "Chlamydophila", "Legionella", "Mycoplasma","E.Coli"]
mapping["Fungal"] = ["Pneumocystis"]
# Load data
self.csvpath = csvpath
self.csv = pd.read_csv(self.csvpath, nrows=nrows)
self.MAXVAL = 255 # Range [0 255]
# Keep only the frontal views.
#idx_pa = self.csv["view"].isin(["PA", "AP", "AP Supine"])
idx_pa = self.csv["view"].isin(self.views)
self.csv = self.csv[idx_pa]
self.labels = []
for pathology in self.pathologies:
mask = self.csv["finding"].str.contains(pathology)
if pathology in mapping:
for syn in mapping[pathology]:
#print("mapping", syn)
mask |= self.csv["finding"].str.contains(syn)
self.labels.append(mask.values)
self.labels = np.asarray(self.labels).T
self.labels = self.labels.astype(np.float32)
def __repr__(self):
pprint.pprint(self.totals())
return self.__class__.__name__ + " num_samples={} views={}".format(len(self), self.views)
def __len__(self):
return len(self.labels)
def __getitem__(self, idx):
imgid = self.csv['filename'].iloc[idx]
img_path = os.path.join(self.imgpath, imgid)
#print(img_path)
img = imread(img_path)
img = normalize(img, self.MAXVAL)
# Check that images are 2D arrays
if len(img.shape) > 2:
img = img[:, :, 0]
if len(img.shape) < 2:
print("error, dimension lower than 2 for image")
# Add color channel
img = img[None, :, :]
if self.transform is not None:
img = self.transform(img)
if self.data_aug is not None:
img = self.data_aug(img)
return {"img":img, "lab":self.labels[idx], "idx":idx}
|
[
"numpy.random.seed",
"numpy.asarray",
"os.path.join"
] |
[((422, 481), 'os.path.join', 'os.path.join', (['thispath', '"""covid-chestxray-dataset"""', '"""images"""'], {}), "(thispath, 'covid-chestxray-dataset', 'images')\n", (434, 481), False, 'import os\n'), ((509, 574), 'os.path.join', 'os.path.join', (['thispath', '"""covid-chestxray-dataset"""', '"""metadata.csv"""'], {}), "(thispath, 'covid-chestxray-dataset', 'metadata.csv')\n", (521, 574), False, 'import os\n'), ((870, 890), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (884, 890), True, 'import numpy as np\n'), ((2944, 2977), 'os.path.join', 'os.path.join', (['self.imgpath', 'imgid'], {}), '(self.imgpath, imgid)\n', (2956, 2977), False, 'import os\n'), ((2546, 2569), 'numpy.asarray', 'np.asarray', (['self.labels'], {}), '(self.labels)\n', (2556, 2569), True, 'import numpy as np\n')]
|
"""Combine for flow artifact mitigated average (composite) image."""
import os
import numpy as np
import nibabel as nb
NII_NAMES = [
'/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/09_average/sub-23_ses-T2s_dir-Mx_part-mag_MEGRE_crop_ups2X_prepped_avg.nii.gz',
'/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/09_average/sub-23_ses-T2s_dir-My_part-mag_MEGRE_crop_ups2X_prepped_avg.nii.gz'
]
OUTDIR = "/home/faruk/data/DATA_MRI_NIFTI/derived/sub-23/T2s/10_composite"
OUT_NAME = "sub-23_ses-T2s_part-mag_MEGRE_crop_ups2X_prepped_avg_composite"
# =============================================================================
print("Step_10: Composite.")
# Output directory
if not os.path.exists(OUTDIR):
os.makedirs(OUTDIR)
print(" Output directory: {}".format(OUTDIR))
# Load data
nii1 = nb.load(NII_NAMES[0])
nii2 = nb.load(NII_NAMES[1])
data1 = np.squeeze(nii1.get_fdata())
data2 = np.squeeze(nii2.get_fdata())
# -----------------------------------------------------------------------------
# Compositing
diff = data1 - data2
idx_neg = diff < 0
idx_pos = diff > 0
data1[idx_pos] -= diff[idx_pos]
data2[idx_neg] += diff[idx_neg]
# Average
data1 += data2
data1 /= 2.
# -----------------------------------------------------------------------------
# Save
out_name = nii1.get_filename().split(os.extsep, 1)[0]
img = nb.Nifti1Image(data1, affine=nii1.affine)
nb.save(img, os.path.join(OUTDIR, "{}.nii.gz".format(OUT_NAME)))
print('Finished.')
|
[
"nibabel.Nifti1Image",
"os.path.exists",
"os.makedirs",
"nibabel.load"
] |
[((816, 837), 'nibabel.load', 'nb.load', (['NII_NAMES[0]'], {}), '(NII_NAMES[0])\n', (823, 837), True, 'import nibabel as nb\n'), ((845, 866), 'nibabel.load', 'nb.load', (['NII_NAMES[1]'], {}), '(NII_NAMES[1])\n', (852, 866), True, 'import nibabel as nb\n'), ((1347, 1388), 'nibabel.Nifti1Image', 'nb.Nifti1Image', (['data1'], {'affine': 'nii1.affine'}), '(data1, affine=nii1.affine)\n', (1361, 1388), True, 'import nibabel as nb\n'), ((701, 723), 'os.path.exists', 'os.path.exists', (['OUTDIR'], {}), '(OUTDIR)\n', (715, 723), False, 'import os\n'), ((729, 748), 'os.makedirs', 'os.makedirs', (['OUTDIR'], {}), '(OUTDIR)\n', (740, 748), False, 'import os\n')]
|
#!/usr/bin/env python
'''
parse a MAVLink protocol XML file and generate an Objective-C implementation
Copyright <NAME> 2013
Released under GNU GPL version 3 or later
'''
from __future__ import print_function
import os
from . import mavparse, mavtemplate
t = mavtemplate.MAVTemplate()
def generate_mavlink(directory, xml):
'''generate MVMavlink header and implementation'''
f = open(os.path.join(directory, "MVMavlink.h"), mode='w')
t.write(f,'''
//
// MVMavlink.h
// MAVLink communications protocol built from ${basename}.xml
//
// Created on ${parse_time} by mavgen_objc.py
// http://qgroundcontrol.org/mavlink
//
#import "MVMessage.h"
${{message_definition_files:#import "MV${name_camel_case}Messages.h"
}}
@class MVMavlink;
@protocol MVMessage;
@protocol MVMavlinkDelegate <NSObject>
/*!
Method called on the delegate when a full message has been received. Note that this may be called multiple times when parseData: is called, if the data passed to parseData: contains multiple messages.
@param mavlink The MVMavlink object calling this method
@param message The id<MVMessage> class containing the parsed message
*/
- (void)mavlink:(MVMavlink *)mavlink didGetMessage:(id<MVMessage>)message;
/*!
Method called on the delegate when data should be sent.
@param mavlink The MVMavlink object calling this method
@param data NSData object containing the bytes to be sent
*/
- (BOOL)mavlink:(MVMavlink *)mavlink shouldWriteData:(NSData *)data;
@end
/*!
Class for parsing and sending instances of id<MVMessage>
@discussion MVMavlink receives a stream of bytes via the parseData: method and calls the delegate method mavlink:didGetMessage: each time a message is fully parsed. Users of MVMavlink can call parseData: anytime they get new data, even if that data does not contain a complete message.
*/
@interface MVMavlink : NSObject
@property (weak, nonatomic) id<MVMavlinkDelegate> delegate;
/*!
Parse byte data received from a MAVLink byte stream.
@param data NSData containing the received bytes
*/
- (void)parseData:(NSData *)data;
/*!
Compile MVMessage object into a bytes and pass to the delegate for sending.
@param message Object conforming to the MVMessage protocol that represents the data to be sent
@return YES if message sending was successful
*/
- (BOOL)sendMessage:(id<MVMessage>)message;
@end
''', xml)
f.close()
f = open(os.path.join(directory, "MVMavlink.m"), mode='w')
t.write(f,'''
//
// MVMavlink.m
// MAVLink communications protocol built from ${basename}.xml
//
// Created by mavgen_objc.py
// http://qgroundcontrol.org/mavlink
//
#import "MVMavlink.h"
@implementation MVMavlink
- (void)parseData:(NSData *)data {
mavlink_message_t msg;
mavlink_status_t status;
char *bytes = (char *)[data bytes];
for (NSInteger i = 0; i < [data length]; ++i) {
if (mavlink_parse_char(MAVLINK_COMM_0, bytes[i], &msg, &status)) {
// Packet received
id<MVMessage> message = [MVMessage messageWithCMessage:msg];
[_delegate mavlink:self didGetMessage:message];
}
}
}
- (BOOL)sendMessage:(id<MVMessage>)message {
return [_delegate mavlink:self shouldWriteData:[message data]];
}
@end
''', xml)
f.close()
def generate_base_message(directory, xml):
'''Generate base MVMessage header and implementation'''
f = open(os.path.join(directory, 'MVMessage.h'), mode='w')
t.write(f, '''
//
// MVMessage.h
// MAVLink communications protocol built from ${basename}.xml
//
// Created by mavgen_objc.py
// http://qgroundcontrol.org/mavlink
//
#import "mavlink.h"
@protocol MVMessage <NSObject>
- (id)initWithCMessage:(mavlink_message_t)message;
- (NSData *)data;
@property (readonly, nonatomic) mavlink_message_t message;
@end
@interface MVMessage : NSObject <MVMessage> {
mavlink_message_t _message;
}
/*!
Create an MVMessage subclass from a mavlink_message_t.
@param message Struct containing the details of the message
@result MVMessage or subclass representing the message
*/
+ (id<MVMessage>)messageWithCMessage:(mavlink_message_t)message;
//! System ID of the sender of the message.
- (uint8_t)systemId;
//! Component ID of the sender of the message.
- (uint8_t)componentId;
//! Message ID of this message.
- (uint8_t)messageId;
@end
''', xml)
f.close()
f = open(os.path.join(directory, 'MVMessage.m'), mode='w')
t.write(f, '''
//
// MVMessage.m
// MAVLink communications protocol built from ${basename}.xml
//
// Created by mavgen_objc.py
// http://qgroundcontrol.org/mavlink
//
#import "MVMessage.h"
${{message_definition_files:#import "MV${name_camel_case}Messages.h"
}}
@implementation MVMessage
@synthesize message=_message;
+ (id)messageWithCMessage:(mavlink_message_t)message {
static NSDictionary *messageIdToClass = nil;
if (!messageIdToClass) {
messageIdToClass = @{
${{message: @${id} : [MVMessage${name_camel_case} class],
}}
};
}
Class messageClass = messageIdToClass[@(message.msgid)];
// Store unknown messages to MVMessage
if (!messageClass) {
messageClass = [MVMessage class];
}
return [[messageClass alloc] initWithCMessage:message];
}
- (id)initWithCMessage:(mavlink_message_t)message {
if ((self = [super init])) {
self->_message = message;
}
return self;
}
- (NSData *)data {
uint8_t buffer[MAVLINK_MAX_PACKET_LEN];
NSInteger length = mavlink_msg_to_send_buffer(buffer, &self->_message);
return [NSData dataWithBytes:buffer length:length];
}
- (uint8_t)systemId {
return self->_message.sysid;
}
- (uint8_t)componentId {
return self->_message.compid;
}
- (uint8_t)messageId {
return self->_message.msgid;
}
- (NSString *)description {
return [NSString stringWithFormat:@"%@, systemId=%d, componentId=%d", [self class], self.systemId, self.componentId];
}
@end
''', xml)
f.close()
def generate_message_definitions_h(directory, xml):
'''generate headerfile containing includes for all messages'''
f = open(os.path.join(directory, "MV" + camel_case_from_underscores(xml.basename) + "Messages.h"), mode='w')
t.write(f, '''
//
// MV${basename_camel_case}Messages.h
// MAVLink communications protocol built from ${basename}.xml
//
// Created by mavgen_objc.py
// http://qgroundcontrol.org/mavlink
//
${{message:#import "MVMessage${name_camel_case}.h"
}}
''', xml)
f.close()
def generate_message(directory, m):
'''generate per-message header and implementation file'''
f = open(os.path.join(directory, 'MVMessage%s.h' % m.name_camel_case), mode='w')
t.write(f, '''
//
// MVMessage${name_camel_case}.h
// MAVLink communications protocol built from ${basename}.xml
//
// Created by mavgen_objc.py
// http://qgroundcontrol.org/mavlink
//
#import "MVMessage.h"
/*!
Class that represents a ${name} Mavlink message.
@discussion ${description}
*/
@interface MVMessage${name_camel_case} : MVMessage
- (id)initWithSystemId:(uint8_t)systemId componentId:(uint8_t)componentId${{arg_fields: ${name_lower_camel_case}:(${arg_type}${array_prefix})${name_lower_camel_case}}};
${{fields://! ${description}
- (${return_type})${name_lower_camel_case}${get_arg_objc};
}}
@end
''', m)
f.close()
f = open(os.path.join(directory, 'MVMessage%s.m' % m.name_camel_case), mode='w')
t.write(f, '''
//
// MVMessage${name_camel_case}.m
// MAVLink communications protocol built from ${basename}.xml
//
// Created by mavgen_objc.py
// http://qgroundcontrol.org/mavlink
//
#import "MVMessage${name_camel_case}.h"
@implementation MVMessage${name_camel_case}
- (id)initWithSystemId:(uint8_t)systemId componentId:(uint8_t)componentId${{arg_fields: ${name_lower_camel_case}:(${arg_type}${array_prefix})${name_lower_camel_case}}} {
if ((self = [super init])) {
mavlink_msg_${name_lower}_pack(systemId, componentId, &(self->_message)${{arg_fields:, ${name_lower_camel_case}}});
}
return self;
}
${{fields:- (${return_type})${name_lower_camel_case}${get_arg_objc} {
${return_method_implementation}
}
}}
- (NSString *)description {
return [NSString stringWithFormat:@"%@${{fields:, ${name_lower_camel_case}=${print_format}}}", [super description]${{fields:, ${get_message}}}];
}
@end
''', m)
f.close()
def camel_case_from_underscores(string):
"""generate a CamelCase string from an underscore_string."""
components = string.split('_')
string = ''
for component in components:
string += component[0].upper() + component[1:]
return string
def lower_camel_case_from_underscores(string):
"""generate a lower-cased camelCase string from an underscore_string.
For example: my_variable_name -> myVariableName"""
components = string.split('_')
string = components[0]
for component in components[1:]:
string += component[0].upper() + component[1:]
return string
def generate_shared(basename, xml_list):
# Create a dictionary to hold all the values we want to use in the templates
template_dict = {}
template_dict['parse_time'] = xml_list[0].parse_time
template_dict['message'] = []
template_dict['message_definition_files'] = []
print("Generating Objective-C implementation in directory %s" % basename)
mavparse.mkdir_p(basename)
for xml in xml_list:
template_dict['message'].extend(xml.message)
basename_camel_case = camel_case_from_underscores(xml.basename)
template_dict['message_definition_files'].append({'name_camel_case': basename_camel_case})
if not template_dict.get('basename', None):
template_dict['basename'] = xml.basename
else:
template_dict['basename'] = template_dict['basename'] + ', ' + xml.basename
# Sort messages by ID
template_dict['message'] = sorted(template_dict['message'], key = lambda message : message.id)
# Add name_camel_case to each message object
for message in template_dict['message']:
message.name_camel_case = camel_case_from_underscores(message.name_lower)
generate_mavlink(basename, template_dict)
generate_base_message(basename, template_dict)
def generate_message_definitions(basename, xml):
'''generate files for one XML file'''
directory = os.path.join(basename, xml.basename)
print("Generating Objective-C implementation in directory %s" % directory)
mavparse.mkdir_p(directory)
xml.basename_camel_case = camel_case_from_underscores(xml.basename)
# Add some extra field attributes for convenience
for m in xml.message:
m.basename = xml.basename
m.parse_time = xml.parse_time
m.name_camel_case = camel_case_from_underscores(m.name_lower)
for f in m.fields:
f.name_lower_camel_case = lower_camel_case_from_underscores(f.name);
f.get_message = "[self %s]" % f.name_lower_camel_case
f.return_method_implementation = ''
f.array_prefix = ''
f.array_return_arg = ''
f.get_arg = ''
f.get_arg_objc = ''
if f.enum:
f.return_type = f.enum
f.arg_type = f.enum
else:
f.return_type = f.type
f.arg_type = f.type
if f.print_format is None:
if f.array_length != 0:
f.print_format = "%@"
elif f.type.startswith('uint64_t'):
f.print_format = "%lld"
elif f.type.startswith('uint') or f.type.startswith('int'):
f.print_format = "%d"
elif f.type.startswith('float'):
f.print_format = "%f"
elif f.type.startswith('char'):
f.print_format = "%c"
else:
print("print_format unsupported for type %s" % f.type)
if f.array_length != 0:
f.get_message = '@"[array of %s[%d]]"' % (f.type, f.array_length)
f.array_prefix = ' *'
f.array_return_arg = '%s, %u, ' % (f.name, f.array_length)
f.return_type = 'uint16_t'
f.get_arg = ', %s' % (f.name)
f.get_arg_objc = ':(%s *)%s' % (f.type, f.name)
if f.type == 'char':
# Special handling for strings (assumes all char arrays are strings)
f.return_type = 'NSString *'
f.get_arg_objc = ''
f.get_message = "[self %s]" % f.name_lower_camel_case
f.return_method_implementation = \
"""char string[%(array_length)d];
mavlink_msg_%(message_name_lower)s_get_%(name)s(&(self->_message), (char *)&string);
return [[NSString alloc] initWithBytes:string length:%(array_length)d encoding:NSASCIIStringEncoding];""" % {'array_length': f.array_length, 'message_name_lower': m.name_lower, 'name': f.name}
if not f.return_method_implementation:
f.return_method_implementation = \
"""return mavlink_msg_%(message_name_lower)s_get_%(name)s(&(self->_message)%(get_arg)s);""" % {'message_name_lower': m.name_lower, 'name': f.name, 'get_arg': f.get_arg}
for m in xml.message:
m.arg_fields = []
for f in m.fields:
if not f.omit_arg:
m.arg_fields.append(f)
generate_message_definitions_h(directory, xml)
for m in xml.message:
generate_message(directory, m)
def generate(basename, xml_list):
'''generate complete MAVLink Objective-C implemenation'''
generate_shared(basename, xml_list)
for xml in xml_list:
generate_message_definitions(basename, xml)
|
[
"os.path.join"
] |
[((10172, 10208), 'os.path.join', 'os.path.join', (['basename', 'xml.basename'], {}), '(basename, xml.basename)\n', (10184, 10208), False, 'import os\n'), ((395, 433), 'os.path.join', 'os.path.join', (['directory', '"""MVMavlink.h"""'], {}), "(directory, 'MVMavlink.h')\n", (407, 433), False, 'import os\n'), ((2395, 2433), 'os.path.join', 'os.path.join', (['directory', '"""MVMavlink.m"""'], {}), "(directory, 'MVMavlink.m')\n", (2407, 2433), False, 'import os\n'), ((3336, 3374), 'os.path.join', 'os.path.join', (['directory', '"""MVMessage.h"""'], {}), "(directory, 'MVMessage.h')\n", (3348, 3374), False, 'import os\n'), ((4310, 4348), 'os.path.join', 'os.path.join', (['directory', '"""MVMessage.m"""'], {}), "(directory, 'MVMessage.m')\n", (4322, 4348), False, 'import os\n'), ((6453, 6513), 'os.path.join', 'os.path.join', (['directory', "('MVMessage%s.h' % m.name_camel_case)"], {}), "(directory, 'MVMessage%s.h' % m.name_camel_case)\n", (6465, 6513), False, 'import os\n'), ((7183, 7243), 'os.path.join', 'os.path.join', (['directory', "('MVMessage%s.m' % m.name_camel_case)"], {}), "(directory, 'MVMessage%s.m' % m.name_camel_case)\n", (7195, 7243), False, 'import os\n')]
|
from matplotlib import pyplot as plt
import sys
m = 10000000000
'''
for name in sys.argv[1:]:
with open(name, "r") as f:
r = f.read()
m = min(m,len( r.split("\n")))
'''
for name in sys.argv[1:]:
with open(name, "r") as f:
r = f.read()
l = [a for a in r.split("\n") if a != ""]
l = [float(a) for a in l[:m]]
l = [a for a in l if a < 100]
s = sorted(l)
rank = [i/len(l) for i in range(len(l))]
plt.scatter(x = sorted(l), y = rank, label = name)
plt.legend()
plt.savefig("band_comparison.png")
|
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.savefig"
] |
[((525, 537), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (535, 537), True, 'from matplotlib import pyplot as plt\n'), ((538, 572), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""band_comparison.png"""'], {}), "('band_comparison.png')\n", (549, 572), True, 'from matplotlib import pyplot as plt\n')]
|
'''
Created on Mar 13, 2012
.. codeauthor:: jhkwakkel <j.h.kwakkel (at) tudelft (dot) nl>
'''
import unittest
import numpy as np
import pandas as pd
from ema_workbench.analysis import prim
from ema_workbench.analysis.prim import PrimBox
from test import utilities
from ema_workbench.analysis.scenario_discovery_util import RuleInductionType
def flu_classify(data):
#get the output for deceased population
result = data['deceased population region 1']
#make an empty array of length equal to number of cases
classes = np.zeros(result.shape[0])
#if deceased population is higher then 1.000.000 people, classify as 1
classes[result[:, -1] > 1000000] = 1
return classes
def scarcity_classify(outcomes):
outcome = outcomes['relative market price']
change = np.abs(outcome[:, 1::]-outcome[:, 0:-1])
neg_change = np.min(change, axis=1)
pos_change = np.max(change, axis=1)
logical = (neg_change > -0.6) & (pos_change > 0.6)
classes = np.zeros(outcome.shape[0])
classes[logical] = 1
return classes
class PrimBoxTestCase(unittest.TestCase):
def test_init(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([0,1,2])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
self.assertEqual(box.peeling_trajectory.shape, (1,6))
def test_select(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([1,1,0])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
new_box_lim = pd.DataFrame([(0,1,1),
(2,5,6)],
columns=['a', 'b', 'c'])
indices = np.array([0,1], dtype=np.int)
box.update(new_box_lim, indices)
box.select(0)
self.assertTrue(np.all(box.yi==prim_obj.yi))
def test_inspect(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = np.array([1,1,0])
prim_obj = prim.Prim(x, y, threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
new_box_lim = pd.DataFrame([(0,1,1),
(2,5,6)],
columns=['a', 'b', 'c'])
indices = np.array([0,1], dtype=np.int)
box.update(new_box_lim, indices)
box.inspect(1)
box.inspect()
box.inspect(style='graph')
with self.assertRaises(ValueError):
box.inspect(style='some unknown style')
def test_show_ppt(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = np.array([1,1,0])
prim_obj = prim.Prim(x, y, threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
cols = ['mean', 'mass', 'coverage', 'density', 'res_dim']
data = np.zeros((100, 5))
data[:, 0:4] = np.random.rand(100, 4)
data[:, 4] = np.random.randint(0, 5, size=(100, ))
box.peeling_trajectory = pd.DataFrame(data, columns=cols)
box.show_ppt()
def test_show_tradeoff(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = np.array([1,1,0])
prim_obj = prim.Prim(x, y, threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
cols = ['mean', 'mass', 'coverage', 'density', 'res_dim']
data = np.zeros((100, 5))
data[:, 0:4] = np.random.rand(100, 4)
data[:, 4] = np.random.randint(0, 5, size=(100, ))
box.peeling_trajectory = pd.DataFrame(data, columns=cols)
box.show_tradeoff()
def test_update(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([1,1,0])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
new_box_lim = pd.DataFrame([(0,1,1),
(2,5,6)],
columns=['a', 'b', 'c'])
indices = np.array([0,1], dtype=np.int)
box.update(new_box_lim, indices)
self.assertEqual(box.peeling_trajectory['mean'][1], 1)
self.assertEqual(box.peeling_trajectory['coverage'][1], 1)
self.assertEqual(box.peeling_trajectory['density'][1], 1)
self.assertEqual(box.peeling_trajectory['res_dim'][1], 1)
self.assertEqual(box.peeling_trajectory['mass'][1], 2/3)
def test_drop_restriction(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([1,1,0])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
box = PrimBox(prim_obj, prim_obj.box_init, prim_obj.yi)
new_box_lim = pd.DataFrame([(0,1,1),
(2,2,6)],
columns=['a', 'b', 'c'])
indices = np.array([0,1], dtype=np.int)
box.update(new_box_lim, indices)
box.drop_restriction('b')
correct_box_lims = pd.DataFrame([(0,1,1),
(2,5,6)],
columns=['a', 'b', 'c'])
box_lims = box.box_lims[-1]
names = box_lims.columns
for entry in names:
lim_correct = correct_box_lims[entry]
lim_box = box_lims[entry]
for i in range(len(lim_correct)):
self.assertEqual(lim_correct[i], lim_box[i])
self.assertEqual(box.peeling_trajectory['mean'][2], 1)
self.assertEqual(box.peeling_trajectory['coverage'][2], 1)
self.assertEqual(box.peeling_trajectory['density'][2], 1)
self.assertEqual(box.peeling_trajectory['res_dim'][2], 1)
self.assertEqual(box.peeling_trajectory['mass'][2], 2/3)
def test_calculate_quasi_p(self):
pass
class PrimTestCase(unittest.TestCase):
def test_setup_prim(self):
self.results = utilities.load_flu_data()
self.classify = flu_classify
experiments, outcomes = self.results
# test initialization, including t_coi calculation in case of searching
# for results equal to or higher than the threshold
outcomes['death toll'] = outcomes['deceased population region 1'][:, -1]
results = experiments, outcomes
threshold = 10000
prim_obj = prim.setup_prim(results, classify='death toll',
threshold_type=prim.ABOVE, threshold=threshold)
value = np.ones((experiments.shape[0],))
value = value[outcomes['death toll'] >= threshold].shape[0]
self.assertTrue(prim_obj.t_coi==value)
# test initialization, including t_coi calculation in case of searching
# for results equal to or lower than the threshold
threshold = 1000
prim_obj = prim.setup_prim(results, classify='death toll',
threshold_type=prim.BELOW,
threshold=threshold)
value = np.ones((experiments.shape[0],))
value = value[outcomes['death toll'] <= threshold].shape[0]
self.assertTrue(prim_obj.t_coi==value)
prim.setup_prim(self.results, self.classify, threshold=prim.ABOVE)
def test_boxes(self):
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,1)],
columns=['a', 'b', 'c'])
y = {'y':np.array([0,1,2])}
results = (x,y)
prim_obj = prim.setup_prim(results, 'y', threshold=0.8)
boxes = prim_obj.boxes
self.assertEqual(len(boxes), 1, 'box length not correct')
# real data test case
prim_obj = prim.setup_prim(utilities.load_flu_data(), flu_classify,
threshold=0.8)
prim_obj.find_box()
boxes = prim_obj.boxes
self.assertEqual(len(boxes), 1, 'box length not correct')
def test_prim_init_select(self):
self.results = utilities.load_flu_data()
self.classify = flu_classify
experiments, outcomes = self.results
unc = experiments.columns.values.tolist()
# test initialization, including t_coi calculation in case of searching
# for results equal to or higher than the threshold
outcomes['death toll'] = outcomes['deceased population region 1'][:, -1]
results = experiments, outcomes
threshold = 10000
prim_obj = prim.setup_prim(results, classify='death toll',
threshold_type=prim.ABOVE, threshold=threshold,
incl_unc=unc)
value = np.ones((experiments.shape[0],))
value = value[outcomes['death toll'] >= threshold].shape[0]
self.assertTrue(prim_obj.t_coi==value)
# test initialization, including t_coi calculation in case of searching
# for results equal to or lower than the threshold
threshold = 1000
prim_obj = prim.setup_prim(results, classify='death toll',
threshold_type=prim.BELOW,
threshold=threshold)
value = np.ones((experiments.shape[0],))
value = value[outcomes['death toll'] <= threshold].shape[0]
self.assertTrue(prim_obj.t_coi==value)
prim.setup_prim(self.results, self.classify, threshold=prim.ABOVE)
def test_quantile(self):
data = pd.Series(np.arange(10))
self.assertTrue(prim.get_quantile(data, 0.9)==8.5)
self.assertTrue(prim.get_quantile(data, 0.95)==8.5)
self.assertTrue(prim.get_quantile(data, 0.1)==0.5)
self.assertTrue(prim.get_quantile(data, 0.05)==0.5)
data = pd.Series(1)
self.assertTrue(prim.get_quantile(data, 0.9)==1)
self.assertTrue(prim.get_quantile(data, 0.95)==1)
self.assertTrue(prim.get_quantile(data, 0.1)==1)
self.assertTrue(prim.get_quantile(data, 0.05)==1)
data = pd.Series([1,1,2,3,4,5,6,7,8,9,9])
self.assertTrue(prim.get_quantile(data, 0.9)==8.5)
self.assertTrue(prim.get_quantile(data, 0.95)==8.5)
self.assertTrue(prim.get_quantile(data, 0.1)==1.5)
self.assertTrue(prim.get_quantile(data, 0.05)==1.5)
def test_box_init(self):
# test init box without NANS
x = pd.DataFrame([(0,1,2),
(2,5,6),
(3,2,7)],
columns=['a', 'b', 'c'])
y = np.array([0,1,2])
prim_obj = prim.Prim(x,y, threshold=0.5,
mode=RuleInductionType.REGRESSION)
box_init = prim_obj.box_init
# some test on the box
self.assertTrue(box_init.loc[0, 'a']==0)
self.assertTrue(box_init.loc[1, 'a']==3)
self.assertTrue(box_init.loc[0, 'b']==1)
self.assertTrue(box_init.loc[1, 'b']==5)
self.assertTrue(box_init.loc[0, 'c']==2)
self.assertTrue(box_init.loc[1, 'c']==7)
# heterogenous without NAN
x = pd.DataFrame([[0.1, 0, 'a'],
[0.2, 1, 'b'],
[0.3, 2, 'a'],
[0.4, 3, 'b'],
[0.5, 4, 'a'],
[0.6, 5, 'a'],
[0.7, 6, 'b'],
[0.8, 7, 'a'],
[0.9, 8, 'b'],
[1.0, 9, 'a']],
columns=['a', 'b', 'c'])
y = np.arange(0, x.shape[0])
prim_obj = prim.Prim(x,y, threshold=0.5,
mode=RuleInductionType.REGRESSION)
box_init = prim_obj.box_init
# some test on the box
self.assertTrue(box_init['a'][0]==0.1)
self.assertTrue(box_init['a'][1]==1.0)
self.assertTrue(box_init['b'][0]==0)
self.assertTrue(box_init['b'][1]==9)
self.assertTrue(box_init['c'][0]==set(['a','b']))
self.assertTrue(box_init['c'][1]==set(['a','b']))
def test_prim_exceptions(self):
results = utilities.load_flu_data()
x, outcomes = results
y = outcomes['deceased population region 1']
self.assertRaises(prim.PrimException, prim.Prim,
x, y, threshold=0.8,
mode=RuleInductionType.REGRESSION)
def test_find_box(self):
results = utilities.load_flu_data()
classify = flu_classify
prim_obj = prim.setup_prim(results, classify,
threshold=0.8)
box_1 = prim_obj.find_box()
prim_obj._update_yi_remaining(prim_obj)
after_find = box_1.yi.shape[0] + prim_obj.yi_remaining.shape[0]
self.assertEqual(after_find, prim_obj.y.shape[0])
box_2 = prim_obj.find_box()
prim_obj._update_yi_remaining(prim_obj)
after_find = box_1.yi.shape[0] +\
box_2.yi.shape[0] +\
prim_obj.yi_remaining.shape[0]
self.assertEqual(after_find, prim_obj.y.shape[0])
def test_discrete_peel(self):
x = pd.DataFrame(np.random.randint(0, 10, size=(100,), dtype=np.int),
columns=['a'])
y = np.zeros(100,)
y[x.a > 5] = 1
primalg = prim.Prim(x, y, threshold=0.8)
boxlims = primalg.box_init
box = prim.PrimBox(primalg, boxlims, primalg.yi)
peels = primalg._discrete_peel(box, 'a', 0, primalg.x_int)
self.assertEqual(len(peels), 2)
for peel in peels:
self.assertEqual(len(peel), 2)
indices, tempbox = peel
self.assertTrue(isinstance(indices, np.ndarray))
self.assertTrue(isinstance(tempbox, pd.DataFrame))
# have modified boxlims as starting point
primalg = prim.Prim(x, y, threshold=0.8)
boxlims = primalg.box_init
boxlims.a = [1,8]
box = prim.PrimBox(primalg, boxlims, primalg.yi)
peels = primalg._discrete_peel(box, 'a', 0, primalg.x_int)
self.assertEqual(len(peels), 2)
for peel in peels:
self.assertEqual(len(peel), 2)
indices, tempbox = peel
self.assertTrue(isinstance(indices, np.ndarray))
self.assertTrue(isinstance(tempbox, pd.DataFrame))
# have modified boxlims as starting point
x.a[x.a>5] = 5
primalg = prim.Prim(x, y, threshold=0.8)
boxlims = primalg.box_init
boxlims.a = [5,8]
box = prim.PrimBox(primalg, boxlims, primalg.yi)
peels = primalg._discrete_peel(box, 'a', 0, primalg.x_int)
self.assertEqual(len(peels), 2)
x.a[x.a<5] = 5
primalg = prim.Prim(x, y, threshold=0.8)
boxlims = primalg.box_init
boxlims.a = [5,8]
box = prim.PrimBox(primalg, boxlims, primalg.yi)
peels = primalg._discrete_peel(box, 'a', 0, primalg.x_int)
self.assertEqual(len(peels), 2)
def test_categorical_peel(self):
x = pd.DataFrame(list(zip(np.random.rand(10,),
['a','b','a','b','a','a','b','a','b','a', ])),
columns=['a', 'b'])
y = np.random.randint(0,2, (10,))
y = y.astype(np.int)
y = {'y':y}
results = x, y
classify = 'y'
prim_obj = prim.setup_prim(results, classify, threshold=0.8)
box_lims = pd.DataFrame([(0, set(['a','b'])),
(1, set(['a','b']))],
columns=['a', 'b'] )
box = prim.PrimBox(prim_obj, box_lims, prim_obj.yi)
u = 'b'
x = x.select_dtypes(exclude=np.number).values
j = 0
peels = prim_obj._categorical_peel(box, u, j, x)
self.assertEqual(len(peels), 2)
for peel in peels:
pl = peel[1][u]
self.assertEqual(len(pl[0]), 1)
self.assertEqual(len(pl[1]), 1)
a = ('a',)
b = ('b',)
x = pd.DataFrame(list(zip(np.random.rand(10,),
[a, b, a, b, a,
a, b, a, b, a])),
columns=['a', 'b'])
y = np.random.randint(0,2, (10,))
y = y.astype(np.int)
y = {'y':y}
results = x, y
classify = 'y'
prim_obj = prim.setup_prim(results, classify, threshold=0.8)
box_lims = prim_obj.box_init
box = prim.PrimBox(prim_obj, box_lims, prim_obj.yi)
u = 'b'
x = x.select_dtypes(exclude=np.number).values
j = 0
peels = prim_obj._categorical_peel(box, u, j, x)
self.assertEqual(len(peels), 2)
for peel in peels:
pl = peel[1][u]
self.assertEqual(len(pl[0]), 1)
self.assertEqual(len(pl[1]), 1)
def test_categorical_paste(self):
a = np.random.rand(10,)
b = ['a','b','a','b','a','a','b','a','b','a', ]
x = pd.DataFrame(list(zip(a,b)), columns=['a', 'b'])
x['b'] = x['b'].astype('category')
y = np.random.randint(0,2, (10,))
y = y.astype(np.int)
y = {'y':y}
results = x,y
classify = 'y'
prim_obj = prim.setup_prim(results, classify, threshold=0.8)
box_lims = pd.DataFrame([(0, set(['a',])),
(1, set(['a',]))], columns=x.columns)
yi = np.where(x.loc[:,'b']=='a')
box = prim.PrimBox(prim_obj, box_lims, yi)
u = 'b'
pastes = prim_obj._categorical_paste(box, u, x, ['b'])
self.assertEqual(len(pastes), 1)
for paste in pastes:
indices, box_lims = paste
self.assertEqual(indices.shape[0], 10)
self.assertEqual(box_lims[u][0], set(['a','b']))
if __name__ == '__main__':
# ema_logging.log_to_stderr(ema_logging.INFO)
unittest.main()
# suite = unittest.TestSuite()
# suite.addTest(PrimTestCase("test_write_boxes_to_stdout"))
# unittest.TextTestRunner().run(suite)
|
[
"test.utilities.load_flu_data",
"numpy.abs",
"numpy.ones",
"ema_workbench.analysis.prim.PrimBox",
"numpy.random.randint",
"numpy.arange",
"unittest.main",
"pandas.DataFrame",
"ema_workbench.analysis.prim.Prim",
"numpy.max",
"ema_workbench.analysis.prim.setup_prim",
"numpy.min",
"pandas.Series",
"numpy.all",
"numpy.zeros",
"numpy.where",
"numpy.array",
"ema_workbench.analysis.prim.get_quantile",
"numpy.random.rand"
] |
[((546, 571), 'numpy.zeros', 'np.zeros', (['result.shape[0]'], {}), '(result.shape[0])\n', (554, 571), True, 'import numpy as np\n'), ((813, 854), 'numpy.abs', 'np.abs', (['(outcome[:, 1:] - outcome[:, 0:-1])'], {}), '(outcome[:, 1:] - outcome[:, 0:-1])\n', (819, 854), True, 'import numpy as np\n'), ((876, 898), 'numpy.min', 'np.min', (['change'], {'axis': '(1)'}), '(change, axis=1)\n', (882, 898), True, 'import numpy as np\n'), ((916, 938), 'numpy.max', 'np.max', (['change'], {'axis': '(1)'}), '(change, axis=1)\n', (922, 938), True, 'import numpy as np\n'), ((1018, 1044), 'numpy.zeros', 'np.zeros', (['outcome.shape[0]'], {}), '(outcome.shape[0])\n', (1026, 1044), True, 'import numpy as np\n'), ((19557, 19572), 'unittest.main', 'unittest.main', ([], {}), '()\n', (19570, 19572), False, 'import unittest\n'), ((1175, 1247), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 2), (2, 5, 6), (3, 2, 1)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 2), (2, 5, 6), (3, 2, 1)], columns=['a', 'b', 'c'])\n", (1187, 1247), True, 'import pandas as pd\n'), ((1409, 1453), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results', '"""y"""'], {'threshold': '(0.8)'}), "(results, 'y', threshold=0.8)\n", (1424, 1453), False, 'from ema_workbench.analysis import prim\n'), ((1468, 1517), 'ema_workbench.analysis.prim.PrimBox', 'PrimBox', (['prim_obj', 'prim_obj.box_init', 'prim_obj.yi'], {}), '(prim_obj, prim_obj.box_init, prim_obj.yi)\n', (1475, 1517), False, 'from ema_workbench.analysis.prim import PrimBox\n'), ((1625, 1697), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 2), (2, 5, 6), (3, 2, 1)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 2), (2, 5, 6), (3, 2, 1)], columns=['a', 'b', 'c'])\n", (1637, 1697), True, 'import pandas as pd\n'), ((1859, 1903), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results', '"""y"""'], {'threshold': '(0.8)'}), "(results, 'y', threshold=0.8)\n", (1874, 1903), False, 'from ema_workbench.analysis import prim\n'), ((1918, 1967), 'ema_workbench.analysis.prim.PrimBox', 'PrimBox', (['prim_obj', 'prim_obj.box_init', 'prim_obj.yi'], {}), '(prim_obj, prim_obj.box_init, prim_obj.yi)\n', (1925, 1967), False, 'from ema_workbench.analysis.prim import PrimBox\n'), ((1991, 2052), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 1), (2, 5, 6)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 1), (2, 5, 6)], columns=['a', 'b', 'c'])\n", (2003, 2052), True, 'import pandas as pd\n'), ((2140, 2170), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int'}), '([0, 1], dtype=np.int)\n', (2148, 2170), True, 'import numpy as np\n'), ((2340, 2412), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 2), (2, 5, 6), (3, 2, 1)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 2), (2, 5, 6), (3, 2, 1)], columns=['a', 'b', 'c'])\n", (2352, 2412), True, 'import pandas as pd\n'), ((2498, 2517), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (2506, 2517), True, 'import numpy as np\n'), ((2544, 2574), 'ema_workbench.analysis.prim.Prim', 'prim.Prim', (['x', 'y'], {'threshold': '(0.8)'}), '(x, y, threshold=0.8)\n', (2553, 2574), False, 'from ema_workbench.analysis import prim\n'), ((2589, 2638), 'ema_workbench.analysis.prim.PrimBox', 'PrimBox', (['prim_obj', 'prim_obj.box_init', 'prim_obj.yi'], {}), '(prim_obj, prim_obj.box_init, prim_obj.yi)\n', (2596, 2638), False, 'from ema_workbench.analysis.prim import PrimBox\n'), ((2662, 2723), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 1), (2, 5, 6)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 1), (2, 5, 6)], columns=['a', 'b', 'c'])\n", (2674, 2723), True, 'import pandas as pd\n'), ((2811, 2841), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int'}), '([0, 1], dtype=np.int)\n', (2819, 2841), True, 'import numpy as np\n'), ((3122, 3194), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 2), (2, 5, 6), (3, 2, 1)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 2), (2, 5, 6), (3, 2, 1)], columns=['a', 'b', 'c'])\n", (3134, 3194), True, 'import pandas as pd\n'), ((3280, 3299), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (3288, 3299), True, 'import numpy as np\n'), ((3326, 3356), 'ema_workbench.analysis.prim.Prim', 'prim.Prim', (['x', 'y'], {'threshold': '(0.8)'}), '(x, y, threshold=0.8)\n', (3335, 3356), False, 'from ema_workbench.analysis import prim\n'), ((3371, 3420), 'ema_workbench.analysis.prim.PrimBox', 'PrimBox', (['prim_obj', 'prim_obj.box_init', 'prim_obj.yi'], {}), '(prim_obj, prim_obj.box_init, prim_obj.yi)\n', (3378, 3420), False, 'from ema_workbench.analysis.prim import PrimBox\n'), ((3520, 3538), 'numpy.zeros', 'np.zeros', (['(100, 5)'], {}), '((100, 5))\n', (3528, 3538), True, 'import numpy as np\n'), ((3562, 3584), 'numpy.random.rand', 'np.random.rand', (['(100)', '(4)'], {}), '(100, 4)\n', (3576, 3584), True, 'import numpy as np\n'), ((3606, 3642), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {'size': '(100,)'}), '(0, 5, size=(100,))\n', (3623, 3642), True, 'import numpy as np\n'), ((3677, 3709), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (3689, 3709), True, 'import pandas as pd\n'), ((3801, 3873), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 2), (2, 5, 6), (3, 2, 1)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 2), (2, 5, 6), (3, 2, 1)], columns=['a', 'b', 'c'])\n", (3813, 3873), True, 'import pandas as pd\n'), ((3959, 3978), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (3967, 3978), True, 'import numpy as np\n'), ((4005, 4035), 'ema_workbench.analysis.prim.Prim', 'prim.Prim', (['x', 'y'], {'threshold': '(0.8)'}), '(x, y, threshold=0.8)\n', (4014, 4035), False, 'from ema_workbench.analysis import prim\n'), ((4050, 4099), 'ema_workbench.analysis.prim.PrimBox', 'PrimBox', (['prim_obj', 'prim_obj.box_init', 'prim_obj.yi'], {}), '(prim_obj, prim_obj.box_init, prim_obj.yi)\n', (4057, 4099), False, 'from ema_workbench.analysis.prim import PrimBox\n'), ((4199, 4217), 'numpy.zeros', 'np.zeros', (['(100, 5)'], {}), '((100, 5))\n', (4207, 4217), True, 'import numpy as np\n'), ((4241, 4263), 'numpy.random.rand', 'np.random.rand', (['(100)', '(4)'], {}), '(100, 4)\n', (4255, 4263), True, 'import numpy as np\n'), ((4285, 4321), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {'size': '(100,)'}), '(0, 5, size=(100,))\n', (4302, 4321), True, 'import numpy as np\n'), ((4356, 4388), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols'}), '(data, columns=cols)\n', (4368, 4388), True, 'import pandas as pd\n'), ((4474, 4546), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 2), (2, 5, 6), (3, 2, 1)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 2), (2, 5, 6), (3, 2, 1)], columns=['a', 'b', 'c'])\n", (4486, 4546), True, 'import pandas as pd\n'), ((4708, 4752), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results', '"""y"""'], {'threshold': '(0.8)'}), "(results, 'y', threshold=0.8)\n", (4723, 4752), False, 'from ema_workbench.analysis import prim\n'), ((4767, 4816), 'ema_workbench.analysis.prim.PrimBox', 'PrimBox', (['prim_obj', 'prim_obj.box_init', 'prim_obj.yi'], {}), '(prim_obj, prim_obj.box_init, prim_obj.yi)\n', (4774, 4816), False, 'from ema_workbench.analysis.prim import PrimBox\n'), ((4840, 4901), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 1), (2, 5, 6)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 1), (2, 5, 6)], columns=['a', 'b', 'c'])\n", (4852, 4901), True, 'import pandas as pd\n'), ((4989, 5019), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int'}), '([0, 1], dtype=np.int)\n', (4997, 5019), True, 'import numpy as np\n'), ((5450, 5522), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 2), (2, 5, 6), (3, 2, 1)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 2), (2, 5, 6), (3, 2, 1)], columns=['a', 'b', 'c'])\n", (5462, 5522), True, 'import pandas as pd\n'), ((5684, 5728), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results', '"""y"""'], {'threshold': '(0.8)'}), "(results, 'y', threshold=0.8)\n", (5699, 5728), False, 'from ema_workbench.analysis import prim\n'), ((5743, 5792), 'ema_workbench.analysis.prim.PrimBox', 'PrimBox', (['prim_obj', 'prim_obj.box_init', 'prim_obj.yi'], {}), '(prim_obj, prim_obj.box_init, prim_obj.yi)\n', (5750, 5792), False, 'from ema_workbench.analysis.prim import PrimBox\n'), ((5816, 5877), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 1), (2, 2, 6)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 1), (2, 2, 6)], columns=['a', 'b', 'c'])\n", (5828, 5877), True, 'import pandas as pd\n'), ((5965, 5995), 'numpy.array', 'np.array', (['[0, 1]'], {'dtype': 'np.int'}), '([0, 1], dtype=np.int)\n', (5973, 5995), True, 'import numpy as np\n'), ((6115, 6176), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 1), (2, 5, 6)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 1), (2, 5, 6)], columns=['a', 'b', 'c'])\n", (6127, 6176), True, 'import pandas as pd\n'), ((7052, 7077), 'test.utilities.load_flu_data', 'utilities.load_flu_data', ([], {}), '()\n', (7075, 7077), False, 'from test import utilities\n'), ((7492, 7591), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results'], {'classify': '"""death toll"""', 'threshold_type': 'prim.ABOVE', 'threshold': 'threshold'}), "(results, classify='death toll', threshold_type=prim.ABOVE,\n threshold=threshold)\n", (7507, 7591), False, 'from ema_workbench.analysis import prim\n'), ((7643, 7675), 'numpy.ones', 'np.ones', (['(experiments.shape[0],)'], {}), '((experiments.shape[0],))\n', (7650, 7675), True, 'import numpy as np\n'), ((7992, 8091), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results'], {'classify': '"""death toll"""', 'threshold_type': 'prim.BELOW', 'threshold': 'threshold'}), "(results, classify='death toll', threshold_type=prim.BELOW,\n threshold=threshold)\n", (8007, 8091), False, 'from ema_workbench.analysis import prim\n'), ((8173, 8205), 'numpy.ones', 'np.ones', (['(experiments.shape[0],)'], {}), '((experiments.shape[0],))\n', (8180, 8205), True, 'import numpy as np\n'), ((8338, 8404), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['self.results', 'self.classify'], {'threshold': 'prim.ABOVE'}), '(self.results, self.classify, threshold=prim.ABOVE)\n', (8353, 8404), False, 'from ema_workbench.analysis import prim\n'), ((8448, 8520), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 2), (2, 5, 6), (3, 2, 1)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 2), (2, 5, 6), (3, 2, 1)], columns=['a', 'b', 'c'])\n", (8460, 8520), True, 'import pandas as pd\n'), ((8681, 8725), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results', '"""y"""'], {'threshold': '(0.8)'}), "(results, 'y', threshold=0.8)\n", (8696, 8725), False, 'from ema_workbench.analysis import prim\n'), ((9193, 9218), 'test.utilities.load_flu_data', 'utilities.load_flu_data', ([], {}), '()\n', (9216, 9218), False, 'from test import utilities\n'), ((9692, 9805), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results'], {'classify': '"""death toll"""', 'threshold_type': 'prim.ABOVE', 'threshold': 'threshold', 'incl_unc': 'unc'}), "(results, classify='death toll', threshold_type=prim.ABOVE,\n threshold=threshold, incl_unc=unc)\n", (9707, 9805), False, 'from ema_workbench.analysis import prim\n'), ((9886, 9918), 'numpy.ones', 'np.ones', (['(experiments.shape[0],)'], {}), '((experiments.shape[0],))\n', (9893, 9918), True, 'import numpy as np\n'), ((10235, 10334), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results'], {'classify': '"""death toll"""', 'threshold_type': 'prim.BELOW', 'threshold': 'threshold'}), "(results, classify='death toll', threshold_type=prim.BELOW,\n threshold=threshold)\n", (10250, 10334), False, 'from ema_workbench.analysis import prim\n'), ((10416, 10448), 'numpy.ones', 'np.ones', (['(experiments.shape[0],)'], {}), '((experiments.shape[0],))\n', (10423, 10448), True, 'import numpy as np\n'), ((10581, 10647), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['self.results', 'self.classify'], {'threshold': 'prim.ABOVE'}), '(self.results, self.classify, threshold=prim.ABOVE)\n', (10596, 10647), False, 'from ema_workbench.analysis import prim\n'), ((10980, 10992), 'pandas.Series', 'pd.Series', (['(1)'], {}), '(1)\n', (10989, 10992), True, 'import pandas as pd\n'), ((11247, 11291), 'pandas.Series', 'pd.Series', (['[1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9]'], {}), '([1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9])\n', (11256, 11291), True, 'import pandas as pd\n'), ((11627, 11699), 'pandas.DataFrame', 'pd.DataFrame', (['[(0, 1, 2), (2, 5, 6), (3, 2, 7)]'], {'columns': "['a', 'b', 'c']"}), "([(0, 1, 2), (2, 5, 6), (3, 2, 7)], columns=['a', 'b', 'c'])\n", (11639, 11699), True, 'import pandas as pd\n'), ((11784, 11803), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (11792, 11803), True, 'import numpy as np\n'), ((11830, 11895), 'ema_workbench.analysis.prim.Prim', 'prim.Prim', (['x', 'y'], {'threshold': '(0.5)', 'mode': 'RuleInductionType.REGRESSION'}), '(x, y, threshold=0.5, mode=RuleInductionType.REGRESSION)\n', (11839, 11895), False, 'from ema_workbench.analysis import prim\n'), ((12346, 12545), 'pandas.DataFrame', 'pd.DataFrame', (["[[0.1, 0, 'a'], [0.2, 1, 'b'], [0.3, 2, 'a'], [0.4, 3, 'b'], [0.5, 4, 'a'],\n [0.6, 5, 'a'], [0.7, 6, 'b'], [0.8, 7, 'a'], [0.9, 8, 'b'], [1.0, 9, 'a']]"], {'columns': "['a', 'b', 'c']"}), "([[0.1, 0, 'a'], [0.2, 1, 'b'], [0.3, 2, 'a'], [0.4, 3, 'b'], [\n 0.5, 4, 'a'], [0.6, 5, 'a'], [0.7, 6, 'b'], [0.8, 7, 'a'], [0.9, 8, 'b'\n ], [1.0, 9, 'a']], columns=['a', 'b', 'c'])\n", (12358, 12545), True, 'import pandas as pd\n'), ((12809, 12833), 'numpy.arange', 'np.arange', (['(0)', 'x.shape[0]'], {}), '(0, x.shape[0])\n', (12818, 12833), True, 'import numpy as np\n'), ((12854, 12919), 'ema_workbench.analysis.prim.Prim', 'prim.Prim', (['x', 'y'], {'threshold': '(0.5)', 'mode': 'RuleInductionType.REGRESSION'}), '(x, y, threshold=0.5, mode=RuleInductionType.REGRESSION)\n', (12863, 12919), False, 'from ema_workbench.analysis import prim\n'), ((13385, 13410), 'test.utilities.load_flu_data', 'utilities.load_flu_data', ([], {}), '()\n', (13408, 13410), False, 'from test import utilities\n'), ((13716, 13741), 'test.utilities.load_flu_data', 'utilities.load_flu_data', ([], {}), '()\n', (13739, 13741), False, 'from test import utilities\n'), ((13802, 13851), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results', 'classify'], {'threshold': '(0.8)'}), '(results, classify, threshold=0.8)\n', (13817, 13851), False, 'from ema_workbench.analysis import prim\n'), ((14581, 14594), 'numpy.zeros', 'np.zeros', (['(100)'], {}), '(100)\n', (14589, 14594), True, 'import numpy as np\n'), ((14646, 14676), 'ema_workbench.analysis.prim.Prim', 'prim.Prim', (['x', 'y'], {'threshold': '(0.8)'}), '(x, y, threshold=0.8)\n', (14655, 14676), False, 'from ema_workbench.analysis import prim\n'), ((14726, 14768), 'ema_workbench.analysis.prim.PrimBox', 'prim.PrimBox', (['primalg', 'boxlims', 'primalg.yi'], {}), '(primalg, boxlims, primalg.yi)\n', (14738, 14768), False, 'from ema_workbench.analysis import prim\n'), ((15236, 15266), 'ema_workbench.analysis.prim.Prim', 'prim.Prim', (['x', 'y'], {'threshold': '(0.8)'}), '(x, y, threshold=0.8)\n', (15245, 15266), False, 'from ema_workbench.analysis import prim\n'), ((15342, 15384), 'ema_workbench.analysis.prim.PrimBox', 'prim.PrimBox', (['primalg', 'boxlims', 'primalg.yi'], {}), '(primalg, boxlims, primalg.yi)\n', (15354, 15384), False, 'from ema_workbench.analysis import prim\n'), ((15875, 15905), 'ema_workbench.analysis.prim.Prim', 'prim.Prim', (['x', 'y'], {'threshold': '(0.8)'}), '(x, y, threshold=0.8)\n', (15884, 15905), False, 'from ema_workbench.analysis import prim\n'), ((15981, 16023), 'ema_workbench.analysis.prim.PrimBox', 'prim.PrimBox', (['primalg', 'boxlims', 'primalg.yi'], {}), '(primalg, boxlims, primalg.yi)\n', (15993, 16023), False, 'from ema_workbench.analysis import prim\n'), ((16187, 16217), 'ema_workbench.analysis.prim.Prim', 'prim.Prim', (['x', 'y'], {'threshold': '(0.8)'}), '(x, y, threshold=0.8)\n', (16196, 16217), False, 'from ema_workbench.analysis import prim\n'), ((16293, 16335), 'ema_workbench.analysis.prim.PrimBox', 'prim.PrimBox', (['primalg', 'boxlims', 'primalg.yi'], {}), '(primalg, boxlims, primalg.yi)\n', (16305, 16335), False, 'from ema_workbench.analysis import prim\n'), ((16715, 16745), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(10,)'], {}), '(0, 2, (10,))\n', (16732, 16745), True, 'import numpy as np\n'), ((16869, 16918), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results', 'classify'], {'threshold': '(0.8)'}), '(results, classify, threshold=0.8)\n', (16884, 16918), False, 'from ema_workbench.analysis import prim\n'), ((17096, 17141), 'ema_workbench.analysis.prim.PrimBox', 'prim.PrimBox', (['prim_obj', 'box_lims', 'prim_obj.yi'], {}), '(prim_obj, box_lims, prim_obj.yi)\n', (17108, 17141), False, 'from ema_workbench.analysis import prim\n'), ((17782, 17812), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(10,)'], {}), '(0, 2, (10,))\n', (17799, 17812), True, 'import numpy as np\n'), ((17936, 17985), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results', 'classify'], {'threshold': '(0.8)'}), '(results, classify, threshold=0.8)\n', (17951, 17985), False, 'from ema_workbench.analysis import prim\n'), ((18037, 18082), 'ema_workbench.analysis.prim.PrimBox', 'prim.PrimBox', (['prim_obj', 'box_lims', 'prim_obj.yi'], {}), '(prim_obj, box_lims, prim_obj.yi)\n', (18049, 18082), False, 'from ema_workbench.analysis import prim\n'), ((18495, 18513), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (18509, 18513), True, 'import numpy as np\n'), ((18696, 18726), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(10,)'], {}), '(0, 2, (10,))\n', (18713, 18726), True, 'import numpy as np\n'), ((18849, 18898), 'ema_workbench.analysis.prim.setup_prim', 'prim.setup_prim', (['results', 'classify'], {'threshold': '(0.8)'}), '(results, classify, threshold=0.8)\n', (18864, 18898), False, 'from ema_workbench.analysis import prim\n'), ((19043, 19073), 'numpy.where', 'np.where', (["(x.loc[:, 'b'] == 'a')"], {}), "(x.loc[:, 'b'] == 'a')\n", (19051, 19073), True, 'import numpy as np\n'), ((19094, 19130), 'ema_workbench.analysis.prim.PrimBox', 'prim.PrimBox', (['prim_obj', 'box_lims', 'yi'], {}), '(prim_obj, box_lims, yi)\n', (19106, 19130), False, 'from ema_workbench.analysis import prim\n'), ((1338, 1357), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (1346, 1357), True, 'import numpy as np\n'), ((1788, 1807), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (1796, 1807), True, 'import numpy as np\n'), ((2266, 2295), 'numpy.all', 'np.all', (['(box.yi == prim_obj.yi)'], {}), '(box.yi == prim_obj.yi)\n', (2272, 2295), True, 'import numpy as np\n'), ((4637, 4656), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (4645, 4656), True, 'import numpy as np\n'), ((5613, 5632), 'numpy.array', 'np.array', (['[1, 1, 0]'], {}), '([1, 1, 0])\n', (5621, 5632), True, 'import numpy as np\n'), ((8610, 8629), 'numpy.array', 'np.array', (['[0, 1, 2]'], {}), '([0, 1, 2])\n', (8618, 8629), True, 'import numpy as np\n'), ((8914, 8939), 'test.utilities.load_flu_data', 'utilities.load_flu_data', ([], {}), '()\n', (8937, 8939), False, 'from test import utilities\n'), ((10703, 10716), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (10712, 10716), True, 'import numpy as np\n'), ((14475, 14526), 'numpy.random.randint', 'np.random.randint', (['(0)', '(10)'], {'size': '(100,)', 'dtype': 'np.int'}), '(0, 10, size=(100,), dtype=np.int)\n', (14492, 14526), True, 'import numpy as np\n'), ((10742, 10770), 'ema_workbench.analysis.prim.get_quantile', 'prim.get_quantile', (['data', '(0.9)'], {}), '(data, 0.9)\n', (10759, 10770), False, 'from ema_workbench.analysis import prim\n'), ((10801, 10830), 'ema_workbench.analysis.prim.get_quantile', 'prim.get_quantile', (['data', '(0.95)'], {}), '(data, 0.95)\n', (10818, 10830), False, 'from ema_workbench.analysis import prim\n'), ((10861, 10889), 'ema_workbench.analysis.prim.get_quantile', 'prim.get_quantile', (['data', '(0.1)'], {}), '(data, 0.1)\n', (10878, 10889), False, 'from ema_workbench.analysis import prim\n'), ((10920, 10949), 'ema_workbench.analysis.prim.get_quantile', 'prim.get_quantile', (['data', '(0.05)'], {}), '(data, 0.05)\n', (10937, 10949), False, 'from ema_workbench.analysis import prim\n'), ((11017, 11045), 'ema_workbench.analysis.prim.get_quantile', 'prim.get_quantile', (['data', '(0.9)'], {}), '(data, 0.9)\n', (11034, 11045), False, 'from ema_workbench.analysis import prim\n'), ((11074, 11103), 'ema_workbench.analysis.prim.get_quantile', 'prim.get_quantile', (['data', '(0.95)'], {}), '(data, 0.95)\n', (11091, 11103), False, 'from ema_workbench.analysis import prim\n'), ((11132, 11160), 'ema_workbench.analysis.prim.get_quantile', 'prim.get_quantile', (['data', '(0.1)'], {}), '(data, 0.1)\n', (11149, 11160), False, 'from ema_workbench.analysis import prim\n'), ((11189, 11218), 'ema_workbench.analysis.prim.get_quantile', 'prim.get_quantile', (['data', '(0.05)'], {}), '(data, 0.05)\n', (11206, 11218), False, 'from ema_workbench.analysis import prim\n'), ((11306, 11334), 'ema_workbench.analysis.prim.get_quantile', 'prim.get_quantile', (['data', '(0.9)'], {}), '(data, 0.9)\n', (11323, 11334), False, 'from ema_workbench.analysis import prim\n'), ((11365, 11394), 'ema_workbench.analysis.prim.get_quantile', 'prim.get_quantile', (['data', '(0.95)'], {}), '(data, 0.95)\n', (11382, 11394), False, 'from ema_workbench.analysis import prim\n'), ((11425, 11453), 'ema_workbench.analysis.prim.get_quantile', 'prim.get_quantile', (['data', '(0.1)'], {}), '(data, 0.1)\n', (11442, 11453), False, 'from ema_workbench.analysis import prim\n'), ((11484, 11513), 'ema_workbench.analysis.prim.get_quantile', 'prim.get_quantile', (['data', '(0.05)'], {}), '(data, 0.05)\n', (11501, 11513), False, 'from ema_workbench.analysis import prim\n'), ((16546, 16564), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (16560, 16564), True, 'import numpy as np\n'), ((17592, 17610), 'numpy.random.rand', 'np.random.rand', (['(10)'], {}), '(10)\n', (17606, 17610), True, 'import numpy as np\n')]
|
from pathlib import Path
import pytest
import pandas as pd
import json
import os
import urllib
import re
from nidm.experiment.Utils import map_variables_to_terms
import tempfile
from os.path import join
from nidm.core import Constants
from uuid import UUID
@pytest.fixture(scope="module", autouse="True")
def setup():
global DATA, REPROSCHEMA_JSON_MAP, BIDS_SIDECAR
temp = { 'participant_id': ['100', '101', '102', '103', '104', '105', '106', '107', '108', '109'],
'age': [18, 25, 30,19 ,35 ,20 ,27 ,29 ,38 ,27],
'sex': ['m', 'm', 'f', 'm', 'f', 'f', 'f', 'f', 'm','m'] }
DATA = pd.DataFrame(temp)
REPROSCHEMA_JSON_MAP = json.loads(
'''
{
"DD(source='participants.tsv', variable='participant_id')": {
"label": "participant_id",
"description": "subject/participant identifier",
"source_variable": "participant_id",
"responseOptions": {
"valueType": "http://www.w3.org/2001/XMLSchema#string"
},
"isAbout": [
{
"@id": "https://ndar.nih.gov/api/datadictionary/v2/dataelement/src_subject_id",
"label": "src_subject_id"
}
]
},
"DD(source='participants.tsv', variable='age')": {
"responseOptions": {
"unitCode": "years",
"minValue": "0",
"maxValue": "100",
"valueType": "http://www.w3.org/2001/XMLSchema#integer"
},
"label": "age",
"description": "age of participant",
"source_variable": "age",
"associatedWith": "NIDM",
"isAbout": [
{
"@id": "http://uri.interlex.org/ilx_0100400",
"label": "Age"
}
]
},
"DD(source='participants.tsv', variable='sex')": {
"responseOptions": {
"minValue": "NA",
"maxValue": "NA",
"unitCode": "NA",
"valueType": "http://www.w3.org/2001/XMLSchema#complexType",
"choices": {
"Male": "m",
"Female": "f"
}
},
"label": "sex",
"description": "biological sex of participant",
"source_variable": "sex",
"associatedWith": "NIDM",
"isAbout": [
{
"@id": "http://uri.interlex.org/ilx_0738439",
"label": "SEX"
}
]
}
}''')
BIDS_SIDECAR = json.loads(
'''
{
"age": {
"label": "age",
"description": "age of participant",
"source_variable": "age",
"associatedWith": "NIDM",
"isAbout": [
{
"@id": "http://uri.interlex.org/ilx_0100400",
"label": "Age"
}
],
"valueType": "http://www.w3.org/2001/XMLSchema#integer",
"minValue": "10",
"maxValue": "100"
},
"sex": {
"minValue": "NA",
"maxValue": "NA",
"unitCode": "NA",
"valueType": "http://www.w3.org/2001/XMLSchema#complexType",
"levels": {
"Male": "m",
"Female": "f"
},
"label": "sex",
"description": "biological sex of participant",
"source_variable": "sex",
"associatedWith": "NIDM",
"isAbout": [
{
"@id": "http://uri.interlex.org/ilx_0738439",
"label": "SEX"
}
]
}
}
''')
def test_map_vars_to_terms_BIDS():
'''
This function will test the Utils.py "map_vars_to_terms" function with a BIDS-formatted
JSON sidecar file
'''
global DATA, BIDS_SIDECAR
column_to_terms, cde = map_variables_to_terms(df=DATA,json_source=BIDS_SIDECAR,
directory=tempfile.gettempdir(),assessment_name="test",bids=True)
# check whether JSON mapping structure returned from map_variables_to_terms matches the
# reproshema structure
assert "DD(source='test', variable='age')" in column_to_terms.keys()
assert "DD(source='test', variable='sex')" in column_to_terms.keys()
assert "isAbout" in column_to_terms["DD(source='test', variable='age')"].keys()
assert "http://uri.interlex.org/ilx_0100400" == column_to_terms["DD(source='test', variable='age')"] \
['isAbout'][0]['@id']
assert "http://uri.interlex.org/ilx_0738439" == column_to_terms["DD(source='test', variable='sex')"] \
['isAbout'][0]['@id']
assert "responseOptions" in column_to_terms["DD(source='test', variable='sex')"].keys()
assert "choices" in column_to_terms["DD(source='test', variable='sex')"]['responseOptions'].keys()
assert "Male" in column_to_terms["DD(source='test', variable='sex')"]['responseOptions']['choices'].keys()
assert "m" == column_to_terms["DD(source='test', variable='sex')"]['responseOptions']['choices']['Male']
assert "Male" in column_to_terms["DD(source='test', variable='sex')"]['responseOptions']['choices'].keys()
assert "m" == column_to_terms["DD(source='test', variable='sex')"]['responseOptions']['choices']['Male']
# now check the JSON sidecar file created by map_variables_to_terms which should match BIDS format
with open(join(tempfile.gettempdir(),"nidm_annotations.json")) as fp:
bids_sidecar = json.load(fp)
assert "age" in bids_sidecar.keys()
assert "sex" in bids_sidecar.keys()
assert "isAbout" in bids_sidecar["age"].keys()
assert "http://uri.interlex.org/ilx_0100400" == bids_sidecar["age"] \
['isAbout'][0]['@id']
assert "http://uri.interlex.org/ilx_0738439" == bids_sidecar["sex"] \
['isAbout'][0]['@id']
assert "levels" in bids_sidecar["sex"].keys()
assert "Male" in bids_sidecar["sex"]['levels'].keys()
assert "m" == bids_sidecar["sex"]['levels']['Male']
assert "Male" in bids_sidecar["sex"]['levels'].keys()
assert "m" == bids_sidecar["sex"]['levels']['Male']
# check the CDE dataelement graph for correct information
query = '''
prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
select distinct ?uuid ?DataElements ?property ?value
where {
?uuid a/rdfs:subClassOf* nidm:DataElement ;
?property ?value .
}'''
qres=cde.query(query)
results=[]
for row in qres:
results.append(list(row))
assert len(results) == 20
def test_map_vars_to_terms_reproschema():
'''
This function will test the Utils.py "map_vars_to_terms" function with a reproschema-formatted
JSON sidecar file
'''
global DATA, REPROSCHEMA_JSON_MAP
column_to_terms, cde = map_variables_to_terms(df=DATA, json_source=REPROSCHEMA_JSON_MAP,
directory=tempfile.gettempdir(), assessment_name="test")
# check whether JSON mapping structure returned from map_variables_to_terms matches the
# reproshema structure
assert "DD(source='test', variable='age')" in column_to_terms.keys()
assert "DD(source='test', variable='sex')" in column_to_terms.keys()
assert "isAbout" in column_to_terms["DD(source='test', variable='age')"].keys()
assert "http://uri.interlex.org/ilx_0100400" == column_to_terms["DD(source='test', variable='age')"] \
['isAbout'][0]['@id']
assert "http://uri.interlex.org/ilx_0738439" == column_to_terms["DD(source='test', variable='sex')"] \
['isAbout'][0]['@id']
assert "responseOptions" in column_to_terms["DD(source='test', variable='sex')"].keys()
assert "choices" in column_to_terms["DD(source='test', variable='sex')"]['responseOptions'].keys()
assert "Male" in column_to_terms["DD(source='test', variable='sex')"]['responseOptions']['choices'].keys()
assert "m" == column_to_terms["DD(source='test', variable='sex')"]['responseOptions']['choices']['Male']
assert "Male" in column_to_terms["DD(source='test', variable='sex')"]['responseOptions']['choices'].keys()
assert "m" == column_to_terms["DD(source='test', variable='sex')"]['responseOptions']['choices']['Male']
# now check the JSON mapping file created by map_variables_to_terms which should match Reproschema format
with open(join(tempfile.gettempdir(), "nidm_annotations.json")) as fp:
reproschema_json = json.load(fp)
assert "DD(source='test', variable='age')" in column_to_terms.keys()
assert "DD(source='test', variable='sex')" in column_to_terms.keys()
assert "isAbout" in column_to_terms["DD(source='test', variable='age')"].keys()
assert "http://uri.interlex.org/ilx_0100400" == column_to_terms["DD(source='test', variable='age')"] \
['isAbout'][0]['@id']
assert "http://uri.interlex.org/ilx_0738439" == column_to_terms["DD(source='test', variable='sex')"] \
['isAbout'][0]['@id']
assert "responseOptions" in column_to_terms["DD(source='test', variable='sex')"].keys()
assert "choices" in column_to_terms["DD(source='test', variable='sex')"]['responseOptions'].keys()
assert "Male" in column_to_terms["DD(source='test', variable='sex')"]['responseOptions']['choices'].keys()
assert "m" == column_to_terms["DD(source='test', variable='sex')"]['responseOptions']['choices']['Male']
assert "Male" in column_to_terms["DD(source='test', variable='sex')"]['responseOptions']['choices'].keys()
assert "m" == column_to_terms["DD(source='test', variable='sex')"]['responseOptions']['choices']['Male']
# check the CDE dataelement graph for correct information
query = '''
prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
select distinct ?uuid ?DataElements ?property ?value
where {
?uuid a/rdfs:subClassOf* nidm:DataElement ;
?property ?value .
}'''
qres = cde.query(query)
results = []
for row in qres:
results.append(list(row))
assert len(results) == 20
|
[
"pandas.DataFrame",
"json.load",
"json.loads",
"tempfile.gettempdir",
"pytest.fixture"
] |
[((265, 311), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'autouse': '"""True"""'}), "(scope='module', autouse='True')\n", (279, 311), False, 'import pytest\n'), ((626, 644), 'pandas.DataFrame', 'pd.DataFrame', (['temp'], {}), '(temp)\n', (638, 644), True, 'import pandas as pd\n'), ((673, 2881), 'json.loads', 'json.loads', (['"""\n {\n "DD(source=\'participants.tsv\', variable=\'participant_id\')": {\n "label": "participant_id",\n "description": "subject/participant identifier",\n "source_variable": "participant_id",\n "responseOptions": {\n "valueType": "http://www.w3.org/2001/XMLSchema#string"\n },\n "isAbout": [\n {\n "@id": "https://ndar.nih.gov/api/datadictionary/v2/dataelement/src_subject_id",\n "label": "src_subject_id"\n }\n ]\n },\n "DD(source=\'participants.tsv\', variable=\'age\')": {\n "responseOptions": {\n "unitCode": "years",\n "minValue": "0",\n "maxValue": "100",\n "valueType": "http://www.w3.org/2001/XMLSchema#integer"\n },\n "label": "age",\n "description": "age of participant",\n "source_variable": "age",\n "associatedWith": "NIDM",\n "isAbout": [\n {\n "@id": "http://uri.interlex.org/ilx_0100400",\n "label": "Age"\n }\n ]\n },\n "DD(source=\'participants.tsv\', variable=\'sex\')": {\n "responseOptions": {\n "minValue": "NA",\n "maxValue": "NA",\n "unitCode": "NA",\n "valueType": "http://www.w3.org/2001/XMLSchema#complexType",\n "choices": {\n "Male": "m",\n "Female": "f"\n }\n },\n "label": "sex",\n "description": "biological sex of participant",\n "source_variable": "sex",\n "associatedWith": "NIDM",\n "isAbout": [\n {\n "@id": "http://uri.interlex.org/ilx_0738439",\n "label": "SEX"\n }\n ]\n }\n }"""'], {}), '(\n """\n {\n "DD(source=\'participants.tsv\', variable=\'participant_id\')": {\n "label": "participant_id",\n "description": "subject/participant identifier",\n "source_variable": "participant_id",\n "responseOptions": {\n "valueType": "http://www.w3.org/2001/XMLSchema#string"\n },\n "isAbout": [\n {\n "@id": "https://ndar.nih.gov/api/datadictionary/v2/dataelement/src_subject_id",\n "label": "src_subject_id"\n }\n ]\n },\n "DD(source=\'participants.tsv\', variable=\'age\')": {\n "responseOptions": {\n "unitCode": "years",\n "minValue": "0",\n "maxValue": "100",\n "valueType": "http://www.w3.org/2001/XMLSchema#integer"\n },\n "label": "age",\n "description": "age of participant",\n "source_variable": "age",\n "associatedWith": "NIDM",\n "isAbout": [\n {\n "@id": "http://uri.interlex.org/ilx_0100400",\n "label": "Age"\n }\n ]\n },\n "DD(source=\'participants.tsv\', variable=\'sex\')": {\n "responseOptions": {\n "minValue": "NA",\n "maxValue": "NA",\n "unitCode": "NA",\n "valueType": "http://www.w3.org/2001/XMLSchema#complexType",\n "choices": {\n "Male": "m",\n "Female": "f"\n }\n },\n "label": "sex",\n "description": "biological sex of participant",\n "source_variable": "sex",\n "associatedWith": "NIDM",\n "isAbout": [\n {\n "@id": "http://uri.interlex.org/ilx_0738439",\n "label": "SEX"\n }\n ]\n }\n }"""\n )\n', (683, 2881), False, 'import json\n'), ((2901, 4222), 'json.loads', 'json.loads', (['"""\n {\n "age": {\n "label": "age",\n "description": "age of participant",\n "source_variable": "age",\n "associatedWith": "NIDM",\n "isAbout": [\n {\n "@id": "http://uri.interlex.org/ilx_0100400",\n "label": "Age"\n }\n ],\n "valueType": "http://www.w3.org/2001/XMLSchema#integer",\n "minValue": "10",\n "maxValue": "100"\n },\n "sex": {\n "minValue": "NA",\n "maxValue": "NA",\n "unitCode": "NA",\n "valueType": "http://www.w3.org/2001/XMLSchema#complexType",\n "levels": {\n "Male": "m",\n "Female": "f"\n },\n "label": "sex",\n "description": "biological sex of participant",\n "source_variable": "sex",\n "associatedWith": "NIDM",\n "isAbout": [\n {\n "@id": "http://uri.interlex.org/ilx_0738439",\n "label": "SEX"\n }\n ]\n }\n }\n \n """'], {}), '(\n """\n {\n "age": {\n "label": "age",\n "description": "age of participant",\n "source_variable": "age",\n "associatedWith": "NIDM",\n "isAbout": [\n {\n "@id": "http://uri.interlex.org/ilx_0100400",\n "label": "Age"\n }\n ],\n "valueType": "http://www.w3.org/2001/XMLSchema#integer",\n "minValue": "10",\n "maxValue": "100"\n },\n "sex": {\n "minValue": "NA",\n "maxValue": "NA",\n "unitCode": "NA",\n "valueType": "http://www.w3.org/2001/XMLSchema#complexType",\n "levels": {\n "Male": "m",\n "Female": "f"\n },\n "label": "sex",\n "description": "biological sex of participant",\n "source_variable": "sex",\n "associatedWith": "NIDM",\n "isAbout": [\n {\n "@id": "http://uri.interlex.org/ilx_0738439",\n "label": "SEX"\n }\n ]\n }\n }\n \n """\n )\n', (2911, 4222), False, 'import json\n'), ((6060, 6073), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (6069, 6073), False, 'import json\n'), ((9057, 9070), 'json.load', 'json.load', (['fp'], {}), '(fp)\n', (9066, 9070), False, 'import json\n'), ((4532, 4553), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (4551, 4553), False, 'import tempfile\n'), ((7538, 7559), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (7557, 7559), False, 'import tempfile\n'), ((5982, 6003), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (6001, 6003), False, 'import tempfile\n'), ((8974, 8995), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (8993, 8995), False, 'import tempfile\n')]
|
# -*- coding: utf-8 -*-
"""
Test Household model module.
"""
from django.test import TestCase
from mspray.apps.main.models.household import Household
from mspray.apps.main.models.location import Location
from mspray.apps.main.tests.utils import data_setup, load_spray_data
class TestHousehold(TestCase):
"""Test Household model class"""
def test_sprayable(self):
"""Test sprayable."""
data_setup()
load_spray_data()
self.assertEqual(Household.objects.filter(sprayable=False).count(), 4)
akros_2 = Location.objects.get(name="Akros_2", level="ta")
self.assertEqual(akros_2.not_sprayable, 4)
|
[
"mspray.apps.main.models.household.Household.objects.filter",
"mspray.apps.main.tests.utils.load_spray_data",
"mspray.apps.main.models.location.Location.objects.get",
"mspray.apps.main.tests.utils.data_setup"
] |
[((413, 425), 'mspray.apps.main.tests.utils.data_setup', 'data_setup', ([], {}), '()\n', (423, 425), False, 'from mspray.apps.main.tests.utils import data_setup, load_spray_data\n'), ((434, 451), 'mspray.apps.main.tests.utils.load_spray_data', 'load_spray_data', ([], {}), '()\n', (449, 451), False, 'from mspray.apps.main.tests.utils import data_setup, load_spray_data\n'), ((550, 598), 'mspray.apps.main.models.location.Location.objects.get', 'Location.objects.get', ([], {'name': '"""Akros_2"""', 'level': '"""ta"""'}), "(name='Akros_2', level='ta')\n", (570, 598), False, 'from mspray.apps.main.models.location import Location\n'), ((477, 518), 'mspray.apps.main.models.household.Household.objects.filter', 'Household.objects.filter', ([], {'sprayable': '(False)'}), '(sprayable=False)\n', (501, 518), False, 'from mspray.apps.main.models.household import Household\n')]
|
"""Network helpers."""
from ipaddress import ip_address
from typing import cast
import yarl
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.loader import bind_hass
from homeassistant.util.network import (
is_ip_address,
is_local,
is_loopback,
is_private,
normalize_url,
)
TYPE_URL_INTERNAL = "internal_url"
TYPE_URL_EXTERNAL = "external_url"
class NoURLAvailableError(HomeAssistantError):
"""An URL to the Home Assistant instance is not available."""
@bind_hass
@callback
def async_get_url(
hass: HomeAssistant,
*,
require_ssl: bool = False,
require_standard_port: bool = False,
allow_internal: bool = True,
allow_external: bool = True,
allow_cloud: bool = True,
allow_ip: bool = True,
prefer_external: bool = False,
prefer_cloud: bool = False,
) -> str:
"""Get a URL to this instance."""
order = [TYPE_URL_INTERNAL, TYPE_URL_EXTERNAL]
if prefer_external:
order.reverse()
# Try finding an URL in the order specified
for url_type in order:
if allow_internal and url_type == TYPE_URL_INTERNAL:
try:
return _async_get_internal_url(
hass,
allow_ip=allow_ip,
require_ssl=require_ssl,
require_standard_port=require_standard_port,
)
except NoURLAvailableError:
pass
if allow_external and url_type == TYPE_URL_EXTERNAL:
try:
return _async_get_external_url(
hass,
allow_cloud=allow_cloud,
allow_ip=allow_ip,
prefer_cloud=prefer_cloud,
require_ssl=require_ssl,
require_standard_port=require_standard_port,
)
except NoURLAvailableError:
pass
# We have to be honest now, we have no viable option available
raise NoURLAvailableError
@bind_hass
@callback
def _async_get_internal_url(
hass: HomeAssistant,
*,
allow_ip: bool = True,
require_ssl: bool = False,
require_standard_port: bool = False,
) -> str:
"""Get internal URL of this instance."""
if hass.config.internal_url:
internal_url = yarl.URL(hass.config.internal_url)
if (
(not require_ssl or internal_url.scheme == "https")
and (not require_standard_port or internal_url.is_default_port())
and (allow_ip or not is_ip_address(str(internal_url.host)))
):
return normalize_url(str(internal_url))
# Fallback to old base_url
try:
return _async_get_deprecated_base_url(
hass,
internal=True,
allow_ip=allow_ip,
require_ssl=require_ssl,
require_standard_port=require_standard_port,
)
except NoURLAvailableError:
pass
# Fallback to detected local IP
if allow_ip and not (
require_ssl or hass.config.api is None or hass.config.api.use_ssl
):
ip_url = yarl.URL.build(
scheme="http", host=hass.config.api.local_ip, port=hass.config.api.port
)
if not is_loopback(ip_address(ip_url.host)) and (
not require_standard_port or ip_url.is_default_port()
):
return normalize_url(str(ip_url))
raise NoURLAvailableError
@bind_hass
@callback
def _async_get_external_url(
hass: HomeAssistant,
*,
allow_cloud: bool = True,
allow_ip: bool = True,
prefer_cloud: bool = False,
require_ssl: bool = False,
require_standard_port: bool = False,
) -> str:
"""Get external URL of this instance."""
if prefer_cloud and allow_cloud:
try:
return _async_get_cloud_url(hass)
except NoURLAvailableError:
pass
if hass.config.external_url:
external_url = yarl.URL(hass.config.external_url)
if (
(allow_ip or not is_ip_address(str(external_url.host)))
and (not require_standard_port or external_url.is_default_port())
and (
not require_ssl
or (
external_url.scheme == "https"
and not is_ip_address(str(external_url.host))
)
)
):
return normalize_url(str(external_url))
try:
return _async_get_deprecated_base_url(
hass,
allow_ip=allow_ip,
require_ssl=require_ssl,
require_standard_port=require_standard_port,
)
except NoURLAvailableError:
pass
if allow_cloud:
try:
return _async_get_cloud_url(hass)
except NoURLAvailableError:
pass
raise NoURLAvailableError
@bind_hass
@callback
def _async_get_cloud_url(hass: HomeAssistant) -> str:
"""Get external Home Assistant Cloud URL of this instance."""
if "cloud" in hass.config.components:
try:
return cast(str, hass.components.cloud.async_remote_ui_url())
except hass.components.cloud.CloudNotAvailable:
pass
raise NoURLAvailableError
@bind_hass
@callback
def _async_get_deprecated_base_url(
hass: HomeAssistant,
*,
internal: bool = False,
allow_ip: bool = True,
require_ssl: bool = False,
require_standard_port: bool = False,
) -> str:
"""Work with the deprecated `base_url`, used as fallback."""
if hass.config.api is None or not hass.config.api.deprecated_base_url:
raise NoURLAvailableError
base_url = yarl.URL(hass.config.api.deprecated_base_url)
# Rules that apply to both internal and external
if (
(allow_ip or not is_ip_address(str(base_url.host)))
and (not require_ssl or base_url.scheme == "https")
and (not require_standard_port or base_url.is_default_port())
):
# Check to ensure an internal URL
if internal and (
str(base_url.host).endswith(".local")
or (
is_ip_address(str(base_url.host))
and not is_loopback(ip_address(base_url.host))
and is_private(ip_address(base_url.host))
)
):
return normalize_url(str(base_url))
# Check to ensure an external URL (a little)
if (
not internal
and not str(base_url.host).endswith(".local")
and not (
is_ip_address(str(base_url.host))
and is_local(ip_address(str(base_url.host)))
)
):
return normalize_url(str(base_url))
raise NoURLAvailableError
|
[
"yarl.URL.build",
"ipaddress.ip_address",
"yarl.URL"
] |
[((5684, 5729), 'yarl.URL', 'yarl.URL', (['hass.config.api.deprecated_base_url'], {}), '(hass.config.api.deprecated_base_url)\n', (5692, 5729), False, 'import yarl\n'), ((2367, 2401), 'yarl.URL', 'yarl.URL', (['hass.config.internal_url'], {}), '(hass.config.internal_url)\n', (2375, 2401), False, 'import yarl\n'), ((3166, 3258), 'yarl.URL.build', 'yarl.URL.build', ([], {'scheme': '"""http"""', 'host': 'hass.config.api.local_ip', 'port': 'hass.config.api.port'}), "(scheme='http', host=hass.config.api.local_ip, port=hass.\n config.api.port)\n", (3180, 3258), False, 'import yarl\n'), ((3994, 4028), 'yarl.URL', 'yarl.URL', (['hass.config.external_url'], {}), '(hass.config.external_url)\n', (4002, 4028), False, 'import yarl\n'), ((3303, 3326), 'ipaddress.ip_address', 'ip_address', (['ip_url.host'], {}), '(ip_url.host)\n', (3313, 3326), False, 'from ipaddress import ip_address\n'), ((6268, 6293), 'ipaddress.ip_address', 'ip_address', (['base_url.host'], {}), '(base_url.host)\n', (6278, 6293), False, 'from ipaddress import ip_address\n'), ((6210, 6235), 'ipaddress.ip_address', 'ip_address', (['base_url.host'], {}), '(base_url.host)\n', (6220, 6235), False, 'from ipaddress import ip_address\n')]
|
from souschef.jinja_expression import (
get_global_jinja_var,
is_jinja_expression,
set_global_jinja_var,
)
def test_add_jinja_var(pure_yaml_with_comments):
set_global_jinja_var(pure_yaml_with_comments, "version", "10.9.8")
assert get_global_jinja_var(pure_yaml_with_comments, "version") == "10.9.8"
def test_is_jinja_expression(simple_full_recipe):
assert is_jinja_expression(simple_full_recipe[0])
assert not is_jinja_expression(simple_full_recipe[-1])
def test_get_global_jinja_var(simple_full_recipe):
assert get_global_jinja_var(simple_full_recipe, "name") == "mat_discover"
assert get_global_jinja_var(simple_full_recipe, "version") == "1.2.1"
def test_get_global_jinja_var_multiple_one_line(multiple_jinja_var_same_line):
assert get_global_jinja_var(multiple_jinja_var_same_line, "name") == "mat_discover"
assert get_global_jinja_var(multiple_jinja_var_same_line, "version") == "1.2.1"
def test_set_global_jinja_var(simple_full_recipe):
set_global_jinja_var(simple_full_recipe, "name", "NEW_NAME")
assert get_global_jinja_var(simple_full_recipe, "name") == "NEW_NAME"
set_global_jinja_var(simple_full_recipe, "version", "3.2.1")
assert get_global_jinja_var(simple_full_recipe, "version") == "3.2.1"
def test_set_global_jinja_var_multiple_one_line(multiple_jinja_var_same_line):
set_global_jinja_var(multiple_jinja_var_same_line, "name", "NEW_NAME")
assert get_global_jinja_var(multiple_jinja_var_same_line, "name") == "NEW_NAME"
set_global_jinja_var(multiple_jinja_var_same_line, "version", "3.2.1")
assert get_global_jinja_var(multiple_jinja_var_same_line, "version") == "3.2.1"
|
[
"souschef.jinja_expression.get_global_jinja_var",
"souschef.jinja_expression.set_global_jinja_var",
"souschef.jinja_expression.is_jinja_expression"
] |
[((174, 240), 'souschef.jinja_expression.set_global_jinja_var', 'set_global_jinja_var', (['pure_yaml_with_comments', '"""version"""', '"""10.9.8"""'], {}), "(pure_yaml_with_comments, 'version', '10.9.8')\n", (194, 240), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((384, 426), 'souschef.jinja_expression.is_jinja_expression', 'is_jinja_expression', (['simple_full_recipe[0]'], {}), '(simple_full_recipe[0])\n', (403, 426), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((1001, 1061), 'souschef.jinja_expression.set_global_jinja_var', 'set_global_jinja_var', (['simple_full_recipe', '"""name"""', '"""NEW_NAME"""'], {}), "(simple_full_recipe, 'name', 'NEW_NAME')\n", (1021, 1061), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((1141, 1201), 'souschef.jinja_expression.set_global_jinja_var', 'set_global_jinja_var', (['simple_full_recipe', '"""version"""', '"""3.2.1"""'], {}), "(simple_full_recipe, 'version', '3.2.1')\n", (1161, 1201), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((1361, 1431), 'souschef.jinja_expression.set_global_jinja_var', 'set_global_jinja_var', (['multiple_jinja_var_same_line', '"""name"""', '"""NEW_NAME"""'], {}), "(multiple_jinja_var_same_line, 'name', 'NEW_NAME')\n", (1381, 1431), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((1521, 1591), 'souschef.jinja_expression.set_global_jinja_var', 'set_global_jinja_var', (['multiple_jinja_var_same_line', '"""version"""', '"""3.2.1"""'], {}), "(multiple_jinja_var_same_line, 'version', '3.2.1')\n", (1541, 1591), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((252, 308), 'souschef.jinja_expression.get_global_jinja_var', 'get_global_jinja_var', (['pure_yaml_with_comments', '"""version"""'], {}), "(pure_yaml_with_comments, 'version')\n", (272, 308), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((442, 485), 'souschef.jinja_expression.is_jinja_expression', 'is_jinja_expression', (['simple_full_recipe[-1]'], {}), '(simple_full_recipe[-1])\n', (461, 485), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((550, 598), 'souschef.jinja_expression.get_global_jinja_var', 'get_global_jinja_var', (['simple_full_recipe', '"""name"""'], {}), "(simple_full_recipe, 'name')\n", (570, 598), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((628, 679), 'souschef.jinja_expression.get_global_jinja_var', 'get_global_jinja_var', (['simple_full_recipe', '"""version"""'], {}), "(simple_full_recipe, 'version')\n", (648, 679), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((783, 841), 'souschef.jinja_expression.get_global_jinja_var', 'get_global_jinja_var', (['multiple_jinja_var_same_line', '"""name"""'], {}), "(multiple_jinja_var_same_line, 'name')\n", (803, 841), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((871, 932), 'souschef.jinja_expression.get_global_jinja_var', 'get_global_jinja_var', (['multiple_jinja_var_same_line', '"""version"""'], {}), "(multiple_jinja_var_same_line, 'version')\n", (891, 932), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((1073, 1121), 'souschef.jinja_expression.get_global_jinja_var', 'get_global_jinja_var', (['simple_full_recipe', '"""name"""'], {}), "(simple_full_recipe, 'name')\n", (1093, 1121), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((1213, 1264), 'souschef.jinja_expression.get_global_jinja_var', 'get_global_jinja_var', (['simple_full_recipe', '"""version"""'], {}), "(simple_full_recipe, 'version')\n", (1233, 1264), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((1443, 1501), 'souschef.jinja_expression.get_global_jinja_var', 'get_global_jinja_var', (['multiple_jinja_var_same_line', '"""name"""'], {}), "(multiple_jinja_var_same_line, 'name')\n", (1463, 1501), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n'), ((1603, 1664), 'souschef.jinja_expression.get_global_jinja_var', 'get_global_jinja_var', (['multiple_jinja_var_same_line', '"""version"""'], {}), "(multiple_jinja_var_same_line, 'version')\n", (1623, 1664), False, 'from souschef.jinja_expression import get_global_jinja_var, is_jinja_expression, set_global_jinja_var\n')]
|
import sys, os, glob
import pandas as pd, numpy as np
import ujson
import datetime
from ast import literal_eval
from get_workflow_info import get_workflow_info, get_class_cols, translate_non_alphanumerics, get_short_slug
################################################################################
# Jailbreak question annotations from their JSON
# (will partially jailbreak markings etc, but not fully)
################################################################################
def breakout_anno_q(row, workflow_info):
# if you're doing this by iterating yourself and feeding it a row it needs
# to be row[1]['anno_json'] because row[0] is the row index
# but if you're calling this by a .apply(lambda ) it doesn't have that
# because obviously why would you want them to have the same syntax why why why
annotations = row['anno_json']
# I was trying this with numpy in hopes of saving time but that can lead to ordering problems
# just saving it here in case I ever change my mind
#
# theclass = np.empty(len(d_cols), dtype=object)
# the_cols = np.empty(len(d_cols), dtype=object)
#
# for thistask in annotations:
#
# the_cols[d_cols[workflow_info[thistask['task']+'_shorttext']]] = workflow_info[thistask['task']+'_shorttext']
#
# try:
# theclass[d_cols[workflow_info[thistask['task']+'_shorttext']]] = thistask['value']
# except:
# # in case numpy doesn't want to accept a dict for the drawing task?
# theclass[d_cols[workflow_info[thistask['task']+'_shorttext']]] = str(thistask['value'])
theclass = {}
for task in annotations:
try:
theclass[workflow_info[task['task']+'_shorttext']] = task['value']
except:
theclass[workflow_info[task['task']+'_shorttext']] = str(task['value'])
# if things are going very badly, uncomment these and pipe the output to a logfile
# print("------------------------------------------------------------")
# print(row)
# print(theclass)
# print(pd.Series(theclass))
# print("------------------------------------------------------------")
return pd.Series(theclass)
################################################################################
# Jailbreak survey annotations from their JSON
################################################################################
def breakout_anno_survey(row, workflow_info, fp, classcols, thecols):
annotations = row['anno_json']
#classcols = "classification_id created_at user_name user_id user_ip".split()
printcols = classcols + thecols
n_marks = 0
theclass = {}
# fill the row with the basic classification information
# theclass['classification_id'] = row.index
# for col in "created_at user_name user_id user_ip".split():
# theclass[col] = row[col]
# actually, let's assume we haven't set classification_id to be the index
for col in classcols:
theclass[col] = row[col]
# create all the other relevant columns
for col in thecols:
theclass[col] = ''
#print(workflow_info)
for task in annotations:
taskname = task['task']
tasktype = workflow_info[taskname]['type']
# for a survey we expect a survey task and a "shortcut" for e.g.
# "Nothing Here", and they require different approaches
# either way we'll write 1 row per mark to the file
if tasktype == "survey":
marks = task['value']
for mark in marks:
n_marks += 1
# empty the dict of marks
for col in thecols:
theclass[col] = ''
# fill in the dict
theclass[taskname.lower()+'_choice'] = mark['choice']
for ans in mark['answers'].keys():
thelabel = workflow_info[taskname]['questions'][ans]['label_slug']
#thelabel = get_short_slug(ans)
theclass[taskname.lower()+'_'+thelabel] = mark['answers'][ans]
# not currently doing anything with "filters"
# print the mark
write_class_row(fp, theclass, printcols)
elif tasktype == "shortcut":
n_marks += 1
# empty the dict of marks
for col in thecols:
theclass[col] = ''
# populate a default value for all the relevant columns
for ans in workflow_info[taskname]['answers']:
theclass[ans['label_slug']] = False
# now populate the ones we have actual info for
for ans_orig in task['value']:
# get the index in the workflow answer map so we can fetch
# the correct column label
i_a = workflow_info[taskname]['answer_map'][ans_orig]
ans = workflow_info[taskname]['answers'][i_a]['label_slug']
#ans = get_short_slug(ans_orig.lower())
theclass[ans] = True
# now write the row to the file
write_class_row(fp, theclass, printcols)
return n_marks
################################################################################
# Write a dictionary to a csv using columns and order in thecols
################################################################################
def write_class_row(fp, theclass, thecols):
# print the row
for i in range(len(thecols)):
entry = theclass[thecols[i]]
if not i == 0:
fp.write(",")
try:
if isinstance(entry, (list, tuple)):
fp.write('"%s"' % str(entry))
else:
fp.write(str(entry))
except:
pass
fp.write("\n")
return
################################################################################
# Compute a vote fraction
################################################################################
def getfrac(row, colname, colcount):
try:
return float(row[colname])/float(row[colcount])
except:
return 0.0
################################################################################
# Aggregate question vote fractions based on a dictionary of tasks
################################################################################
def aggregate_questions(classifications, theqdict, verbose=True):
by_subj = classifications.groupby(['subject_ids'])
subj_ans = by_subj['count'].aggregate('sum')
subj_ans.name = 'n_class_total'
# this should set up with the index==subject_ids and the column name we've just specified
class_counts = pd.DataFrame(subj_ans)
# .items() is python 3, .iteritems() is python 2
for t, q in theqdict.iteritems():
if verbose:
print("Aggregating task %s (%s)... %s" % (t, q, datetime.datetime.now().strftime('%H:%M:%S')))
colstem = t.lower()+'_'+q+'_'
answers = classifications[q].unique()
by_q_subj = classifications.groupby(['subject_ids', q])
q_subj_ans = by_q_subj['count'].aggregate('sum')
subj_anscounts_df = pd.DataFrame(q_subj_ans).unstack().fillna(0.0)
# the above ends up with multi-level column names, so let's fix that
newcolnames = []
fraccolnames = []
for namepair in subj_anscounts_df.columns:
# [0] should be 'count' because that's the column we summmed on
# [1] is the text of each answer
# let's make it label-friendly
thisans = (translate_non_alphanumerics(namepair[1], translate_to=u'')).replace('\n', '_').replace(' ', '_').replace('__', '_').replace('__', '_').lower()
# e.g. 't1_spiral_arms_attached_yes_count'
thisnewcol = colstem + thisans + '_count'
thisnewfrac = colstem + thisans + '_frac'
newcolnames.append(thisnewcol)
fraccolnames.append(thisnewfrac)
class_counts[thisnewcol] = np.zeros_like(class_counts.n_class_total)
subj_anscounts_df.columns = newcolnames
class_counts[newcolnames] = subj_anscounts_df
class_counts[colstem+'count'] = class_counts[newcolnames].apply(lambda row: sum(row), axis=1)
for i, thecol in enumerate(newcolnames):
thefraccol = fraccolnames[i]
class_counts[thefraccol] = class_counts.apply(lambda row: getfrac(row, thecol, colstem+'count'), axis=1)
# just some cleanup (replace NaNs with 0.0)
class_counts.fillna(0.0, inplace=True)
return class_counts
################################################################################
# Aggregate survey classifications based on a workflow definition dict
################################################################################
def aggregate_survey(grp, workflow_info):
#workflow_info = wf_info
# groupby() --> df because indexing etc is slightly different
subj = pd.DataFrame(grp)
# get the columns we'll be using based on the workflow info
class_cols = get_class_cols(workflow_info)
# initialize the dict that will hold the counts
theclass = {}
for col in class_cols:
theclass[col] = 0.0
# count the number of classifications for this subject
theclass['class_count'] = len(subj.classification_id.unique())
# now loop through tasks
for task in workflow_info['tasknames']:
# we will do something slightly different for the survey itself
# versus the "unlinked" task(s) e.g. "Nothing Here"
task_low = task.lower()
if workflow_info[task]['type'] == "survey":
# only deal with the choices we actually need for this subject
choicecol = "%s_choice" % task_low
choices = (subj[choicecol].unique()).tolist()
# ignore if there are empties, which read here as NaN
try:
choices.remove(np.nan)
except ValueError:
# if there aren't any NaNs in the list, carry on
pass
# make sure this task isn't empty
if (len(choices) > 0):
# get the questions we're working with
qcol = []
qmult = []
for i_q in range(len(workflow_info[task]['questionsOrder'])):
q = workflow_info[task]['questionsOrder'][i_q]
#qcol[i_q] = "%s_%s" % (task_low, workflow_info[task]['questions'][q]['label_slug'])
qcol.append(workflow_info[task]['questions'][q]['label_slug'])
qmult.append(workflow_info[task]['questions'][q]['multiple'])
for choice in choices:
# choice_slug will have the taskname prepended
choice_slug = workflow_info[task]['choices'][choice]['label_slug']
# only deal with the annotations that indicated this choice
this_choice = subj[subj[choicecol] == choice]
# count 'em up
choice_count = float(len(this_choice))
theclass["%s_count" % choice_slug] = choice_count
# now deal with the questions for each choice
for i_q in range(len(qcol)):
q = workflow_info[task]['questionsOrder'][i_q]
# the column we're saving to
class_slug = "%s_%s" % (choice_slug, qcol[i_q])
# the column we're reading from
col_slug = "%s_%s" % (task_low, workflow_info[task]['questions'][q]['label_slug'])
# if this question requires a single answer, this is relatively easy
if not qmult[i_q]:
theclass["%s_count" % class_slug] = float(len(this_choice[col_slug]))
by_ans = this_choice.groupby(col_slug)
theans = this_choice[col_slug].unique()
ans_count = by_ans['count'].aggregate('sum')
for a in ans_count.index:
a_str = a
if not isinstance(a, basestring):
a_str = str(int(a))
a_slug = workflow_info[task]['questions'][q]['answers'][a_str]['label_slug']
colname = "%s_%s_count" % (choice_slug, a_slug)
theclass[colname] = ans_count[a]
else:
# we need to deal with questions that can have multiple answers
# we stored them as a list, but stringified
try:
ans_list = [literal_eval(t) for t in this_choice[col_slug].values]
list_all = [item for sublist in ans_list for item in sublist]
except:
ans_list = [t for t in this_choice[col_slug].values]
list_all = ans_list
# this will flatten the list of lists
adf = pd.DataFrame(list_all)
adf.columns = ['ans']
adf['count'] = np.ones_like(list_all, dtype=int)
by_ans = adf.groupby('ans')
ans_count = by_ans['count'].aggregate('sum')
for a in ans_count.index:
a_str = a
if not isinstance(a, basestring):
a_str = str(int(a))
a_slug = workflow_info[task]['questions'][q]['answers'][a_str]['label_slug']
colname = "%s_%s_count" % (choice_slug, a_slug)
theclass[colname] = ans_count[a]
elif workflow_info[task]['type'] == "shortcut":
# what columns and possible answers are we working with here?
#answers = []
#anno_cols = []
for q in workflow_info[task]['answers']:
# the actual answer text
#answers.append(q['label'])
# the column name in the jailbroken annotations file
#anno_cols.append(q['label_slug'])
thecol = q['label_slug']
# the True values are already in there
x = subj[thecol].fillna(False)
thecount = float(sum(x))
theclass["%s_count" % thecol] = thecount
theclass["%s_frac" % thecol] = thecount/theclass['class_count']
return pd.Series(theclass)
#end
|
[
"pandas.DataFrame",
"numpy.zeros_like",
"get_workflow_info.get_class_cols",
"numpy.ones_like",
"get_workflow_info.translate_non_alphanumerics",
"pandas.Series",
"ast.literal_eval",
"datetime.datetime.now"
] |
[((2197, 2216), 'pandas.Series', 'pd.Series', (['theclass'], {}), '(theclass)\n', (2206, 2216), True, 'import pandas as pd, numpy as np\n'), ((6722, 6744), 'pandas.DataFrame', 'pd.DataFrame', (['subj_ans'], {}), '(subj_ans)\n', (6734, 6744), True, 'import pandas as pd, numpy as np\n'), ((9013, 9030), 'pandas.DataFrame', 'pd.DataFrame', (['grp'], {}), '(grp)\n', (9025, 9030), True, 'import pandas as pd, numpy as np\n'), ((9113, 9142), 'get_workflow_info.get_class_cols', 'get_class_cols', (['workflow_info'], {}), '(workflow_info)\n', (9127, 9142), False, 'from get_workflow_info import get_workflow_info, get_class_cols, translate_non_alphanumerics, get_short_slug\n'), ((14838, 14857), 'pandas.Series', 'pd.Series', (['theclass'], {}), '(theclass)\n', (14847, 14857), True, 'import pandas as pd, numpy as np\n'), ((8049, 8090), 'numpy.zeros_like', 'np.zeros_like', (['class_counts.n_class_total'], {}), '(class_counts.n_class_total)\n', (8062, 8090), True, 'import pandas as pd, numpy as np\n'), ((7199, 7223), 'pandas.DataFrame', 'pd.DataFrame', (['q_subj_ans'], {}), '(q_subj_ans)\n', (7211, 7223), True, 'import pandas as pd, numpy as np\n'), ((13304, 13326), 'pandas.DataFrame', 'pd.DataFrame', (['list_all'], {}), '(list_all)\n', (13316, 13326), True, 'import pandas as pd, numpy as np\n'), ((13420, 13453), 'numpy.ones_like', 'np.ones_like', (['list_all'], {'dtype': 'int'}), '(list_all, dtype=int)\n', (13432, 13453), True, 'import pandas as pd, numpy as np\n'), ((6917, 6940), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (6938, 6940), False, 'import datetime\n'), ((12881, 12896), 'ast.literal_eval', 'literal_eval', (['t'], {}), '(t)\n', (12893, 12896), False, 'from ast import literal_eval\n'), ((7613, 7671), 'get_workflow_info.translate_non_alphanumerics', 'translate_non_alphanumerics', (['namepair[1]'], {'translate_to': 'u""""""'}), "(namepair[1], translate_to=u'')\n", (7640, 7671), False, 'from get_workflow_info import get_workflow_info, get_class_cols, translate_non_alphanumerics, get_short_slug\n')]
|
import json
from flask import Flask, render_template, request
from app.index import Index
app = Flask(__name__)
@app.route('/', methods=['GET'])
def index():
template_name = 'index.html'
return render_template(template_name)
@app.route('/search/', methods=['POST'])
def search():
word = request.form['company_name']
try:
result = Index.check_comapny_name(word)
data = {"status": "Success", "message": result}
except Exception as e:
data = {"status": "Error!", "message": str(e)}
return json.dumps(data)
if __name__ == "__main__":
app.run()
|
[
"flask.Flask",
"app.index.Index.check_comapny_name",
"flask.render_template",
"json.dumps"
] |
[((97, 112), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (102, 112), False, 'from flask import Flask, render_template, request\n'), ((205, 235), 'flask.render_template', 'render_template', (['template_name'], {}), '(template_name)\n', (220, 235), False, 'from flask import Flask, render_template, request\n'), ((539, 555), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (549, 555), False, 'import json\n'), ((359, 389), 'app.index.Index.check_comapny_name', 'Index.check_comapny_name', (['word'], {}), '(word)\n', (383, 389), False, 'from app.index import Index\n')]
|
"""
Provide tests for atomic swap handler initialization method implementation.
"""
import datetime
import time
import pytest
from sawtooth_sdk.processor.exceptions import InvalidTransaction
from sawtooth_sdk.protobuf.processor_pb2 import TpProcessRequest
from sawtooth_sdk.protobuf.setting_pb2 import Setting
from sawtooth_sdk.protobuf.transaction_pb2 import (
Transaction,
TransactionHeader,
)
from testing.conftest import create_signer
from testing.mocks.stub import StubContext
from testing.utils.client import proto_error_msg
from remme.clients.block_info import (
CONFIG_ADDRESS,
BlockInfoClient,
)
from remme.protos.account_pb2 import Account
from remme.protos.atomic_swap_pb2 import (
AtomicSwapInfo,
AtomicSwapInitPayload,
AtomicSwapMethod,
)
from remme.protos.block_info_pb2 import BlockInfo, BlockInfoConfig
from remme.protos.transaction_pb2 import TransactionPayload
from remme.shared.utils import hash512
from remme.settings import (
SETTINGS_KEY_ZERO_ADDRESS_OWNERS,
SETTINGS_SWAP_COMMISSION,
ZERO_ADDRESS,
)
from remme.settings.helper import _make_settings_key
from remme.tp.atomic_swap import AtomicSwapHandler
from remme.tp.basic import BasicHandler
TOKENS_AMOUNT_TO_SWAP = 200
SWAP_COMMISSION_AMOUNT = 100
BOT_ETHEREUM_ADDRESS = '0xe6ca0e7c974f06471759e9a05d18b538c5ced11e'
BOT_PRIVATE_KEY = '<KEY>'
BOT_PUBLIC_KEY = '03ecc5cb4094eb05319be6c7a63ebf17133d4ffaea48cdcfd1d5fc79dac7db7b6b'
BOT_ADDRESS = '112007b9433e1da5c624ff926477141abedfd57585a36590b0a8edc4104ef28093ee30'
ALICE_ETHEREUM_ADDRESS = '0x8dfe0f55a1cf9b22b8c85a9ff7a85a28a3879f71'
ALICE_ADDRESS = '112007db8a00c010402e2e3a7d03491323e761e0ea612481c518605648ceeb5ed454f7'
ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR = '0x6f4d5666332f5a575a714d4245624455612f2b4345424f704b4256704f5'
BOT_IT_IS_INITIATOR_MARK = ''
SWAP_ID = '033102e41346242476b15a3a7966eb5249271025fc7fb0b37ed3fdb4bcce3884'
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY = _make_settings_key(SETTINGS_SWAP_COMMISSION)
ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY = _make_settings_key(SETTINGS_KEY_ZERO_ADDRESS_OWNERS)
ADDRESS_TO_STORE_SWAP_INFO_BY = BasicHandler(
name=AtomicSwapHandler().family_name, versions=AtomicSwapHandler()._family_versions[0]
).make_address_from_data(data=SWAP_ID)
TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS = {
'family_name': AtomicSwapHandler().family_name,
'family_version': AtomicSwapHandler()._family_versions[0],
}
RANDOM_NODE_PUBLIC_KEY = '<KEY>'
RANDOM_PUBLIC_KEY = '<KEY>'
CURRENT_TIMESTAMP = int(datetime.datetime.now().timestamp())
BLOCK_INFO_CONFIG_ADDRESS = CONFIG_ADDRESS
BLOCK_INFO_ADDRESS = BlockInfoClient.create_block_address(1000)
block_info_config = BlockInfoConfig()
block_info_config.latest_block = 1000
SERIALIZED_BLOCK_INFO_CONFIG = block_info_config.SerializeToString()
block_info = BlockInfo()
block_info.timestamp = CURRENT_TIMESTAMP
SERIALIZED_BLOCK_INFO = block_info.SerializeToString()
INPUTS = [
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY,
BLOCK_INFO_CONFIG_ADDRESS,
BLOCK_INFO_ADDRESS,
BOT_ADDRESS,
ZERO_ADDRESS,
ADDRESS_TO_STORE_SWAP_INFO_BY,
]
OUTPUTS = [
ADDRESS_TO_STORE_SWAP_INFO_BY,
ZERO_ADDRESS,
BOT_ADDRESS,
]
def test_atomic_swap_init_with_empty_proto():
"""
Case: send empty proto for init
Expect: invalid transaction error
"""
inputs = outputs = [
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY,
BLOCK_INFO_CONFIG_ADDRESS,
BLOCK_INFO_ADDRESS,
BOT_ADDRESS,
ZERO_ADDRESS,
ADDRESS_TO_STORE_SWAP_INFO_BY,
ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY,
]
atomic_swap_init_payload = AtomicSwapInitPayload()
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=inputs,
outputs=outputs,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
mock_context = StubContext(inputs=inputs, outputs=outputs, initial_state={})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert proto_error_msg(
AtomicSwapInitPayload,
{
'receiver_address': ['Missed address'],
'sender_address_non_local': ['This field is required.'],
'amount': ['This field is required.'],
'swap_id': ['Missed swap_id'],
'created_at': ['This field is required.'],
}
) == str(error.value)
def test_atomic_swap_init():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens.
Expect: bot sends commission to the zero account address, swap amount is decreased from bot account.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
bot_account = Account()
bot_account.balance = 5000
serialized_bot_account = bot_account.SerializeToString()
zero_account = Account()
zero_account.balance = 0
serialized_zero_account = zero_account.SerializeToString()
swap_commission_setting = Setting()
swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION, value=str(SWAP_COMMISSION_AMOUNT))
serialized_swap_commission_setting = swap_commission_setting.SerializeToString()
genesis_members_setting = Setting()
genesis_members_setting.entries.add(key=SETTINGS_KEY_ZERO_ADDRESS_OWNERS, value=f'{BOT_PUBLIC_KEY},')
serialized_genesis_members_setting = genesis_members_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
BOT_ADDRESS: serialized_bot_account,
ZERO_ADDRESS: serialized_zero_account,
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY: serialized_swap_commission_setting,
ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY: serialized_genesis_members_setting,
})
swap_info = AtomicSwapInfo()
swap_info.swap_id = SWAP_ID
swap_info.state = AtomicSwapInfo.OPENED
swap_info.amount = TOKENS_AMOUNT_TO_SWAP
swap_info.created_at = CURRENT_TIMESTAMP
swap_info.email_address_encrypted_optional = ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR
swap_info.sender_address = BOT_ADDRESS
swap_info.sender_address_non_local = BOT_ETHEREUM_ADDRESS
swap_info.receiver_address = ALICE_ADDRESS
swap_info.is_initiator = True
serialized_swap_info = swap_info.SerializeToString()
expected_bot_account = Account()
expected_bot_account.balance = 5000 - TOKENS_AMOUNT_TO_SWAP - SWAP_COMMISSION_AMOUNT
serialized_expected_bot_account = expected_bot_account.SerializeToString()
expected_zero_account = Account()
expected_zero_account.balance = SWAP_COMMISSION_AMOUNT
serialized_expected_zero_account = expected_zero_account.SerializeToString()
expected_state = {
BOT_ADDRESS: serialized_expected_bot_account,
ZERO_ADDRESS: serialized_expected_zero_account,
ADDRESS_TO_STORE_SWAP_INFO_BY: serialized_swap_info,
}
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
state_as_list = mock_context.get_state(addresses=[
ADDRESS_TO_STORE_SWAP_INFO_BY, BOT_ADDRESS, ZERO_ADDRESS,
])
state_as_dict = {entry.address: entry.data for entry in state_as_list}
assert expected_state == state_as_dict
def test_atomic_swap_init_already_taken_id():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with already existing swap id.
Expect: invalid transaction error is raised with atomic swap id has already been taken error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
swap_info = AtomicSwapInfo()
swap_info.swap_id = SWAP_ID
swap_info.state = AtomicSwapInfo.OPENED
swap_info.amount = TOKENS_AMOUNT_TO_SWAP
swap_info.created_at = CURRENT_TIMESTAMP
swap_info.email_address_encrypted_optional = ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR
swap_info.sender_address = BOT_ADDRESS
swap_info.sender_address_non_local = BOT_ETHEREUM_ADDRESS
swap_info.receiver_address = ALICE_ADDRESS
serialized_swap_info = swap_info.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
ADDRESS_TO_STORE_SWAP_INFO_BY: serialized_swap_info,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert 'Atomic swap ID has already been taken, please use a different one.' == str(error.value)
def test_atomic_swap_init_swap_no_block_config_info():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens when no block config settings.
Expect: invalid transaction error is raised with nlock config not found error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert 'Block config not found.' == str(error.value)
def test_atomic_swap_init_swap_no_block_info():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens when no needed block information.
Expect: invalid transaction error is raised with nlock config not found error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert f'Block {block_info_config.latest_block + 1} not found.' == str(error.value)
def test_atomic_swap_init_swap_receiver_address_invalid_type():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with invalid Alice node address.
Expect: invalid transaction error is raised with atomic swap id has already been taken error message.
"""
invalid_receiver_address = '112934y*(J#QJ3UH*PD(:9B&TYDB*I0b0a8edc4104ef28093ee30'
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=invalid_receiver_address,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert proto_error_msg(
AtomicSwapInitPayload,
{'receiver_address': ['Address is not of a blockchain token type.']}
) == str(error.value)
def test_atomic_swap_init_swap_wrong_commission_address():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with wrong commission settings.
Expect: invalid transaction error is raised with wrong commission address error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
swap_commission_setting = Setting()
swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION, value='-1')
serialized_swap_commission_setting = swap_commission_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY: serialized_swap_commission_setting,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
assert 'Wrong commission address.' == str(error.value)
def test_atomic_swap_init_swap_no_account_in_state():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens from non-existent bot address.
Expect: invalid transaction error is raised with not enough balance error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
swap_commission_setting = Setting()
swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION, value=str(SWAP_COMMISSION_AMOUNT))
serialized_swap_commission_setting = swap_commission_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY: serialized_swap_commission_setting,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
total_amount = TOKENS_AMOUNT_TO_SWAP + SWAP_COMMISSION_AMOUNT
assert f'Not enough balance to perform the transaction in the amount (with a commission) {total_amount}.' \
== str(error.value)
def test_atomic_swap_init_swap_not_enough_balance():
"""
Case: initialize swap of bot's Remme node tokens to Alice's ERC20 Remme tokens with not enough bot address balance.
Expect: invalid transaction error is raised with not enough balance error message.
"""
atomic_swap_init_payload = AtomicSwapInitPayload(
receiver_address=ALICE_ADDRESS,
sender_address_non_local=BOT_ETHEREUM_ADDRESS,
amount=TOKENS_AMOUNT_TO_SWAP,
swap_id=SWAP_ID,
secret_lock_by_solicitor=BOT_IT_IS_INITIATOR_MARK,
email_address_encrypted_by_initiator=ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR,
created_at=CURRENT_TIMESTAMP,
)
transaction_payload = TransactionPayload()
transaction_payload.method = AtomicSwapMethod.INIT
transaction_payload.data = atomic_swap_init_payload.SerializeToString()
serialized_transaction_payload = transaction_payload.SerializeToString()
transaction_header = TransactionHeader(
signer_public_key=BOT_PUBLIC_KEY,
family_name=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_name'),
family_version=TRANSACTION_REQUEST_ACCOUNT_HANDLER_PARAMS.get('family_version'),
inputs=INPUTS,
outputs=OUTPUTS,
dependencies=[],
payload_sha512=hash512(data=serialized_transaction_payload),
batcher_public_key=RANDOM_NODE_PUBLIC_KEY,
nonce=time.time().hex().encode(),
)
serialized_header = transaction_header.SerializeToString()
transaction_request = TpProcessRequest(
header=transaction_header,
payload=serialized_transaction_payload,
signature=create_signer(private_key=BOT_PRIVATE_KEY).sign(serialized_header),
)
bot_account = Account()
bot_account.balance = 0
serialized_bot_account_balance = bot_account.SerializeToString()
swap_commission_setting = Setting()
swap_commission_setting.entries.add(key=SETTINGS_SWAP_COMMISSION, value=str(SWAP_COMMISSION_AMOUNT))
serialized_swap_commission_setting = swap_commission_setting.SerializeToString()
mock_context = StubContext(inputs=INPUTS, outputs=OUTPUTS, initial_state={
BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,
BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,
BOT_ADDRESS: serialized_bot_account_balance,
ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY: serialized_swap_commission_setting,
})
with pytest.raises(InvalidTransaction) as error:
AtomicSwapHandler().apply(transaction=transaction_request, context=mock_context)
total_amount = TOKENS_AMOUNT_TO_SWAP + SWAP_COMMISSION_AMOUNT
assert f'Not enough balance to perform the transaction in the amount (with a commission) {total_amount}.' \
== str(error.value)
|
[
"remme.protos.block_info_pb2.BlockInfoConfig",
"remme.tp.atomic_swap.AtomicSwapHandler",
"testing.mocks.stub.StubContext",
"remme.protos.block_info_pb2.BlockInfo",
"remme.shared.utils.hash512",
"sawtooth_sdk.protobuf.setting_pb2.Setting",
"datetime.datetime.now",
"time.time",
"remme.protos.atomic_swap_pb2.AtomicSwapInfo",
"pytest.raises",
"remme.settings.helper._make_settings_key",
"remme.clients.block_info.BlockInfoClient.create_block_address",
"remme.protos.account_pb2.Account",
"remme.protos.atomic_swap_pb2.AtomicSwapInitPayload",
"testing.utils.client.proto_error_msg",
"remme.protos.transaction_pb2.TransactionPayload",
"testing.conftest.create_signer"
] |
[((1956, 2000), 'remme.settings.helper._make_settings_key', '_make_settings_key', (['SETTINGS_SWAP_COMMISSION'], {}), '(SETTINGS_SWAP_COMMISSION)\n', (1974, 2000), False, 'from remme.settings.helper import _make_settings_key\n'), ((2047, 2099), 'remme.settings.helper._make_settings_key', '_make_settings_key', (['SETTINGS_KEY_ZERO_ADDRESS_OWNERS'], {}), '(SETTINGS_KEY_ZERO_ADDRESS_OWNERS)\n', (2065, 2099), False, 'from remme.settings.helper import _make_settings_key\n'), ((2631, 2673), 'remme.clients.block_info.BlockInfoClient.create_block_address', 'BlockInfoClient.create_block_address', (['(1000)'], {}), '(1000)\n', (2667, 2673), False, 'from remme.clients.block_info import CONFIG_ADDRESS, BlockInfoClient\n'), ((2695, 2712), 'remme.protos.block_info_pb2.BlockInfoConfig', 'BlockInfoConfig', ([], {}), '()\n', (2710, 2712), False, 'from remme.protos.block_info_pb2 import BlockInfo, BlockInfoConfig\n'), ((2834, 2845), 'remme.protos.block_info_pb2.BlockInfo', 'BlockInfo', ([], {}), '()\n', (2843, 2845), False, 'from remme.protos.block_info_pb2 import BlockInfo, BlockInfoConfig\n'), ((3661, 3684), 'remme.protos.atomic_swap_pb2.AtomicSwapInitPayload', 'AtomicSwapInitPayload', ([], {}), '()\n', (3682, 3684), False, 'from remme.protos.atomic_swap_pb2 import AtomicSwapInfo, AtomicSwapInitPayload, AtomicSwapMethod\n'), ((3712, 3732), 'remme.protos.transaction_pb2.TransactionPayload', 'TransactionPayload', ([], {}), '()\n', (3730, 3732), False, 'from remme.protos.transaction_pb2 import TransactionPayload\n'), ((4746, 4807), 'testing.mocks.stub.StubContext', 'StubContext', ([], {'inputs': 'inputs', 'outputs': 'outputs', 'initial_state': '{}'}), '(inputs=inputs, outputs=outputs, initial_state={})\n', (4757, 4807), False, 'from testing.mocks.stub import StubContext\n'), ((5595, 5923), 'remme.protos.atomic_swap_pb2.AtomicSwapInitPayload', 'AtomicSwapInitPayload', ([], {'receiver_address': 'ALICE_ADDRESS', 'sender_address_non_local': 'BOT_ETHEREUM_ADDRESS', 'amount': 'TOKENS_AMOUNT_TO_SWAP', 'swap_id': 'SWAP_ID', 'secret_lock_by_solicitor': 'BOT_IT_IS_INITIATOR_MARK', 'email_address_encrypted_by_initiator': 'ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR', 'created_at': 'CURRENT_TIMESTAMP'}), '(receiver_address=ALICE_ADDRESS,\n sender_address_non_local=BOT_ETHEREUM_ADDRESS, amount=\n TOKENS_AMOUNT_TO_SWAP, swap_id=SWAP_ID, secret_lock_by_solicitor=\n BOT_IT_IS_INITIATOR_MARK, email_address_encrypted_by_initiator=\n ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR, created_at=CURRENT_TIMESTAMP)\n', (5616, 5923), False, 'from remme.protos.atomic_swap_pb2 import AtomicSwapInfo, AtomicSwapInitPayload, AtomicSwapMethod\n'), ((5995, 6015), 'remme.protos.transaction_pb2.TransactionPayload', 'TransactionPayload', ([], {}), '()\n', (6013, 6015), False, 'from remme.protos.transaction_pb2 import TransactionPayload\n'), ((7028, 7037), 'remme.protos.account_pb2.Account', 'Account', ([], {}), '()\n', (7035, 7037), False, 'from remme.protos.account_pb2 import Account\n'), ((7150, 7159), 'remme.protos.account_pb2.Account', 'Account', ([], {}), '()\n', (7157, 7159), False, 'from remme.protos.account_pb2 import Account\n'), ((7283, 7292), 'sawtooth_sdk.protobuf.setting_pb2.Setting', 'Setting', ([], {}), '()\n', (7290, 7292), False, 'from sawtooth_sdk.protobuf.setting_pb2 import Setting\n'), ((7514, 7523), 'sawtooth_sdk.protobuf.setting_pb2.Setting', 'Setting', ([], {}), '()\n', (7521, 7523), False, 'from sawtooth_sdk.protobuf.setting_pb2 import Setting\n'), ((7735, 8158), 'testing.mocks.stub.StubContext', 'StubContext', ([], {'inputs': 'INPUTS', 'outputs': 'OUTPUTS', 'initial_state': '{BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,\n BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO, BOT_ADDRESS:\n serialized_bot_account, ZERO_ADDRESS: serialized_zero_account,\n ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY:\n serialized_swap_commission_setting,\n ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY:\n serialized_genesis_members_setting}'}), '(inputs=INPUTS, outputs=OUTPUTS, initial_state={\n BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,\n BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO, BOT_ADDRESS:\n serialized_bot_account, ZERO_ADDRESS: serialized_zero_account,\n ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY:\n serialized_swap_commission_setting,\n ADDRESS_TO_GET_GENESIS_MEMBERS_AS_STRING_BY:\n serialized_genesis_members_setting})\n', (7746, 8158), False, 'from testing.mocks.stub import StubContext\n'), ((8202, 8218), 'remme.protos.atomic_swap_pb2.AtomicSwapInfo', 'AtomicSwapInfo', ([], {}), '()\n', (8216, 8218), False, 'from remme.protos.atomic_swap_pb2 import AtomicSwapInfo, AtomicSwapInitPayload, AtomicSwapMethod\n'), ((8748, 8757), 'remme.protos.account_pb2.Account', 'Account', ([], {}), '()\n', (8755, 8757), False, 'from remme.protos.account_pb2 import Account\n'), ((8955, 8964), 'remme.protos.account_pb2.Account', 'Account', ([], {}), '()\n', (8962, 8964), False, 'from remme.protos.account_pb2 import Account\n'), ((9956, 10284), 'remme.protos.atomic_swap_pb2.AtomicSwapInitPayload', 'AtomicSwapInitPayload', ([], {'receiver_address': 'ALICE_ADDRESS', 'sender_address_non_local': 'BOT_ETHEREUM_ADDRESS', 'amount': 'TOKENS_AMOUNT_TO_SWAP', 'swap_id': 'SWAP_ID', 'secret_lock_by_solicitor': 'BOT_IT_IS_INITIATOR_MARK', 'email_address_encrypted_by_initiator': 'ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR', 'created_at': 'CURRENT_TIMESTAMP'}), '(receiver_address=ALICE_ADDRESS,\n sender_address_non_local=BOT_ETHEREUM_ADDRESS, amount=\n TOKENS_AMOUNT_TO_SWAP, swap_id=SWAP_ID, secret_lock_by_solicitor=\n BOT_IT_IS_INITIATOR_MARK, email_address_encrypted_by_initiator=\n ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR, created_at=CURRENT_TIMESTAMP)\n', (9977, 10284), False, 'from remme.protos.atomic_swap_pb2 import AtomicSwapInfo, AtomicSwapInitPayload, AtomicSwapMethod\n'), ((10356, 10376), 'remme.protos.transaction_pb2.TransactionPayload', 'TransactionPayload', ([], {}), '()\n', (10374, 10376), False, 'from remme.protos.transaction_pb2 import TransactionPayload\n'), ((11387, 11403), 'remme.protos.atomic_swap_pb2.AtomicSwapInfo', 'AtomicSwapInfo', ([], {}), '()\n', (11401, 11403), False, 'from remme.protos.atomic_swap_pb2 import AtomicSwapInfo, AtomicSwapInitPayload, AtomicSwapMethod\n'), ((11891, 12008), 'testing.mocks.stub.StubContext', 'StubContext', ([], {'inputs': 'INPUTS', 'outputs': 'OUTPUTS', 'initial_state': '{ADDRESS_TO_STORE_SWAP_INFO_BY: serialized_swap_info}'}), '(inputs=INPUTS, outputs=OUTPUTS, initial_state={\n ADDRESS_TO_STORE_SWAP_INFO_BY: serialized_swap_info})\n', (11902, 12008), False, 'from testing.mocks.stub import StubContext\n'), ((12573, 12901), 'remme.protos.atomic_swap_pb2.AtomicSwapInitPayload', 'AtomicSwapInitPayload', ([], {'receiver_address': 'ALICE_ADDRESS', 'sender_address_non_local': 'BOT_ETHEREUM_ADDRESS', 'amount': 'TOKENS_AMOUNT_TO_SWAP', 'swap_id': 'SWAP_ID', 'secret_lock_by_solicitor': 'BOT_IT_IS_INITIATOR_MARK', 'email_address_encrypted_by_initiator': 'ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR', 'created_at': 'CURRENT_TIMESTAMP'}), '(receiver_address=ALICE_ADDRESS,\n sender_address_non_local=BOT_ETHEREUM_ADDRESS, amount=\n TOKENS_AMOUNT_TO_SWAP, swap_id=SWAP_ID, secret_lock_by_solicitor=\n BOT_IT_IS_INITIATOR_MARK, email_address_encrypted_by_initiator=\n ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR, created_at=CURRENT_TIMESTAMP)\n', (12594, 12901), False, 'from remme.protos.atomic_swap_pb2 import AtomicSwapInfo, AtomicSwapInitPayload, AtomicSwapMethod\n'), ((12973, 12993), 'remme.protos.transaction_pb2.TransactionPayload', 'TransactionPayload', ([], {}), '()\n', (12991, 12993), False, 'from remme.protos.transaction_pb2 import TransactionPayload\n'), ((14007, 14068), 'testing.mocks.stub.StubContext', 'StubContext', ([], {'inputs': 'INPUTS', 'outputs': 'OUTPUTS', 'initial_state': '{}'}), '(inputs=INPUTS, outputs=OUTPUTS, initial_state={})\n', (14018, 14068), False, 'from testing.mocks.stub import StubContext\n'), ((14576, 14904), 'remme.protos.atomic_swap_pb2.AtomicSwapInitPayload', 'AtomicSwapInitPayload', ([], {'receiver_address': 'ALICE_ADDRESS', 'sender_address_non_local': 'BOT_ETHEREUM_ADDRESS', 'amount': 'TOKENS_AMOUNT_TO_SWAP', 'swap_id': 'SWAP_ID', 'secret_lock_by_solicitor': 'BOT_IT_IS_INITIATOR_MARK', 'email_address_encrypted_by_initiator': 'ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR', 'created_at': 'CURRENT_TIMESTAMP'}), '(receiver_address=ALICE_ADDRESS,\n sender_address_non_local=BOT_ETHEREUM_ADDRESS, amount=\n TOKENS_AMOUNT_TO_SWAP, swap_id=SWAP_ID, secret_lock_by_solicitor=\n BOT_IT_IS_INITIATOR_MARK, email_address_encrypted_by_initiator=\n ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR, created_at=CURRENT_TIMESTAMP)\n', (14597, 14904), False, 'from remme.protos.atomic_swap_pb2 import AtomicSwapInfo, AtomicSwapInitPayload, AtomicSwapMethod\n'), ((14976, 14996), 'remme.protos.transaction_pb2.TransactionPayload', 'TransactionPayload', ([], {}), '()\n', (14994, 14996), False, 'from remme.protos.transaction_pb2 import TransactionPayload\n'), ((16010, 16131), 'testing.mocks.stub.StubContext', 'StubContext', ([], {'inputs': 'INPUTS', 'outputs': 'OUTPUTS', 'initial_state': '{BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG}'}), '(inputs=INPUTS, outputs=OUTPUTS, initial_state={\n BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG})\n', (16021, 16131), False, 'from testing.mocks.stub import StubContext\n'), ((16797, 17136), 'remme.protos.atomic_swap_pb2.AtomicSwapInitPayload', 'AtomicSwapInitPayload', ([], {'receiver_address': 'invalid_receiver_address', 'sender_address_non_local': 'BOT_ETHEREUM_ADDRESS', 'amount': 'TOKENS_AMOUNT_TO_SWAP', 'swap_id': 'SWAP_ID', 'secret_lock_by_solicitor': 'BOT_IT_IS_INITIATOR_MARK', 'email_address_encrypted_by_initiator': 'ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR', 'created_at': 'CURRENT_TIMESTAMP'}), '(receiver_address=invalid_receiver_address,\n sender_address_non_local=BOT_ETHEREUM_ADDRESS, amount=\n TOKENS_AMOUNT_TO_SWAP, swap_id=SWAP_ID, secret_lock_by_solicitor=\n BOT_IT_IS_INITIATOR_MARK, email_address_encrypted_by_initiator=\n ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR, created_at=CURRENT_TIMESTAMP)\n', (16818, 17136), False, 'from remme.protos.atomic_swap_pb2 import AtomicSwapInfo, AtomicSwapInitPayload, AtomicSwapMethod\n'), ((17208, 17228), 'remme.protos.transaction_pb2.TransactionPayload', 'TransactionPayload', ([], {}), '()\n', (17226, 17228), False, 'from remme.protos.transaction_pb2 import TransactionPayload\n'), ((18242, 18410), 'testing.mocks.stub.StubContext', 'StubContext', ([], {'inputs': 'INPUTS', 'outputs': 'OUTPUTS', 'initial_state': '{BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,\n BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO}'}), '(inputs=INPUTS, outputs=OUTPUTS, initial_state={\n BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,\n BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO})\n', (18253, 18410), False, 'from testing.mocks.stub import StubContext\n'), ((19048, 19376), 'remme.protos.atomic_swap_pb2.AtomicSwapInitPayload', 'AtomicSwapInitPayload', ([], {'receiver_address': 'ALICE_ADDRESS', 'sender_address_non_local': 'BOT_ETHEREUM_ADDRESS', 'amount': 'TOKENS_AMOUNT_TO_SWAP', 'swap_id': 'SWAP_ID', 'secret_lock_by_solicitor': 'BOT_IT_IS_INITIATOR_MARK', 'email_address_encrypted_by_initiator': 'ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR', 'created_at': 'CURRENT_TIMESTAMP'}), '(receiver_address=ALICE_ADDRESS,\n sender_address_non_local=BOT_ETHEREUM_ADDRESS, amount=\n TOKENS_AMOUNT_TO_SWAP, swap_id=SWAP_ID, secret_lock_by_solicitor=\n BOT_IT_IS_INITIATOR_MARK, email_address_encrypted_by_initiator=\n ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR, created_at=CURRENT_TIMESTAMP)\n', (19069, 19376), False, 'from remme.protos.atomic_swap_pb2 import AtomicSwapInfo, AtomicSwapInitPayload, AtomicSwapMethod\n'), ((19448, 19468), 'remme.protos.transaction_pb2.TransactionPayload', 'TransactionPayload', ([], {}), '()\n', (19466, 19468), False, 'from remme.protos.transaction_pb2 import TransactionPayload\n'), ((20493, 20502), 'sawtooth_sdk.protobuf.setting_pb2.Setting', 'Setting', ([], {}), '()\n', (20500, 20502), False, 'from sawtooth_sdk.protobuf.setting_pb2 import Setting\n'), ((20690, 20944), 'testing.mocks.stub.StubContext', 'StubContext', ([], {'inputs': 'INPUTS', 'outputs': 'OUTPUTS', 'initial_state': '{BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,\n BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,\n ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY:\n serialized_swap_commission_setting}'}), '(inputs=INPUTS, outputs=OUTPUTS, initial_state={\n BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,\n BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,\n ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY:\n serialized_swap_commission_setting})\n', (20701, 20944), False, 'from testing.mocks.stub import StubContext\n'), ((21466, 21794), 'remme.protos.atomic_swap_pb2.AtomicSwapInitPayload', 'AtomicSwapInitPayload', ([], {'receiver_address': 'ALICE_ADDRESS', 'sender_address_non_local': 'BOT_ETHEREUM_ADDRESS', 'amount': 'TOKENS_AMOUNT_TO_SWAP', 'swap_id': 'SWAP_ID', 'secret_lock_by_solicitor': 'BOT_IT_IS_INITIATOR_MARK', 'email_address_encrypted_by_initiator': 'ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR', 'created_at': 'CURRENT_TIMESTAMP'}), '(receiver_address=ALICE_ADDRESS,\n sender_address_non_local=BOT_ETHEREUM_ADDRESS, amount=\n TOKENS_AMOUNT_TO_SWAP, swap_id=SWAP_ID, secret_lock_by_solicitor=\n BOT_IT_IS_INITIATOR_MARK, email_address_encrypted_by_initiator=\n ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR, created_at=CURRENT_TIMESTAMP)\n', (21487, 21794), False, 'from remme.protos.atomic_swap_pb2 import AtomicSwapInfo, AtomicSwapInitPayload, AtomicSwapMethod\n'), ((21866, 21886), 'remme.protos.transaction_pb2.TransactionPayload', 'TransactionPayload', ([], {}), '()\n', (21884, 21886), False, 'from remme.protos.transaction_pb2 import TransactionPayload\n'), ((22911, 22920), 'sawtooth_sdk.protobuf.setting_pb2.Setting', 'Setting', ([], {}), '()\n', (22918, 22920), False, 'from sawtooth_sdk.protobuf.setting_pb2 import Setting\n'), ((23131, 23385), 'testing.mocks.stub.StubContext', 'StubContext', ([], {'inputs': 'INPUTS', 'outputs': 'OUTPUTS', 'initial_state': '{BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,\n BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,\n ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY:\n serialized_swap_commission_setting}'}), '(inputs=INPUTS, outputs=OUTPUTS, initial_state={\n BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,\n BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO,\n ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY:\n serialized_swap_commission_setting})\n', (23142, 23385), False, 'from testing.mocks.stub import StubContext\n'), ((24064, 24392), 'remme.protos.atomic_swap_pb2.AtomicSwapInitPayload', 'AtomicSwapInitPayload', ([], {'receiver_address': 'ALICE_ADDRESS', 'sender_address_non_local': 'BOT_ETHEREUM_ADDRESS', 'amount': 'TOKENS_AMOUNT_TO_SWAP', 'swap_id': 'SWAP_ID', 'secret_lock_by_solicitor': 'BOT_IT_IS_INITIATOR_MARK', 'email_address_encrypted_by_initiator': 'ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR', 'created_at': 'CURRENT_TIMESTAMP'}), '(receiver_address=ALICE_ADDRESS,\n sender_address_non_local=BOT_ETHEREUM_ADDRESS, amount=\n TOKENS_AMOUNT_TO_SWAP, swap_id=SWAP_ID, secret_lock_by_solicitor=\n BOT_IT_IS_INITIATOR_MARK, email_address_encrypted_by_initiator=\n ALICE_EMAIL_ADDRESS_ENCRYPTED_BY_INITIATOR, created_at=CURRENT_TIMESTAMP)\n', (24085, 24392), False, 'from remme.protos.atomic_swap_pb2 import AtomicSwapInfo, AtomicSwapInitPayload, AtomicSwapMethod\n'), ((24464, 24484), 'remme.protos.transaction_pb2.TransactionPayload', 'TransactionPayload', ([], {}), '()\n', (24482, 24484), False, 'from remme.protos.transaction_pb2 import TransactionPayload\n'), ((25497, 25506), 'remme.protos.account_pb2.Account', 'Account', ([], {}), '()\n', (25504, 25506), False, 'from remme.protos.account_pb2 import Account\n'), ((25635, 25644), 'sawtooth_sdk.protobuf.setting_pb2.Setting', 'Setting', ([], {}), '()\n', (25642, 25644), False, 'from sawtooth_sdk.protobuf.setting_pb2 import Setting\n'), ((25855, 26158), 'testing.mocks.stub.StubContext', 'StubContext', ([], {'inputs': 'INPUTS', 'outputs': 'OUTPUTS', 'initial_state': '{BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,\n BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO, BOT_ADDRESS:\n serialized_bot_account_balance,\n ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY:\n serialized_swap_commission_setting}'}), '(inputs=INPUTS, outputs=OUTPUTS, initial_state={\n BLOCK_INFO_CONFIG_ADDRESS: SERIALIZED_BLOCK_INFO_CONFIG,\n BLOCK_INFO_ADDRESS: SERIALIZED_BLOCK_INFO, BOT_ADDRESS:\n serialized_bot_account_balance,\n ADDRESS_TO_GET_SWAP_COMMISSION_AMOUNT_BY:\n serialized_swap_commission_setting})\n', (25866, 26158), False, 'from testing.mocks.stub import StubContext\n'), ((2344, 2363), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (2361, 2363), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((4818, 4851), 'pytest.raises', 'pytest.raises', (['InvalidTransaction'], {}), '(InvalidTransaction)\n', (4831, 4851), False, 'import pytest\n'), ((4963, 5227), 'testing.utils.client.proto_error_msg', 'proto_error_msg', (['AtomicSwapInitPayload', "{'receiver_address': ['Missed address'], 'sender_address_non_local': [\n 'This field is required.'], 'amount': ['This field is required.'],\n 'swap_id': ['Missed swap_id'], 'created_at': ['This field is required.']}"], {}), "(AtomicSwapInitPayload, {'receiver_address': [\n 'Missed address'], 'sender_address_non_local': [\n 'This field is required.'], 'amount': ['This field is required.'],\n 'swap_id': ['Missed swap_id'], 'created_at': ['This field is required.']})\n", (4978, 5227), False, 'from testing.utils.client import proto_error_msg\n'), ((12029, 12062), 'pytest.raises', 'pytest.raises', (['InvalidTransaction'], {}), '(InvalidTransaction)\n', (12042, 12062), False, 'import pytest\n'), ((14079, 14112), 'pytest.raises', 'pytest.raises', (['InvalidTransaction'], {}), '(InvalidTransaction)\n', (14092, 14112), False, 'import pytest\n'), ((16152, 16185), 'pytest.raises', 'pytest.raises', (['InvalidTransaction'], {}), '(InvalidTransaction)\n', (16165, 16185), False, 'import pytest\n'), ((18435, 18468), 'pytest.raises', 'pytest.raises', (['InvalidTransaction'], {}), '(InvalidTransaction)\n', (18448, 18468), False, 'import pytest\n'), ((18580, 18693), 'testing.utils.client.proto_error_msg', 'proto_error_msg', (['AtomicSwapInitPayload', "{'receiver_address': ['Address is not of a blockchain token type.']}"], {}), "(AtomicSwapInitPayload, {'receiver_address': [\n 'Address is not of a blockchain token type.']})\n", (18595, 18693), False, 'from testing.utils.client import proto_error_msg\n'), ((20969, 21002), 'pytest.raises', 'pytest.raises', (['InvalidTransaction'], {}), '(InvalidTransaction)\n', (20982, 21002), False, 'import pytest\n'), ((23410, 23443), 'pytest.raises', 'pytest.raises', (['InvalidTransaction'], {}), '(InvalidTransaction)\n', (23423, 23443), False, 'import pytest\n'), ((26187, 26220), 'pytest.raises', 'pytest.raises', (['InvalidTransaction'], {}), '(InvalidTransaction)\n', (26200, 26220), False, 'import pytest\n'), ((2399, 2418), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (2416, 2418), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((2529, 2552), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2550, 2552), False, 'import datetime\n'), ((4297, 4341), 'remme.shared.utils.hash512', 'hash512', ([], {'data': 'serialized_transaction_payload'}), '(data=serialized_transaction_payload)\n', (4304, 4341), False, 'from remme.shared.utils import hash512\n'), ((6580, 6624), 'remme.shared.utils.hash512', 'hash512', ([], {'data': 'serialized_transaction_payload'}), '(data=serialized_transaction_payload)\n', (6587, 6624), False, 'from remme.shared.utils import hash512\n'), ((9311, 9330), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (9328, 9330), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((10941, 10985), 'remme.shared.utils.hash512', 'hash512', ([], {'data': 'serialized_transaction_payload'}), '(data=serialized_transaction_payload)\n', (10948, 10985), False, 'from remme.shared.utils import hash512\n'), ((13558, 13602), 'remme.shared.utils.hash512', 'hash512', ([], {'data': 'serialized_transaction_payload'}), '(data=serialized_transaction_payload)\n', (13565, 13602), False, 'from remme.shared.utils import hash512\n'), ((15561, 15605), 'remme.shared.utils.hash512', 'hash512', ([], {'data': 'serialized_transaction_payload'}), '(data=serialized_transaction_payload)\n', (15568, 15605), False, 'from remme.shared.utils import hash512\n'), ((17793, 17837), 'remme.shared.utils.hash512', 'hash512', ([], {'data': 'serialized_transaction_payload'}), '(data=serialized_transaction_payload)\n', (17800, 17837), False, 'from remme.shared.utils import hash512\n'), ((20033, 20077), 'remme.shared.utils.hash512', 'hash512', ([], {'data': 'serialized_transaction_payload'}), '(data=serialized_transaction_payload)\n', (20040, 20077), False, 'from remme.shared.utils import hash512\n'), ((22451, 22495), 'remme.shared.utils.hash512', 'hash512', ([], {'data': 'serialized_transaction_payload'}), '(data=serialized_transaction_payload)\n', (22458, 22495), False, 'from remme.shared.utils import hash512\n'), ((25049, 25093), 'remme.shared.utils.hash512', 'hash512', ([], {'data': 'serialized_transaction_payload'}), '(data=serialized_transaction_payload)\n', (25056, 25093), False, 'from remme.shared.utils import hash512\n'), ((4870, 4889), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (4887, 4889), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((12081, 12100), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (12098, 12100), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((14131, 14150), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (14148, 14150), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((16204, 16223), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (16221, 16223), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((18487, 18506), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (18504, 18506), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((21021, 21040), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (21038, 21040), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((23462, 23481), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (23479, 23481), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((26239, 26258), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (26256, 26258), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((2155, 2174), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (2172, 2174), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((4652, 4694), 'testing.conftest.create_signer', 'create_signer', ([], {'private_key': 'BOT_PRIVATE_KEY'}), '(private_key=BOT_PRIVATE_KEY)\n', (4665, 4694), False, 'from testing.conftest import create_signer\n'), ((6935, 6977), 'testing.conftest.create_signer', 'create_signer', ([], {'private_key': 'BOT_PRIVATE_KEY'}), '(private_key=BOT_PRIVATE_KEY)\n', (6948, 6977), False, 'from testing.conftest import create_signer\n'), ((11296, 11338), 'testing.conftest.create_signer', 'create_signer', ([], {'private_key': 'BOT_PRIVATE_KEY'}), '(private_key=BOT_PRIVATE_KEY)\n', (11309, 11338), False, 'from testing.conftest import create_signer\n'), ((13913, 13955), 'testing.conftest.create_signer', 'create_signer', ([], {'private_key': 'BOT_PRIVATE_KEY'}), '(private_key=BOT_PRIVATE_KEY)\n', (13926, 13955), False, 'from testing.conftest import create_signer\n'), ((15916, 15958), 'testing.conftest.create_signer', 'create_signer', ([], {'private_key': 'BOT_PRIVATE_KEY'}), '(private_key=BOT_PRIVATE_KEY)\n', (15929, 15958), False, 'from testing.conftest import create_signer\n'), ((18148, 18190), 'testing.conftest.create_signer', 'create_signer', ([], {'private_key': 'BOT_PRIVATE_KEY'}), '(private_key=BOT_PRIVATE_KEY)\n', (18161, 18190), False, 'from testing.conftest import create_signer\n'), ((20388, 20430), 'testing.conftest.create_signer', 'create_signer', ([], {'private_key': 'BOT_PRIVATE_KEY'}), '(private_key=BOT_PRIVATE_KEY)\n', (20401, 20430), False, 'from testing.conftest import create_signer\n'), ((22806, 22848), 'testing.conftest.create_signer', 'create_signer', ([], {'private_key': 'BOT_PRIVATE_KEY'}), '(private_key=BOT_PRIVATE_KEY)\n', (22819, 22848), False, 'from testing.conftest import create_signer\n'), ((25404, 25446), 'testing.conftest.create_signer', 'create_signer', ([], {'private_key': 'BOT_PRIVATE_KEY'}), '(private_key=BOT_PRIVATE_KEY)\n', (25417, 25446), False, 'from testing.conftest import create_signer\n'), ((2197, 2216), 'remme.tp.atomic_swap.AtomicSwapHandler', 'AtomicSwapHandler', ([], {}), '()\n', (2214, 2216), False, 'from remme.tp.atomic_swap import AtomicSwapHandler\n'), ((4408, 4419), 'time.time', 'time.time', ([], {}), '()\n', (4417, 4419), False, 'import time\n'), ((6691, 6702), 'time.time', 'time.time', ([], {}), '()\n', (6700, 6702), False, 'import time\n'), ((11052, 11063), 'time.time', 'time.time', ([], {}), '()\n', (11061, 11063), False, 'import time\n'), ((13669, 13680), 'time.time', 'time.time', ([], {}), '()\n', (13678, 13680), False, 'import time\n'), ((15672, 15683), 'time.time', 'time.time', ([], {}), '()\n', (15681, 15683), False, 'import time\n'), ((17904, 17915), 'time.time', 'time.time', ([], {}), '()\n', (17913, 17915), False, 'import time\n'), ((20144, 20155), 'time.time', 'time.time', ([], {}), '()\n', (20153, 20155), False, 'import time\n'), ((22562, 22573), 'time.time', 'time.time', ([], {}), '()\n', (22571, 22573), False, 'import time\n'), ((25160, 25171), 'time.time', 'time.time', ([], {}), '()\n', (25169, 25171), False, 'import time\n')]
|
import logging
from nmigen.compat import *
from nmigen.compat.genlib.cdc import MultiReg
from nmigen.compat.genlib.fifo import _FIFOInterface
from ..gateware.analyzer import *
__all__ = ["GlasgowAnalyzer"]
class GlasgowAnalyzer(Module):
logger = logging.getLogger(__name__)
def __init__(self, registers, multiplexer, event_depth=None):
multiplexer.set_analyzer(self)
self.mux_interface = multiplexer.claim_interface(self, args=None, with_analyzer=False)
self.event_analyzer = self.mux_interface.add_subtarget(
EventAnalyzer(output_fifo=self.mux_interface.get_in_fifo(auto_flush=False),
event_depth=event_depth))
self.event_sources = self.event_analyzer.event_sources
self.throttle = self.event_analyzer.throttle
self.done, self.addr_done = registers.add_rw(1)
self.logger.debug("adding done register at address %#04x", self.addr_done)
self.comb += self.event_analyzer.done.eq(self.done)
self._pins = []
def _name(self, applet, event):
# return "{}-{}".format(applet.name, event)
return event
def add_generic_event(self, applet, name, signal):
event_source = self.event_analyzer.add_event_source(
name=self._name(applet, name), kind="change", width=signal.nbits)
signal_r = Signal.like(signal)
event_source.sync += [
signal_r.eq(signal),
]
event_source.comb += [
event_source.data.eq(signal),
event_source.trigger.eq(signal != signal_r),
]
def add_in_fifo_event(self, applet, fifo):
event_source = self.event_analyzer.add_event_source(
name=self._name(applet, "fifo-in"), kind="strobe", width=8)
event_source.sync += [
event_source.trigger.eq(fifo.writable & fifo.we),
event_source.data.eq(fifo.din)
]
def add_out_fifo_event(self, applet, fifo):
event_source = self.event_analyzer.add_event_source(
name=self._name(applet, "fifo-out"), kind="strobe", width=8)
event_source.comb += [
event_source.trigger.eq(fifo.readable & fifo.re),
event_source.data.eq(fifo.dout)
]
def add_pin_event(self, applet, name, triple):
self._pins.append((self._name(applet, name), triple))
def _finalize_pin_events(self):
if not self._pins:
return
reg_reset = Signal()
self.sync += reg_reset.eq(self.event_analyzer.reset)
pin_oes = []
pin_ios = []
for (name, triple) in self._pins:
sync_i = Signal.like(triple.i)
self.specials += MultiReg(triple.i, sync_i)
pin_oes.append((name, triple.oe))
pin_ios.append((name, Mux(triple.oe, triple.o, sync_i)))
sig_oes = Cat(oe for n, oe in pin_oes)
reg_oes = Signal.like(sig_oes)
sig_ios = Cat(io for n, io in pin_ios)
reg_ios = Signal.like(sig_ios)
self.sync += [
reg_oes.eq(sig_oes),
reg_ios.eq(sig_ios),
]
oe_event_source = self.event_analyzer.add_event_source(
name="oe", kind="change", width=value_bits_sign(sig_oes)[0],
fields=[(name, value_bits_sign(oe)[0]) for name, oe in pin_oes])
io_event_source = self.event_analyzer.add_event_source(
name="io", kind="change", width=value_bits_sign(sig_ios)[0],
fields=[(name, value_bits_sign(io)[0]) for name, io in pin_ios])
self.comb += [
oe_event_source.trigger.eq(reg_reset | (sig_oes != reg_oes)),
oe_event_source.data.eq(sig_oes),
io_event_source.trigger.eq(reg_reset | (sig_ios != reg_ios)),
io_event_source.data.eq(sig_ios),
]
|
[
"logging.getLogger",
"nmigen.compat.genlib.cdc.MultiReg"
] |
[((255, 282), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (272, 282), False, 'import logging\n'), ((2695, 2721), 'nmigen.compat.genlib.cdc.MultiReg', 'MultiReg', (['triple.i', 'sync_i'], {}), '(triple.i, sync_i)\n', (2703, 2721), False, 'from nmigen.compat.genlib.cdc import MultiReg\n')]
|
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unitests for automatic variable tracing."""
import unittest
import numpy as np
import jax.numpy as jn
import objax
from objax.zoo.dnnet import DNNet
global_w = objax.TrainVar(jn.zeros(5))
global_b = objax.TrainVar(jn.zeros(1))
global_m = objax.nn.Sequential([objax.nn.Conv2D(2, 4, 3), objax.nn.BatchNorm2D(4)])
class TestTracing(unittest.TestCase):
"""Unit tests for variable tracing using."""
def test_function_global_vars(self):
def loss(x, y):
pred = jn.dot(x, global_w.value) + global_b.value
return 0.5 * ((y - pred) ** 2).mean()
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, {'global_w': global_w, 'global_b': global_b})
def test_function_global_module(self):
def loss(x):
return jn.sum(global_m(x, training=True))
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, global_m.vars(scope='global_m.'))
def test_function_closure_vars(self):
w = objax.TrainVar(jn.zeros(5))
b = objax.TrainVar(jn.zeros(1))
def loss(x, y):
pred = jn.dot(x, w.value) + b.value
return 0.5 * ((y - pred) ** 2).mean()
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, {'w': w, 'b': b})
def test_function_closure_module(self):
m = objax.nn.Sequential([objax.nn.Conv2D(1, 2, 3), objax.nn.BatchNorm2D(2)])
def loss(x):
return jn.sum(m(x, training=True))
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, m.vars(scope='m.'))
def test_lambda_with_closure_vars(self):
w = objax.TrainVar(jn.zeros(5))
b = objax.TrainVar(jn.zeros(1))
loss = lambda x, y: 0.5 * ((y - jn.dot(x, w.value) + b.value) ** 2).mean()
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, {'w': w, 'b': b})
def test_multiline_lambda_with_closure_vars(self):
w = objax.TrainVar(jn.zeros(5))
b = objax.TrainVar(jn.zeros(1))
loss = lambda x, y: (
0.5 * ((y - jn.dot(x, w.value) + b.value) ** 2).mean()
)
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, {'w': w, 'b': b})
def test_closure_overrides_global_vars(self):
# Make sure that global variables are what we expect them to be
np.testing.assert_allclose(global_w.value, np.zeros(5))
np.testing.assert_allclose(global_b.value, np.zeros(1))
def _do_test():
# define local variable with the same name as existing global
global_w = objax.TrainVar(jn.ones(10))
# verify that global_w and global_b are what we expect them to be
np.testing.assert_allclose(global_w.value, np.ones(10))
np.testing.assert_allclose(global_b.value, np.zeros(1))
# loss function which mixes closure vars, global vars and closure var hides global var
def loss(x, y):
pred = jn.dot(x, global_w.value) + global_b.value
return 0.5 * ((y - pred) ** 2).mean()
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, {'global_w': global_w, 'global_b': global_b})
_do_test()
# Make sure that global variables didn't change, in other words
# that _do_test operated on local variables
np.testing.assert_allclose(global_w.value, np.zeros(5))
np.testing.assert_allclose(global_b.value, np.zeros(1))
def test_typical_training_loop(self):
# Define model and optimizer
model = DNNet((32, 10), objax.functional.leaky_relu)
opt = objax.optimizer.Momentum(model.vars(), nesterov=True)
# Predict op
predict_op = lambda x: objax.functional.softmax(model(x, training=False))
self.assertDictEqual(objax.util.find_used_variables(predict_op),
model.vars(scope='model.'))
# Loss function
def loss(x, label):
logit = model(x, training=True)
xe_loss = objax.functional.loss.cross_entropy_logits_sparse(logit, label).mean()
return xe_loss
self.assertDictEqual(objax.util.find_used_variables(loss),
model.vars(scope='model.'))
# Gradients and loss function
loss_gv = objax.GradValues(loss, objax.util.find_used_variables(loss))
def train_op(x, y, learning_rate):
grads, loss = loss_gv(x, y)
opt(learning_rate, grads)
return loss
self.assertDictEqual(objax.util.find_used_variables(train_op),
{**model.vars(scope='loss_gv.model.'), **opt.vars(scope='opt.')})
def test_lambda_inside_function(self):
m = objax.nn.Sequential([objax.nn.Conv2D(1, 2, 3), objax.nn.BatchNorm2D(2)])
def loss(x):
get_logits = lambda inp: m(inp, training=True)
return jn.sum(get_logits(x))
vc = objax.util.find_used_variables(loss)
self.assertDictEqual(vc, m.vars(scope='m.'))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"jax.numpy.dot",
"objax.zoo.dnnet.DNNet",
"numpy.zeros",
"numpy.ones",
"objax.nn.Conv2D",
"objax.nn.BatchNorm2D",
"jax.numpy.ones",
"jax.numpy.zeros",
"objax.functional.loss.cross_entropy_logits_sparse",
"objax.util.find_used_variables"
] |
[((758, 769), 'jax.numpy.zeros', 'jn.zeros', (['(5)'], {}), '(5)\n', (766, 769), True, 'import jax.numpy as jn\n'), ((797, 808), 'jax.numpy.zeros', 'jn.zeros', (['(1)'], {}), '(1)\n', (805, 808), True, 'import jax.numpy as jn\n'), ((5718, 5733), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5731, 5733), False, 'import unittest\n'), ((843, 867), 'objax.nn.Conv2D', 'objax.nn.Conv2D', (['(2)', '(4)', '(3)'], {}), '(2, 4, 3)\n', (858, 867), False, 'import objax\n'), ((869, 892), 'objax.nn.BatchNorm2D', 'objax.nn.BatchNorm2D', (['(4)'], {}), '(4)\n', (889, 892), False, 'import objax\n'), ((1176, 1212), 'objax.util.find_used_variables', 'objax.util.find_used_variables', (['loss'], {}), '(loss)\n', (1206, 1212), False, 'import objax\n'), ((1425, 1461), 'objax.util.find_used_variables', 'objax.util.find_used_variables', (['loss'], {}), '(loss)\n', (1455, 1461), False, 'import objax\n'), ((1789, 1825), 'objax.util.find_used_variables', 'objax.util.find_used_variables', (['loss'], {}), '(loss)\n', (1819, 1825), False, 'import objax\n'), ((2090, 2126), 'objax.util.find_used_variables', 'objax.util.find_used_variables', (['loss'], {}), '(loss)\n', (2120, 2126), False, 'import objax\n'), ((2404, 2440), 'objax.util.find_used_variables', 'objax.util.find_used_variables', (['loss'], {}), '(loss)\n', (2434, 2440), False, 'import objax\n'), ((2750, 2786), 'objax.util.find_used_variables', 'objax.util.find_used_variables', (['loss'], {}), '(loss)\n', (2780, 2786), False, 'import objax\n'), ((4209, 4253), 'objax.zoo.dnnet.DNNet', 'DNNet', (['(32, 10)', 'objax.functional.leaky_relu'], {}), '((32, 10), objax.functional.leaky_relu)\n', (4214, 4253), False, 'from objax.zoo.dnnet import DNNet\n'), ((5595, 5631), 'objax.util.find_used_variables', 'objax.util.find_used_variables', (['loss'], {}), '(loss)\n', (5625, 5631), False, 'import objax\n'), ((1599, 1610), 'jax.numpy.zeros', 'jn.zeros', (['(5)'], {}), '(5)\n', (1607, 1610), True, 'import jax.numpy as jn\n'), ((1639, 1650), 'jax.numpy.zeros', 'jn.zeros', (['(1)'], {}), '(1)\n', (1647, 1650), True, 'import jax.numpy as jn\n'), ((2253, 2264), 'jax.numpy.zeros', 'jn.zeros', (['(5)'], {}), '(5)\n', (2261, 2264), True, 'import jax.numpy as jn\n'), ((2293, 2304), 'jax.numpy.zeros', 'jn.zeros', (['(1)'], {}), '(1)\n', (2301, 2304), True, 'import jax.numpy as jn\n'), ((2575, 2586), 'jax.numpy.zeros', 'jn.zeros', (['(5)'], {}), '(5)\n', (2583, 2586), True, 'import jax.numpy as jn\n'), ((2615, 2626), 'jax.numpy.zeros', 'jn.zeros', (['(1)'], {}), '(1)\n', (2623, 2626), True, 'import jax.numpy as jn\n'), ((3012, 3023), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (3020, 3023), True, 'import numpy as np\n'), ((3076, 3087), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (3084, 3087), True, 'import numpy as np\n'), ((3720, 3756), 'objax.util.find_used_variables', 'objax.util.find_used_variables', (['loss'], {}), '(loss)\n', (3750, 3756), False, 'import objax\n'), ((4036, 4047), 'numpy.zeros', 'np.zeros', (['(5)'], {}), '(5)\n', (4044, 4047), True, 'import numpy as np\n'), ((4100, 4111), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (4108, 4111), True, 'import numpy as np\n'), ((4456, 4498), 'objax.util.find_used_variables', 'objax.util.find_used_variables', (['predict_op'], {}), '(predict_op)\n', (4486, 4498), False, 'import objax\n'), ((4804, 4840), 'objax.util.find_used_variables', 'objax.util.find_used_variables', (['loss'], {}), '(loss)\n', (4834, 4840), False, 'import objax\n'), ((4979, 5015), 'objax.util.find_used_variables', 'objax.util.find_used_variables', (['loss'], {}), '(loss)\n', (5009, 5015), False, 'import objax\n'), ((5193, 5233), 'objax.util.find_used_variables', 'objax.util.find_used_variables', (['train_op'], {}), '(train_op)\n', (5223, 5233), False, 'import objax\n'), ((1069, 1094), 'jax.numpy.dot', 'jn.dot', (['x', 'global_w.value'], {}), '(x, global_w.value)\n', (1075, 1094), True, 'import jax.numpy as jn\n'), ((1696, 1714), 'jax.numpy.dot', 'jn.dot', (['x', 'w.value'], {}), '(x, w.value)\n', (1702, 1714), True, 'import jax.numpy as jn\n'), ((1955, 1979), 'objax.nn.Conv2D', 'objax.nn.Conv2D', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (1970, 1979), False, 'import objax\n'), ((1981, 2004), 'objax.nn.BatchNorm2D', 'objax.nn.BatchNorm2D', (['(2)'], {}), '(2)\n', (2001, 2004), False, 'import objax\n'), ((3226, 3237), 'jax.numpy.ones', 'jn.ones', (['(10)'], {}), '(10)\n', (3233, 3237), True, 'import jax.numpy as jn\n'), ((3373, 3384), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (3380, 3384), True, 'import numpy as np\n'), ((3441, 3452), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (3449, 3452), True, 'import numpy as np\n'), ((5407, 5431), 'objax.nn.Conv2D', 'objax.nn.Conv2D', (['(1)', '(2)', '(3)'], {}), '(1, 2, 3)\n', (5422, 5431), False, 'import objax\n'), ((5433, 5456), 'objax.nn.BatchNorm2D', 'objax.nn.BatchNorm2D', (['(2)'], {}), '(2)\n', (5453, 5456), False, 'import objax\n'), ((3605, 3630), 'jax.numpy.dot', 'jn.dot', (['x', 'global_w.value'], {}), '(x, global_w.value)\n', (3611, 3630), True, 'import jax.numpy as jn\n'), ((4676, 4739), 'objax.functional.loss.cross_entropy_logits_sparse', 'objax.functional.loss.cross_entropy_logits_sparse', (['logit', 'label'], {}), '(logit, label)\n', (4725, 4739), False, 'import objax\n'), ((2347, 2365), 'jax.numpy.dot', 'jn.dot', (['x', 'w.value'], {}), '(x, w.value)\n', (2353, 2365), True, 'import jax.numpy as jn\n'), ((2683, 2701), 'jax.numpy.dot', 'jn.dot', (['x', 'w.value'], {}), '(x, w.value)\n', (2689, 2701), True, 'import jax.numpy as jn\n')]
|
import binascii
import os
import struct
# https://github.com/Yelp/py_zipkin/blob/
# 7937ca859f8ae1f1009ab69fd1ddcd8fc33f1dad/py_zipkin/util.py#L1-L54
def generate_random_64bit_string() -> str:
"""Returns a 64 bit UTF-8 encoded string. In the interests of simplicity,
this is always cast to a `str` instead of (in py2 land) a unicode string.
Certain clients (I'm looking at you, Twisted) don't enjoy unicode headers.
:returns: random 16-character string
"""
return str(binascii.hexlify(os.urandom(8)).decode('utf-8'))
def generate_random_128bit_string() -> str:
"""Returns a 128 bit UTF-8 encoded string. Follows the same conventions
as generate_random_64bit_string().
:returns: random 32-character string
"""
return str(binascii.hexlify(os.urandom(16)).decode('utf-8'))
def unsigned_hex_to_signed_int(hex_string: str) -> int:
"""Converts a 64-bit hex string to a signed int value.
This is due to the fact that Apache Thrift only has signed values.
Examples:
'17133d482ba4f605' => 1662740067609015813
'b6dbb1c2b362bf51' => -5270423489115668655
:param hex_string: the string representation of a zipkin ID
:returns: signed int representation
"""
return struct.unpack('q', struct.pack('Q', int(hex_string, 16)))[0]
def signed_int_to_unsigned_hex(signed_int: int) -> str:
"""Converts a signed int value to a 64-bit hex string.
Examples:
1662740067609015813 => '17133d482ba4f605'
-5270423489115668655 => 'b6dbb1c2b362bf51'
:param signed_int: an int to convert
:returns: unsigned hex string
"""
hex_string = hex(struct.unpack('Q', struct.pack('q', signed_int))[0])[2:]
if hex_string.endswith('L'):
return hex_string[:-1]
return hex_string
|
[
"os.urandom",
"struct.pack"
] |
[((512, 525), 'os.urandom', 'os.urandom', (['(8)'], {}), '(8)\n', (522, 525), False, 'import os\n'), ((787, 801), 'os.urandom', 'os.urandom', (['(16)'], {}), '(16)\n', (797, 801), False, 'import os\n'), ((1668, 1696), 'struct.pack', 'struct.pack', (['"""q"""', 'signed_int'], {}), "('q', signed_int)\n", (1679, 1696), False, 'import struct\n')]
|
# This file is part of OctoBot (https://github.com/Drakkar-Software/OctoBot)
# Copyright (c) 2021 Drakkar-Software, All rights reserved.
#
# OctoBot is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either
# version 3.0 of the License, or (at your option) any later version.
#
# OctoBot is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with OctoBot. If not, see <https://www.gnu.org/licenses/>.
import octobot_backtesting.api as backtesting_api
import octobot_commons.enums as common_enums
import octobot_services.api as service_api
import octobot_services.octobot_channel_consumer as service_channel_consumer
import octobot_tentacles_manager.api as tentacles_manager_api
import octobot.channels as octobot_channels
import octobot.constants as constants
class ServiceFeedProducer(octobot_channels.OctoBotChannelProducer):
"""EvaluatorFactory class:
- Create service feeds
"""
def __init__(self, channel, octobot):
super().__init__(channel)
self.octobot = octobot
self.started = False
self.service_feeds = []
async def start(self):
in_backtesting = backtesting_api.is_backtesting_enabled(self.octobot.config)
service_feed_factory = service_api.create_service_feed_factory(self.octobot.config,
self.octobot.async_loop,
self.octobot.bot_id)
for feed in service_feed_factory.get_available_service_feeds(in_backtesting):
if tentacles_manager_api.is_tentacle_activated_in_tentacles_setup_config(
self.octobot.tentacles_setup_config, feed.get_name()):
await self.create_feed(service_feed_factory, feed, in_backtesting)
async def start_feeds(self):
self.started = True
for feed in self.service_feeds:
await self.send(bot_id=self.octobot.bot_id,
subject=common_enums.OctoBotChannelSubjects.UPDATE.value,
action=service_channel_consumer.OctoBotChannelServiceActions.START_SERVICE_FEED.value,
data={
service_channel_consumer.OctoBotChannelServiceDataKeys.INSTANCE.value: feed,
service_channel_consumer.OctoBotChannelServiceDataKeys.EDITED_CONFIG.value:
self.octobot.get_edited_config(constants.CONFIG_KEY, dict_only=False)
})
async def create_feed(self, service_feed_factory, feed, in_backtesting):
await self.send(bot_id=self.octobot.bot_id,
subject=common_enums.OctoBotChannelSubjects.CREATION.value,
action=service_channel_consumer.OctoBotChannelServiceActions.SERVICE_FEED.value,
data={
service_channel_consumer.OctoBotChannelServiceDataKeys.EDITED_CONFIG.value:
self.octobot.get_edited_config(constants.CONFIG_KEY, dict_only=False),
service_channel_consumer.OctoBotChannelServiceDataKeys.BACKTESTING_ENABLED.value:
in_backtesting,
service_channel_consumer.OctoBotChannelServiceDataKeys.CLASS.value: feed,
service_channel_consumer.OctoBotChannelServiceDataKeys.FACTORY.value: service_feed_factory
})
async def register_service_feed(self, instance):
self.service_feeds.append(instance)
async def stop(self):
for service_feed in self.service_feeds:
await service_api.stop_service_feed(service_feed)
|
[
"octobot_services.api.create_service_feed_factory",
"octobot_backtesting.api.is_backtesting_enabled",
"octobot_services.api.stop_service_feed"
] |
[((1494, 1553), 'octobot_backtesting.api.is_backtesting_enabled', 'backtesting_api.is_backtesting_enabled', (['self.octobot.config'], {}), '(self.octobot.config)\n', (1532, 1553), True, 'import octobot_backtesting.api as backtesting_api\n'), ((1585, 1696), 'octobot_services.api.create_service_feed_factory', 'service_api.create_service_feed_factory', (['self.octobot.config', 'self.octobot.async_loop', 'self.octobot.bot_id'], {}), '(self.octobot.config, self.octobot.\n async_loop, self.octobot.bot_id)\n', (1624, 1696), True, 'import octobot_services.api as service_api\n'), ((4066, 4109), 'octobot_services.api.stop_service_feed', 'service_api.stop_service_feed', (['service_feed'], {}), '(service_feed)\n', (4095, 4109), True, 'import octobot_services.api as service_api\n')]
|
# -*- coding: utf-8 -*-
from BaseOperator import BaseOperator
import yaml
import os
class BaseFPGAOperator(BaseOperator):
def __init__(self, name, kernel_conf_file):
super().__init__(name)
self.kernel_interfaces = {}
home = os.environ['MYSTR_HOME']
conf_file_path = home + '/conf/' + kernel_conf_file
if os.path.exists(conf_file_path):
f = open(home + '/conf/' + kernel_conf_file, 'r')
self.kernel_conf = yaml.load(f)
f.close()
|
[
"yaml.load",
"os.path.exists"
] |
[((353, 383), 'os.path.exists', 'os.path.exists', (['conf_file_path'], {}), '(conf_file_path)\n', (367, 383), False, 'import os\n'), ((478, 490), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (487, 490), False, 'import yaml\n')]
|
import os
import random
import shlex
import string
import sys
from subprocess import call, Popen, PIPE
class PersistentSSHConnection(object) :
'''This class wraps a master ssh process and allows repeated command
execution on the same ssh connection without reentering a passphrase.
The master connection is performed using the -M and -o ControlPath ssh
arguments. All ssh child processes are killed and the socket file object
is deleted upon *close()* or garbage collection.'''
def __init__(self,username,hostname,ssh_exec='/usr/bin/ssh',scp_exec='/usr/bin/scp') :
noise = ''.join(random.sample(string.letters,4))
self._socket_fn = '/tmp/master-%s@%s:22-%s'%(username,hostname,noise)
self._master_ssh_opts = '-f -MN -o ControlPath=%s -o ControlMaster=no -o ServerAliveInterval=900'%self._socket_fn
self._slave_ssh_opts = '-S %s'%self._socket_fn
self._slave_scp_opts = '-r -o ControlPath=%s'%self._socket_fn
self._ssh_exec = ssh_exec
self._scp_exec = scp_exec
cmd = [ssh_exec]+shlex.split(self._master_ssh_opts)+['%s@%s'%(username,hostname)]
self._master_ssh_cmd = ' '.join(cmd)
self._master_proc = Popen(cmd,stdin=PIPE)
self._master_proc.communicate()
self.username = username
self.hostname = hostname
def needs_socket(fn) :
def f(self,*args) :
if not os.path.exists(self._socket_fn) :
raise Exception('SSH socket file cannot be found, will not send command')
return fn(self,*args)
return f
@needs_socket
def send_cmd(self,cmd) :
'''Send an ssh command over the persistent connection and return the
stdout and stderr of the command execution.'''
ssh_cmd = [self._ssh_exec]+shlex.split(self._slave_ssh_opts)+['%s@%s'%(self.username,self.hostname),cmd]
p = Popen(ssh_cmd,stdout=PIPE,stderr=PIPE)
stdout, stderr = p.communicate()
return stdout, stderr
@needs_socket
def fetch_file(self,path,lpath=None,recurse=False) :
'''Call scp and copy path from remote host to local filesystem with the
same name. If *lpath* is specified the local file will be named *lpath*.
If the remote path is a directory, mkdir the directory locally if it does
not already exist. If *recurse* is True, directories will be downloaded
as well as created.'''
scp_local = lpath or path
# check if remote path is directory
stdout, stderr = self.send_cmd('test -d %s && echo directory || echo file'%path)
if stdout.strip() == 'directory' :
try :
os.mkdir(scp_local)
r = 0
except (Exception) as e:
print(e)
if stdout.strip() == 'file' or recurse :
scp_remote = '%s@%s:%s'%(self.username,self.hostname,path)
scp_cmd = [self._scp_exec]+shlex.split(self._slave_scp_opts)+[scp_remote,scp_local]
r = call(scp_cmd)
return r
def __del__(self) :
try :
self.close()
except :
pass
def close(self) :
'''Close the connection and clean up'''
# find and kill the forked ssh process
p = Popen('pgrep -f "^%s$"'%self._master_ssh_cmd,shell=True,stdout=PIPE,stderr=PIPE)
stdout, stderr = p.communicate()
if stdout is not None and stdout != '' :
for pid in stdout.strip().split() :
try :
os.kill(int(pid),signal.SIGKILL)
except Exception as e:
sys.stderr.write('killing forked process %s didnt work: %s'%(pid,e))
# remove the socket file
try :
os.remove(self._socket_fn)
except : # eh, whatever
pass
if __name__ == '__main__' :
username = raw_input('enter username: ')
hostname = raw_input('enter hostname: ')
conn = PersistentSSHConnection(username,hostname)
print('connected to %s, send ssh commands and watch yourself'%hostname)
while True :
cmd = raw_input('%s@%s ] '%(username,hostname))
if cmd.strip() == 'exit' :
break
stdout, stderr = conn.send_cmd(cmd)
print('stdout:')
print(stdout)
print('stderr:')
print(stderr)
|
[
"os.mkdir",
"subprocess.Popen",
"os.remove",
"random.sample",
"shlex.split",
"os.path.exists",
"subprocess.call",
"sys.stderr.write"
] |
[((1209, 1231), 'subprocess.Popen', 'Popen', (['cmd'], {'stdin': 'PIPE'}), '(cmd, stdin=PIPE)\n', (1214, 1231), False, 'from subprocess import call, Popen, PIPE\n'), ((1892, 1932), 'subprocess.Popen', 'Popen', (['ssh_cmd'], {'stdout': 'PIPE', 'stderr': 'PIPE'}), '(ssh_cmd, stdout=PIPE, stderr=PIPE)\n', (1897, 1932), False, 'from subprocess import call, Popen, PIPE\n'), ((3278, 3367), 'subprocess.Popen', 'Popen', (['(\'pgrep -f "^%s$"\' % self._master_ssh_cmd)'], {'shell': '(True)', 'stdout': 'PIPE', 'stderr': 'PIPE'}), '(\'pgrep -f "^%s$"\' % self._master_ssh_cmd, shell=True, stdout=PIPE,\n stderr=PIPE)\n', (3283, 3367), False, 'from subprocess import call, Popen, PIPE\n'), ((618, 650), 'random.sample', 'random.sample', (['string.letters', '(4)'], {}), '(string.letters, 4)\n', (631, 650), False, 'import random\n'), ((3017, 3030), 'subprocess.call', 'call', (['scp_cmd'], {}), '(scp_cmd)\n', (3021, 3030), False, 'from subprocess import call, Popen, PIPE\n'), ((3760, 3786), 'os.remove', 'os.remove', (['self._socket_fn'], {}), '(self._socket_fn)\n', (3769, 3786), False, 'import os\n'), ((1070, 1104), 'shlex.split', 'shlex.split', (['self._master_ssh_opts'], {}), '(self._master_ssh_opts)\n', (1081, 1104), False, 'import shlex\n'), ((1412, 1443), 'os.path.exists', 'os.path.exists', (['self._socket_fn'], {}), '(self._socket_fn)\n', (1426, 1443), False, 'import os\n'), ((1802, 1835), 'shlex.split', 'shlex.split', (['self._slave_ssh_opts'], {}), '(self._slave_ssh_opts)\n', (1813, 1835), False, 'import shlex\n'), ((2680, 2699), 'os.mkdir', 'os.mkdir', (['scp_local'], {}), '(scp_local)\n', (2688, 2699), False, 'import os\n'), ((2944, 2977), 'shlex.split', 'shlex.split', (['self._slave_scp_opts'], {}), '(self._slave_scp_opts)\n', (2955, 2977), False, 'import shlex\n'), ((3631, 3702), 'sys.stderr.write', 'sys.stderr.write', (["('killing forked process %s didnt work: %s' % (pid, e))"], {}), "('killing forked process %s didnt work: %s' % (pid, e))\n", (3647, 3702), False, 'import sys\n')]
|
import json
import sys
import os
from tqdm import tqdm
from mdf_refinery.parsers.tab_parser import parse_tab
from mdf_refinery.validator import Validator
# VERSION 0.3.0
# This is the converter for: Dataset for "Canopy uptake dominates nighttime carbonyl sulfide fluxes in a boreal forest"
# Arguments:
# input_path (string): The file or directory where the data resides.
# NOTE: Do not hard-code the path to the data in the converter (the filename can be hard-coded, though). The converter should be portable.
# metadata (string or dict): The path to the JSON dataset metadata file, a dict or json.dumps string containing the dataset metadata, or None to specify the metadata here. Default None.
# verbose (bool): Should the script print status messages to standard output? Default False.
# NOTE: The converter should have NO output if verbose is False, unless there is an error.
def convert(input_path, metadata=None, verbose=False):
if verbose:
print("Begin converting")
# Collect the metadata
# NOTE: For fields that represent people (e.g. mdf-data_contact), other IDs can be added (ex. "github": "jgaff").
# It is recommended that all people listed in mdf-data_contributor have a github username listed.
#
# If there are other useful fields not covered here, another block (dictionary at the same level as "mdf") can be created for those fields.
# The block must be called the same thing as the source_name for the dataset.
if not metadata:
## Metadata:dataset
dataset_metadata = {
"mdf": {
"title": "Dataset for \"Canopy uptake dominates nighttime carbonyl sulfide fluxes in a boreal forest\"",
"acl": ["public"],
"source_name": "carbonyl_sulfide_fluxes",
"data_contact": {
"given_name": "Huilin",
"family_name": "Chen",
"email": "<EMAIL>",
"institution": "University of Groningen, University of Colorado"
},
"data_contributor": [{
"given_name": "Evan",
"family_name": "Pike",
"email": "<EMAIL>",
"institution": "The University of Chicago",
"github": "dep78",
}],
"citation": ["<NAME>. Kooijmans, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, … <NAME>. (2017). Dataset for \"Canopy uptake dominates nighttime carbonyl sulfide fluxes in a boreal forest\" [Data set]. Zenodo. http://doi.org/10.5281/zenodo.580303"],
"author": [{
"given_name": "<NAME>.",
"family_name": "Kooijmans",
"institution": "University of Groningen",
},
{
"given_name": "Kadmiel",
"family_name": "Maseyk",
"institution": "The Open University",
},
{
"given_name": "Ulli",
"family_name": "Seibt",
"institution": "University of California",
},
{
"given_name": "Wu",
"family_name": "Sun",
"institution": "University of California",
},
{
"given_name": "Timo",
"family_name": "Vesala",
"institution": "University of Helsinki",
},
{
"given_name": "Ivan",
"family_name": "Mammarella",
"institution": "University of Helsinki",
},
{
"given_name": "Pasi",
"family_name": "Kolari",
"institution": "University of Helsinki",
},
{
"given_name": "Juho",
"family_name": "Aalto",
"institution": "University of Helsinki",
},
{
"given_name": "Alessandro",
"family_name": "Franchin",
"institution": "University of Helsinki, University of Colorado",
},
{
"given_name": "Roberta",
"family_name": "Vecchi",
"institution": "University of Milan",
},
{
"given_name": "Gianluigi",
"family_name": "Valli",
"institution": "University of Milan",
},
{
"given_name": "Huilin",
"family_name": "Chen",
"email": "<EMAIL>",
"institution": "University of Groningen, University of Colorado",
}],
"license": "https://creativecommons.org/licenses/by/4.0/",
"collection": "Carbonyl Sulfide Fluxes",
#"tags": [""],
"description": "Nighttime averaged ecosystem fluxes of COS and CO2 obtained through the radon-tracer and eddy-covariance method as presented in \"Canopy uptake dominates nighttime carbonyl sulfide fluxes in a boreal forest\" submitted to Atmospheric Chemistry and Physics.",
"year": 2017,
"links": {
"landing_page": "https://doi.org/10.5281/zenodo.580303",
"publication": ["https://www.atmos-chem-phys-discuss.net/acp-2017-407/"],
#"data_doi": "",
#"related_id": "",
"txt": {
#"globus_endpoint": ,
"http_host": "https://zenodo.org",
"path": "/record/580303/files/Kooijmans_et_al_2017_ACPD_20170516.txt",
},
},
},
#"mrr": {
#},
#"dc": {
#},
}
## End metadata
elif type(metadata) is str:
try:
dataset_metadata = json.loads(metadata)
except Exception:
try:
with open(metadata, 'r') as metadata_file:
dataset_metadata = json.load(metadata_file)
except Exception as e:
sys.exit("Error: Unable to read metadata: " + repr(e))
elif type(metadata) is dict:
dataset_metadata = metadata
else:
sys.exit("Error: Invalid metadata parameter")
# Make a Validator to help write the feedstock
# You must pass the metadata to the constructor
# Each Validator instance can only be used for a single dataset
# If the metadata is incorrect, the constructor will throw an exception and the program will exit
dataset_validator = Validator(dataset_metadata)
# Get the data
# Each record should be exactly one dictionary
# You must write your records using the Validator one at a time
# It is recommended that you use a parser to help with this process if one is available for your datatype
# Each record also needs its own metadata
with open(os.path.join(input_path, "Kooijmans_et_al_2017_ACPD_20170516.txt"), "r") as raw_in:
data = raw_in.read()
description = "".join(data.split("\n\n")[1:2])
start = "##########################################\n"
for line in tqdm(parse_tab(data.split(start)[-1], sep=","), desc="Processing Data", disable=not verbose):
## Metadata:record
record_metadata = {
"mdf": {
"title": "Carbonyl Sulfide Fluxes doy: " + line["doy"],
"acl": ["public"],
#"composition": ,
#"tags": ,
"description": description,
"raw": json.dumps(line),
"links": {
#"landing_page": ,
#"publication": ,
#"data_doi": ,
#"related_id": ,
"txt": {
"globus_endpoint": "82f1b5c6-6e9b-11e5-ba47-22000b92c6ec",
"http_host": "https://data.materialsdatafacility.org",
"path": "/collections/carbonyl_sulfide_fluxes/Kooijmans_et_al_2017_ACPD_20170516.txt",
},
},
#"citation": ,
#"data_contact": {
#"given_name": ,
#"family_name": ,
#"email": ,
#"institution": ,
#},
#"author": [{
#"given_name": ,
#"family_name": ,
#"email": ,
#"institution": ,
#}],
#"year": ,
},
#"dc": {
#},
}
## End metadata
# Pass each individual record to the Validator
result = dataset_validator.write_record(record_metadata)
# Check if the Validator accepted the record, and stop processing if it didn't
# If the Validator returns "success" == True, the record was written successfully
if not result["success"]:
if not dataset_validator.cancel_validation()["success"]:
print("Error cancelling validation. The partial feedstock may not be removed.")
raise ValueError(result["message"] + "\n" + result.get("details", ""))
# You're done!
if verbose:
print("Finished converting")
|
[
"mdf_refinery.validator.Validator",
"json.load",
"json.loads",
"json.dumps",
"os.path.join",
"sys.exit"
] |
[((6955, 6982), 'mdf_refinery.validator.Validator', 'Validator', (['dataset_metadata'], {}), '(dataset_metadata)\n', (6964, 6982), False, 'from mdf_refinery.validator import Validator\n'), ((7305, 7371), 'os.path.join', 'os.path.join', (['input_path', '"""Kooijmans_et_al_2017_ACPD_20170516.txt"""'], {}), "(input_path, 'Kooijmans_et_al_2017_ACPD_20170516.txt')\n", (7317, 7371), False, 'import os\n'), ((6229, 6249), 'json.loads', 'json.loads', (['metadata'], {}), '(metadata)\n', (6239, 6249), False, 'import json\n'), ((6609, 6654), 'sys.exit', 'sys.exit', (['"""Error: Invalid metadata parameter"""'], {}), "('Error: Invalid metadata parameter')\n", (6617, 6654), False, 'import sys\n'), ((7951, 7967), 'json.dumps', 'json.dumps', (['line'], {}), '(line)\n', (7961, 7967), False, 'import json\n'), ((6391, 6415), 'json.load', 'json.load', (['metadata_file'], {}), '(metadata_file)\n', (6400, 6415), False, 'import json\n')]
|
import numpy as np
from ..testing_utils import DummyConverter, DummyLoad, DummyNoise, DummyOdeSolver, DummyVoltageSupply, DummyElectricMotor,\
mock_instantiate, instantiate_dict
from gym_electric_motor.physical_systems import physical_systems as ps, converters as cv, electric_motors as em,\
mechanical_loads as ml, voltage_supplies as vs, solvers as sv
from gym.spaces import Box
import pytest
class TestSCMLSystem:
"""
Base Class to test all PhysicalSystems that derive from SCMLSystem
"""
class_to_test = ps.SCMLSystem
def mock_build_state(self, motor_state, torque, u_in, u_sup):
"""Function to mock an arbitrary build_state function to test the SCMLSystem
"""
self.motor_state = motor_state
self.torque = torque
self.u_in = u_in
self.u_sup = u_sup
return np.concatenate((
self.motor_state[:len(DummyLoad.state_names)], [torque],
self.motor_state[len(DummyLoad.state_names):], [u_sup]
))
@pytest.fixture
def scml_system(self, monkeypatch):
"""
Returns an instantiated SCMLSystem with Dummy Components and mocked abstract functions
"""
monkeypatch.setattr(
self.class_to_test,
'_build_state_names',
lambda _:
DummyLoad.state_names + ['torque'] + DummyElectricMotor.CURRENTS + DummyElectricMotor.VOLTAGES + ['u_sup']
)
monkeypatch.setattr(
self.class_to_test,
'_build_state_space',
lambda _, state_names: Box(
low=np.zeros_like(state_names, dtype=float),
high=np.zeros_like(state_names, dtype=float)
)
)
return self.class_to_test(
converter=DummyConverter(),
motor=DummyElectricMotor(),
load=DummyLoad(),
supply=DummyVoltageSupply(),
ode_solver=DummyOdeSolver(),
noise_generator=DummyNoise()
)
def test_reset(self, scml_system):
"""Test the reset function in the physical system"""
scml_system._t = 12
scml_system._k = 33
state_space = scml_system.state_space
state_positions = scml_system.state_positions
initial_state = scml_system.reset()
target = (np.array([0, 0, 0, 0, 0, 0, 560]) + scml_system._noise_generator.reset()) / scml_system.limits
assert np.all(initial_state == target), 'Initial states of the system are incorrect'
assert scml_system._t == 0, 'Time of the system was not set to zero after reset'
assert scml_system._k == 0, 'Episode step of the system was not set to zero after reset'
assert scml_system.converter.reset_counter == scml_system.electrical_motor.reset_counter \
== scml_system.mechanical_load.reset_counter == scml_system.supply.reset_counter,\
'The reset was not passed to all components of the SCMLSystem'
assert scml_system._ode_solver.t == 0, 'The ode solver was not reset correctly'
assert all(scml_system._ode_solver.y == np.zeros_like(
scml_system.mechanical_load.state_names + scml_system.electrical_motor.CURRENTS, dtype=float
)), ' The ode solver was not reset correctly'
def test_system_equation(self, scml_system):
"""Tests the system equation function"""
state = np.random.rand(4)
currents = state[[2, 3]]
torque = scml_system.electrical_motor.torque(currents)
u_in = np.random.rand(2)
t = np.random.rand()
derivative = scml_system._system_equation(t, state, u_in)
assert all(
derivative == np.array([torque, -torque, currents[0] - u_in[0], currents[1] - u_in[1]])
), 'The system equation return differs from the expected'
assert scml_system.mechanical_load.t == t, 'The time t was not passed through to the mech. load equation'
assert np.all(scml_system.mechanical_load.mechanical_state == state[:2]),\
'The mech. state was not returned correctly'
def test_simulate(self, scml_system):
"""Test the simulation function of the SCMLSystem"""
# Reset the system and take a random action
scml_system.reset()
action = scml_system.action_space.sample()
# Set a defined intitial state
ode_state = np.array([3, 4, 5, 6])
scml_system._ode_solver.set_initial_value(ode_state)
# Perform the action on the system
next_state = scml_system.simulate(action)
solver_state_me = scml_system._ode_solver.y[:len(DummyLoad.state_names)]
solver_state_el = scml_system._ode_solver.y[len(DummyLoad.state_names):]
torque = [scml_system.electrical_motor.torque(solver_state_el)]
u_sup = [scml_system.supply.u_nominal]
u_in = [u * u_sup[0] for u in scml_system.converter.u_in]
# Calculate the next state
desired_next_state = (
np.concatenate((solver_state_me, torque, solver_state_el, u_in, u_sup))
+ scml_system._noise_generator.noise()
) / scml_system.limits
# Assertions for correct simulation
assert all(desired_next_state == next_state), 'The calculated next state differs from the expected one'
assert scml_system.converter.action == action, 'The action was not passed correctly to the converter'
assert scml_system.converter.action_set_time == 0, 'The action start time was passed incorrect to the converter'
assert scml_system.converter.last_i_out == scml_system.electrical_motor.i_in(scml_system._ode_solver.last_y[2:])
def test_system_jacobian(self, scml_system):
"""Tests for the system jacobian function"""
el_jac = np.arange(4).reshape(2, 2)
el_over_omega = np.arange(4, 6)
torque_over_el = np.arange(6, 8)
# Set the el. jacobian returns to specified values
scml_system.electrical_motor.electrical_jac_return = (el_jac, el_over_omega, torque_over_el)
me_jac = np.arange(8, 12).reshape(2, 2)
me_over_torque = np.arange(12, 14)
# Set the mech. jabobian returns to specified values
scml_system.mechanical_load.mechanical_jac_return = me_jac, me_over_torque
sys_jac = scml_system._system_jacobian(0, np.array([0, 1, 2, 3]), [0, -1])
#
assert np.all(sys_jac[-2:, -2:] == el_jac), 'The el. jacobian is false'
assert np.all(sys_jac[:2, :2] == me_jac), 'The mech. jacobian is false'
assert np.all(sys_jac[2:, 0] == el_over_omega), 'the derivative of the el.state over omega is false'
assert np.all(sys_jac[2:, 1] == np.zeros(2))
assert np.all(sys_jac[:-2, 2:] == np.array([[72, 84], [78, 91]])), 'The derivative of the mech.state ' \
'over the currents is false'
|
[
"numpy.zeros_like",
"numpy.concatenate",
"numpy.zeros",
"numpy.array",
"numpy.arange",
"numpy.random.rand",
"numpy.all"
] |
[((2433, 2464), 'numpy.all', 'np.all', (['(initial_state == target)'], {}), '(initial_state == target)\n', (2439, 2464), True, 'import numpy as np\n'), ((3391, 3408), 'numpy.random.rand', 'np.random.rand', (['(4)'], {}), '(4)\n', (3405, 3408), True, 'import numpy as np\n'), ((3520, 3537), 'numpy.random.rand', 'np.random.rand', (['(2)'], {}), '(2)\n', (3534, 3537), True, 'import numpy as np\n'), ((3550, 3566), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (3564, 3566), True, 'import numpy as np\n'), ((3948, 4013), 'numpy.all', 'np.all', (['(scml_system.mechanical_load.mechanical_state == state[:2])'], {}), '(scml_system.mechanical_load.mechanical_state == state[:2])\n', (3954, 4013), True, 'import numpy as np\n'), ((4368, 4390), 'numpy.array', 'np.array', (['[3, 4, 5, 6]'], {}), '([3, 4, 5, 6])\n', (4376, 4390), True, 'import numpy as np\n'), ((5805, 5820), 'numpy.arange', 'np.arange', (['(4)', '(6)'], {}), '(4, 6)\n', (5814, 5820), True, 'import numpy as np\n'), ((5846, 5861), 'numpy.arange', 'np.arange', (['(6)', '(8)'], {}), '(6, 8)\n', (5855, 5861), True, 'import numpy as np\n'), ((6095, 6112), 'numpy.arange', 'np.arange', (['(12)', '(14)'], {}), '(12, 14)\n', (6104, 6112), True, 'import numpy as np\n'), ((6366, 6401), 'numpy.all', 'np.all', (['(sys_jac[-2:, -2:] == el_jac)'], {}), '(sys_jac[-2:, -2:] == el_jac)\n', (6372, 6401), True, 'import numpy as np\n'), ((6446, 6479), 'numpy.all', 'np.all', (['(sys_jac[:2, :2] == me_jac)'], {}), '(sys_jac[:2, :2] == me_jac)\n', (6452, 6479), True, 'import numpy as np\n'), ((6526, 6565), 'numpy.all', 'np.all', (['(sys_jac[2:, 0] == el_over_omega)'], {}), '(sys_jac[2:, 0] == el_over_omega)\n', (6532, 6565), True, 'import numpy as np\n'), ((6307, 6329), 'numpy.array', 'np.array', (['[0, 1, 2, 3]'], {}), '([0, 1, 2, 3])\n', (6315, 6329), True, 'import numpy as np\n'), ((2323, 2356), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 560]'], {}), '([0, 0, 0, 0, 0, 0, 560])\n', (2331, 2356), True, 'import numpy as np\n'), ((3102, 3214), 'numpy.zeros_like', 'np.zeros_like', (['(scml_system.mechanical_load.state_names + scml_system.electrical_motor.\n CURRENTS)'], {'dtype': 'float'}), '(scml_system.mechanical_load.state_names + scml_system.\n electrical_motor.CURRENTS, dtype=float)\n', (3115, 3214), True, 'import numpy as np\n'), ((3679, 3752), 'numpy.array', 'np.array', (['[torque, -torque, currents[0] - u_in[0], currents[1] - u_in[1]]'], {}), '([torque, -torque, currents[0] - u_in[0], currents[1] - u_in[1]])\n', (3687, 3752), True, 'import numpy as np\n'), ((4970, 5041), 'numpy.concatenate', 'np.concatenate', (['(solver_state_me, torque, solver_state_el, u_in, u_sup)'], {}), '((solver_state_me, torque, solver_state_el, u_in, u_sup))\n', (4984, 5041), True, 'import numpy as np\n'), ((5754, 5766), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (5763, 5766), True, 'import numpy as np\n'), ((6039, 6055), 'numpy.arange', 'np.arange', (['(8)', '(12)'], {}), '(8, 12)\n', (6048, 6055), True, 'import numpy as np\n'), ((6660, 6671), 'numpy.zeros', 'np.zeros', (['(2)'], {}), '(2)\n', (6668, 6671), True, 'import numpy as np\n'), ((6715, 6745), 'numpy.array', 'np.array', (['[[72, 84], [78, 91]]'], {}), '([[72, 84], [78, 91]])\n', (6723, 6745), True, 'import numpy as np\n'), ((1600, 1639), 'numpy.zeros_like', 'np.zeros_like', (['state_names'], {'dtype': 'float'}), '(state_names, dtype=float)\n', (1613, 1639), True, 'import numpy as np\n'), ((1662, 1701), 'numpy.zeros_like', 'np.zeros_like', (['state_names'], {'dtype': 'float'}), '(state_names, dtype=float)\n', (1675, 1701), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
# Copyright (c) 2019, Frappe Technologies Pvt. Ltd. and Contributors
# MIT License. See license.txt
from __future__ import unicode_literals
import unittest
import sqlparse
import frappe
import frappe.recorder
from frappe.utils import set_request
from frappe.website.render import render_page
class TestRecorder(unittest.TestCase):
def setUp(self):
frappe.recorder.stop()
frappe.recorder.delete()
set_request()
frappe.recorder.start()
frappe.recorder.record()
def test_start(self):
frappe.recorder.dump()
requests = frappe.recorder.get()
self.assertEqual(len(requests), 1)
def test_do_not_record(self):
frappe.recorder.do_not_record(frappe.get_all)("DocType")
frappe.recorder.dump()
requests = frappe.recorder.get()
self.assertEqual(len(requests), 0)
def test_get(self):
frappe.recorder.dump()
requests = frappe.recorder.get()
self.assertEqual(len(requests), 1)
request = frappe.recorder.get(requests[0]["uuid"])
self.assertTrue(request)
def test_delete(self):
frappe.recorder.dump()
requests = frappe.recorder.get()
self.assertEqual(len(requests), 1)
frappe.recorder.delete()
requests = frappe.recorder.get()
self.assertEqual(len(requests), 0)
def test_record_without_sql_queries(self):
frappe.recorder.dump()
requests = frappe.recorder.get()
request = frappe.recorder.get(requests[0]["uuid"])
self.assertEqual(len(request["calls"]), 0)
def test_record_with_sql_queries(self):
frappe.get_all("DocType")
frappe.recorder.dump()
requests = frappe.recorder.get()
request = frappe.recorder.get(requests[0]["uuid"])
self.assertNotEqual(len(request["calls"]), 0)
def test_explain(self):
frappe.db.sql("SELECT * FROM tabDocType")
frappe.db.sql("COMMIT")
frappe.recorder.dump()
requests = frappe.recorder.get()
request = frappe.recorder.get(requests[0]["uuid"])
self.assertEqual(len(request["calls"][0]["explain_result"]), 1)
self.assertEqual(len(request["calls"][1]["explain_result"]), 0)
def test_multiple_queries(self):
queries = [
{"mariadb": "SELECT * FROM tabDocType", "postgres": 'SELECT * FROM "tabDocType"'},
{"mariadb": "SELECT COUNT(*) FROM tabDocType", "postgres": 'SELECT COUNT(*) FROM "tabDocType"'},
{"mariadb": "COMMIT", "postgres": "COMMIT"},
]
sql_dialect = frappe.db.db_type or "mariadb"
for query in queries:
frappe.db.sql(query[sql_dialect])
frappe.recorder.dump()
requests = frappe.recorder.get()
request = frappe.recorder.get(requests[0]["uuid"])
self.assertEqual(len(request["calls"]), len(queries))
for query, call in zip(queries, request["calls"]):
self.assertEqual(
call["query"], sqlparse.format(query[sql_dialect].strip(), keyword_case="upper", reindent=True)
)
def test_duplicate_queries(self):
queries = [
("SELECT * FROM tabDocType", 2),
("SELECT COUNT(*) FROM tabDocType", 1),
("select * from tabDocType", 2),
("COMMIT", 3),
("COMMIT", 3),
("COMMIT", 3),
]
for query in queries:
frappe.db.sql(query[0])
frappe.recorder.dump()
requests = frappe.recorder.get()
request = frappe.recorder.get(requests[0]["uuid"])
for query, call in zip(queries, request["calls"]):
self.assertEqual(call["exact_copies"], query[1])
def test_error_page_rendering(self):
content = render_page("error")
self.assertIn("Error", content)
|
[
"frappe.recorder.do_not_record",
"frappe.utils.set_request",
"frappe.recorder.start",
"frappe.recorder.record",
"frappe.db.sql",
"frappe.website.render.render_page",
"frappe.recorder.delete",
"frappe.recorder.get",
"frappe.recorder.stop",
"frappe.recorder.dump",
"frappe.get_all"
] |
[((383, 405), 'frappe.recorder.stop', 'frappe.recorder.stop', ([], {}), '()\n', (403, 405), False, 'import frappe\n'), ((408, 432), 'frappe.recorder.delete', 'frappe.recorder.delete', ([], {}), '()\n', (430, 432), False, 'import frappe\n'), ((435, 448), 'frappe.utils.set_request', 'set_request', ([], {}), '()\n', (446, 448), False, 'from frappe.utils import set_request\n'), ((451, 474), 'frappe.recorder.start', 'frappe.recorder.start', ([], {}), '()\n', (472, 474), False, 'import frappe\n'), ((477, 501), 'frappe.recorder.record', 'frappe.recorder.record', ([], {}), '()\n', (499, 501), False, 'import frappe\n'), ((528, 550), 'frappe.recorder.dump', 'frappe.recorder.dump', ([], {}), '()\n', (548, 550), False, 'import frappe\n'), ((564, 585), 'frappe.recorder.get', 'frappe.recorder.get', ([], {}), '()\n', (583, 585), False, 'import frappe\n'), ((716, 738), 'frappe.recorder.dump', 'frappe.recorder.dump', ([], {}), '()\n', (736, 738), False, 'import frappe\n'), ((752, 773), 'frappe.recorder.get', 'frappe.recorder.get', ([], {}), '()\n', (771, 773), False, 'import frappe\n'), ((835, 857), 'frappe.recorder.dump', 'frappe.recorder.dump', ([], {}), '()\n', (855, 857), False, 'import frappe\n'), ((872, 893), 'frappe.recorder.get', 'frappe.recorder.get', ([], {}), '()\n', (891, 893), False, 'import frappe\n'), ((944, 984), 'frappe.recorder.get', 'frappe.recorder.get', (["requests[0]['uuid']"], {}), "(requests[0]['uuid'])\n", (963, 984), False, 'import frappe\n'), ((1039, 1061), 'frappe.recorder.dump', 'frappe.recorder.dump', ([], {}), '()\n', (1059, 1061), False, 'import frappe\n'), ((1076, 1097), 'frappe.recorder.get', 'frappe.recorder.get', ([], {}), '()\n', (1095, 1097), False, 'import frappe\n'), ((1138, 1162), 'frappe.recorder.delete', 'frappe.recorder.delete', ([], {}), '()\n', (1160, 1162), False, 'import frappe\n'), ((1177, 1198), 'frappe.recorder.get', 'frappe.recorder.get', ([], {}), '()\n', (1196, 1198), False, 'import frappe\n'), ((1283, 1305), 'frappe.recorder.dump', 'frappe.recorder.dump', ([], {}), '()\n', (1303, 1305), False, 'import frappe\n'), ((1320, 1341), 'frappe.recorder.get', 'frappe.recorder.get', ([], {}), '()\n', (1339, 1341), False, 'import frappe\n'), ((1354, 1394), 'frappe.recorder.get', 'frappe.recorder.get', (["requests[0]['uuid']"], {}), "(requests[0]['uuid'])\n", (1373, 1394), False, 'import frappe\n'), ((1485, 1510), 'frappe.get_all', 'frappe.get_all', (['"""DocType"""'], {}), "('DocType')\n", (1499, 1510), False, 'import frappe\n'), ((1513, 1535), 'frappe.recorder.dump', 'frappe.recorder.dump', ([], {}), '()\n', (1533, 1535), False, 'import frappe\n'), ((1550, 1571), 'frappe.recorder.get', 'frappe.recorder.get', ([], {}), '()\n', (1569, 1571), False, 'import frappe\n'), ((1584, 1624), 'frappe.recorder.get', 'frappe.recorder.get', (["requests[0]['uuid']"], {}), "(requests[0]['uuid'])\n", (1603, 1624), False, 'import frappe\n'), ((1702, 1743), 'frappe.db.sql', 'frappe.db.sql', (['"""SELECT * FROM tabDocType"""'], {}), "('SELECT * FROM tabDocType')\n", (1715, 1743), False, 'import frappe\n'), ((1746, 1769), 'frappe.db.sql', 'frappe.db.sql', (['"""COMMIT"""'], {}), "('COMMIT')\n", (1759, 1769), False, 'import frappe\n'), ((1772, 1794), 'frappe.recorder.dump', 'frappe.recorder.dump', ([], {}), '()\n', (1792, 1794), False, 'import frappe\n'), ((1809, 1830), 'frappe.recorder.get', 'frappe.recorder.get', ([], {}), '()\n', (1828, 1830), False, 'import frappe\n'), ((1843, 1883), 'frappe.recorder.get', 'frappe.recorder.get', (["requests[0]['uuid']"], {}), "(requests[0]['uuid'])\n", (1862, 1883), False, 'import frappe\n'), ((2416, 2438), 'frappe.recorder.dump', 'frappe.recorder.dump', ([], {}), '()\n', (2436, 2438), False, 'import frappe\n'), ((2453, 2474), 'frappe.recorder.get', 'frappe.recorder.get', ([], {}), '()\n', (2472, 2474), False, 'import frappe\n'), ((2487, 2527), 'frappe.recorder.get', 'frappe.recorder.get', (["requests[0]['uuid']"], {}), "(requests[0]['uuid'])\n", (2506, 2527), False, 'import frappe\n'), ((3042, 3064), 'frappe.recorder.dump', 'frappe.recorder.dump', ([], {}), '()\n', (3062, 3064), False, 'import frappe\n'), ((3079, 3100), 'frappe.recorder.get', 'frappe.recorder.get', ([], {}), '()\n', (3098, 3100), False, 'import frappe\n'), ((3113, 3153), 'frappe.recorder.get', 'frappe.recorder.get', (["requests[0]['uuid']"], {}), "(requests[0]['uuid'])\n", (3132, 3153), False, 'import frappe\n'), ((3311, 3331), 'frappe.website.render.render_page', 'render_page', (['"""error"""'], {}), "('error')\n", (3322, 3331), False, 'from frappe.website.render import render_page\n'), ((657, 702), 'frappe.recorder.do_not_record', 'frappe.recorder.do_not_record', (['frappe.get_all'], {}), '(frappe.get_all)\n', (686, 702), False, 'import frappe\n'), ((2379, 2412), 'frappe.db.sql', 'frappe.db.sql', (['query[sql_dialect]'], {}), '(query[sql_dialect])\n', (2392, 2412), False, 'import frappe\n'), ((3015, 3038), 'frappe.db.sql', 'frappe.db.sql', (['query[0]'], {}), '(query[0])\n', (3028, 3038), False, 'import frappe\n')]
|
import os.path as op
import random
import time
from keras.callbacks import TensorBoard, ModelCheckpoint, LearningRateScheduler
import tensorflow as tf
from tensorflow_addons.callbacks import TQDMProgressBar
from fastmri_recon.data.sequences.oasis_sequences import Masked2DSequence, KIKISequence
from fastmri_recon.models.functional_models.kiki_sep import kiki_sep_net
from fastmri_recon.models.utils.data_consistency import MultiplyScalar
from fastmri_recon.models.utils.non_linearities import lrelu
random.seed(0)
# paths
train_path = '/media/Zaccharie/UHRes/OASIS_data/'
n_train = 1000
n_val = 200
# generators
AF = 4
train_gen_last = Masked2DSequence(train_path, af=AF, inner_slices=32, rand=True, scale_factor=1e-2, seed=0, val_split=0.1)
val_gen_last = train_gen_last.val_sequence
train_gen_last.filenames = random.sample(train_gen_last.filenames, n_train)
val_gen_last.filenames = random.sample(val_gen_last.filenames, n_val)
random.seed(0)
train_gen_i = KIKISequence(train_path, af=AF, inner_slices=32, rand=True, scale_factor=1e-2, space='I', seed=0, val_split=0.1)
val_gen_i = train_gen_i.val_sequence
train_gen_i.filenames = random.sample(train_gen_i.filenames, n_train)
val_gen_i.filenames = random.sample(val_gen_i.filenames, n_val)
random.seed(0)
train_gen_k = KIKISequence(train_path, af=AF, inner_slices=32, rand=True, scale_factor=1e-2, space='K', seed=0, val_split=0.1)
val_gen_k = train_gen_k.val_sequence
train_gen_k.filenames = random.sample(train_gen_k.filenames, n_train)
val_gen_k.filenames = random.sample(val_gen_k.filenames, n_val)
random.seed(0)
run_params = {
'n_convs': 16,
'n_filters': 48,
'noiseless': True,
'lr': 1e-3,
'activation': lrelu,
'input_size': (None, None, 1),
}
multiply_scalar = MultiplyScalar()
n_epochs = 50
def learning_rate_from_epoch(epoch):
return 10**(-(epoch // (n_epochs/3)) - 3)
def train_model(model, space='K', n=1):
print(model.summary(line_length=150))
run_id = f'kikinet_sep_{space}{n}_af{AF}_oasis_{int(time.time())}'
chkpt_path = f'checkpoints/{run_id}' + '-{epoch:02d}.hdf5'
print(run_id)
chkpt_cback = ModelCheckpoint(chkpt_path, period=n_epochs//2)
log_dir = op.join('logs', run_id)
tboard_cback = TensorBoard(
profile_batch=0,
log_dir=log_dir,
histogram_freq=0,
write_graph=True,
write_images=False,
)
lrate_cback = LearningRateScheduler(learning_rate_from_epoch)
tqdm_cb = TQDMProgressBar()
if space == 'K':
train_gen = train_gen_k
val_gen = val_gen_k
elif space == 'I':
if n == 2:
train_gen = train_gen_last
val_gen = val_gen_last
elif n == 1:
train_gen = train_gen_i
val_gen = val_gen_i
model.fit_generator(
train_gen,
steps_per_epoch=n_train,
epochs=n_epochs,
validation_data=val_gen,
validation_steps=1,
verbose=0,
callbacks=[tqdm_cb, tboard_cback, chkpt_cback, lrate_cback,],
# max_queue_size=35,
use_multiprocessing=True,
workers=35,
shuffle=True,
)
return model
# first K net training
model = kiki_sep_net(None, multiply_scalar, to_add='K', last=False, **run_params)
train_model(model, space='K', n=1)
model = kiki_sep_net(model, multiply_scalar, to_add='I', last=False, **run_params)
train_model(model, space='I', n=1)
model = kiki_sep_net(model, multiply_scalar, to_add='K', last=False, **run_params)
train_model(model, space='K', n=2)
model = kiki_sep_net(model, multiply_scalar, to_add='I', last=True, fastmri=False, **run_params)
train_model(model, space='I', n=2)
|
[
"fastmri_recon.data.sequences.oasis_sequences.Masked2DSequence",
"fastmri_recon.models.functional_models.kiki_sep.kiki_sep_net",
"random.sample",
"keras.callbacks.ModelCheckpoint",
"time.time",
"keras.callbacks.TensorBoard",
"random.seed",
"tensorflow_addons.callbacks.TQDMProgressBar",
"fastmri_recon.models.utils.data_consistency.MultiplyScalar",
"keras.callbacks.LearningRateScheduler",
"fastmri_recon.data.sequences.oasis_sequences.KIKISequence",
"os.path.join"
] |
[((503, 517), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (514, 517), False, 'import random\n'), ((645, 754), 'fastmri_recon.data.sequences.oasis_sequences.Masked2DSequence', 'Masked2DSequence', (['train_path'], {'af': 'AF', 'inner_slices': '(32)', 'rand': '(True)', 'scale_factor': '(0.01)', 'seed': '(0)', 'val_split': '(0.1)'}), '(train_path, af=AF, inner_slices=32, rand=True,\n scale_factor=0.01, seed=0, val_split=0.1)\n', (661, 754), False, 'from fastmri_recon.data.sequences.oasis_sequences import Masked2DSequence, KIKISequence\n'), ((821, 869), 'random.sample', 'random.sample', (['train_gen_last.filenames', 'n_train'], {}), '(train_gen_last.filenames, n_train)\n', (834, 869), False, 'import random\n'), ((895, 939), 'random.sample', 'random.sample', (['val_gen_last.filenames', 'n_val'], {}), '(val_gen_last.filenames, n_val)\n', (908, 939), False, 'import random\n'), ((940, 954), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (951, 954), False, 'import random\n'), ((971, 1088), 'fastmri_recon.data.sequences.oasis_sequences.KIKISequence', 'KIKISequence', (['train_path'], {'af': 'AF', 'inner_slices': '(32)', 'rand': '(True)', 'scale_factor': '(0.01)', 'space': '"""I"""', 'seed': '(0)', 'val_split': '(0.1)'}), "(train_path, af=AF, inner_slices=32, rand=True, scale_factor=\n 0.01, space='I', seed=0, val_split=0.1)\n", (983, 1088), False, 'from fastmri_recon.data.sequences.oasis_sequences import Masked2DSequence, KIKISequence\n'), ((1145, 1190), 'random.sample', 'random.sample', (['train_gen_i.filenames', 'n_train'], {}), '(train_gen_i.filenames, n_train)\n', (1158, 1190), False, 'import random\n'), ((1213, 1254), 'random.sample', 'random.sample', (['val_gen_i.filenames', 'n_val'], {}), '(val_gen_i.filenames, n_val)\n', (1226, 1254), False, 'import random\n'), ((1255, 1269), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (1266, 1269), False, 'import random\n'), ((1285, 1402), 'fastmri_recon.data.sequences.oasis_sequences.KIKISequence', 'KIKISequence', (['train_path'], {'af': 'AF', 'inner_slices': '(32)', 'rand': '(True)', 'scale_factor': '(0.01)', 'space': '"""K"""', 'seed': '(0)', 'val_split': '(0.1)'}), "(train_path, af=AF, inner_slices=32, rand=True, scale_factor=\n 0.01, space='K', seed=0, val_split=0.1)\n", (1297, 1402), False, 'from fastmri_recon.data.sequences.oasis_sequences import Masked2DSequence, KIKISequence\n'), ((1459, 1504), 'random.sample', 'random.sample', (['train_gen_k.filenames', 'n_train'], {}), '(train_gen_k.filenames, n_train)\n', (1472, 1504), False, 'import random\n'), ((1527, 1568), 'random.sample', 'random.sample', (['val_gen_k.filenames', 'n_val'], {}), '(val_gen_k.filenames, n_val)\n', (1540, 1568), False, 'import random\n'), ((1569, 1583), 'random.seed', 'random.seed', (['(0)'], {}), '(0)\n', (1580, 1583), False, 'import random\n'), ((1759, 1775), 'fastmri_recon.models.utils.data_consistency.MultiplyScalar', 'MultiplyScalar', ([], {}), '()\n', (1773, 1775), False, 'from fastmri_recon.models.utils.data_consistency import MultiplyScalar\n'), ((3180, 3253), 'fastmri_recon.models.functional_models.kiki_sep.kiki_sep_net', 'kiki_sep_net', (['None', 'multiply_scalar'], {'to_add': '"""K"""', 'last': '(False)'}), "(None, multiply_scalar, to_add='K', last=False, **run_params)\n", (3192, 3253), False, 'from fastmri_recon.models.functional_models.kiki_sep import kiki_sep_net\n'), ((3297, 3371), 'fastmri_recon.models.functional_models.kiki_sep.kiki_sep_net', 'kiki_sep_net', (['model', 'multiply_scalar'], {'to_add': '"""I"""', 'last': '(False)'}), "(model, multiply_scalar, to_add='I', last=False, **run_params)\n", (3309, 3371), False, 'from fastmri_recon.models.functional_models.kiki_sep import kiki_sep_net\n'), ((3415, 3489), 'fastmri_recon.models.functional_models.kiki_sep.kiki_sep_net', 'kiki_sep_net', (['model', 'multiply_scalar'], {'to_add': '"""K"""', 'last': '(False)'}), "(model, multiply_scalar, to_add='K', last=False, **run_params)\n", (3427, 3489), False, 'from fastmri_recon.models.functional_models.kiki_sep import kiki_sep_net\n'), ((3533, 3625), 'fastmri_recon.models.functional_models.kiki_sep.kiki_sep_net', 'kiki_sep_net', (['model', 'multiply_scalar'], {'to_add': '"""I"""', 'last': '(True)', 'fastmri': '(False)'}), "(model, multiply_scalar, to_add='I', last=True, fastmri=False,\n **run_params)\n", (3545, 3625), False, 'from fastmri_recon.models.functional_models.kiki_sep import kiki_sep_net\n'), ((2130, 2179), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['chkpt_path'], {'period': '(n_epochs // 2)'}), '(chkpt_path, period=n_epochs // 2)\n', (2145, 2179), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, LearningRateScheduler\n'), ((2192, 2215), 'os.path.join', 'op.join', (['"""logs"""', 'run_id'], {}), "('logs', run_id)\n", (2199, 2215), True, 'import os.path as op\n'), ((2235, 2341), 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'profile_batch': '(0)', 'log_dir': 'log_dir', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(False)'}), '(profile_batch=0, log_dir=log_dir, histogram_freq=0, write_graph\n =True, write_images=False)\n', (2246, 2341), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, LearningRateScheduler\n'), ((2402, 2449), 'keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['learning_rate_from_epoch'], {}), '(learning_rate_from_epoch)\n', (2423, 2449), False, 'from keras.callbacks import TensorBoard, ModelCheckpoint, LearningRateScheduler\n'), ((2464, 2481), 'tensorflow_addons.callbacks.TQDMProgressBar', 'TQDMProgressBar', ([], {}), '()\n', (2479, 2481), False, 'from tensorflow_addons.callbacks import TQDMProgressBar\n'), ((2015, 2026), 'time.time', 'time.time', ([], {}), '()\n', (2024, 2026), False, 'import time\n')]
|
# MIT License
#
# Copyright (c) 2017 Changsung
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
def daily(start, end):
return range(start, end+1)
def weekly(start, end):
return map(lambda x: x * 5, range(start, end+1))
def tech_key(data_key, period, tech_ind):
return str(data_key) + "-" + str(period) + "_" + str(tech_ind)
def calculate_quarter_count(start_date, end_date):
assert(start_date <= end_date)
quarter_num = 0
if start_date.month < 4:
quarter_num += 4
elif start_date.month < 5:
quarter_num += 3
elif start_date.month < 8:
quarter_num += 2
elif start_date.month < 11:
quarter_num += 1
else:
quarter_num += 0
if end_date.month < 4:
quarter_num -= 4
elif end_date.month < 5:
quarter_num -= 3
elif end_date.month < 8:
quarter_num -= 2
elif end_date.month < 11:
quarter_num -= 1
else:
quarter_num -= 0
year_delta = end_date.year - start_date.year
quarter_num += year_delta * 4
return quarter_num
from datetime import datetime
def quarterify(year, quarter_num):
""" quarter_num = 1, 2, 3, 4
"""
year = int(year)
assert(quarter_num in [1, 2, 3, 4])
if quarter_num == 1:
return datetime(year, 5, 1)
elif quarter_num == 2:
return datetime(year, 8, 1)
elif quarter_num == 3:
return datetime(year, 11, 1)
else:
return datetime(year+1, 4, 1)
def get_closest_quarter(cur_date, past=True):
if cur_date.month < 4:
if past:
return (cur_date.year-1), 3
#return cur_date.replace(year=(cur_date.year-1), month=11, day=1)
else:
return (cur_date.year-1), 4
#return cur_date.replace(year=(cur_date.year), month=4, day=1)
elif cur_date.month < 5:
if past:
return (cur_date.year-1), 4
#return cur_date.replace(year=(cur_date.year), month=4, day=1)
else:
return (cur_date.year), 1
#return cur_date.replace(year=(cur_date.year), month=5, day=1)
elif cur_date.month < 8:
if past:
return (cur_date.year), 1
#return cur_date.replace(year=(cur_date.year), month=5, day=1)
else:
return (cur_date.year), 2
#return cur_date.replace(year=(cur_date.year), month=8, day=1)
elif cur_date.month < 11:
if past:
return (cur_date.year), 2
#return cur_date.replace(year=(cur_date.year), month=8, day=1)
else:
return (cur_date.year), 3
#return cur_date.replace(year=(cur_date.year), month=11, day=1)
else:
if past:
return (cur_date.year), 3
#return cur_date.replace(year=(cur_date.year), month=11, day=1)
else:
return (cur_date.year), 4
#return cur_date.replace(year=(cur_date.year+1), month=4, day=1)
|
[
"datetime.datetime"
] |
[((2144, 2164), 'datetime.datetime', 'datetime', (['year', '(5)', '(1)'], {}), '(year, 5, 1)\n', (2152, 2164), False, 'from datetime import datetime\n'), ((2198, 2218), 'datetime.datetime', 'datetime', (['year', '(8)', '(1)'], {}), '(year, 8, 1)\n', (2206, 2218), False, 'from datetime import datetime\n'), ((2252, 2273), 'datetime.datetime', 'datetime', (['year', '(11)', '(1)'], {}), '(year, 11, 1)\n', (2260, 2273), False, 'from datetime import datetime\n'), ((2290, 2314), 'datetime.datetime', 'datetime', (['(year + 1)', '(4)', '(1)'], {}), '(year + 1, 4, 1)\n', (2298, 2314), False, 'from datetime import datetime\n')]
|
# Author: <NAME>
# An abstract type for resources types that are encapsulated by a d3m remote dataset
import logging
from ls_dataset.dataset_resource import DatasetResource
from ls_dataset.dsr_table import DSRTable
logger = logging.getLogger(__name__)
class DatasetResourceFactory(object):
"""
Factory class for producing the appropriate DatasetResource type given
some metadata
"""
@staticmethod
def get_resource(metadata):
"""
Initializes and returns an instance of a dataset resource with proper
child class given a metadata dictionary
"""
if metadata['resType'] not in DatasetResource.__resource_types__:
logger.warning("Invalid resource type encountered: %s" % str(metadata))
raise Exception("Invalid resource type encountered: %s" % str(metadata))
elif metadata['resType'] == 'table':
return DSRTable(metadata)
else:
return DatasetResource(metadata)
|
[
"ls_dataset.dataset_resource.DatasetResource",
"ls_dataset.dsr_table.DSRTable",
"logging.getLogger"
] |
[((229, 256), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (246, 256), False, 'import logging\n'), ((915, 933), 'ls_dataset.dsr_table.DSRTable', 'DSRTable', (['metadata'], {}), '(metadata)\n', (923, 933), False, 'from ls_dataset.dsr_table import DSRTable\n'), ((967, 992), 'ls_dataset.dataset_resource.DatasetResource', 'DatasetResource', (['metadata'], {}), '(metadata)\n', (982, 992), False, 'from ls_dataset.dataset_resource import DatasetResource\n')]
|
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from model_utils.models import TimeStampedModel
from semantic_version.django_fields import VersionField
from ..game_catalog.utils import AbstractUUIDModel
# Create your models here.
class ReleaseNotice(TimeStampedModel, AbstractUUIDModel, models.Model):
"""
Represents the latest release notice that a given user has seen.
"""
user = models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)
latest_version_shown = models.ForeignKey(
"ReleaseNote", on_delete=models.SET_NULL, null=True, blank=True
)
def __str__(self):
str_response = "{} saw notes for version {} at {}"
str_args = None
if self.latest_version_shown:
str_args = [
self.user.username,
self.latest_version_shown.version,
self.modified,
]
else:
str_args = [self.user.username, None, self.created]
return str_response.format(*str_args)
class ReleaseNote(TimeStampedModel, AbstractUUIDModel, models.Model):
"""
Represents parsed release note entries. We store the results in the DB rather than parsing the changelog everytime, which could be prone to errors.
"""
version = VersionField(unique=True, help_text=_("Semantic version for the update."))
release_date = models.DateField(
db_index=True, help_text=_("Official release date for this version.")
)
notes = models.TextField(
help_text=_("The notes for the release. This should be stored in Markdown.")
)
notes_rendered = models.TextField(
null=True,
blank=True,
help_text=_(
"HTML rendered version of the notes field. Precalculated at creation to reduce template processing."
),
)
def __str__(self):
return str(self.version)
class Meta:
ordering = ["-release_date"]
|
[
"django.db.models.ForeignKey",
"django.db.models.OneToOneField",
"django.utils.translation.ugettext_lazy"
] |
[((473, 545), 'django.db.models.OneToOneField', 'models.OneToOneField', (['settings.AUTH_USER_MODEL'], {'on_delete': 'models.CASCADE'}), '(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n', (493, 545), False, 'from django.db import models\n'), ((573, 659), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""ReleaseNote"""'], {'on_delete': 'models.SET_NULL', 'null': '(True)', 'blank': '(True)'}), "('ReleaseNote', on_delete=models.SET_NULL, null=True,\n blank=True)\n", (590, 659), False, 'from django.db import models\n'), ((1387, 1424), 'django.utils.translation.ugettext_lazy', '_', (['"""Semantic version for the update."""'], {}), "('Semantic version for the update.')\n", (1388, 1424), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1496, 1540), 'django.utils.translation.ugettext_lazy', '_', (['"""Official release date for this version."""'], {}), "('Official release date for this version.')\n", (1497, 1540), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1595, 1661), 'django.utils.translation.ugettext_lazy', '_', (['"""The notes for the release. This should be stored in Markdown."""'], {}), "('The notes for the release. This should be stored in Markdown.')\n", (1596, 1661), True, 'from django.utils.translation import ugettext_lazy as _\n'), ((1764, 1872), 'django.utils.translation.ugettext_lazy', '_', (['"""HTML rendered version of the notes field. Precalculated at creation to reduce template processing."""'], {}), "('HTML rendered version of the notes field. Precalculated at creation to reduce template processing.'\n )\n", (1765, 1872), True, 'from django.utils.translation import ugettext_lazy as _\n')]
|
import torch
from torch import nn
from torch.nn import functional as F
__all__ = ["FeatureNet", "PointSectorScatter"]
class Conv1x1(nn.Module):
def __init__(self, in_channels=160, out_channels=32):
super(Conv1x1, self).__init__()
self.conv = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.bn = nn.BatchNorm1d(out_channels)
self.relu = nn.ReLU(inplace=True)
def forward(self, inputs):
x = self.conv(inputs)
x = self.bn(x)
out = self.relu(x)
return out
class PN(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.linear = nn.Linear(in_channels, out_channels)
self.norm = nn.BatchNorm1d(out_channels)
def forward(self, inputs):
# inputs.shape : sector x points(100) x features
x = self.linear(inputs) # sector x points(100) x 32
x = self.norm(x.permute(0, 2, 1).contiguous()) # to bn 32 features. not points. (p x 32 x 100)
x = F.relu(x)
x_max = torch.max(x, dim=2, keepdim=True)[0] # (p x 32 x 100) -> (p x 32 x 1)
return x_max
class MMPN(nn.Module): # Multi Modal PointNet: local feature extractor
def __init__(self, in_channels, out_channels):
super().__init__()
self.conv1 = nn.Conv1d(in_channels, 64, kernel_size= 1)
self.conv2 = nn.Conv1d(64, 128, kernel_size=1)
self.conv3 = nn.Conv1d(128, 128, kernel_size=1)
self.bn1 = nn.BatchNorm1d(128)
self.linear = nn.Linear(128, out_channels)
self.bn2 = nn.BatchNorm1d(out_channels)
self.relu = nn.ReLU()
def forward(self, inputs):
x = inputs.permute(0, 2, 1).contiguous()
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.bn1(x)
x = self.relu(x)
x = x.permute(0, 2, 1).contiguous()
x = self.linear(x)
x = self.bn2(x.permute(0, 2, 1).contiguous()).permute(0, 2, 1).contiguous()
x = self.relu(x)
x_max = torch.max(x, dim=1, keepdim=True)[0]
return x_max # s x 1 x 32
# def get_paddings_indicator(actual_num, max_num, axis=0):
# actual_num = torch.unsqueeze(actual_num, axis + 1)
# # tiled_actual_num: [N, M, 1]
# max_num_shape = [1] * len(actual_num.shape)
# max_num_shape[axis + 1] = -1
# max_num = torch.arange(
# max_num,
# dtype = torch.int,
# device = actual_num.device).view(max_num_shape)
# # tiled_actual_num: [[3,3,3,3,3], [4,4,4,4,4], [2,2,2,2,2]]
# # tiled_max_num: [[0,1,2,3,4], [0,1,2,3,4], [0,1,2,3,4]]
# paddings_indicator = actual_num.int() > max_num
# # paddings_indicator shape: [batch_size, max_num]
# return paddings_indicator
class FeatureNet(nn.Module):
def __init__(self):
"""
Base on Pillar Feature Net.
"""
super().__init__()
# # Create FeatureNet layers
# # All-in-one
self.fn = PN(9, 32)
# Multimod
# self.fn_xyz = PN(3, 32) # p x n x 3 -> p x 32 x 1
# self.fn_i = PN(1, 32) # p x n x 1 -> p x 32 x 1
# self.fn_d = PN(1, 32) # p x n x 1 -> p x 32 x 1
# self.fn_r = PN(1, 32) # p x n x 1 -> p x 32 x 1
# self.fn_cxyz = PN(3, 32) # p x n x 3 -> p x 32 x 1
# self.conv1x1= Conv1x1(32*5,32) # p x n x 3 -> p x 32 x 1
# # Backup
# self.pfn_layers = nn.ModuleList(pfn_layers)
# Need pillar (voxel) size and x/y offset in order to calculate pillar offset
# self.vx = voxel_size[0]
# self.vy = voxel_size[1]
# self.x_offset = self.vx / 2 + pc_range[0]
# self.y_offset = self.vy / 2 + pc_range[1]
def forward(self, features, coors, num_points):
# features: p x n x 6 (x,y,z,i,rad,dis)
# num_points: p x 1
# Find distance of x, y, and z from pillar center 这个不能用?可以用半径来算?
# not available in sector
# f_center = torch.zeros_like(features[:, :, :2]) # f_center: nx100x2 ; coors 前两列都是0
# f_center[:, :, 0] = features[:, :, 0] - (coors[:, 3].float().unsqueeze(1) * self.vx + self.x_offset)
# f_center[:, :, 1] = features[:, :, 1] - (coors[:, 2].float().unsqueeze(1) * self.vy + self.y_offset)
points_mean = features[:, :, :3].sum(dim=1, keepdim=True) / num_points.type_as(features).view(-1, 1, 1)
f_cluster = features[:, :, :3] - points_mean # f_cluster: px100x3
########################################
# # Multimod approach
#
# # input: x,y,z
# # output: distance of x, y, and z from cluster center
# features_xyz = features[:, :, :3] # pxnx3
# features_sector_core = f_cluster # pxnx3
# features_intensity = torch.unsqueeze(features[:,:,3],2) # pxnx1
# features_radius = torch.unsqueeze(features[:,:,4],2) # pxnx1
# features_distance = torch.unsqueeze(features[:,:,5],2) #pxnx1
#
# sector_xyz = self.fn_xyz(features_xyz)
# sector_core= self.fn_cxyz(features_sector_core)
# sector_intensity = self.fn_i(features_intensity)
# sector_distance = self.fn_d(features_distance)
# sector_radius= self.fn_r(features_radius)
# sector_cat= torch.cat([sector_xyz,sector_intensity,sector_radius,
# sector_distance,sector_core],dim=1)
# sector_all = self.conv1x1(sector_cat) # P x 160 x1 -> P x 32 x 1
# sector_all = sector_all.squeeze().transpose(0, 1)
# return sector_all # 32 x p
###################################################
# # All-in-one approach
feature_all = torch.cat((features, f_cluster), dim=2) #6+3
sector_all = self.fn(feature_all)
sector_all = sector_all.squeeze().transpose(0, 1)
return sector_all
# sector_cat= torch.transpose(torch.cat([sector_xyz, sector_intensity, sector_radius,
# sector_distance, sector_core], dim=1), 1, 2)
class PointSectorScatter(nn.Module):
def __init__(self):
"""
modified from Point Pillar's Scatter.
Converts learned features from dense tensor to sparse pseudo image. This replaces SECOND's
second.pytorch.voxelnet.SparseMiddleExtractor.
"""
super().__init__()
self.nh = 64 # 64
self.nw = 256 # 256
self.nchannels = 32 # output channels
def forward(self, features, coords, batch_size):
# batch_canvas will be the final output.
batch_canvas = []
for batch_id in range(batch_size):
# Create the canvas for this sample
canvas = torch.zeros(( self.nchannels, self.nh, self.nw),
dtype=features.dtype, device=features.device)
# Only include non-empty pillars
batch_mask = coords[:, 0] == batch_id #Px4(b,x,y,z)
this_coords = coords[batch_mask, :]
this_voxels = features[:, batch_mask ] # 32 x P
# Now scatter the blob back to the canvas.
canvas[:,this_coords[:,1].type(torch.long),this_coords[:,2].type(torch.long)] = this_voxels
# Append to a list for later stacking.
batch_canvas.append(canvas)
# Stack to 3-dim tensor (batch-size, nchannels, nrows*ncols)
batch_canvas = torch.stack(batch_canvas, 0)
return batch_canvas #bs ,32, 64, 256 (batchsize, channels, height, width)
|
[
"torch.nn.ReLU",
"torch.stack",
"torch.nn.BatchNorm1d",
"torch.nn.Conv1d",
"torch.cat",
"torch.nn.Linear",
"torch.max",
"torch.nn.functional.relu",
"torch.zeros"
] |
[((273, 324), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_channels', 'out_channels'], {'kernel_size': '(1)'}), '(in_channels, out_channels, kernel_size=1)\n', (282, 324), False, 'from torch import nn\n'), ((344, 372), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_channels'], {}), '(out_channels)\n', (358, 372), False, 'from torch import nn\n'), ((394, 415), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (401, 415), False, 'from torch import nn\n'), ((680, 716), 'torch.nn.Linear', 'nn.Linear', (['in_channels', 'out_channels'], {}), '(in_channels, out_channels)\n', (689, 716), False, 'from torch import nn\n'), ((738, 766), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_channels'], {}), '(out_channels)\n', (752, 766), False, 'from torch import nn\n'), ((1039, 1048), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1045, 1048), True, 'from torch.nn import functional as F\n'), ((1337, 1378), 'torch.nn.Conv1d', 'nn.Conv1d', (['in_channels', '(64)'], {'kernel_size': '(1)'}), '(in_channels, 64, kernel_size=1)\n', (1346, 1378), False, 'from torch import nn\n'), ((1402, 1435), 'torch.nn.Conv1d', 'nn.Conv1d', (['(64)', '(128)'], {'kernel_size': '(1)'}), '(64, 128, kernel_size=1)\n', (1411, 1435), False, 'from torch import nn\n'), ((1458, 1492), 'torch.nn.Conv1d', 'nn.Conv1d', (['(128)', '(128)'], {'kernel_size': '(1)'}), '(128, 128, kernel_size=1)\n', (1467, 1492), False, 'from torch import nn\n'), ((1513, 1532), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(128)'], {}), '(128)\n', (1527, 1532), False, 'from torch import nn\n'), ((1556, 1584), 'torch.nn.Linear', 'nn.Linear', (['(128)', 'out_channels'], {}), '(128, out_channels)\n', (1565, 1584), False, 'from torch import nn\n'), ((1605, 1633), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_channels'], {}), '(out_channels)\n', (1619, 1633), False, 'from torch import nn\n'), ((1655, 1664), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1662, 1664), False, 'from torch import nn\n'), ((5804, 5843), 'torch.cat', 'torch.cat', (['(features, f_cluster)'], {'dim': '(2)'}), '((features, f_cluster), dim=2)\n', (5813, 5843), False, 'import torch\n'), ((7548, 7576), 'torch.stack', 'torch.stack', (['batch_canvas', '(0)'], {}), '(batch_canvas, 0)\n', (7559, 7576), False, 'import torch\n'), ((1066, 1099), 'torch.max', 'torch.max', (['x'], {'dim': '(2)', 'keepdim': '(True)'}), '(x, dim=2, keepdim=True)\n', (1075, 1099), False, 'import torch\n'), ((2082, 2115), 'torch.max', 'torch.max', (['x'], {'dim': '(1)', 'keepdim': '(True)'}), '(x, dim=1, keepdim=True)\n', (2091, 2115), False, 'import torch\n'), ((6846, 6943), 'torch.zeros', 'torch.zeros', (['(self.nchannels, self.nh, self.nw)'], {'dtype': 'features.dtype', 'device': 'features.device'}), '((self.nchannels, self.nh, self.nw), dtype=features.dtype,\n device=features.device)\n', (6857, 6943), False, 'import torch\n')]
|
# vim: sw=4:ts=4:et:cc=120
import base64
import datetime
import functools
import io
import json
import logging
import os, os.path
import re
import shutil
import sqlite3
import tempfile
import threading
import zipfile
import requests
from urllib.parse import urlparse
import saq
from saq.constants import *
from saq.collectors import Collector, Submission
from saq.error import report_exception
from saq.fireeye import *
from saq.util import local_time, format_iso8601
ARTIFACT_STATUS_READY = 1
ARTIFACT_STATUS_COMPLETE = 2
ARTIFACT_STATUS_ERROR = 3
# utility function to return the list of tags a given url should have
# some of the urls we get from fireeye are file:///, ehdr://, etc...
# no reason to tag them as malicious since they are meaningless
def _get_tags_for_url(url):
if url.lower().startswith('http'):
return [ 'malicious' ]
return []
class FireEyeCollector(Collector):
def __init__(self, *args, **kwargs):
super().__init__(service_config=saq.CONFIG['service_fireeye_collector'],
workload_type='fireeye',
delete_files=True,
*args, **kwargs)
self.fe_client = FireEyeAPIClient(saq.CONFIG['fireeye']['host'],
saq.CONFIG['fireeye']['user_name'],
saq.CONFIG['fireeye']['password'])
# we use a small sqlite database to keep track of what IDs we've already loaded
self.alert_uuid_cache_path = os.path.join(self.persistence_dir, 'fireeye_alert_uuid.db')
if not os.path.exists(self.alert_uuid_cache_path):
try:
with sqlite3.connect(self.alert_uuid_cache_path) as db:
c = db.cursor()
c.execute("""
CREATE TABLE uuid_tracking (
uuid TEXT,
insert_date INTEGER,
artifact_status INTEGER DEFAULT 1,
last_artifact_http_result INTEGER,
last_artifact_http_result_text TEXT,
last_artifact_attempt INTEGER,
error_message TEXT
)""")
c.execute("""
CREATE INDEX insert_date_index ON uuid_tracking(insert_date)
""")
c.execute("""
CREATE INDEX artifact_status_index ON uuid_tracking(artifact_status)
""")
db.commit()
except Exception as e:
logging.error(f"unable to create {self.alert_uuid_cache_path}: {e}")
report_exception()
else:
# if we already have it created then let's log how many we're tracking
with sqlite3.connect(self.alert_uuid_cache_path) as db:
c = db.cursor()
c.execute("SELECT COUNT(*) FROM uuid_tracking")
row = c.fetchone()
logging.debug(f"currently tracking {row[0]} alert uuids from fireeye")
# remember the last time we executed an API call for the alerts
self.last_api_call_path = os.path.join(self.persistence_dir, 'fireeye_last_api_call')
self._last_api_call = None
try:
if os.path.exists(self.last_api_call_path):
with open(self.last_api_call_path, 'r') as fp:
self._last_api_call = datetime.datetime.strptime(fp.read(), event_time_format_json_tz)
logging.debug(f"loaded {self._last_api_call} as last_api_call epoch time")
except Exception as e:
logging.error(f"unable to load {self.last_api_call_path}: {e}")
report_exception()
try:
os.remove(self.last_api_call_path)
except:
pass
# where we store fireeye artifacts we download
# these are later picked up by the FireEyeArtifactAnalyzer (in lib/saq/modules/fireeye.py)
self.artifact_storage_dir = os.path.join(saq.DATA_DIR, saq.CONFIG['fireeye']['artifact_storage_dir'])
if not os.path.isdir(self.artifact_storage_dir):
os.makedirs(self.artifact_storage_dir, exist_ok=True)
# primary collection threads
self.alert_collection_thread = None
self.artifact_collection_thread = None
@property
def generate_alerts(self):
"""Are we creating alerts?"""
return self.service_config.getboolean('generate_alerts')
def stop(self, *args, **kwargs):
super().stop(*args, **kwargs)
# make sure we release our fireeye token
self.fe_client.close()
@property
def last_api_call(self):
return self._last_api_call
@last_api_call.setter
def last_api_call(self, value):
assert isinstance(value, datetime.datetime)
self._last_api_call = value
try:
with open(self.last_api_call_path, 'w') as fp:
fp.write(value.strftime(event_time_format_json_tz))
except Exception as e:
logging.error(f"unable to save last_api_call value to {self.last_api_call_path}: {e}")
report_exception()
def get_duration(self):
"""Returns the duration to use based on the last time we made the api call."""
result = None
for hours in VALID_DURATIONS:
result = hours
if self.last_api_call + datetime.timedelta(hours=hours) >= local_time():
break
return hours
def get_alerts(self):
# if we don't have a last_api_call, then we default to 48 hours ago
if self.last_api_call is None:
self.last_api_call = local_time() - datetime.timedelta(hours=48)
logging.debug(f"last_api_call is empty so defaulting to 48 hours ago: {self.last_api_call}")
now = local_time()
#duration = self.get_duration()
duration = 48
start_time = format_iso8601(self.last_api_call)
try:
for alert in self.fe_client.get_alerts(self.last_api_call, duration):
yield alert
except requests.exceptions.HTTPError as e:
if e.response.status_code in [ 502, 503 ]:
logging.warning(f"fireeye returned {e.response.status_code} (unavailable)")
return
raise e
# the next time we make this call, we start at last_api_call + duration_in_hours
next_api_call = self.last_api_call + datetime.timedelta(hours=duration)
if next_api_call > now: # if our duration puts us past right now, then just use right now
self.last_api_call = now
else:
self.last_api_call = next_api_call
logging.debug(f"next fireeye api call will start at {self.last_api_call}")
def is_alert_processed(self, uuid):
"""Returns True if this alert has already been processed, False otherwise."""
try:
with sqlite3.connect(self.alert_uuid_cache_path) as db:
c = db.cursor()
c.execute("SELECT uuid FROM uuid_tracking WHERE uuid = ?", (uuid,))
row = c.fetchone()
if row is None:
return False
logging.debug(f"already processed alert {uuid}")
return True
except Exception as e:
logging.error(f"unable to check fireeye alert processed status {uuid}: {e}")
report_exception()
return False # default to accepting the alert
def mark_alert_processed(self, uuid):
"""Records the processing of a given alert uuid."""
try:
with sqlite3.connect(self.alert_uuid_cache_path) as db:
c = db.cursor()
c.execute("INSERT INTO uuid_tracking ( uuid, insert_date ) VALUES ( ?, ? )",
(uuid, datetime.datetime.now().timestamp()))
db.commit()
except Exception as e:
logging.error(f"unable to track fireeye alert uuid {uuid}: {e}")
report_exception()
def clear_old_records(self):
"""Clears records held in the alert cache older than 48 hours."""
try:
with sqlite3.connect(self.alert_uuid_cache_path) as db:
c = db.cursor()
c.execute("DELETE FROM uuid_tracking WHERE insert_date < ?",
((datetime.datetime.now() - datetime.timedelta(hours=48)).timestamp(),))
db.commit()
except Exception as e:
logging.error(f"unable to track fireeye alert uuid {uuid}: {e}")
report_exception()
def extended_collection(self):
self.alert_collection_thread = threading.Thread(target=self.execute_in_loop,
args=(self.collect_alerts,),
name="Alert Collection")
self.alert_collection_thread.start()
self.artifact_collection_thread = threading.Thread(target=self.execute_in_loop,
args=(self.collect_artifacts,),
name="Artifact Collection")
self.artifact_collection_thread.start()
# wait for these threads to finish
self.alert_collection_thread.join()
self.artifact_collection_thread.join()
def collect_alerts(self):
for alert in self.get_alerts():
if self.service_shutdown_event.is_set():
break
if self.is_alert_processed(alert['uuid']):
logging.debug(f"skipping alert {alert['uuid']} -- already processed")
continue
self.mark_alert_processed(alert['uuid'])
self.clear_old_records()
# are we generating ACE alerts for ths stuff we collect here?
if not self.generate_alerts:
continue
description = f"FireEye {alert[KEY_PRODUCT]} ({alert[KEY_ACTION]}) "
observables = []
if KEY_EXPLANATION in alert:
explanation = alert[KEY_EXPLANATION]
if KEY_MALWARE_DETECTED in explanation:
malware_detected = explanation[KEY_MALWARE_DETECTED]
if KEY_MALWARE in malware_detected:
malware = malware_detected[KEY_MALWARE]
if len(malware) > 0 and KEY_NAME in malware[0]:
description += malware[0][KEY_NAME] + " "
for malware_sample in malware:
if ((KEY_TYPE in malware_sample and malware_sample[KEY_TYPE] == 'link')
and KEY_URL in malware_sample):
url = observables.append({'type': F_URL, 'value': malware_sample[KEY_URL], 'tags': _get_tags_for_url(malware_sample[KEY_URL])})
# for email alerts these are hashes
if alert[KEY_PRODUCT] == 'EMAIL_MPS':
if KEY_MD5 in malware_sample:
observables.append({'type': F_MD5, 'value': malware_sample[KEY_MD5]})
if KEY_SHA256 in malware_sample:
observables.append({'type': F_SHA256, 'value': malware_sample[KEY_SHA256]})
# but for web alerts these are URLs lol
elif alert[KEY_PRODUCT] == 'WEB_MPS':
if KEY_MD5 in malware_sample:
url = observables.append({'type': F_URL, 'value': malware_sample[KEY_MD5], 'tags': _get_tags_for_url(malware_sample[KEY_MD5])}) # <-- that is correct
if KEY_SRC in alert:
if KEY_SMTP_MAIL_FROM in alert[KEY_SRC]:
#description += "From " + alert[KEY_SRC][KEY_SMTP_MAIL_FROM] + " "
observables.append({'type': F_EMAIL_ADDRESS, 'value': alert[KEY_SRC][KEY_SMTP_MAIL_FROM]})
if KEY_IP in alert[KEY_SRC]:
observables.append({'type': F_IPV4, 'value': alert[KEY_SRC][KEY_IP]})
if KEY_DST in alert:
if KEY_SMTP_TO in alert[KEY_DST]:
observables.append({'type': F_EMAIL_ADDRESS, 'value': alert[KEY_DST][KEY_SMTP_TO]})
if KEY_SRC in alert and KEY_SMTP_MAIL_FROM in alert[KEY_SRC]:
observables.append({'type': F_EMAIL_CONVERSATION, 'value': create_email_conversation(alert[KEY_SRC][KEY_SMTP_MAIL_FROM], alert[KEY_DST][KEY_SMTP_TO])})
if KEY_IP in alert[KEY_DST]:
observables.append({'type': F_IPV4, 'value': alert[KEY_DST][KEY_IP]})
if KEY_SRC in alert and KEY_IP in alert[KEY_SRC]:
ipv4_conversation = observables.append({'type': F_IPV4_CONVERSATION,
'value': create_ipv4_conversation(alert[KEY_SRC][KEY_IP],
alert[KEY_DST][KEY_IP])})
if ipv4_conversation is not None:
# if this was caught by the WEB MPS then let's grab the pcap
if alert[KEY_PRODUCT] == 'WEB_MPS':
ipv4_conversation.add_directive(DIRECTIVE_EXTRACT_PCAP)
if KEY_SMTP_MESSAGE in alert:
if KEY_SUBJECT in alert[KEY_SMTP_MESSAGE]:
description += "Subject " + alert[KEY_SMTP_MESSAGE][KEY_SUBJECT]
if KEY_UUID in alert:
observables.append({'type': F_FIREEYE_UUID, 'value': alert[KEY_UUID]})
# XXX for some reason the date time value for the occurred key says -0400 even
# XXX though it's actually UTC
alert[KEY_OCCURRED] = alert[KEY_OCCURRED][:-5] + '+0000'
submission = Submission(
description = description,
analysis_mode = ANALYSIS_MODE_CORRELATION,
tool = 'FireEye',
tool_instance = self.fe_client.fe_host,
type = ANALYSIS_TYPE_FIREEYE,
event_time = datetime.datetime.strptime(alert[KEY_OCCURRED], event_time_format_tz),
details = alert,
observables = observables,
tags = [],
files = [])
self.submission_list.put(submission)
return saq.CONFIG['fireeye'].getint('query_frequency', 60) # wait for N seconds before we look again
def get_next_artifact_uuid(self):
"""Returns the next uuid to collect artifacts for, or None if none are currently required."""
with sqlite3.connect(self.alert_uuid_cache_path) as db:
c = db.cursor()
# get the next alert that needs to have artifact downloaded
c.execute("""
SELECT uuid
FROM uuid_tracking
WHERE artifact_status = ?
ORDER BY last_artifact_attempt ASC, insert_date ASC
LIMIT 1
""",
(ARTIFACT_STATUS_READY,))
row = c.fetchone()
if row is None:
return None
return row[0]
def update_artifact_last_attempt(self, uuid):
with sqlite3.connect(self.alert_uuid_cache_path) as db:
c = db.cursor()
c.execute("UPDATE uuid_tracking SET last_artifact_attempt = ? WHERE uuid = ?",
(datetime.datetime.now().timestamp(), uuid))
db.commit()
def update_artifact_status(self, uuid, status, http_result=None, http_result_text=None, error_message=None):
with sqlite3.connect(self.alert_uuid_cache_path) as db:
c = db.cursor()
c.execute("""
UPDATE uuid_tracking
SET artifact_status = ?,
last_artifact_http_result = ?,
last_artifact_http_result_text = ?,
error_message = ?
WHERE uuid = ?""",
(status, http_result, http_result_text, error_message, uuid))
db.commit()
def collect_artifacts(self):
uuid = self.get_next_artifact_uuid()
if uuid is None:
return 1
self.update_artifact_last_attempt(uuid)
artifact_http_result = None
artifact_http_result_text = None
# first check to see if we've already downloaded it
# you're typically only going to see this in the development environmnent
target_dir = os.path.join(self.artifact_storage_dir, f'{uuid}')
if os.path.exists(target_dir):
logging.info(f"already downloaded artifacts for {uuid}")
self.update_artifact_status(uuid, ARTIFACT_STATUS_COMPLETE)
# don't wait for next request
return 0
logging.info(f"attempting to download artifacts for {uuid}")
try:
with FireEyeAPIClient(saq.CONFIG['fireeye']['host'],
saq.CONFIG['fireeye']['user_name'],
saq.CONFIG['fireeye']['password']) as fe_client:
# store the artifacts in a temporary directory until they are completed downloading
output_dir = os.path.join(self.artifact_storage_dir, f'{uuid}_temp')
if os.path.exists(output_dir):
logging.warning(f"output dir {output_dir} already exists -- deleting")
shutil.rmtree(output_dir)
os.mkdir(output_dir)
try:
artifact_json = fe_client.get_artifacts_by_uuid(output_dir, uuid)
with open(os.path.join(output_dir, 'artifact.json'), 'w') as fp:
json.dump(artifact_json, fp)
for artifact_entry in artifact_json[KEY_ARTIFACTS_INFO_LIST]:
file_name = artifact_entry[KEY_ARTIFACT_NAME]
file_type = artifact_entry[KEY_ARTIFACT_TYPE]
if not os.path.exists(os.path.join(output_dir, file_name)):
logging.warning(f"artifact file {file_name} does not exist in {output_dir}")
continue
logging.info(f"recording artifact {file_name} for {uuid}")
# move the directory to where the fireeye artifact analysis module is expecting to see it
final_dir = os.path.join(self.artifact_storage_dir, uuid)
if os.path.exists(final_dir):
logging.warning(f"final output dir {final_dir} already exists -- deleting")
shutil.rmtree(final_dir)
shutil.move(output_dir, os.path.join(self.artifact_storage_dir, uuid))
self.update_artifact_status(uuid, ARTIFACT_STATUS_COMPLETE)
except requests.exceptions.HTTPError as e:
# in my testing I'm finding FireEye returning 404 then later returning the data for the same call
# the calls takes a LONG time to complete (60+ seconds)
# it must be downloading it from the cloud or something
# and then I think 500 level error codes are when the system is getting behind
if e.response.status_code == 404 or ( 500 <= e.response.status_code <= 599 ):
self.update_artifact_status(uuid, ARTIFACT_STATUS_READY,
http_result = e.response.status_code,
http_result_text = str(e.response))
except Exception as e:
logging.error(f"unable to download artifacts for uuid {uuid}: {e}")
report_exception()
self.update_artifact_status(uuid, ARTIFACT_STATUS_ERROR, error_message=str(e))
return 0 # don't wait to process the next one
|
[
"os.mkdir",
"os.remove",
"shutil.rmtree",
"os.path.join",
"logging.error",
"logging.warning",
"saq.util.local_time",
"os.path.exists",
"datetime.timedelta",
"datetime.datetime.now",
"threading.Thread",
"json.dump",
"datetime.datetime.strptime",
"sqlite3.connect",
"saq.error.report_exception",
"logging.debug",
"os.makedirs",
"os.path.isdir",
"logging.info",
"saq.util.format_iso8601"
] |
[((1522, 1581), 'os.path.join', 'os.path.join', (['self.persistence_dir', '"""fireeye_alert_uuid.db"""'], {}), "(self.persistence_dir, 'fireeye_alert_uuid.db')\n", (1534, 1581), False, 'import os, os.path\n'), ((2938, 2997), 'os.path.join', 'os.path.join', (['self.persistence_dir', '"""fireeye_last_api_call"""'], {}), "(self.persistence_dir, 'fireeye_last_api_call')\n", (2950, 2997), False, 'import os, os.path\n'), ((3805, 3878), 'os.path.join', 'os.path.join', (['saq.DATA_DIR', "saq.CONFIG['fireeye']['artifact_storage_dir']"], {}), "(saq.DATA_DIR, saq.CONFIG['fireeye']['artifact_storage_dir'])\n", (3817, 3878), False, 'import os, os.path\n'), ((5641, 5653), 'saq.util.local_time', 'local_time', ([], {}), '()\n', (5651, 5653), False, 'from saq.util import local_time, format_iso8601\n'), ((5737, 5771), 'saq.util.format_iso8601', 'format_iso8601', (['self.last_api_call'], {}), '(self.last_api_call)\n', (5751, 5771), False, 'from saq.util import local_time, format_iso8601\n'), ((6513, 6587), 'logging.debug', 'logging.debug', (['f"""next fireeye api call will start at {self.last_api_call}"""'], {}), "(f'next fireeye api call will start at {self.last_api_call}')\n", (6526, 6587), False, 'import logging\n'), ((8499, 8602), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.execute_in_loop', 'args': '(self.collect_alerts,)', 'name': '"""Alert Collection"""'}), "(target=self.execute_in_loop, args=(self.collect_alerts,),\n name='Alert Collection')\n", (8515, 8602), False, 'import threading\n'), ((8800, 8910), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.execute_in_loop', 'args': '(self.collect_artifacts,)', 'name': '"""Artifact Collection"""'}), "(target=self.execute_in_loop, args=(self.collect_artifacts,\n ), name='Artifact Collection')\n", (8816, 8910), False, 'import threading\n'), ((16448, 16498), 'os.path.join', 'os.path.join', (['self.artifact_storage_dir', 'f"""{uuid}"""'], {}), "(self.artifact_storage_dir, f'{uuid}')\n", (16460, 16498), False, 'import os, os.path\n'), ((16510, 16536), 'os.path.exists', 'os.path.exists', (['target_dir'], {}), '(target_dir)\n', (16524, 16536), False, 'import os, os.path\n'), ((16751, 16811), 'logging.info', 'logging.info', (['f"""attempting to download artifacts for {uuid}"""'], {}), "(f'attempting to download artifacts for {uuid}')\n", (16763, 16811), False, 'import logging\n'), ((1597, 1639), 'os.path.exists', 'os.path.exists', (['self.alert_uuid_cache_path'], {}), '(self.alert_uuid_cache_path)\n', (1611, 1639), False, 'import os, os.path\n'), ((3061, 3100), 'os.path.exists', 'os.path.exists', (['self.last_api_call_path'], {}), '(self.last_api_call_path)\n', (3075, 3100), False, 'import os, os.path\n'), ((3894, 3934), 'os.path.isdir', 'os.path.isdir', (['self.artifact_storage_dir'], {}), '(self.artifact_storage_dir)\n', (3907, 3934), False, 'import os, os.path\n'), ((3948, 4001), 'os.makedirs', 'os.makedirs', (['self.artifact_storage_dir'], {'exist_ok': '(True)'}), '(self.artifact_storage_dir, exist_ok=True)\n', (3959, 4001), False, 'import os, os.path\n'), ((5533, 5635), 'logging.debug', 'logging.debug', (['f"""last_api_call is empty so defaulting to 48 hours ago: {self.last_api_call}"""'], {}), "(\n f'last_api_call is empty so defaulting to 48 hours ago: {self.last_api_call}'\n )\n", (5546, 5635), False, 'import logging\n'), ((6273, 6307), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'duration'}), '(hours=duration)\n', (6291, 6307), False, 'import datetime\n'), ((14760, 14803), 'sqlite3.connect', 'sqlite3.connect', (['self.alert_uuid_cache_path'], {}), '(self.alert_uuid_cache_path)\n', (14775, 14803), False, 'import sqlite3\n'), ((15277, 15320), 'sqlite3.connect', 'sqlite3.connect', (['self.alert_uuid_cache_path'], {}), '(self.alert_uuid_cache_path)\n', (15292, 15320), False, 'import sqlite3\n'), ((15665, 15708), 'sqlite3.connect', 'sqlite3.connect', (['self.alert_uuid_cache_path'], {}), '(self.alert_uuid_cache_path)\n', (15680, 15708), False, 'import sqlite3\n'), ((16550, 16606), 'logging.info', 'logging.info', (['f"""already downloaded artifacts for {uuid}"""'], {}), "(f'already downloaded artifacts for {uuid}')\n", (16562, 16606), False, 'import logging\n'), ((2562, 2605), 'sqlite3.connect', 'sqlite3.connect', (['self.alert_uuid_cache_path'], {}), '(self.alert_uuid_cache_path)\n', (2577, 2605), False, 'import sqlite3\n'), ((2760, 2830), 'logging.debug', 'logging.debug', (['f"""currently tracking {row[0]} alert uuids from fireeye"""'], {}), "(f'currently tracking {row[0]} alert uuids from fireeye')\n", (2773, 2830), False, 'import logging\n'), ((3410, 3473), 'logging.error', 'logging.error', (['f"""unable to load {self.last_api_call_path}: {e}"""'], {}), "(f'unable to load {self.last_api_call_path}: {e}')\n", (3423, 3473), False, 'import logging\n'), ((3486, 3504), 'saq.error.report_exception', 'report_exception', ([], {}), '()\n', (3502, 3504), False, 'from saq.error import report_exception\n'), ((4852, 4943), 'logging.error', 'logging.error', (['f"""unable to save last_api_call value to {self.last_api_call_path}: {e}"""'], {}), "(\n f'unable to save last_api_call value to {self.last_api_call_path}: {e}')\n", (4865, 4943), False, 'import logging\n'), ((4951, 4969), 'saq.error.report_exception', 'report_exception', ([], {}), '()\n', (4967, 4969), False, 'from saq.error import report_exception\n'), ((5244, 5256), 'saq.util.local_time', 'local_time', ([], {}), '()\n', (5254, 5256), False, 'from saq.util import local_time, format_iso8601\n'), ((5477, 5489), 'saq.util.local_time', 'local_time', ([], {}), '()\n', (5487, 5489), False, 'from saq.util import local_time, format_iso8601\n'), ((5492, 5520), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(48)'}), '(hours=48)\n', (5510, 5520), False, 'import datetime\n'), ((6745, 6788), 'sqlite3.connect', 'sqlite3.connect', (['self.alert_uuid_cache_path'], {}), '(self.alert_uuid_cache_path)\n', (6760, 6788), False, 'import sqlite3\n'), ((7029, 7077), 'logging.debug', 'logging.debug', (['f"""already processed alert {uuid}"""'], {}), "(f'already processed alert {uuid}')\n", (7042, 7077), False, 'import logging\n'), ((7149, 7225), 'logging.error', 'logging.error', (['f"""unable to check fireeye alert processed status {uuid}: {e}"""'], {}), "(f'unable to check fireeye alert processed status {uuid}: {e}')\n", (7162, 7225), False, 'import logging\n'), ((7238, 7256), 'saq.error.report_exception', 'report_exception', ([], {}), '()\n', (7254, 7256), False, 'from saq.error import report_exception\n'), ((7448, 7491), 'sqlite3.connect', 'sqlite3.connect', (['self.alert_uuid_cache_path'], {}), '(self.alert_uuid_cache_path)\n', (7463, 7491), False, 'import sqlite3\n'), ((7765, 7829), 'logging.error', 'logging.error', (['f"""unable to track fireeye alert uuid {uuid}: {e}"""'], {}), "(f'unable to track fireeye alert uuid {uuid}: {e}')\n", (7778, 7829), False, 'import logging\n'), ((7842, 7860), 'saq.error.report_exception', 'report_exception', ([], {}), '()\n', (7858, 7860), False, 'from saq.error import report_exception\n'), ((7999, 8042), 'sqlite3.connect', 'sqlite3.connect', (['self.alert_uuid_cache_path'], {}), '(self.alert_uuid_cache_path)\n', (8014, 8042), False, 'import sqlite3\n'), ((8328, 8392), 'logging.error', 'logging.error', (['f"""unable to track fireeye alert uuid {uuid}: {e}"""'], {}), "(f'unable to track fireeye alert uuid {uuid}: {e}')\n", (8341, 8392), False, 'import logging\n'), ((8405, 8423), 'saq.error.report_exception', 'report_exception', ([], {}), '()\n', (8421, 8423), False, 'from saq.error import report_exception\n'), ((9429, 9498), 'logging.debug', 'logging.debug', (['f"""skipping alert {alert[\'uuid\']} -- already processed"""'], {}), '(f"skipping alert {alert[\'uuid\']} -- already processed")\n', (9442, 9498), False, 'import logging\n'), ((17173, 17228), 'os.path.join', 'os.path.join', (['self.artifact_storage_dir', 'f"""{uuid}_temp"""'], {}), "(self.artifact_storage_dir, f'{uuid}_temp')\n", (17185, 17228), False, 'import os, os.path\n'), ((17248, 17274), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (17262, 17274), False, 'import os, os.path\n'), ((17429, 17449), 'os.mkdir', 'os.mkdir', (['output_dir'], {}), '(output_dir)\n', (17437, 17449), False, 'import os, os.path\n'), ((19628, 19695), 'logging.error', 'logging.error', (['f"""unable to download artifacts for uuid {uuid}: {e}"""'], {}), "(f'unable to download artifacts for uuid {uuid}: {e}')\n", (19641, 19695), False, 'import logging\n'), ((19708, 19726), 'saq.error.report_exception', 'report_exception', ([], {}), '()\n', (19724, 19726), False, 'from saq.error import report_exception\n'), ((1679, 1722), 'sqlite3.connect', 'sqlite3.connect', (['self.alert_uuid_cache_path'], {}), '(self.alert_uuid_cache_path)\n', (1694, 1722), False, 'import sqlite3\n'), ((2344, 2412), 'logging.error', 'logging.error', (['f"""unable to create {self.alert_uuid_cache_path}: {e}"""'], {}), "(f'unable to create {self.alert_uuid_cache_path}: {e}')\n", (2357, 2412), False, 'import logging\n'), ((2429, 2447), 'saq.error.report_exception', 'report_exception', ([], {}), '()\n', (2445, 2447), False, 'from saq.error import report_exception\n'), ((3292, 3366), 'logging.debug', 'logging.debug', (['f"""loaded {self._last_api_call} as last_api_call epoch time"""'], {}), "(f'loaded {self._last_api_call} as last_api_call epoch time')\n", (3305, 3366), False, 'import logging\n'), ((3538, 3572), 'os.remove', 'os.remove', (['self.last_api_call_path'], {}), '(self.last_api_call_path)\n', (3547, 3572), False, 'import os, os.path\n'), ((5209, 5240), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': 'hours'}), '(hours=hours)\n', (5227, 5240), False, 'import datetime\n'), ((6018, 6093), 'logging.warning', 'logging.warning', (['f"""fireeye returned {e.response.status_code} (unavailable)"""'], {}), "(f'fireeye returned {e.response.status_code} (unavailable)')\n", (6033, 6093), False, 'import logging\n'), ((14244, 14313), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['alert[KEY_OCCURRED]', 'event_time_format_tz'], {}), '(alert[KEY_OCCURRED], event_time_format_tz)\n', (14270, 14313), False, 'import datetime\n'), ((17296, 17366), 'logging.warning', 'logging.warning', (['f"""output dir {output_dir} already exists -- deleting"""'], {}), "(f'output dir {output_dir} already exists -- deleting')\n", (17311, 17366), False, 'import logging\n'), ((17387, 17412), 'shutil.rmtree', 'shutil.rmtree', (['output_dir'], {}), '(output_dir)\n', (17400, 17412), False, 'import shutil\n'), ((18380, 18425), 'os.path.join', 'os.path.join', (['self.artifact_storage_dir', 'uuid'], {}), '(self.artifact_storage_dir, uuid)\n', (18392, 18425), False, 'import os, os.path\n'), ((18449, 18474), 'os.path.exists', 'os.path.exists', (['final_dir'], {}), '(final_dir)\n', (18463, 18474), False, 'import os, os.path\n'), ((17675, 17703), 'json.dump', 'json.dump', (['artifact_json', 'fp'], {}), '(artifact_json, fp)\n', (17684, 17703), False, 'import json\n'), ((18178, 18236), 'logging.info', 'logging.info', (['f"""recording artifact {file_name} for {uuid}"""'], {}), "(f'recording artifact {file_name} for {uuid}')\n", (18190, 18236), False, 'import logging\n'), ((18500, 18575), 'logging.warning', 'logging.warning', (['f"""final output dir {final_dir} already exists -- deleting"""'], {}), "(f'final output dir {final_dir} already exists -- deleting')\n", (18515, 18575), False, 'import logging\n'), ((18600, 18624), 'shutil.rmtree', 'shutil.rmtree', (['final_dir'], {}), '(final_dir)\n', (18613, 18624), False, 'import shutil\n'), ((18670, 18715), 'os.path.join', 'os.path.join', (['self.artifact_storage_dir', 'uuid'], {}), '(self.artifact_storage_dir, uuid)\n', (18682, 18715), False, 'import os, os.path\n'), ((15470, 15493), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15491, 15493), False, 'import datetime\n'), ((17596, 17637), 'os.path.join', 'os.path.join', (['output_dir', '"""artifact.json"""'], {}), "(output_dir, 'artifact.json')\n", (17608, 17637), False, 'import os, os.path\n'), ((18039, 18115), 'logging.warning', 'logging.warning', (['f"""artifact file {file_name} does not exist in {output_dir}"""'], {}), "(f'artifact file {file_name} does not exist in {output_dir}')\n", (18054, 18115), False, 'import logging\n'), ((7656, 7679), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7677, 7679), False, 'import datetime\n'), ((17973, 18008), 'os.path.join', 'os.path.join', (['output_dir', 'file_name'], {}), '(output_dir, file_name)\n', (17985, 18008), False, 'import os, os.path\n'), ((8186, 8209), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8207, 8209), False, 'import datetime\n'), ((8212, 8240), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(48)'}), '(hours=48)\n', (8230, 8240), False, 'import datetime\n')]
|
from cfgm_common.exceptions import NoIdError
from schema_transformer.resources._resource_base import ResourceBaseST
class VirtualPortGroupST(ResourceBaseST):
_dict = {}
obj_type = 'virtual_port_group'
ref_fields = ['virtual_machine_interface']
prop_fields = ['annotations']
@classmethod
def reinit(cls):
for obj in cls.list_vnc_obj():
if not obj.annotations:
continue
for kvp in obj.annotations.key_value_pair:
if kvp.key == 'usage' and kvp.value == '<KEY>':
st_obj = cls.locate(obj.get_fq_name_str(), obj)
st_obj.evaluate()
# end reinit
def __init__(self, name, obj=None):
self.name = name
self.uuid = None
self.virtual_machine_interfaces = set()
self.update(obj)
self.uuid = self.obj.uuid
# end __init__
def update(self, obj=None):
changed = self.update_vnc_obj(obj)
if 'annotations' in changed:
self.set_annotations()
return changed
# end update
def delete_obj(self):
for fabric_vmi_st_name in self.virtual_machine_interfaces:
fabric_vmi_st = \
ResourceBaseST.get_obj_type_map() \
.get('virtual_machine_interface') \
.get(fabric_vmi_st_name)
self.delete_fabric_vmi_ref(fabric_vmi_st)
self.delete_physical_interface_ref()
# end delete_obj
def evaluate(self, **kwargs):
if getattr(self, 'annotations', {}).get("usage", "") == "sriov-vm":
self.sanitize_fabric_vmis()
if not self.is_valid():
self.delete_self_db_obj()
# end evaluate
def sanitize_fabric_vmis(self):
self._logger.debug("Starts sanitizing "
"vpg's (%s) fabric vmis" % self.name)
for fabric_vmi_st_name in self.virtual_machine_interfaces:
fabric_vmi_st = \
ResourceBaseST.get_obj_type_map() \
.get('virtual_machine_interface') \
.get(fabric_vmi_st_name)
if fabric_vmi_st is None:
continue
vn_st = \
ResourceBaseST.get_obj_type_map() \
.get('virtual_network') \
.get(fabric_vmi_st.virtual_network)
if vn_st is None:
continue
vn_st.virtual_port_groups.add(self.name)
no_valid_vmi_under_fabric_vmi = True
for vmi_st_name in vn_st.virtual_machine_interfaces:
if vmi_st_name != fabric_vmi_st_name:
vmi_st = \
ResourceBaseST.get_obj_type_map() \
.get('virtual_machine_interface') \
.get(vmi_st_name)
if vmi_st is None:
continue
if vmi_st.get_pi_uuid() in \
self.get_uuids(self.obj.get_physical_interface_refs()):
no_valid_vmi_under_fabric_vmi = False
break
if no_valid_vmi_under_fabric_vmi:
self.delete_fabric_vmi_ref(fabric_vmi_st)
self._logger.debug("Finshed sanitizing "
"vpg's (%s) fabric vmis" % self.name)
# end sanitize_fabric_vmis
def is_valid(self):
self.update(None)
if len(self.virtual_machine_interfaces) == 0:
return False
return True
# end is_valid
def delete_self_db_obj(self):
self._logger.debug("Starts deleting vpg db object %s" % self.name)
try:
# no need to manually delete fabric VMI,
# since delete_obj will run when VPG deletion event is caught
self.delete_obj()
self._vnc_lib.virtual_port_group_delete(id=self.uuid)
except NoIdError:
pass
self._logger.debug("Finished deleting vpg db object %s" % self.name)
# end delete_self_db_obj
def delete_fabric_vmi_ref(self, fabric_vmi_st):
if fabric_vmi_st is not None:
vn_st = \
ResourceBaseST.get_obj_type_map() \
.get('virtual_network') \
.get(fabric_vmi_st.virtual_network)
if vn_st is not None:
if self.name in vn_st.virtual_port_groups:
vn_st.virtual_port_groups.remove(self.name)
fabric_vmi_uuid = fabric_vmi_st.uuid
if fabric_vmi_uuid is not None:
try:
self._vnc_lib.ref_update(
'virtual-port-group', self.uuid,
'virtual-machine-interface', fabric_vmi_uuid,
None, 'DELETE')
fabric_vmi = self._vnc_lib \
.virtual_machine_interface_read(
id=fabric_vmi_uuid)
fabric_vmi_vpg_back_refs = \
fabric_vmi.get_virtual_port_group_back_refs()
if fabric_vmi_vpg_back_refs is None or \
len(fabric_vmi_vpg_back_refs) == 0:
self._vnc_lib \
.virtual_machine_interface_delete(
id=fabric_vmi_uuid)
except NoIdError:
pass
except Exception as e:
msg = ("Unexpected error during "
"dereferencing fabric vmi %s: %s"
% (fabric_vmi_st.name, str(e)))
self._logger.error(msg)
self.add_ignored_error(msg)
# end delete_fabric_vmi_ref
def delete_physical_interface_ref(self):
# Since internal created sriov vpg only refers one pi
# We simply delete all pi refs
for pi_uuid in self.get_uuids(self.obj.get_physical_interface_refs()):
try:
self._vnc_lib.ref_update(
'virtual-port-group', self.uuid,
'physical-interface', pi_uuid,
None, 'DELETE')
except NoIdError:
pass
except Exception as e:
msg = ("Unexpected error during "
"dereferencing "
"pyhsical interface %s: %s"
% (pi_uuid, str(e)))
self._logger.error(msg)
self.add_ignored_error(msg)
# end delete_physical_interface_ref
def get_uuids(self, items):
if items is None:
return []
if isinstance(items, list):
return [item['uuid'] for item in items]
if isinstance(items, dict) and len(items.keys()) > 0:
return [item['uuid'] for item in
items.get(list(items.keys())[0], [])]
# end get_uuids
def set_annotations(self):
self.annotations = self.kvps_to_dict(self.obj.get_annotations())
return
# end set_bindings
def kvps_to_dict(self, kvps):
dictionary = dict()
if not kvps:
return dictionary
for kvp in kvps.get_key_value_pair():
dictionary[kvp.get_key()] = kvp.get_value()
return dictionary
# end kvps_to_dict
# end class VirtualPortGroupST
|
[
"schema_transformer.resources._resource_base.ResourceBaseST.get_obj_type_map"
] |
[((1219, 1252), 'schema_transformer.resources._resource_base.ResourceBaseST.get_obj_type_map', 'ResourceBaseST.get_obj_type_map', ([], {}), '()\n', (1250, 1252), False, 'from schema_transformer.resources._resource_base import ResourceBaseST\n'), ((2007, 2040), 'schema_transformer.resources._resource_base.ResourceBaseST.get_obj_type_map', 'ResourceBaseST.get_obj_type_map', ([], {}), '()\n', (2038, 2040), False, 'from schema_transformer.resources._resource_base import ResourceBaseST\n'), ((2265, 2298), 'schema_transformer.resources._resource_base.ResourceBaseST.get_obj_type_map', 'ResourceBaseST.get_obj_type_map', ([], {}), '()\n', (2296, 2298), False, 'from schema_transformer.resources._resource_base import ResourceBaseST\n'), ((4254, 4287), 'schema_transformer.resources._resource_base.ResourceBaseST.get_obj_type_map', 'ResourceBaseST.get_obj_type_map', ([], {}), '()\n', (4285, 4287), False, 'from schema_transformer.resources._resource_base import ResourceBaseST\n'), ((2754, 2787), 'schema_transformer.resources._resource_base.ResourceBaseST.get_obj_type_map', 'ResourceBaseST.get_obj_type_map', ([], {}), '()\n', (2785, 2787), False, 'from schema_transformer.resources._resource_base import ResourceBaseST\n')]
|
import inspect
from collections import namedtuple
ValueFinder = namedtuple("ValueFinder", ["locator", "key", "value"])
def get_attribute_paths_containing_string(target_object, target_str, search_exact_word=False):
"""
:param target_object: object to inspect during runtime
:param target_str: :type str: string to search for
:param search_exact_word: :type bool: if True, only values containing nothing but the find_value will be returned, else any value just containing the str will be returned.
:return: a namedtuple(locator, value). Running exec or eval on the locator in the same namespace should return the object containing the target_str.
"""
members = inspect.getmembers(target_object)
if search_exact_word:
# finer-grained search that matches by full word, wrap the string in single quotes i.e. "\'yourstring\'"
matches = [ValueFinder("[x for x in members if ("'" + target_str + "'") in str(x)]", target_str, x)
for x in members if ("\'" + target_str + "\'") in str(x)]
else:
# coarse-grained search. check if target_str is anywhere in the attribute
matches = [ValueFinder("x for x in inspect.getmembers(target_object) if target_str in str(x)", target_str, x)
for x in inspect.getmembers(target_object) if target_str in str(x)]
return matches
def get_paths_containing_string_in_locals(target_str, locals_dict, locals_dict_ref_str="locals()"):
"""
Get paths to anything in the locals namespace that contains the target string.
:param target_str: string to search for
:param locals_dict: the locals() dictionary. In most cases, this should just be locals()
:param locals_dict_ref_str: the string represenation of the locals_dict argument. This will be used in returning the path to any matching values.
:return: a namedtuple(locator, value). Running exec or eval on the locator in the same namespace should return the object containing the target_str.
"""
def conditions(key, val):
return (str(val).__contains__(target_str)) and (key[0] != '_')
matches = []
for k, v in locals_dict.items():
try:
if conditions(k, v):
matches.append(ValueFinder(locals_dict_ref_str + "['" + str(k) + "']", k, str(v)))
except AttributeError:
# This is a common exception and likely not much to worry about. This code iterates over all attributes
# of a dict looking for simple string matches and so runs into AttributeErrors.
pass
return matches
def get_paths_containing_string_in_threadstack(target_str, stack_context=2):
"""
Get paths to datastructures containing string in the threadstack. This excludes stackframes in modules external to the project
:param target_str: string to search for
:param stack_context: maximum outer scope of stacks to check
:return: a namedtuple(locator, value). Running exec or eval on the locator in the same namespace should return the object containing the target_str.
"""
frames = []
list_of_frames = [
get_paths_containing_string_in_locals(target_str,
val.frame.f_locals,
locals_dict_ref_str="[get_paths_containing_string_in_locals(target_str, val.frame.f_locals) for idx, "
"val in enumerate(inspect.getouterframes(inspect.currentframe(), "
"2)) if \"pydev\" not in val.filename]")
for idx, val in enumerate(inspect.getouterframes(inspect.currentframe(), stack_context))
if "pydev" not in val.filename]
for x in list_of_frames:
for y in x:
frames.append(y)
return frames
def get_all_paths_containing_string(target_str, locals_dict, other_objects_to_inspect=None):
"""
This searches locals(), all stackframes (but ignores frames triggered outside of this project's modules) and
within any other objects provided in other_objects_to_inspect.
:param target_str: the value you want to find
:param locals_dict: a dictionary of locals(). Probably most cases, passing 'locals()'
:param other_objects_to_inspect: :type list: (optional) any objects outside of locals or frames that you want to inspect
for values
:return: list of namedtuples(locator,key,find_value). An example of this is the ValueFinder tuple.
:chains_into: __eval_all_locators
"""
all_matches = []
all_matches.extend(get_paths_containing_string_in_locals(target_str, locals_dict))
all_matches.extend(get_paths_containing_string_in_threadstack(target_str))
if other_objects_to_inspect:
[all_matches.extend(get_attribute_paths_containing_string(other_object, target_str)) for other_object in
other_objects_to_inspect]
return all_matches
def get_all_categorized_paths_containing_string(target_str, locals_dict, other_objects_to_inspect=None):
"""
Get all paths to datastructures containing the target_str. Exact same results as get_all_paths_containing_string but categorized.
:param target_str: the value you want to find
:param locals_dict: a dictionary of locals(). Probably most cases, passing 'locals()'
:param other_objects_to_inspect: :type list: (optional) any objects outside of locals or frames that you want to inspect for values
:return: list of namedtuples(frames, inspections, locals) and within each of those fields, instances of
namedtuples(locator,key,find_value). An example of this is the ValueFinder tuple.
:chains_into: into __eval_all_locators. For example, a raw eval can be applied(result.locals[0].locator)
"""
matches_found = namedtuple("matches_found", ["locals", "frames", "inspections"])
inspect_members_filtered_by_string = []
locals_filtered_by_string = get_paths_containing_string_in_locals(target_str, locals_dict=locals_dict)
frames_filtered_by_string = get_paths_containing_string_in_threadstack(target_str)
if other_objects_to_inspect:
[inspect_members_filtered_by_string.extend(get_attribute_paths_containing_string(other_object, target_str)) for
other_object in other_objects_to_inspect]
return matches_found(locals=locals_filtered_by_string, frames=frames_filtered_by_string,
inspections=inspect_members_filtered_by_string)
def __eval_all_locators(input_list, return_exec=False, return_exec_name="evaluated_locators"):
"""
:param input_list: :type list of namedtuple(locator,key,value). An example of this is the ValueFinder tuple
:param return_exec: :type boolean: flag for whether to return a code string that can be run through exec(*)
:return: If return_executable is false, returns a list of all the locators run. This often returns the actual
object that the string was found in. if return_executable is true, this function runs nothing and just returns a
string of code that can be run as an arg to the exec function.
After running the exec function on this arg, a variable called evaluated_locators will be referenceable through
the locals dictionary using return_exec_name's actual value as the key i.e. by default, locals()['evaluated_locators']
"""
executable_code = return_exec_name + " = []\n" \
"for x in " + repr(input_list) + ":\n" \
" " + return_exec_name + ".append(eval(x.locator))"
try:
if not return_exec:
exec(executable_code)
return locals()[return_exec_name]
except KeyError:
import traceback
traceback.print_last()
print("Key not found in this scope. "
"Consider using this function with the return_exec flag instead to run the function in the proper scope.")
else:
return executable_code
def get_all_paths_containing_string_in_nested_objects(object_ut, target_str, _result, max_depth=2, _path_string="", _current_depth=0):
"""
Search the attributes of an object for target_str and the attributes of those attributes up to max_depth.
:param object_ut: object under test. The object to inspect for the target_str
:param target_str: string to search for
:param _result: Use result=[] unlesss you require advanced usage. This holds a running tally of results through
recursive cycles. Setting this to [] in the signature will change behaviour
:param max_depth: max depth to recursively search attributes of attributes
due to how python handles variable-default defined in the signature during recursion.
:param _path_string: Internal. This holds a running tally of the datastructure's path through recursive cycles.
:param _current_depth: Internal. This holds a running tally of the investigation-depth through recursive cycles.
:return: a namedtuple(locator, value) of all objects containing target_str. Running exec or eval on the locator in
the same namespace should return the object containing the target_str.
"""
tuple_inspected = inspect.getmembers(object_ut)
destructive_callables = ["__clear__", "__setattr__", "__init__", "__init_subclass__", "__delattr__", "__call__"]
path_value = namedtuple("path_value", ["locator", "value"])
_current_depth += 1
if _current_depth > max_depth:
return _result
if hasattr(tuple_inspected, "__iter__"):
for v in tuple_inspected:
try:
attr = getattr(eval("object_ut" + _path_string), v[0])
if callable(attr) and (v[0] not in destructive_callables):
postfix = "()"
candidate_str = str(attr())
else:
postfix = ""
candidate_str = str(attr)
if target_str in candidate_str:
_result.append(path_value(_path_string + "." + str(v[0]) + postfix, str(v[1]) + postfix))
if (_current_depth + 1) <= max_depth:
_result = get_all_paths_containing_string_in_nested_objects(attr, target_str, _result=_result,
_path_string=_path_string + "." + v[0] + postfix,
_current_depth=_current_depth)
except:
# Many exceptions can be expected here
# as this evaluates almost all attributes of a given object without knowing much about them.
pass
return _result
|
[
"inspect.currentframe",
"traceback.print_last",
"collections.namedtuple",
"inspect.getmembers"
] |
[((65, 119), 'collections.namedtuple', 'namedtuple', (['"""ValueFinder"""', "['locator', 'key', 'value']"], {}), "('ValueFinder', ['locator', 'key', 'value'])\n", (75, 119), False, 'from collections import namedtuple\n'), ((691, 724), 'inspect.getmembers', 'inspect.getmembers', (['target_object'], {}), '(target_object)\n', (709, 724), False, 'import inspect\n'), ((5832, 5896), 'collections.namedtuple', 'namedtuple', (['"""matches_found"""', "['locals', 'frames', 'inspections']"], {}), "('matches_found', ['locals', 'frames', 'inspections'])\n", (5842, 5896), False, 'from collections import namedtuple\n'), ((9232, 9261), 'inspect.getmembers', 'inspect.getmembers', (['object_ut'], {}), '(object_ut)\n', (9250, 9261), False, 'import inspect\n'), ((9396, 9442), 'collections.namedtuple', 'namedtuple', (['"""path_value"""', "['locator', 'value']"], {}), "('path_value', ['locator', 'value'])\n", (9406, 9442), False, 'from collections import namedtuple\n'), ((7784, 7806), 'traceback.print_last', 'traceback.print_last', ([], {}), '()\n', (7804, 7806), False, 'import traceback\n'), ((1289, 1322), 'inspect.getmembers', 'inspect.getmembers', (['target_object'], {}), '(target_object)\n', (1307, 1322), False, 'import inspect\n'), ((3690, 3712), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (3710, 3712), False, 'import inspect\n')]
|
"""
"""
from dataclasses import dataclass, field
import itertools
from OpenGL.GL.shaders import compileProgram, compileShader
from OpenGL.raw.GL.VERSION.GL_1_0 import glColor3f, glPointSize
from OpenGL.raw.GL.VERSION.GL_1_1 import GL_POINTS, GL_LINES
from OpenGL.raw.GL.VERSION.GL_2_0 import (
GL_VERTEX_SHADER, GL_FRAGMENT_SHADER,
glUseProgram
)
import pathlib
import pyglet
import pymunk
from pymunk import Vec2d, Space
from warp_grid.flag import Flag3D
@dataclass
class WarpMapPyMunk(object):
space: Space
h: int = 13
w: int = 11
size: int = 32
bs: list = field(default_factory=list)
static_bs: list = field(default_factory=list)
def __post_init__(self):
web_group = 1
for y in range(self.h):
for x in range(self.w):
b = pymunk.Body(1, 1)
b.position = Vec2d(x, y) * self.size
b.velocity_func = self.constant_velocity
s = pymunk.Circle(b, 15)
s.filter = pymunk.ShapeFilter(group=web_group)
s.ignore_draw = True
self.space.add(b, s)
self.bs.append(b)
stiffness = 5000. * 0.1
damping = 100 * 0.1
def add_joint(a, b):
rl = a.position.get_distance(b.position) * 0.9
j = pymunk.DampedSpring(a, b, (0, 0), (0, 0),
rl, stiffness, damping)
j.max_bias = 1000
self.space.add(j)
for y in range(1, self.h - 1):
for x in range(1, self.w - 1):
bs_xy = self.bs[x + y * self.w]
for yi in range(y - 1, y + 2):
for xi in range(x - 1, x + 2):
if (xi, yi) != (x, y) and ((x - xi) * (y - yi) == 0):
add_joint(bs_xy, self.bs[xi + yi * self.w])
print("len(self.space.constraints): {}".format(
len(self.space.constraints)))
# ATTACH POINTS
def _static_point(b):
static_body = pymunk.Body(body_type=pymunk.Body.STATIC)
static_body.position = b.position
self.static_bs.append(static_body)
j = pymunk.PivotJoint(static_body, b, static_body.position)
j.damping = 100
j.stiffness = 20000
self.space.add(j)
self.static_bs = []
# fisrt and last rows
for x in range(self.w):
_static_point(self.bs[x])
_static_point(self.bs[x + (self.h - 1) * self.w])
# first and last cols
for y in range(self.h):
_static_point(self.bs[y * self.w])
_static_point(self.bs[(self.w - 1) + y * self.w])
cooling_map_img = pyglet.image.load(
pathlib.Path('datas/cooling_map.png'))
# self.cooling_map_sprite = pyglet.sprite.Sprite(img=cooling_map_img)
self.cooling_map_tex = cooling_map_img.get_texture()
x = (self.w * 4) * 2.0
y = (self.h * 4) * 2.0
vlist_arr = [
0, 0, 1.0, 1.0, 1.0, 0, 0,
x, 0, 1.0, 1.0, 1.0, 1, 0,
0, y, 1.0, 1.0, 1.0, 0, 1,
x, y, 1.0, 1.0, 1.0, 1, 1,
]
self.vlist = pyglet.graphics.vertex_list(
4,
('v2f',
list(itertools.chain(*zip(vlist_arr[::7], vlist_arr[1::7])))),
('t2f',
list(itertools.chain(*zip(vlist_arr[5::7], vlist_arr[6::7]))))
)
# http://io7m.com/documents/fso-tta/
self.vertex_shader_source = """
#version 130
out vec2 vTexCoord;
void
main() {
gl_Position = gl_ModelViewProjectionMatrix * gl_Vertex;
vTexCoord = vec2(gl_MultiTexCoord0);
}
"""
self.fragment_shader_source = """
#version 130
uniform sampler2D tex0;
in vec2 vTexCoord;
out vec4 fragColor;
void main() {
fragColor = texture(tex0, vTexCoord) * 8;
}
"""
self.shader = compileProgram(
compileShader(self.vertex_shader_source, GL_VERTEX_SHADER),
compileShader(self.fragment_shader_source, GL_FRAGMENT_SHADER),
)
self.flag = Flag3D()
@staticmethod
def constant_velocity(body: pymunk.Body, _gravity, _damping, _dt):
body_velocity_normalized = body.velocity.normalized()
#
body.velocity = body_velocity_normalized * 75
def get_web_crossings(self):
return [
[b.position.x, b.position.y]
for b in self.bs
]
def draw_debug(self,
draw_flag=False,
draw_static_attach_points=False,
draw_web_crossings=True,
draw_web_constraints=False, ):
if draw_flag:
# self.vao._draw_frame()
self.flag.update(self.bs)
glUseProgram(self.shader)
self.flag.draw()
glUseProgram(0)
# static attach points
if draw_static_attach_points:
glColor3f(1, 0, 1)
glPointSize(6)
a = []
for b in self.static_bs:
a += [b.position.x, b.position.y]
pyglet.graphics.draw(len(a) // 2, GL_POINTS, ('v2f', a))
# web crossings / bodies
if draw_web_crossings:
glColor3f(.1, .8, .05)
a = []
for b in self.bs:
a += [b.position.x, b.position.y]
glPointSize(4)
pyglet.graphics.draw(len(a) // 2, GL_POINTS, ('v2f', a))
# web net / constraints
if draw_web_constraints:
a = []
for j in self.space.constraints:
a += [j.a.position.x, j.a.position.y, j.b.position.x,
j.b.position.y]
pyglet.graphics.draw(len(a) // 2, GL_LINES, ('v2f', a))
def relax(self, dt):
self.space.step(dt)
|
[
"pymunk.PivotJoint",
"OpenGL.raw.GL.VERSION.GL_1_0.glPointSize",
"warp_grid.flag.Flag3D",
"pymunk.DampedSpring",
"OpenGL.raw.GL.VERSION.GL_1_0.glColor3f",
"pymunk.Vec2d",
"dataclasses.field",
"pathlib.Path",
"pymunk.Circle",
"OpenGL.raw.GL.VERSION.GL_2_0.glUseProgram",
"OpenGL.GL.shaders.compileShader",
"pymunk.ShapeFilter",
"pymunk.Body"
] |
[((593, 620), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (598, 620), False, 'from dataclasses import dataclass, field\n'), ((643, 670), 'dataclasses.field', 'field', ([], {'default_factory': 'list'}), '(default_factory=list)\n', (648, 670), False, 'from dataclasses import dataclass, field\n'), ((4437, 4445), 'warp_grid.flag.Flag3D', 'Flag3D', ([], {}), '()\n', (4443, 4445), False, 'from warp_grid.flag import Flag3D\n'), ((1320, 1385), 'pymunk.DampedSpring', 'pymunk.DampedSpring', (['a', 'b', '(0, 0)', '(0, 0)', 'rl', 'stiffness', 'damping'], {}), '(a, b, (0, 0), (0, 0), rl, stiffness, damping)\n', (1339, 1385), False, 'import pymunk\n'), ((2041, 2082), 'pymunk.Body', 'pymunk.Body', ([], {'body_type': 'pymunk.Body.STATIC'}), '(body_type=pymunk.Body.STATIC)\n', (2052, 2082), False, 'import pymunk\n'), ((2193, 2248), 'pymunk.PivotJoint', 'pymunk.PivotJoint', (['static_body', 'b', 'static_body.position'], {}), '(static_body, b, static_body.position)\n', (2210, 2248), False, 'import pymunk\n'), ((2759, 2796), 'pathlib.Path', 'pathlib.Path', (['"""datas/cooling_map.png"""'], {}), "('datas/cooling_map.png')\n", (2771, 2796), False, 'import pathlib\n'), ((4270, 4328), 'OpenGL.GL.shaders.compileShader', 'compileShader', (['self.vertex_shader_source', 'GL_VERTEX_SHADER'], {}), '(self.vertex_shader_source, GL_VERTEX_SHADER)\n', (4283, 4328), False, 'from OpenGL.GL.shaders import compileProgram, compileShader\n'), ((4342, 4404), 'OpenGL.GL.shaders.compileShader', 'compileShader', (['self.fragment_shader_source', 'GL_FRAGMENT_SHADER'], {}), '(self.fragment_shader_source, GL_FRAGMENT_SHADER)\n', (4355, 4404), False, 'from OpenGL.GL.shaders import compileProgram, compileShader\n'), ((5110, 5135), 'OpenGL.raw.GL.VERSION.GL_2_0.glUseProgram', 'glUseProgram', (['self.shader'], {}), '(self.shader)\n', (5122, 5135), False, 'from OpenGL.raw.GL.VERSION.GL_2_0 import GL_VERTEX_SHADER, GL_FRAGMENT_SHADER, glUseProgram\n'), ((5177, 5192), 'OpenGL.raw.GL.VERSION.GL_2_0.glUseProgram', 'glUseProgram', (['(0)'], {}), '(0)\n', (5189, 5192), False, 'from OpenGL.raw.GL.VERSION.GL_2_0 import GL_VERTEX_SHADER, GL_FRAGMENT_SHADER, glUseProgram\n'), ((5275, 5293), 'OpenGL.raw.GL.VERSION.GL_1_0.glColor3f', 'glColor3f', (['(1)', '(0)', '(1)'], {}), '(1, 0, 1)\n', (5284, 5293), False, 'from OpenGL.raw.GL.VERSION.GL_1_0 import glColor3f, glPointSize\n'), ((5306, 5320), 'OpenGL.raw.GL.VERSION.GL_1_0.glPointSize', 'glPointSize', (['(6)'], {}), '(6)\n', (5317, 5320), False, 'from OpenGL.raw.GL.VERSION.GL_1_0 import glColor3f, glPointSize\n'), ((5578, 5603), 'OpenGL.raw.GL.VERSION.GL_1_0.glColor3f', 'glColor3f', (['(0.1)', '(0.8)', '(0.05)'], {}), '(0.1, 0.8, 0.05)\n', (5587, 5603), False, 'from OpenGL.raw.GL.VERSION.GL_1_0 import glColor3f, glPointSize\n'), ((5712, 5726), 'OpenGL.raw.GL.VERSION.GL_1_0.glPointSize', 'glPointSize', (['(4)'], {}), '(4)\n', (5723, 5726), False, 'from OpenGL.raw.GL.VERSION.GL_1_0 import glColor3f, glPointSize\n'), ((812, 829), 'pymunk.Body', 'pymunk.Body', (['(1)', '(1)'], {}), '(1, 1)\n', (823, 829), False, 'import pymunk\n'), ((961, 981), 'pymunk.Circle', 'pymunk.Circle', (['b', '(15)'], {}), '(b, 15)\n', (974, 981), False, 'import pymunk\n'), ((1009, 1044), 'pymunk.ShapeFilter', 'pymunk.ShapeFilter', ([], {'group': 'web_group'}), '(group=web_group)\n', (1027, 1044), False, 'import pymunk\n'), ((859, 870), 'pymunk.Vec2d', 'Vec2d', (['x', 'y'], {}), '(x, y)\n', (864, 870), False, 'from pymunk import Vec2d, Space\n')]
|
#
# Copyright (c) 2015 Autodesk Inc.
# All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
import logging
import ochopod
import pykka
import time
import uuid
from flask import Flask, request
from kazoo.exceptions import ConnectionClosedError, NodeExistsError
from kazoo.client import KazooClient, KazooState
from kazoo.recipe.lock import LockTimeout
from ochopod.core.fsm import shutdown, spin_lock, Aborted, FSM
from pykka import ThreadingFuture, Timeout
from threading import Event
#: Our ochopod logger
logger = logging.getLogger('ochopod')
#: Root zookeeper node path (under which we store the pod data for each cluster). This path will prefix any node
#: we read or write (including the lock).
ROOT = '/ochopod/clusters'
#: We use the same tick for all our state-machines (namely one second). This quantity can be scaled up or
#: down depending on the actor
SAMPLING = 1.0
class ZK(FSM):
"""
Base layer dealing with zookeeper and in charge of writing the pod ephemeral node upon connection. The
reset() state will by default loop back to initial() and properly de-allocate the kazoo driver. Once connected
the machine will spin() until we raise something.
Please note we support an explicit reset request which will trip the machine. This is used from the CLI to
force a pod to completely disconnect/reconnect/reconfigure.
"""
def __init__(self, brokers, scope, tag, breadcrumbs, hints):
super(ZK, self).__init__()
self.breadcrumbs = breadcrumbs
self.connected = 0
self.brokers = brokers
self.force_reset = 0
self.hints = hints
self.hints['state'] = 'follower'
self.id = uuid.uuid4()
self.prefix = '%s/%s.%s' % (ROOT, scope, tag)
self.scope = scope
self.seq = None
self.tag = tag
def feedback(self, state):
#
# - forward the state change to the actor via a message
# - the specialized() hook will process this safely
#
self.actor_ref.tell(
{
'request': 'state change',
'state': state
})
def reset(self, data):
self.connected = 0
self.force_reset = 0
self.hints['state'] = 'follower'
logger.warning('%s : actor reset (%s)' % (self.path, data.cause))
if hasattr(data, 'zk'):
#
# - gracefully shut our client down
#
data.zk.stop()
logger.debug('%s : zk client stopped, releasing resources' % self.path)
data.zk.close()
if self.terminate:
super(ZK, self).reset(data)
return 'initial', data, 0
def initial(self, data):
#
# - setup a new kazoo client
#
cnx_string = ','.join(self.brokers)
logger.debug('%s : connecting @ %s' % (self.path, cnx_string))
data.zk = KazooClient(hosts=cnx_string, timeout=5.0, read_only=0, randomize_hosts=1)
data.zk.add_listener(self.feedback)
data.zk.start()
data.n = 0
return 'wait_for_cnx', data, 0
def wait_for_cnx(self, data):
if self.force_reset or self.terminate:
raise Aborted('resetting')
#
# - loop back if we haven't received a CONNECTED event from the driver
#
if not self.connected:
return 'wait_for_cnx', data, SAMPLING
#
# - the /pods node holds all our ephemeral per-container data (one container == one child node)
# - the /hash node stores the last recorded md5 hash (local pods + dependencies), which we use to
# flag any change amongst the pods or their dependencies
#
data.zk.ensure_path('%s/pods' % self.prefix)
data.zk.ensure_path('%s/hash' % self.prefix)
try:
#
# - register ourselves by creating an ephemeral
# - this is where we can store arbitrary information (e.g our breadcrumbs)
# - we ask for a sequence counter as well which we then keep (e.g in case of connection loss or reset
# we guarantee the pod won't get assigned a new index)
# - this is *critical* for some use-cases (e.g Kafka where the broker index must remain the same)
#
path = data.zk.create('%s/pods/%s.' % (self.prefix, self.id), ephemeral=True, sequence=True)
tokens = path.split('.')
if self.seq is None:
self.seq = int(tokens[-1])
self.breadcrumbs['seq'] = self.seq
js = json.dumps(self.breadcrumbs)
data.zk.set(path, js)
except NodeExistsError:
#
# - if the node is already there we just recovered from a zookeeper connection loss
# and /snapshot has not been phased out yet .. this is not an issue, simply pause a bit
# to re-attempt later
#
logger.debug('%s : pod %s is already there (probably a zk reconnect)' % (self.path, self.id))
return 'wait_for_cnx', data, 5.0 * SAMPLING
logger.debug('%s : registered as %s (#%d)' % (self.path, self.id, self.seq))
data.connected_at = time.time()
return 'spin', data, 0
def spin(self, data):
raise NotImplementedError
def specialized(self, msg):
assert 'request' in msg, 'bogus message received ?'
req = msg['request']
if req == 'state change':
#
# - we got a zk state change
# - we only use the switch to CONNECTED to go from wait_for_cnx() to spin()
# - ZK disconnects (LOST or SUSPENDED) are simply flagged when exceptions are raised
#
state = msg['state']
current = 'connected' if self.connected else 'disconnected'
logger.debug('%s : zk state change -> "%s" (%s)' % (self.path, str(state), current))
if self.connected and state != KazooState.CONNECTED:
logger.warning('%s : lost connection (%s) / forcing a reset' % (self.path, str(state)))
self.force_reset = 1
self.connected = 0
elif state == KazooState.CONNECTED:
self.connected = 1
elif req == 'reset':
#
# - we got a request to explicitly force a reset
# - this is typically invoked from the CLI
#
self.force_reset = 1
else:
super(ZK, self).specialized(msg)
class Coordinator(ZK):
"""
Leader lock implementation logic, based on :class:`ZK`. The spin() state will attempt to grab a lock (we
simply use the Kazoo recipe). If we obtain the lock we boot the controller actor (e.g the clustering model)
and then stay there by spin-locking on its latch. If the controller goes down for any reason (typically a
zookeeper error or a shutdown request) we'll reset (and disconnect from zookeeper).
"""
def __init__(self, brokers, scope, tag, port, breadcrumbs, model, hints):
super(Coordinator, self).__init__(brokers, scope, tag, breadcrumbs, hints)
self.model = model
self.path = 'coordinator'
self.port = port
def reset(self, data):
if hasattr(data, 'controller'):
#
# - don't forget to nuke our controller before resetting
#
shutdown(data.controller)
if hasattr(data, 'lock'):
#
# - make sure to remove the lock attribute
# - it's useless to release the lock as we'll release the client altogether
#
delattr(data, 'lock')
return super(Coordinator, self).reset(data)
def spin(self, data):
#
# - if the termination trigger is set, abort immediately
#
if self.force_reset or self.terminate:
raise Aborted('resetting')
#
# - attempt to fetch the lock
# - allocate it if not already done
# - it is *important* to just allocate one lock as there is a leak in kazoo
#
if not hasattr(data, 'lock'):
data.lock = data.zk.Lock('%s/coordinator' % self.prefix)
try:
#
# - attempt to lock within a 5 seconds timeout to avoid stalling in some cases
#
if data.lock.acquire(timeout=5.0 * SAMPLING):
return 'start_controller', data, 0
except LockTimeout:
pass
return 'spin', data, 0
def start_controller(self, data):
#
# - if the termination trigger is set, abort immediately
# - this is important as it is possible to somehow get the lock after a suspend (acquire() returns
# true in that case which is misleading)
#
if self.force_reset or self.terminate:
raise Aborted('resetting')
#
# - we have the lock (e.g we are the leader)
# - start the controller actor
#
data.latch = ThreadingFuture()
logger.debug('%s : lock acquired @ %s, now leading' % (self.path, self.prefix))
data.controller = self.model.start(data.zk, self.id, self.hints, self.scope, self.tag, self.port, data.latch)
return 'lock', data, 0
def lock(self, data):
#
# - if the termination trigger is set, abort immediately
#
if self.force_reset or self.terminate:
raise Aborted('resetting')
#
# - spin-lock on the controller latch
# - any catastrophic plug failure will be trapped that way
#
try:
Event()
out = data.latch.get(SAMPLING)
if isinstance(out, Exception):
raise out
except Timeout:
pass
return 'lock', data, 0
|
[
"uuid.uuid4",
"ochopod.core.fsm.shutdown",
"kazoo.client.KazooClient",
"ochopod.core.fsm.Aborted",
"json.dumps",
"time.time",
"pykka.ThreadingFuture",
"threading.Event",
"logging.getLogger"
] |
[((1044, 1072), 'logging.getLogger', 'logging.getLogger', (['"""ochopod"""'], {}), "('ochopod')\n", (1061, 1072), False, 'import logging\n'), ((2208, 2220), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (2218, 2220), False, 'import uuid\n'), ((3430, 3504), 'kazoo.client.KazooClient', 'KazooClient', ([], {'hosts': 'cnx_string', 'timeout': '(5.0)', 'read_only': '(0)', 'randomize_hosts': '(1)'}), '(hosts=cnx_string, timeout=5.0, read_only=0, randomize_hosts=1)\n', (3441, 3504), False, 'from kazoo.client import KazooClient, KazooState\n'), ((5738, 5749), 'time.time', 'time.time', ([], {}), '()\n', (5747, 5749), False, 'import time\n'), ((9575, 9592), 'pykka.ThreadingFuture', 'ThreadingFuture', ([], {}), '()\n', (9590, 9592), False, 'from pykka import ThreadingFuture, Timeout\n'), ((3733, 3753), 'ochopod.core.fsm.Aborted', 'Aborted', (['"""resetting"""'], {}), "('resetting')\n", (3740, 3753), False, 'from ochopod.core.fsm import shutdown, spin_lock, Aborted, FSM\n'), ((5103, 5131), 'json.dumps', 'json.dumps', (['self.breadcrumbs'], {}), '(self.breadcrumbs)\n', (5113, 5131), False, 'import json\n'), ((7938, 7963), 'ochopod.core.fsm.shutdown', 'shutdown', (['data.controller'], {}), '(data.controller)\n', (7946, 7963), False, 'from ochopod.core.fsm import shutdown, spin_lock, Aborted, FSM\n'), ((8436, 8456), 'ochopod.core.fsm.Aborted', 'Aborted', (['"""resetting"""'], {}), "('resetting')\n", (8443, 8456), False, 'from ochopod.core.fsm import shutdown, spin_lock, Aborted, FSM\n'), ((9420, 9440), 'ochopod.core.fsm.Aborted', 'Aborted', (['"""resetting"""'], {}), "('resetting')\n", (9427, 9440), False, 'from ochopod.core.fsm import shutdown, spin_lock, Aborted, FSM\n'), ((10009, 10029), 'ochopod.core.fsm.Aborted', 'Aborted', (['"""resetting"""'], {}), "('resetting')\n", (10016, 10029), False, 'from ochopod.core.fsm import shutdown, spin_lock, Aborted, FSM\n'), ((10189, 10196), 'threading.Event', 'Event', ([], {}), '()\n', (10194, 10196), False, 'from threading import Event\n')]
|
#coding=utf-8
'''
Created on 2016年9月27日
@author: dengdan
'''
import numpy as np
import time
import random
rng = np.random.RandomState(int(time.time()))
rand = np.random.rand
"""
Create an array of the given shape and populate it with random samples from a uniform distribution over [0, 1)
"""
def normal(shape, mu = 0, sigma_square = 1, dtype = np.float32):
tensor = rng.normal(mu, np.sqrt(sigma_square), shape)
return np.array(tensor, dtype = dtype)
def randint(low = 2 ** 30, high = None, shape = None):
"""
low: the higher bound except when high is not None.
high: when it is not none, low must be smaller than it
shape: if not provided, a scalar will be returned
"""
return rng.randint(low = low, high = high, size = shape)
def shuffle(lst):
random.shuffle(lst)
def sample(lst, n):
return random.sample(lst, n)
def prob(allow_zero = True):
"""
Generate a random value as probability
"""
return rand_val(low = 0, high = 1.0, allow_zero = allow_zero)
def rand_val(low = 0, high = 1.0, allow_zero = True, range = None):
if range is not None:
low = range[0]
high = range[1]
val = rng.uniform(low = low, high = high)
if not allow_zero:
while not val:
val = rng.uniform(low = low, high = high)
return val
|
[
"random.sample",
"random.shuffle",
"time.time",
"numpy.array",
"numpy.sqrt"
] |
[((433, 462), 'numpy.array', 'np.array', (['tensor'], {'dtype': 'dtype'}), '(tensor, dtype=dtype)\n', (441, 462), True, 'import numpy as np\n'), ((799, 818), 'random.shuffle', 'random.shuffle', (['lst'], {}), '(lst)\n', (813, 818), False, 'import random\n'), ((851, 872), 'random.sample', 'random.sample', (['lst', 'n'], {}), '(lst, n)\n', (864, 872), False, 'import random\n'), ((140, 151), 'time.time', 'time.time', ([], {}), '()\n', (149, 151), False, 'import time\n'), ((392, 413), 'numpy.sqrt', 'np.sqrt', (['sigma_square'], {}), '(sigma_square)\n', (399, 413), True, 'import numpy as np\n')]
|
# Import the libraries we need for this lab
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from torch.utils.data import Dataset, DataLoader
# Plot the data
def plot_decision_regions_2class(model,data_set):
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#00AAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#00AAFF'])
X = data_set.x.numpy()
y = data_set.y.numpy()
h = .02
x_min, x_max = X[:, 0].min() - 0.1 , X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1 , X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),np.arange(y_min, y_max, h))
XX = torch.Tensor(np.c_[xx.ravel(), yy.ravel()])
yhat = np.logical_not((model(XX)[:, 0] > 0.5).numpy()).reshape(xx.shape)
plt.pcolormesh(xx, yy, yhat, cmap=cmap_light)
plt.plot(X[y[:, 0] == 0, 0], X[y[:, 0] == 0, 1], 'o', label='y=0')
plt.plot(X[y[:, 0] == 1, 0], X[y[:, 0] == 1, 1], 'ro', label='y=1')
plt.title("decision region")
plt.legend()
# Calculate the accuracy
def accuracy(model, data_set):
return np.mean(data_set.y.view(-1).numpy() == (model(data_set.x)[:, 0] > 0.5).numpy())
# Define the class Net with one hidden layer
class Net(nn.Module):
# Constructor
def __init__(self, D_in, H, D_out):
super(Net, self).__init__()
# hidden layer
self.linear1 = nn.Linear(D_in, H)
# output layer
self.linear2 = nn.Linear(H, D_out)
# Prediction
def forward(self, x):
x = torch.sigmoid(self.linear1(x))
x = torch.sigmoid(self.linear2(x))
return x
# Define the train model
def train(data_set, model, criterion, train_loader, optimizer, epochs=5):
COST = []
ACC = []
for epoch in range(epochs):
total = 0
for x, y in train_loader:
optimizer.zero_grad()
yhat = model(x)
loss = criterion(yhat, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# cumulative loss
total += loss.item()
ACC.append(accuracy(model, data_set))
COST.append(total)
fig, ax1 = plt.subplots()
color = 'tab:red'
ax1.plot(COST, color=color)
ax1.set_xlabel('epoch', color=color)
ax1.set_ylabel('total loss', color=color)
ax1.tick_params(axis='y', color=color)
ax2 = ax1.twinx()
color = 'tab:blue'
ax2.set_ylabel('accuracy', color=color) # we already handled the x-label with ax1
ax2.plot(ACC, color=color)
ax2.tick_params(axis='y', color=color)
fig.tight_layout() # otherwise the right y-label is slightly clipped
plt.show()
return COST
# Define the class XOR_Data
class XOR_Data(Dataset):
# Constructor
def __init__(self, N_s=100):
self.x = torch.zeros((N_s, 2))
self.y = torch.zeros((N_s, 1))
for i in range(N_s // 4):
self.x[i, :] = torch.Tensor([0.0, 0.0])
self.y[i, 0] = torch.Tensor([0.0])
self.x[i + N_s // 4, :] = torch.Tensor([0.0, 1.0])
self.y[i + N_s // 4, 0] = torch.Tensor([1.0])
self.x[i + N_s // 2, :] = torch.Tensor([1.0, 0.0])
self.y[i + N_s // 2, 0] = torch.Tensor([1.0])
self.x[i + 3 * N_s // 4, :] = torch.Tensor([1.0, 1.0])
self.y[i + 3 * N_s // 4, 0] = torch.Tensor([0.0])
self.x = self.x + 0.01 * torch.randn((N_s, 2))
self.len = N_s
# Getter
def __getitem__(self, index):
return self.x[index], self.y[index]
# Get Length
def __len__(self):
return self.len
# Plot the data
def plot_stuff(self):
plt.plot(self.x[self.y[:, 0] == 0, 0].numpy(), self.x[self.y[:, 0] == 0, 1].numpy(), 'o', label="y=0")
plt.plot(self.x[self.y[:, 0] == 1, 0].numpy(), self.x[self.y[:, 0] == 1, 1].numpy(), 'ro', label="y=1")
plt.legend()
# Create dataset object
data_set = XOR_Data()
data_set.plot_stuff()
# Train the model
learning_rate = 0.001
criterion = nn.BCELoss()
#optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
train_loader = DataLoader(dataset=data_set, batch_size=1)
#LOSS12 = train(data_set, model, criterion, train_loader, optimizer, epochs=500)
#plot_decision_regions_2class(model, data_set)
# Train the model
learning_rate = 0.1
criterion = nn.BCELoss()
#optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
train_loader = DataLoader(dataset=data_set, batch_size=1)
#LOSS12 = train(data_set, model, criterion, train_loader, optimizer, epochs=500)
#plot_decision_regions_2class(model, data_set)
# Practice: create a model with two neuron
model = Net(2, 4, 1)
# Type your code here
# Train the model
learning_rate = 0.1
criterion = nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
train_loader = DataLoader(dataset=data_set, batch_size=1)
LOSS12 = train(data_set, model, criterion, train_loader, optimizer, epochs=500)
plot_decision_regions_2class(model, data_set)
|
[
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"torch.nn.BCELoss",
"matplotlib.pyplot.plot",
"torch.utils.data.DataLoader",
"matplotlib.pyplot.legend",
"torch.randn",
"matplotlib.pyplot.subplots",
"torch.Tensor",
"numpy.arange",
"matplotlib.pyplot.pcolormesh",
"torch.nn.Linear",
"torch.zeros",
"matplotlib.colors.ListedColormap"
] |
[((4111, 4123), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (4121, 4123), True, 'import torch.nn as nn\n'), ((4206, 4248), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'data_set', 'batch_size': '(1)'}), '(dataset=data_set, batch_size=1)\n', (4216, 4248), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((4429, 4441), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (4439, 4441), True, 'import torch.nn as nn\n'), ((4524, 4566), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'data_set', 'batch_size': '(1)'}), '(dataset=data_set, batch_size=1)\n', (4534, 4566), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((4834, 4846), 'torch.nn.BCELoss', 'nn.BCELoss', ([], {}), '()\n', (4844, 4846), True, 'import torch.nn as nn\n'), ((4928, 4970), 'torch.utils.data.DataLoader', 'DataLoader', ([], {'dataset': 'data_set', 'batch_size': '(1)'}), '(dataset=data_set, batch_size=1)\n', (4938, 4970), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((344, 393), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['#FFAAAA', '#AAFFAA', '#00AAFF']"], {}), "(['#FFAAAA', '#AAFFAA', '#00AAFF'])\n", (358, 393), False, 'from matplotlib.colors import ListedColormap\n'), ((410, 459), 'matplotlib.colors.ListedColormap', 'ListedColormap', (["['#FF0000', '#00FF00', '#00AAFF']"], {}), "(['#FF0000', '#00FF00', '#00AAFF'])\n", (424, 459), False, 'from matplotlib.colors import ListedColormap\n'), ((863, 908), 'matplotlib.pyplot.pcolormesh', 'plt.pcolormesh', (['xx', 'yy', 'yhat'], {'cmap': 'cmap_light'}), '(xx, yy, yhat, cmap=cmap_light)\n', (877, 908), True, 'import matplotlib.pyplot as plt\n'), ((913, 979), 'matplotlib.pyplot.plot', 'plt.plot', (['X[y[:, 0] == 0, 0]', 'X[y[:, 0] == 0, 1]', '"""o"""'], {'label': '"""y=0"""'}), "(X[y[:, 0] == 0, 0], X[y[:, 0] == 0, 1], 'o', label='y=0')\n", (921, 979), True, 'import matplotlib.pyplot as plt\n'), ((984, 1051), 'matplotlib.pyplot.plot', 'plt.plot', (['X[y[:, 0] == 1, 0]', 'X[y[:, 0] == 1, 1]', '"""ro"""'], {'label': '"""y=1"""'}), "(X[y[:, 0] == 1, 0], X[y[:, 0] == 1, 1], 'ro', label='y=1')\n", (992, 1051), True, 'import matplotlib.pyplot as plt\n'), ((1056, 1084), 'matplotlib.pyplot.title', 'plt.title', (['"""decision region"""'], {}), "('decision region')\n", (1065, 1084), True, 'import matplotlib.pyplot as plt\n'), ((1089, 1101), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1099, 1101), True, 'import matplotlib.pyplot as plt\n'), ((2250, 2264), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2262, 2264), True, 'import matplotlib.pyplot as plt\n'), ((2735, 2745), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2743, 2745), True, 'import matplotlib.pyplot as plt\n'), ((673, 699), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (682, 699), True, 'import numpy as np\n'), ((700, 726), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (709, 726), True, 'import numpy as np\n'), ((1462, 1480), 'torch.nn.Linear', 'nn.Linear', (['D_in', 'H'], {}), '(D_in, H)\n', (1471, 1480), True, 'import torch.nn as nn\n'), ((1527, 1546), 'torch.nn.Linear', 'nn.Linear', (['H', 'D_out'], {}), '(H, D_out)\n', (1536, 1546), True, 'import torch.nn as nn\n'), ((2888, 2909), 'torch.zeros', 'torch.zeros', (['(N_s, 2)'], {}), '((N_s, 2))\n', (2899, 2909), False, 'import torch\n'), ((2927, 2948), 'torch.zeros', 'torch.zeros', (['(N_s, 1)'], {}), '((N_s, 1))\n', (2938, 2948), False, 'import torch\n'), ((3974, 3986), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3984, 3986), True, 'import matplotlib.pyplot as plt\n'), ((3010, 3034), 'torch.Tensor', 'torch.Tensor', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (3022, 3034), False, 'import torch\n'), ((3062, 3081), 'torch.Tensor', 'torch.Tensor', (['[0.0]'], {}), '([0.0])\n', (3074, 3081), False, 'import torch\n'), ((3121, 3145), 'torch.Tensor', 'torch.Tensor', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (3133, 3145), False, 'import torch\n'), ((3184, 3203), 'torch.Tensor', 'torch.Tensor', (['[1.0]'], {}), '([1.0])\n', (3196, 3203), False, 'import torch\n'), ((3243, 3267), 'torch.Tensor', 'torch.Tensor', (['[1.0, 0.0]'], {}), '([1.0, 0.0])\n', (3255, 3267), False, 'import torch\n'), ((3306, 3325), 'torch.Tensor', 'torch.Tensor', (['[1.0]'], {}), '([1.0])\n', (3318, 3325), False, 'import torch\n'), ((3369, 3393), 'torch.Tensor', 'torch.Tensor', (['[1.0, 1.0]'], {}), '([1.0, 1.0])\n', (3381, 3393), False, 'import torch\n'), ((3436, 3455), 'torch.Tensor', 'torch.Tensor', (['[0.0]'], {}), '([0.0])\n', (3448, 3455), False, 'import torch\n'), ((3494, 3515), 'torch.randn', 'torch.randn', (['(N_s, 2)'], {}), '((N_s, 2))\n', (3505, 3515), False, 'import torch\n')]
|
import pandas as pd
from collections import namedtuple
from xbbg.core import timezone
from xbbg.io import files, logs, param
Futures = dict(
Jan='F', Feb='G', Mar='H', Apr='J', May='K', Jun='M',
Jul='N', Aug='Q', Sep='U', Oct='V', Nov='X', Dec='Z',
)
CurrencyPair = namedtuple('CurrencyPair', ['ticker', 'factor', 'power'])
ValidSessions = ['allday', 'day', 'am', 'pm', 'night', 'pre', 'post']
PKG_PATH = files.abspath(__file__, 0)
ASSET_INFO = {
'Index': ['tickers'],
'Comdty': ['tickers', 'key_month'],
'Curncy': ['tickers'],
'Equity': ['exch_codes'],
}
DVD_TPYES = {
'all': 'DVD_Hist_All',
'dvd': 'DVD_Hist',
'split': 'Eqy_DVD_Hist_Splits',
'gross': 'Eqy_DVD_Hist_Gross',
'adjust': 'Eqy_DVD_Adjust_Fact',
'adj_fund': 'Eqy_DVD_Adj_Fund',
'with_amt': 'DVD_Hist_All_with_Amt_Status',
'dvd_amt': 'DVD_Hist_with_Amt_Status',
'gross_amt': 'DVD_Hist_Gross_with_Amt_Stat',
'projected': 'BDVD_Pr_Ex_Dts_DVD_Amts_w_Ann',
}
DVD_COLS = {
'Declared Date': 'dec_date',
'Ex-Date': 'ex_date',
'Record Date': 'rec_date',
'Payable Date': 'pay_date',
'Dividend Amount': 'dvd_amt',
'Dividend Frequency': 'dvd_freq',
'Dividend Type': 'dvd_type',
'Amount Status': 'amt_status',
'Adjustment Date': 'adj_date',
'Adjustment Factor': 'adj_factor',
'Adjustment Factor Operator Type': 'adj_op',
'Adjustment Factor Flag': 'adj_flag',
'Amount Per Share': 'amt_ps',
'Projected/Confirmed': 'category',
}
LIVE_INFO = {
# Common fields
'MKTDATA_EVENT_TYPE', 'MKTDATA_EVENT_SUBTYPE', 'IS_DELAYED_STREAM',
# Last Price
'LAST_PRICE', 'RT_PX_CHG_PCT_1D', 'REALTIME_PERCENT_BID_ASK_SPREAD',
'EVT_TRADE_DATE_RT', 'TRADE_UPDATE_STAMP_RT',
'EQY_TURNOVER_REALTIME', 'VOLUME',
# Bid
'BID', 'BID_UPDATE_STAMP_RT',
# Ask
'ASK', 'ASK_UPDATE_STAMP_RT',
# Common in bid / ask
'SPREAD_BA', 'MID',
}
LIVE_CHG = {
'RT_PX_CHG_PCT_1D', 'CHG_PCT_1M_RT', 'CHG_PCT_3M_RT',
'CHG_PCT_MTD_RT', 'CHG_PCT_QTD_RT', 'CHG_PCT_YTD_RT',
'REALTIME_2_DAY_CHANGE_PERCENT', 'REALTIME_5_DAY_CHANGE_PERCENT',
'REALTIME_15_SEC_PRICE_PCT_CHG', 'REALTIME_ONE_MIN_PRICE_PCT_CHG',
# Equities only
'REALTIME_FIVE_MIN_PRICE_PCT_CHG', 'REALTIME_15_MIN_PRICE_PCT_CHG',
'REALTIME_ONE_HOUR_PRICE_PCT_CHG',
}
LIVE_VOL = {
'REALTIME_VOLUME_5_DAY_INTERVAL',
# Real-time current volume as % change from N-day avg volume
'DELTA_AVAT_1_DAY_INTERVAL', 'DELTA_AVAT_5_DAY_INTERVAL',
'DELTA_AVAT_10_DAY_INTERVAL', 'DELTA_AVAT_20_DAY_INTERVAL',
'DELTA_AVAT_30_DAY_INTERVAL', 'DELTA_AVAT_100_DAY_INTERVAL',
'DELTA_AVAT_180_DAY_INTERVAL',
# Real-time turnover as % change from N-day average turnover
'DELTA_ATAT_1_DAY_INTERVAL', 'DELTA_ATAT_5_DAY_INTERVAL',
'DELTA_ATAT_10_DAY_INTERVAL', 'DELTA_ATAT_20_DAY_INTERVAL',
'DELTA_ATAT_30_DAY_INTERVAL', 'DELTA_ATAT_100_DAY_INTERVAL',
'DELTA_ATAT_180_DAY_INTERVAL',
}
LIVE_RATIO = {
'PRICE_EARNINGS_RATIO_RT', 'PRICE_TO_BOOK_RATIO_RT',
'PRICE_TO_SALES_RATIO_RT', 'PRICE_CASH_FLOW_RT', 'PRICE_EBITDA_RT',
}
def exch_info(ticker: str, **kwargs) -> pd.Series:
"""
Exchange info for given ticker
Args:
ticker: ticker or exchange
**kwargs:
ref: reference ticker or exchange
used as supplement if exchange info is not defined for `ticker`
original: original ticker (for logging)
config: info from exch.yml
Returns:
pd.Series
Examples:
>>> exch_info('SPY US Equity')
tz America/New_York
allday [04:00, 20:00]
day [09:30, 16:00]
post [16:01, 20:00]
pre [04:00, 09:30]
Name: EquityUS, dtype: object
>>> exch_info('SPY US Equity', ref='EquityUS')
tz America/New_York
allday [04:00, 20:00]
day [09:30, 16:00]
post [16:01, 20:00]
pre [04:00, 09:30]
Name: EquityUS, dtype: object
>>> exch_info('ES1 Index')
tz America/New_York
allday [18:00, 17:00]
day [08:00, 17:00]
Name: CME, dtype: object
>>> exch_info('ESM0 Index', ref='ES1 Index')
tz America/New_York
allday [18:00, 17:00]
day [08:00, 17:00]
Name: CME, dtype: object
>>> exch_info('Z 1 Index')
tz Europe/London
allday [01:00, 21:00]
day [01:00, 21:00]
Name: FuturesFinancialsICE, dtype: object
>>> exch_info('TESTTICKER Corp')
Series([], dtype: object)
>>> exch_info('US')
tz America/New_York
allday [04:00, 20:00]
day [09:30, 16:00]
post [16:01, 20:00]
pre [04:00, 09:30]
Name: EquityUS, dtype: object
>>> exch_info('UXF1UXG1 Index')
tz America/New_York
allday [18:00, 17:00]
day [18:00, 17:00]
Name: FuturesCBOE, dtype: object
>>> exch_info('TESTTICKER Index', original='TESTTICKER Index')
Series([], dtype: object)
>>> exch_info('TESTTCK Index')
Series([], dtype: object)
"""
logger = logs.get_logger(exch_info, level='debug')
if kwargs.get('ref', ''):
return exch_info(ticker=kwargs['ref'])
exch = kwargs.get('config', param.load_config(cat='exch'))
original = kwargs.get('original', '')
# Case 1: Use exchange directly
if ticker in exch.index:
info = exch.loc[ticker].dropna()
# Check required info
if info.reindex(['allday', 'tz']).dropna().size < 2:
logger.error(
f'required info (allday + tz) cannot be found in '
f'{original if original else ticker} ...'
)
return pd.Series(dtype=object)
# Fill day session info if not provided
if 'day' not in info:
info['day'] = info['allday']
return info.dropna().apply(param.to_hours)
if original:
logger.error(f'exchange info cannot be found in {original} ...')
return pd.Series(dtype=object)
# Case 2: Use ticker to find exchange
exch_name = market_info(ticker=ticker).get('exch', '')
if not exch_name: return pd.Series(dtype=object)
return exch_info(
ticker=exch_name,
original=ticker,
config=exch,
)
def market_info(ticker: str) -> pd.Series:
"""
Get info for given ticker
Args:
ticker: Bloomberg full ticker
Returns:
dict
Examples:
>>> market_info('SHCOMP Index').exch
'EquityChina'
>>> market_info('SPY US Equity').exch
'EquityUS'
>>> market_info('ICICIC=1 IS Equity').exch
'EquityFuturesIndia'
>>> market_info('INT1 Curncy').exch
'CurrencyIndia'
>>> market_info('CL1 Comdty').exch
'NYME'
>>> incorrect_tickers = [
... 'C XX Equity', 'XXX Comdty', 'Bond_ISIN Corp',
... 'XYZ Index', 'XYZ Curncy',
... ]
>>> pd.concat([market_info(_) for _ in incorrect_tickers])
Series([], dtype: object)
"""
t_info = ticker.split()
exch_only = len(ticker) == 2
if (not exch_only) and (t_info[-1] not in ['Equity', 'Comdty', 'Curncy', 'Index']):
return pd.Series(dtype=object)
a_info = asset_config(asset='Equity' if exch_only else t_info[-1])
# =========================================== #
# Equity / Equity Futures #
# =========================================== #
if (t_info[-1] == 'Equity') or exch_only:
is_fut = '==' if '=' in ticker else '!='
exch_sym = ticker if exch_only else t_info[-2]
return take_first(
data=a_info,
query=f'exch_codes == "{exch_sym}" and is_fut {is_fut} True',
)
# ================================================ #
# Currency / Commodity / Index #
# ================================================ #
if t_info[0] in a_info.tickers.values:
symbol = t_info[0]
elif t_info[0][-1].isdigit():
end_idx = 2 if t_info[-2].isdigit() else 1
symbol = t_info[0][:-end_idx].strip()
# Special contracts
if (symbol[:2] == 'UX') and (t_info[-1] == 'Index'):
symbol = 'UX'
else:
symbol = t_info[0].split('+')[0]
return take_first(data=a_info, query=f'tickers == "{symbol}"')
def take_first(data: pd.DataFrame, query: str) -> pd.Series:
"""
Query and take the 1st row of result
Args:
data: pd.DataFrame
query: query string
Returns:
pd.Series
"""
if data.empty: return pd.Series(dtype=object)
res = data.query(query)
if res.empty: return pd.Series(dtype=object)
return res.reset_index(drop=True).iloc[0]
def asset_config(asset: str) -> pd.DataFrame:
"""
Load info for given asset
Args:
asset: asset name
Returns:
pd.DataFrame
"""
cfg_files = param.config_files('assets')
cache_cfg = f'{PKG_PATH}/markets/cached/{asset}_cfg.pkl'
last_mod = max(map(files.modified_time, cfg_files))
if files.exists(cache_cfg) and files.modified_time(cache_cfg) > last_mod:
return pd.read_pickle(cache_cfg)
config = (
pd.concat([
explode(
data=pd.DataFrame(param.load_yaml(cf).get(asset, [])),
columns=ASSET_INFO[asset],
)
for cf in cfg_files
], sort=False)
.drop_duplicates(keep='last')
.reset_index(drop=True)
)
files.create_folder(cache_cfg, is_file=True)
config.to_pickle(cache_cfg)
return config
def explode(data: pd.DataFrame, columns: list) -> pd.DataFrame:
"""
Explode data by columns
Args:
data: pd.DataFrame
columns: columns to explode
Returns:
pd.DataFrame
"""
if data.empty: return pd.DataFrame()
if len(columns) == 1:
return data.explode(column=columns[0])
return explode(
data=data.explode(column=columns[-1]),
columns=columns[:-1],
)
def ccy_pair(local, base='USD') -> CurrencyPair:
"""
Currency pair info
Args:
local: local currency
base: base currency
Returns:
CurrencyPair
Examples:
>>> ccy_pair(local='HKD', base='USD')
CurrencyPair(ticker='HKD Curncy', factor=1.0, power=1.0)
>>> ccy_pair(local='GBp')
CurrencyPair(ticker='GBP Curncy', factor=100.0, power=-1.0)
>>> ccy_pair(local='USD', base='GBp')
CurrencyPair(ticker='GBP Curncy', factor=0.01, power=1.0)
>>> ccy_pair(local='XYZ', base='USD')
CurrencyPair(ticker='', factor=1.0, power=1.0)
>>> ccy_pair(local='GBP', base='GBp')
CurrencyPair(ticker='', factor=0.01, power=1.0)
>>> ccy_pair(local='GBp', base='GBP')
CurrencyPair(ticker='', factor=100.0, power=1.0)
"""
ccy_param = param.load_config(cat='ccy')
if f'{local}{base}' in ccy_param.index:
info = ccy_param.loc[f'{local}{base}'].dropna()
elif f'{base}{local}' in ccy_param.index:
info = ccy_param.loc[f'{base}{local}'].dropna()
info['factor'] = 1. / info.get('factor', 1.)
info['power'] = -info.get('power', 1.)
elif base.lower() == local.lower():
info = dict(ticker='')
info['factor'] = 1.
if base[-1].lower() == base[-1]:
info['factor'] /= 100.
if local[-1].lower() == local[-1]:
info['factor'] *= 100.
else:
logger = logs.get_logger(ccy_pair, level='debug')
logger.error(f'incorrect currency - local {local} / base {base}')
return CurrencyPair(ticker='', factor=1., power=1.0)
if 'factor' not in info: info['factor'] = 1.
if 'power' not in info: info['power'] = 1.
return CurrencyPair(**info)
def market_timing(ticker, dt, timing='EOD', tz='local', **kwargs) -> str:
"""
Market close time for ticker
Args:
ticker: ticker name
dt: date
timing: [EOD (default), BOD]
tz: conversion to timezone
Returns:
str: date & time
Examples:
>>> market_timing('7267 JT Equity', dt='2018-09-10')
'2018-09-10 14:58'
>>> market_timing('7267 JT Equity', dt='2018-09-10', tz=timezone.TimeZone.NY)
'2018-09-10 01:58:00-04:00'
>>> market_timing('7267 JT Equity', dt='2018-01-10', tz='NY')
'2018-01-10 00:58:00-05:00'
>>> market_timing('7267 JT Equity', dt='2018-09-10', tz='SPX Index')
'2018-09-10 01:58:00-04:00'
>>> market_timing('8035 JT Equity', dt='2018-09-10', timing='BOD')
'2018-09-10 09:01'
>>> market_timing('Z 1 Index', dt='2018-09-10', timing='FINISHED')
'2018-09-10 21:00'
>>> market_timing('TESTTICKER Corp', dt='2018-09-10')
''
"""
logger = logs.get_logger(market_timing, level='debug')
exch = pd.Series(exch_info(ticker=ticker, **kwargs))
if any(req not in exch.index for req in ['tz', 'allday', 'day']):
logger.error(f'required exchange info cannot be found in {ticker} ...')
return ''
mkt_time = {
'BOD': exch.day[0], 'FINISHED': exch.allday[-1]
}.get(timing, exch.day[-1])
cur_dt = pd.Timestamp(str(dt)).strftime('%Y-%m-%d')
if tz == 'local': return f'{cur_dt} {mkt_time}'
return timezone.tz_convert(f'{cur_dt} {mkt_time}', to_tz=tz, from_tz=exch.tz)
|
[
"pandas.DataFrame",
"xbbg.io.files.modified_time",
"xbbg.io.files.abspath",
"xbbg.io.logs.get_logger",
"xbbg.io.files.create_folder",
"xbbg.io.param.config_files",
"xbbg.io.param.load_yaml",
"xbbg.io.param.load_config",
"xbbg.io.files.exists",
"collections.namedtuple",
"pandas.Series",
"pandas.read_pickle",
"xbbg.core.timezone.tz_convert"
] |
[((276, 333), 'collections.namedtuple', 'namedtuple', (['"""CurrencyPair"""', "['ticker', 'factor', 'power']"], {}), "('CurrencyPair', ['ticker', 'factor', 'power'])\n", (286, 333), False, 'from collections import namedtuple\n'), ((416, 442), 'xbbg.io.files.abspath', 'files.abspath', (['__file__', '(0)'], {}), '(__file__, 0)\n', (429, 442), False, 'from xbbg.io import files, logs, param\n'), ((5308, 5349), 'xbbg.io.logs.get_logger', 'logs.get_logger', (['exch_info'], {'level': '"""debug"""'}), "(exch_info, level='debug')\n", (5323, 5349), False, 'from xbbg.io import files, logs, param\n'), ((9157, 9185), 'xbbg.io.param.config_files', 'param.config_files', (['"""assets"""'], {}), "('assets')\n", (9175, 9185), False, 'from xbbg.io import files, logs, param\n'), ((9742, 9786), 'xbbg.io.files.create_folder', 'files.create_folder', (['cache_cfg'], {'is_file': '(True)'}), '(cache_cfg, is_file=True)\n', (9761, 9786), False, 'from xbbg.io import files, logs, param\n'), ((11129, 11157), 'xbbg.io.param.load_config', 'param.load_config', ([], {'cat': '"""ccy"""'}), "(cat='ccy')\n", (11146, 11157), False, 'from xbbg.io import files, logs, param\n'), ((13074, 13119), 'xbbg.io.logs.get_logger', 'logs.get_logger', (['market_timing'], {'level': '"""debug"""'}), "(market_timing, level='debug')\n", (13089, 13119), False, 'from xbbg.io import files, logs, param\n'), ((13572, 13642), 'xbbg.core.timezone.tz_convert', 'timezone.tz_convert', (['f"""{cur_dt} {mkt_time}"""'], {'to_tz': 'tz', 'from_tz': 'exch.tz'}), "(f'{cur_dt} {mkt_time}', to_tz=tz, from_tz=exch.tz)\n", (13591, 13642), False, 'from xbbg.core import timezone\n'), ((5461, 5490), 'xbbg.io.param.load_config', 'param.load_config', ([], {'cat': '"""exch"""'}), "(cat='exch')\n", (5478, 5490), False, 'from xbbg.io import files, logs, param\n'), ((6219, 6242), 'pandas.Series', 'pd.Series', ([], {'dtype': 'object'}), '(dtype=object)\n', (6228, 6242), True, 'import pandas as pd\n'), ((6374, 6397), 'pandas.Series', 'pd.Series', ([], {'dtype': 'object'}), '(dtype=object)\n', (6383, 6397), True, 'import pandas as pd\n'), ((7437, 7460), 'pandas.Series', 'pd.Series', ([], {'dtype': 'object'}), '(dtype=object)\n', (7446, 7460), True, 'import pandas as pd\n'), ((8828, 8851), 'pandas.Series', 'pd.Series', ([], {'dtype': 'object'}), '(dtype=object)\n', (8837, 8851), True, 'import pandas as pd\n'), ((8905, 8928), 'pandas.Series', 'pd.Series', ([], {'dtype': 'object'}), '(dtype=object)\n', (8914, 8928), True, 'import pandas as pd\n'), ((9310, 9333), 'xbbg.io.files.exists', 'files.exists', (['cache_cfg'], {}), '(cache_cfg)\n', (9322, 9333), False, 'from xbbg.io import files, logs, param\n'), ((9396, 9421), 'pandas.read_pickle', 'pd.read_pickle', (['cache_cfg'], {}), '(cache_cfg)\n', (9410, 9421), True, 'import pandas as pd\n'), ((10082, 10096), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (10094, 10096), True, 'import pandas as pd\n'), ((5917, 5940), 'pandas.Series', 'pd.Series', ([], {'dtype': 'object'}), '(dtype=object)\n', (5926, 5940), True, 'import pandas as pd\n'), ((9338, 9368), 'xbbg.io.files.modified_time', 'files.modified_time', (['cache_cfg'], {}), '(cache_cfg)\n', (9357, 9368), False, 'from xbbg.io import files, logs, param\n'), ((11743, 11783), 'xbbg.io.logs.get_logger', 'logs.get_logger', (['ccy_pair'], {'level': '"""debug"""'}), "(ccy_pair, level='debug')\n", (11758, 11783), False, 'from xbbg.io import files, logs, param\n'), ((9513, 9532), 'xbbg.io.param.load_yaml', 'param.load_yaml', (['cf'], {}), '(cf)\n', (9528, 9532), False, 'from xbbg.io import files, logs, param\n')]
|
import miner_globals
import m.common as common
import base
def p_source_statement(p):
'''statement : SOURCE FILENAME'''
p.lexer.begin("INITIAL")
p[0] = SourceStatement(p[2])
def p_once_statement(p):
'''statement : ONCE relative_import_name'''
p.lexer.begin("INITIAL")
p[0] = OnceStatement(p[2])
def p_call_statement(p):
'''statement : CALL relative_import_name '(' named_parameter_list ')' '''
p[0] = CallStatement(p[2], p[4])
def p_call_statement_no_params(p):
'''statement : CALL relative_import_name '(' ')' '''
p[0] = CallStatement(p[2], [])
class SourceStatement(base.StatementBase):
NAME = "SOURCE"
SHORT_HELP = "SOURCE <filename> - executes miner script"
LONG_HELP = """SOURCE <filename>
Executes miner script specified by <filename> in current script environment.
All parameters changes will be available
"""
COMPLETION_STATE = common.COMPLETE_FILE
def __init__(self, fileName):
base.StatementBase.__init__(self)
self.fileName = fileName
def execute(self):
miner_globals.callScript(self.fileName)
class OnceStatement(SourceStatement):
NAME = "ONCE"
SHORT_HELP = "ONCE <filename> - executes miner script only once"
LONG_HELP = """ONCE <filename>
Executes miner script specified by <filename> in current script environment.
All parameters changes will be available
The script is executed only once even if multiple statements available
"""
COMPLETION_STATE = common.COMPLETE_FILE
ALREADY_EXECUTED=set()
def __init__(self, fileName):
SourceStatement.__init__(self, fileName)
def execute(self):
if self.fileName not in OnceStatement.ALREADY_EXECUTED:
OnceStatement.ALREADY_EXECUTED.add(self.fileName)
SourceStatement.execute(self)
class CallStatement(base.StatementBase):
NAME = "CALL"
SHORT_HELP = "CALL <scriptId>(paramName=paramValue, ...) - executes miner script"
LONG_HELP = """CALL <scriptId>(paramName=paramValue, ...)
Executes miner script in its own context.
If script overwrites some parameters it is not reflected in perent's script environment
scriptId - is script in the tools folder or in relative notation (e.g. myTool.myScript, .myScript)
paramName - is the name of parameter -> _1 , _2 ... - positional parameters
_o = "output" maps to -o "output"
paramValue - is the python expression
"""
def __init__(self, scriptId, parametersList):
base.StatementBase.__init__(self)
self.scriptId = scriptId
self.parametersList = parametersList
def execute(self):
parametersMap = {}
for e in self.parametersList:
name, value = e.getValue().split("=", 1)
name = name.strip()
if name == "_o":
name = ">"
elif name.startswith("_"):
try:
val = int(name[1:])
name = name[1:]
except:
pass
parametersMap[name] = str(miner_globals.evalExpression(value))
miner_globals.callScript(self.scriptId, inPrivateEnvironment=True, **parametersMap)
miner_globals.addHelpClass(SourceStatement)
miner_globals.addKeyWord(statement="SOURCE", switchesToFileMode=True)
miner_globals.addHelpClass(OnceStatement)
miner_globals.addKeyWord(statement="ONCE", switchesToFileMode=True)
miner_globals.addHelpClass(CallStatement)
miner_globals.addKeyWord(statement="CALL")
|
[
"miner_globals.addKeyWord",
"base.StatementBase.__init__",
"miner_globals.addHelpClass",
"miner_globals.callScript",
"miner_globals.evalExpression"
] |
[((3266, 3309), 'miner_globals.addHelpClass', 'miner_globals.addHelpClass', (['SourceStatement'], {}), '(SourceStatement)\n', (3292, 3309), False, 'import miner_globals\n'), ((3310, 3379), 'miner_globals.addKeyWord', 'miner_globals.addKeyWord', ([], {'statement': '"""SOURCE"""', 'switchesToFileMode': '(True)'}), "(statement='SOURCE', switchesToFileMode=True)\n", (3334, 3379), False, 'import miner_globals\n'), ((3381, 3422), 'miner_globals.addHelpClass', 'miner_globals.addHelpClass', (['OnceStatement'], {}), '(OnceStatement)\n', (3407, 3422), False, 'import miner_globals\n'), ((3423, 3490), 'miner_globals.addKeyWord', 'miner_globals.addKeyWord', ([], {'statement': '"""ONCE"""', 'switchesToFileMode': '(True)'}), "(statement='ONCE', switchesToFileMode=True)\n", (3447, 3490), False, 'import miner_globals\n'), ((3492, 3533), 'miner_globals.addHelpClass', 'miner_globals.addHelpClass', (['CallStatement'], {}), '(CallStatement)\n', (3518, 3533), False, 'import miner_globals\n'), ((3534, 3576), 'miner_globals.addKeyWord', 'miner_globals.addKeyWord', ([], {'statement': '"""CALL"""'}), "(statement='CALL')\n", (3558, 3576), False, 'import miner_globals\n'), ((968, 1001), 'base.StatementBase.__init__', 'base.StatementBase.__init__', (['self'], {}), '(self)\n', (995, 1001), False, 'import base\n'), ((1066, 1105), 'miner_globals.callScript', 'miner_globals.callScript', (['self.fileName'], {}), '(self.fileName)\n', (1090, 1105), False, 'import miner_globals\n'), ((2559, 2592), 'base.StatementBase.__init__', 'base.StatementBase.__init__', (['self'], {}), '(self)\n', (2586, 2592), False, 'import base\n'), ((3173, 3261), 'miner_globals.callScript', 'miner_globals.callScript', (['self.scriptId'], {'inPrivateEnvironment': '(True)'}), '(self.scriptId, inPrivateEnvironment=True, **\n parametersMap)\n', (3197, 3261), False, 'import miner_globals\n'), ((3128, 3163), 'miner_globals.evalExpression', 'miner_globals.evalExpression', (['value'], {}), '(value)\n', (3156, 3163), False, 'import miner_globals\n')]
|
from financial_canvas.figures.CustomFigure import CustomFigure
from bokeh.models import RangeTool
class PreviewSlider(CustomFigure):
'''
Creates preview slider at the bottom of the main figure. The first df should always contain
close column.
Args:
target_figure (figures.CustomFigure): (optional) if passed will be resized with slider.
'''
def __init__(self, df, *, selected_from=None, target_figure=None):
super().__init__(df, selected_from=selected_from)
bokeh_figure = self.get_figure_defaults()
p = bokeh_figure(
title=
"Drag the middle and edges of the selection box to change the range above",
plot_height=125,
x_axis_type="datetime",
y_axis_type=None,
tools="",
toolbar_location=None,
background_fill_color="#efefef")
if target_figure:
range_tool = RangeTool(x_range=target_figure.p.x_range)
range_tool.overlay.fill_color = "navy"
range_tool.overlay.fill_alpha = 0.2
p.add_tools(range_tool)
p.toolbar.active_multi = range_tool
self.sources = target_figure.sources
p.scatter(
'index',
'close',
size=1,
source=self.sources['main'][0],
)
# layout
p.ygrid.grid_line_color = None
self.p = p
|
[
"bokeh.models.RangeTool"
] |
[((936, 978), 'bokeh.models.RangeTool', 'RangeTool', ([], {'x_range': 'target_figure.p.x_range'}), '(x_range=target_figure.p.x_range)\n', (945, 978), False, 'from bokeh.models import RangeTool\n')]
|
# -*- coding: utf-8 -*-
# Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2020 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from synapse.api import errors
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_json_object_from_request,
)
from synapse.http.site import SynapseRequest
from ._base import client_patterns, interactive_auth_handler
logger = logging.getLogger(__name__)
class DevicesRestServlet(RestServlet):
PATTERNS = client_patterns("/devices$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
async def on_GET(self, request):
requester = await self.auth.get_user_by_req(request, allow_guest=True)
devices = await self.device_handler.get_devices_by_user(
requester.user.to_string()
)
return 200, {"devices": devices}
class DeleteDevicesRestServlet(RestServlet):
"""
API for bulk deletion of devices. Accepts a JSON object with a devices
key which lists the device_ids to delete. Requires user interactive auth.
"""
PATTERNS = client_patterns("/delete_devices")
def __init__(self, hs):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
self.auth_handler = hs.get_auth_handler()
@interactive_auth_handler
async def on_POST(self, request):
requester = await self.auth.get_user_by_req(request)
try:
body = parse_json_object_from_request(request)
except errors.SynapseError as e:
if e.errcode == errors.Codes.NOT_JSON:
# DELETE
# deal with older clients which didn't pass a JSON dict
# the same as those that pass an empty dict
body = {}
else:
raise e
assert_params_in_dict(body, ["devices"])
await self.auth_handler.validate_user_via_ui_auth(
requester, request, body, "remove device(s) from your account",
)
await self.device_handler.delete_devices(
requester.user.to_string(), body["devices"]
)
return 200, {}
class DeviceRestServlet(RestServlet):
PATTERNS = client_patterns("/devices/(?P<device_id>[^/]*)$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
self.auth_handler = hs.get_auth_handler()
async def on_GET(self, request, device_id):
requester = await self.auth.get_user_by_req(request, allow_guest=True)
device = await self.device_handler.get_device(
requester.user.to_string(), device_id
)
return 200, device
@interactive_auth_handler
async def on_DELETE(self, request, device_id):
requester = await self.auth.get_user_by_req(request)
try:
body = parse_json_object_from_request(request)
except errors.SynapseError as e:
if e.errcode == errors.Codes.NOT_JSON:
# deal with older clients which didn't pass a JSON dict
# the same as those that pass an empty dict
body = {}
else:
raise
await self.auth_handler.validate_user_via_ui_auth(
requester, request, body, "remove a device from your account",
)
await self.device_handler.delete_device(requester.user.to_string(), device_id)
return 200, {}
async def on_PUT(self, request, device_id):
requester = await self.auth.get_user_by_req(request, allow_guest=True)
body = parse_json_object_from_request(request)
await self.device_handler.update_device(
requester.user.to_string(), device_id, body
)
return 200, {}
class DehydratedDeviceServlet(RestServlet):
"""Retrieve or store a dehydrated device.
GET /org.matrix.msc2697.v2/dehydrated_device
HTTP/1.1 200 OK
Content-Type: application/json
{
"device_id": "dehydrated_device_id",
"device_data": {
"algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm",
"account": "dehydrated_device"
}
}
PUT /org.matrix.msc2697/dehydrated_device
Content-Type: application/json
{
"device_data": {
"algorithm": "org.matrix.msc2697.v1.dehydration.v1.olm",
"account": "dehydrated_device"
}
}
HTTP/1.1 200 OK
Content-Type: application/json
{
"device_id": "dehydrated_device_id"
}
"""
PATTERNS = client_patterns("/org.matrix.msc2697.v2/dehydrated_device", releases=())
def __init__(self, hs):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
async def on_GET(self, request: SynapseRequest):
requester = await self.auth.get_user_by_req(request)
dehydrated_device = await self.device_handler.get_dehydrated_device(
requester.user.to_string()
)
if dehydrated_device is not None:
(device_id, device_data) = dehydrated_device
result = {"device_id": device_id, "device_data": device_data}
return (200, result)
else:
raise errors.NotFoundError("No dehydrated device available")
async def on_PUT(self, request: SynapseRequest):
submission = parse_json_object_from_request(request)
requester = await self.auth.get_user_by_req(request)
if "device_data" not in submission:
raise errors.SynapseError(
400, "device_data missing", errcode=errors.Codes.MISSING_PARAM,
)
elif not isinstance(submission["device_data"], dict):
raise errors.SynapseError(
400,
"device_data must be an object",
errcode=errors.Codes.INVALID_PARAM,
)
device_id = await self.device_handler.store_dehydrated_device(
requester.user.to_string(),
submission["device_data"],
submission.get("initial_device_display_name", None),
)
return 200, {"device_id": device_id}
class ClaimDehydratedDeviceServlet(RestServlet):
"""Claim a dehydrated device.
POST /org.matrix.msc2697.v2/dehydrated_device/claim
Content-Type: application/json
{
"device_id": "dehydrated_device_id"
}
HTTP/1.1 200 OK
Content-Type: application/json
{
"success": true,
}
"""
PATTERNS = client_patterns(
"/org.matrix.msc2697.v2/dehydrated_device/claim", releases=()
)
def __init__(self, hs):
super().__init__()
self.hs = hs
self.auth = hs.get_auth()
self.device_handler = hs.get_device_handler()
async def on_POST(self, request: SynapseRequest):
requester = await self.auth.get_user_by_req(request)
submission = parse_json_object_from_request(request)
if "device_id" not in submission:
raise errors.SynapseError(
400, "device_id missing", errcode=errors.Codes.MISSING_PARAM,
)
elif not isinstance(submission["device_id"], str):
raise errors.SynapseError(
400, "device_id must be a string", errcode=errors.Codes.INVALID_PARAM,
)
result = await self.device_handler.rehydrate_device(
requester.user.to_string(),
self.auth.get_access_token_from_request(request),
submission["device_id"],
)
return (200, result)
def register_servlets(hs, http_server):
DeleteDevicesRestServlet(hs).register(http_server)
DevicesRestServlet(hs).register(http_server)
DeviceRestServlet(hs).register(http_server)
DehydratedDeviceServlet(hs).register(http_server)
ClaimDehydratedDeviceServlet(hs).register(http_server)
|
[
"synapse.http.servlet.parse_json_object_from_request",
"synapse.api.errors.NotFoundError",
"synapse.http.servlet.assert_params_in_dict",
"synapse.api.errors.SynapseError",
"logging.getLogger"
] |
[((940, 967), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (957, 967), False, 'import logging\n'), ((2590, 2630), 'synapse.http.servlet.assert_params_in_dict', 'assert_params_in_dict', (['body', "['devices']"], {}), "(body, ['devices'])\n", (2611, 2630), False, 'from synapse.http.servlet import RestServlet, assert_params_in_dict, parse_json_object_from_request\n'), ((4503, 4542), 'synapse.http.servlet.parse_json_object_from_request', 'parse_json_object_from_request', (['request'], {}), '(request)\n', (4533, 4542), False, 'from synapse.http.servlet import RestServlet, assert_params_in_dict, parse_json_object_from_request\n'), ((6283, 6322), 'synapse.http.servlet.parse_json_object_from_request', 'parse_json_object_from_request', (['request'], {}), '(request)\n', (6313, 6322), False, 'from synapse.http.servlet import RestServlet, assert_params_in_dict, parse_json_object_from_request\n'), ((7815, 7854), 'synapse.http.servlet.parse_json_object_from_request', 'parse_json_object_from_request', (['request'], {}), '(request)\n', (7845, 7854), False, 'from synapse.http.servlet import RestServlet, assert_params_in_dict, parse_json_object_from_request\n'), ((2224, 2263), 'synapse.http.servlet.parse_json_object_from_request', 'parse_json_object_from_request', (['request'], {}), '(request)\n', (2254, 2263), False, 'from synapse.http.servlet import RestServlet, assert_params_in_dict, parse_json_object_from_request\n'), ((3772, 3811), 'synapse.http.servlet.parse_json_object_from_request', 'parse_json_object_from_request', (['request'], {}), '(request)\n', (3802, 3811), False, 'from synapse.http.servlet import RestServlet, assert_params_in_dict, parse_json_object_from_request\n'), ((6153, 6207), 'synapse.api.errors.NotFoundError', 'errors.NotFoundError', (['"""No dehydrated device available"""'], {}), "('No dehydrated device available')\n", (6173, 6207), False, 'from synapse.api import errors\n'), ((6447, 6535), 'synapse.api.errors.SynapseError', 'errors.SynapseError', (['(400)', '"""device_data missing"""'], {'errcode': 'errors.Codes.MISSING_PARAM'}), "(400, 'device_data missing', errcode=errors.Codes.\n MISSING_PARAM)\n", (6466, 6535), False, 'from synapse.api import errors\n'), ((7916, 8002), 'synapse.api.errors.SynapseError', 'errors.SynapseError', (['(400)', '"""device_id missing"""'], {'errcode': 'errors.Codes.MISSING_PARAM'}), "(400, 'device_id missing', errcode=errors.Codes.\n MISSING_PARAM)\n", (7935, 8002), False, 'from synapse.api import errors\n'), ((6642, 6740), 'synapse.api.errors.SynapseError', 'errors.SynapseError', (['(400)', '"""device_data must be an object"""'], {'errcode': 'errors.Codes.INVALID_PARAM'}), "(400, 'device_data must be an object', errcode=errors.\n Codes.INVALID_PARAM)\n", (6661, 6740), False, 'from synapse.api import errors\n'), ((8106, 8201), 'synapse.api.errors.SynapseError', 'errors.SynapseError', (['(400)', '"""device_id must be a string"""'], {'errcode': 'errors.Codes.INVALID_PARAM'}), "(400, 'device_id must be a string', errcode=errors.Codes\n .INVALID_PARAM)\n", (8125, 8201), False, 'from synapse.api import errors\n')]
|
"""
Convert an EO3 metadata doc to a Stac Item. (BETA/Incomplete)
"""
import math
import mimetypes
from pathlib import Path
from typing import Dict, List, Optional, Callable
from urllib.parse import urljoin
import jsonschema
import rapidjson
from datacube.utils.geometry import Geometry, CRS
from requests_cache.core import CachedSession
from eodatasets3.model import DatasetDoc, GridDoc
# Mapping between EO3 field names and STAC properties object field names
MAPPING_EO3_TO_STAC = {
"dtr:end_datetime": "end_datetime",
"dtr:start_datetime": "start_datetime",
"eo:gsd": "gsd",
"eo:instrument": "instruments",
"eo:platform": "platform",
"eo:constellation": "constellation",
"eo:off_nadir": "view:off_nadir",
"eo:azimuth": "view:azimuth",
"eo:sun_azimuth": "view:sun_azimuth",
"eo:sun_elevation": "view:sun_elevation",
"odc:processing_datetime": "created",
}
def _as_stac_instruments(value: str):
"""
>>> _as_stac_instruments('TM')
['tm']
>>> _as_stac_instruments('OLI')
['oli']
>>> _as_stac_instruments('ETM+')
['etm']
>>> _as_stac_instruments('OLI_TIRS')
['oli', 'tirs']
"""
return [i.strip("+-").lower() for i in value.split("_")]
def _convert_value_to_stac_type(key: str, value):
"""
Convert return type as per STAC specification
"""
# In STAC spec, "instruments" have [String] type
if key == "eo:instrument":
return _as_stac_instruments(value)
else:
return value
def _media_fields(path: Path) -> Dict:
"""
Add media type of the asset object
"""
mime_type = mimetypes.guess_type(path.name)[0]
if path.suffix == ".sha1":
return {"type": "text/plain"}
elif path.suffix == ".yaml":
return {"type": "text/yaml"}
elif mime_type:
if mime_type == "image/tiff":
return {"type": "image/tiff; application=geotiff"}
else:
return {"type": mime_type}
else:
return {}
def _asset_roles_fields(asset_name: str) -> Dict:
"""
Add roles of the asset object
"""
if asset_name.startswith("thumbnail"):
return {"roles": ["thumbnail"]}
elif asset_name.startswith("metadata"):
return {"roles": ["metadata"]}
else:
return {}
def _asset_title_fields(asset_name: str) -> Dict:
"""
Add title of the asset object
"""
if asset_name.startswith("thumbnail"):
return {"title": "Thumbnail image"}
else:
return {}
def _proj_fields(grid: Dict[str, GridDoc], grid_name: str = "default") -> Dict:
"""
Add fields of the STAC Projection (proj) Extension to a STAC Item
"""
grid_doc = grid.get(grid_name)
if grid_doc:
return {
"proj:shape": grid_doc.shape,
"proj:transform": grid_doc.transform,
}
else:
return {}
def _lineage_fields(lineage: Dict) -> Dict:
"""
Add custom lineage field to a STAC Item
"""
if lineage:
return {"odc:lineage": lineage}
else:
return {}
def _odc_links(explorer_base_url: str, dataset: DatasetDoc) -> List:
"""
Add links for ODC product into a STAC Item
"""
if explorer_base_url:
return [
{
"title": "ODC Product Overview",
"rel": "product_overview",
"type": "text/html",
"href": urljoin(explorer_base_url, f"product/{dataset.product.name}"),
},
{
"title": "ODC Dataset Overview",
"rel": "alternative",
"type": "text/html",
"href": urljoin(explorer_base_url, f"dataset/{dataset.id}"),
},
{
"rel": "parent",
"href": urljoin(
explorer_base_url, f"/stac/collections/{dataset.product.name}"
),
},
]
else:
return []
def to_stac_item(
dataset: DatasetDoc,
stac_item_destination_url: str,
dataset_location: Optional[str] = None,
odc_dataset_metadata_url: Optional[str] = None,
explorer_base_url: Optional[str] = None,
) -> dict:
"""
Convert the given ODC Dataset into a Stac Item document.
Note: You may want to call `validate_item(doc)` on the outputs to find any
incomplete properties.
:param stac_item_destination_url: Public 'self' URL where the stac document will be findable.
:param dataset_location: Use this location instead of picking from dataset.locations
(for calculating relative band paths)
:param odc_dataset_metadata_url: Public URL for the original ODC dataset yaml document
:param explorer_base_url: An Explorer instance that contains this dataset.
Will allow links to things such as the product definition.
"""
geom = Geometry(dataset.geometry, CRS(dataset.crs))
wgs84_geometry = geom.to_crs(CRS("epsg:4326"), math.inf)
properties = {
# This field is required to be present, even if null.
"proj:epsg": None,
}
crs = dataset.crs.lower()
if crs.startswith("epsg:"):
properties["proj:epsg"] = int(crs.lstrip("epsg:"))
else:
properties["proj:wkt2"] = dataset.crs
if dataset.label:
properties["title"] = dataset.label
# TODO: choose remote if there's multiple locations?
# Without a dataset location, all paths will be relative.
dataset_location = dataset_location or (
dataset.locations[0] if dataset.locations else None
)
links = []
if stac_item_destination_url:
links.append(
{
"rel": "self",
"type": "application/json",
"href": stac_item_destination_url,
}
)
if odc_dataset_metadata_url:
links.append(
{
"title": "ODC Dataset YAML",
"rel": "odc_yaml",
"type": "text/yaml",
"href": odc_dataset_metadata_url,
}
)
links.extend(_odc_links(explorer_base_url, dataset))
item_doc = dict(
stac_version="1.0.0-beta.2",
stac_extensions=["eo", "projection"],
type="Feature",
id=dataset.id,
bbox=wgs84_geometry.boundingbox,
geometry=wgs84_geometry.json,
properties={
**{
MAPPING_EO3_TO_STAC.get(key, key): _convert_value_to_stac_type(key, val)
for key, val in dataset.properties.items()
},
"odc:product": dataset.product.name,
**_lineage_fields(dataset.lineage),
**properties,
**(_proj_fields(dataset.grids) if dataset.grids else {}),
},
# TODO: Currently assuming no name collisions.
assets={
**{
name: (
{
"eo:bands": [{"name": name}],
**_media_fields(Path(m.path)),
"roles": ["data"],
"href": urljoin(dataset_location, m.path),
**(
_proj_fields(dataset.grids, m.grid) if dataset.grids else {}
),
}
)
for name, m in dataset.measurements.items()
},
**{
name: (
{
**_asset_title_fields(name),
**_media_fields(Path(m.path)),
**_asset_roles_fields(name),
"href": urljoin(dataset_location, m.path),
}
)
for name, m in dataset.accessories.items()
},
},
links=links,
)
# To pass validation, only add 'view' extension when we're using it somewhere.
if any(k.startswith("view:") for k in item_doc["properties"].keys()):
item_doc["stac_extensions"].append("view")
return item_doc
def validate_item(
item_doc: Dict,
allow_cached_specs: bool = True,
disallow_network_access: bool = False,
log: Callable[[str], None] = lambda line: None,
schema_host="https://schemas.stacspec.org",
):
"""
Validate a document against the Stac Item schema and its declared extensions
Requires an internet connection the first time to fetch the relevant specs,
but will cache them locally for repeated requests.
:param item_doc:
:param allow_cached_specs: Allow using a cached spec.
Disable to force-download the spec again.
:param disallow_network_access: Only allow validation using cached specs.
:param log: Callback for human-readable progress/status (eg: 'print').
:param schema_host: The host to download stac schemas from.
:raises NoAvailableSchemaError: When cannot find a spec for the given Stac version+extentions
"""
item_doc = _normalise_doc(item_doc)
stac_version = item_doc.get("stac_version")
one_day = 60 * 60 * 24
max_cache_time = one_day if "beta" in stac_version else one_day * 365
schemas = [
(
"Item",
f"{schema_host}/v{stac_version}/item-spec/json-schema/item.json#",
)
]
for extension in item_doc.get("stac_extensions", []):
schemas.append(
(
f"extension {extension!r}",
f"{schema_host}/v{stac_version}/extensions/{extension}/json-schema/schema.json#",
)
)
log(f"Stac version {stac_version}. Schema cache: {max_cache_time/60//60}hrs.")
with CachedSession(
"stac_schema_cache",
backend="sqlite",
expire_after=max_cache_time,
old_data_on_error=True,
) as session:
if not allow_cached_specs:
session.cache.clear()
for schema_label, schema_url in schemas:
if not session.cache.has_url(schema_url):
if disallow_network_access:
raise NoAvailableSchemaError(
f"{schema_label} schema is not cached, and network access is disabled: {schema_url}"
)
log(f"{schema_url}")
r = session.get(schema_url, timeout=60)
if r.status_code == 404:
raise NoAvailableSchemaError(
f"No schema found for Stac {stac_version} {schema_label}: "
f"{schema_url!r}"
)
r.raise_for_status()
schema_json = r.json()
log(f"Validating {schema_label}...")
jsonschema.validate(item_doc, schema_json)
class NoAvailableSchemaError(Exception):
pass
def _normalise_doc(doc: Dict) -> Dict:
"""
Normalise all the embedded values to simple json types.
(needed for jsonschema validation.)
"""
return rapidjson.loads(rapidjson.dumps(doc, datetime_mode=True, uuid_mode=True))
|
[
"jsonschema.validate",
"datacube.utils.geometry.CRS",
"urllib.parse.urljoin",
"requests_cache.core.CachedSession",
"mimetypes.guess_type",
"pathlib.Path",
"rapidjson.dumps"
] |
[((1614, 1645), 'mimetypes.guess_type', 'mimetypes.guess_type', (['path.name'], {}), '(path.name)\n', (1634, 1645), False, 'import mimetypes\n'), ((4918, 4934), 'datacube.utils.geometry.CRS', 'CRS', (['dataset.crs'], {}), '(dataset.crs)\n', (4921, 4934), False, 'from datacube.utils.geometry import Geometry, CRS\n'), ((4969, 4985), 'datacube.utils.geometry.CRS', 'CRS', (['"""epsg:4326"""'], {}), "('epsg:4326')\n", (4972, 4985), False, 'from datacube.utils.geometry import Geometry, CRS\n'), ((9673, 9783), 'requests_cache.core.CachedSession', 'CachedSession', (['"""stac_schema_cache"""'], {'backend': '"""sqlite"""', 'expire_after': 'max_cache_time', 'old_data_on_error': '(True)'}), "('stac_schema_cache', backend='sqlite', expire_after=\n max_cache_time, old_data_on_error=True)\n", (9686, 9783), False, 'from requests_cache.core import CachedSession\n'), ((10946, 11002), 'rapidjson.dumps', 'rapidjson.dumps', (['doc'], {'datetime_mode': '(True)', 'uuid_mode': '(True)'}), '(doc, datetime_mode=True, uuid_mode=True)\n', (10961, 11002), False, 'import rapidjson\n'), ((10666, 10708), 'jsonschema.validate', 'jsonschema.validate', (['item_doc', 'schema_json'], {}), '(item_doc, schema_json)\n', (10685, 10708), False, 'import jsonschema\n'), ((3404, 3465), 'urllib.parse.urljoin', 'urljoin', (['explorer_base_url', 'f"""product/{dataset.product.name}"""'], {}), "(explorer_base_url, f'product/{dataset.product.name}')\n", (3411, 3465), False, 'from urllib.parse import urljoin\n'), ((3644, 3695), 'urllib.parse.urljoin', 'urljoin', (['explorer_base_url', 'f"""dataset/{dataset.id}"""'], {}), "(explorer_base_url, f'dataset/{dataset.id}')\n", (3651, 3695), False, 'from urllib.parse import urljoin\n'), ((3783, 3854), 'urllib.parse.urljoin', 'urljoin', (['explorer_base_url', 'f"""/stac/collections/{dataset.product.name}"""'], {}), "(explorer_base_url, f'/stac/collections/{dataset.product.name}')\n", (3790, 3854), False, 'from urllib.parse import urljoin\n'), ((7093, 7126), 'urllib.parse.urljoin', 'urljoin', (['dataset_location', 'm.path'], {}), '(dataset_location, m.path)\n', (7100, 7126), False, 'from urllib.parse import urljoin\n'), ((7642, 7675), 'urllib.parse.urljoin', 'urljoin', (['dataset_location', 'm.path'], {}), '(dataset_location, m.path)\n', (7649, 7675), False, 'from urllib.parse import urljoin\n'), ((7003, 7015), 'pathlib.Path', 'Path', (['m.path'], {}), '(m.path)\n', (7007, 7015), False, 'from pathlib import Path\n'), ((7542, 7554), 'pathlib.Path', 'Path', (['m.path'], {}), '(m.path)\n', (7546, 7554), False, 'from pathlib import Path\n')]
|