code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
import torch
import torch.nn as nn
class QNetwork(nn.Module):
"""Actor (Policy) Model using a Single DQN."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
# Define Deep Q-Network Layers
self.dqn_layers = nn.Sequential(
nn.Linear(state_size, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU(),
nn.Linear(32, action_size)
)
def forward(self, state):
"""Build a network that maps state -> action values."""
q_values = self.dqn_layers(state)
return q_values
class DuelQNetwork(nn.Module):
"""Actor (Policy) Model using a Duel DQN."""
def __init__(self, state_size, action_size, seed):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
"""
super(DuelQNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
# Define Feature Layers
self.feature_layers = nn.Sequential(
nn.Linear(state_size, 128),
nn.ReLU(),
nn.Linear(128, 64),
nn.ReLU(),
nn.Linear(64, 32),
nn.ReLU()
)
# Define Value Stream
self.value_stream = nn.Sequential(
nn.Linear(32, 1)
)
# Define Advantage Layers
self.advantage_stream = nn.Sequential(
nn.Linear(32, action_size)
)
def forward(self, state):
"""Build a network that maps state -> action values."""
x = self.feature_layers(state)
values = self.value_stream(x)
advantages = self.advantage_stream(x)
q_values = values + (advantages - advantages.mean())
return q_values | [
"torch.manual_seed",
"torch.nn.ReLU",
"torch.nn.Linear"
] | [((469, 492), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (486, 492), False, 'import torch\n'), ((1409, 1432), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (1426, 1432), False, 'import torch\n'), ((594, 620), 'torch.nn.Linear', 'nn.Linear', (['state_size', '(128)'], {}), '(state_size, 128)\n', (603, 620), True, 'import torch.nn as nn\n'), ((634, 643), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (641, 643), True, 'import torch.nn as nn\n'), ((657, 675), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (666, 675), True, 'import torch.nn as nn\n'), ((689, 698), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (696, 698), True, 'import torch.nn as nn\n'), ((712, 729), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(32)'], {}), '(64, 32)\n', (721, 729), True, 'import torch.nn as nn\n'), ((743, 752), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (750, 752), True, 'import torch.nn as nn\n'), ((766, 792), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'action_size'], {}), '(32, action_size)\n', (775, 792), True, 'import torch.nn as nn\n'), ((1531, 1557), 'torch.nn.Linear', 'nn.Linear', (['state_size', '(128)'], {}), '(state_size, 128)\n', (1540, 1557), True, 'import torch.nn as nn\n'), ((1571, 1580), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1578, 1580), True, 'import torch.nn as nn\n'), ((1594, 1612), 'torch.nn.Linear', 'nn.Linear', (['(128)', '(64)'], {}), '(128, 64)\n', (1603, 1612), True, 'import torch.nn as nn\n'), ((1626, 1635), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1633, 1635), True, 'import torch.nn as nn\n'), ((1649, 1666), 'torch.nn.Linear', 'nn.Linear', (['(64)', '(32)'], {}), '(64, 32)\n', (1658, 1666), True, 'import torch.nn as nn\n'), ((1680, 1689), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1687, 1689), True, 'import torch.nn as nn\n'), ((1794, 1810), 'torch.nn.Linear', 'nn.Linear', (['(32)', '(1)'], {}), '(32, 1)\n', (1803, 1810), True, 'import torch.nn as nn\n'), ((1914, 1940), 'torch.nn.Linear', 'nn.Linear', (['(32)', 'action_size'], {}), '(32, action_size)\n', (1923, 1940), True, 'import torch.nn as nn\n')] |
# Copyright 2017 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Input and output from network interfaces.
This wraps PCap, TunTap, etc., to provide a simple, universal, cooperative
interface to network interfaces.
Currently limited to Linux.
"""
from pox.lib.pxpcap import PCap
from queue import Queue
from pox.lib.revent import Event, EventMixin
from pox.lib.ioworker.io_loop import ReadLoop
from pox.core import core
import struct
from fcntl import ioctl
import socket
from pox.lib.addresses import EthAddr, IPAddr
from pox.lib.addresses import parse_cidr, cidr_to_netmask
import os
import ctypes
IFNAMESIZ = 16
IFREQ_SIZE = 40
# from linux/if_tun.h
TUNSETIFF = 0x400454ca
TUNGETIFF = 0x800454d2
IFF_TUN = 0x0001
IFF_TAP = 0x0002
IFF_NO_PI = 0x1000
IFF_ONE_QUEUE = 0x2000
IFF_VNET_HDR = 0x4000
IFF_TUN_EXCL = 0x8000
IFF_MULTI_QUEUE = 0x0100
IFF_ATTACH_QUEUE = 0x0200
IFF_DETACH_QUEUE = 0x0400
IFF_PERSIST = 0x0800
IFF_NOFILTER = 0x1000
#from linux/if.h (flags)
IFF_UP = 1<<0
IFF_BROADCAST = 1<<1
IFF_DEBUG = 1<<2
IFF_LOOPBACK = 1<<3
IFF_POINTOPOINT = 1<<4
IFF_NOTRAILERS = 1<<5
IFF_RUNNING = 1<<6
IFF_NOARP = 1<<7
IFF_PROMISC = 1<<8
IFF_ALLMULTI = 1<<9
IFF_MASTER = 1<<10
IFF_SLAVE = 1<<11
IFF_MULTICAST = 1<<12
IFF_PORTSEL = 1<<13
IFF_AUTOMEDIA = 1<<14
IFF_DYNAMIC = 1<<15
IFF_LOWER_UP = 1<<16
IFF_DORMANT = 1<<17
IFF_ECHO = 1<<18
# Unless IFF_NO_PI, there's a header on packets:
# 16 bits of flags
# 16 bits (big endian?) protocol number
# from /usr/include/linux/sockios.h
SIOCGIFHWADDR = 0x8927
SIOCGIFMTU = 0x8921
SIOCSIFMTU = 0x8922
SIOCGIFFLAGS = 0x8913
SIOCSIFFLAGS = 0x8914
SIOCSIFHWADDR = 0x8924
SIOCGIFNETMASK = 0x891b
SIOCSIFNETMASK = 0x891c
SIOCGIFADDR = 0x8915
SIOCSIFADDR = 0x8916
SIOCGIFBRDADDR = 0x8919
SIOCSIFBRDADDR = 0x891a
SIOCSIFNAME = 0x8923
SIOCADDRT = 0x890B # rtentry (route.h) for IPv4, in6_rtmsg for IPv6
SIOCDELRT = 0x890C
# from /usr/include/linux/if_arp.h
ARPHRD_ETHER = 1
ARPHRD_IEEE802 = 1
ARPHRD_IEEE1394 = 24
ARPHRD_EUI64 = 27
ARPHRD_LOOPBACK = 772
ARPHRD_IPGRE = 778
ARPHRD_IEE802_TR = 800
ARPHRD_IEE80211 = 801
ARPHRD_IEE80211_PRISM = 802
ARPHRD_IEE80211_RADIOTAP = 803
ARPHRD_IP6GRE = 823
class rtentry (object):
"""
Wrapper for Linux rtentry
Only tries to capture IPv4 usage.
Possibly better done with ctypes.
"""
# flags
RTF_UP = 0x0001 # usable
RTF_GATEWAY = 0x0002 # dst is gateway
RTF_HOST = 0x0004 # host route
RTF_REINSTATE = 0x0008 # reinstate after timeout
RTF_DYNAMIC = 0x0010 # created dynamically (by redirect)
RTF_MODIFIED = 0x0020 # modified dynamically (by redirect)
RTF_MSS = 0x0040 # use specific MSS for this route
RTF_WINDOW = 0x0080 # use per-route window clamping
RTF_IRTT = 0x0100 # use initial RTT
RTF_REJECT = 0x0200 # reject route
# fields
rt_hash = 0
rt_dst = IPAddr("0.0.0.0")
rt_gateway = IPAddr("0.0.0.0")
rt_genmask = IPAddr("0.0.0.0")
rt_flags = 0
rt_refcnt = 0
rt_use = 0
rt_ifp = 0 # ptr to struct ifnet
rt_metric = 0
rt_dev = None # device name
rt_mss = 0
rt_window = 0 # window clamping
rt_irtt = 0 # initial RTT
def pack (self):
if self.rt_dev:
s = ctypes.c_char_p(self.rt_dev + "\0") # Null terminator necessary?
dev = ctypes.cast(s, ctypes.c_void_p).value
self._buf = s # You must use the resulting packed string before changing
# rt_dev!
else:
dev = 0
return struct.pack("L16s16s16shhLPhPLLH",
self.rt_hash,
sockaddr_in(self.rt_dst).pack(),
sockaddr_in(self.rt_gateway).pack(),
sockaddr_in(self.rt_genmask).pack(),
self.rt_flags,
self.rt_refcnt,
self.rt_use,
self.rt_ifp,
self.rt_metric,
dev,
self.rt_mss,
self.rt_window,
self.rt_irtt)
class sockaddr_in (object):
"""
Wrapper for sockaddr_in
"""
sin_family = socket.AF_INET
sin_port = 0
sin_addr = IPAddr("0.0.0.0")
def __init__ (self, addr=None, port=None):
if addr is not None:
self.sin_addr = IPAddr(addr)
if port is not None:
self.sin_port = port
def pack (self):
r = struct.pack("hH", self.sin_family, self.sin_port)
r += self.sin_addr.raw
r += ("\0" * 8)
return r
class Interface (object):
"""
Simple interface to tun/tap driver
Currently only for Linux. IIRC, shouldn't be too hard to adapt for BSD.
Other OSes will probably need a fair amount of work.
"""
#TODO: Setters
def __init__ (self, name):
self._name = name
def __str__ (self):
return "%s('%s')" % (type(self).__name__, self.name)
@property
def name (self):
return self._name.rstrip("\0")
@name.setter
def name (self, value):
if len(value) > IFNAMESIZ: raise RuntimeError("Name too long")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += value
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFNAME, ifr)
self._name = value
@property
def ipv6_enabled (self):
f = file("/proc/sys/net/ipv6/conf/%s/disable_ipv6" % (self.name,), "r")
with f:
return f.read()[0] == "0" # Note inversion!
@ipv6_enabled.setter
def ipv6_enabled (self, value):
f = file("/proc/sys/net/ipv6/conf/%s/disable_ipv6" % (self.name,), "w")
with f:
f.write("0" if value else "1") # Note inversion!
@property
def ip_forwarding (self):
f = file("/proc/sys/net/ipv4/conf/%s/forwarding" % (self.name,), "r")
with f:
return f.read()[0] == "1"
@ip_forwarding.setter
def ip_forwarding (self, value):
f = file("/proc/sys/net/ipv4/conf/%s/forwarding" % (self.name,), "w")
with f:
f.write("1" if value else "0")
@property
def mtu (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCGIFMTU, ifr)
return struct.unpack("I", ret[IFNAMESIZ:][:4])[0]
@mtu.setter
def mtu (self, value):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sI", self.name, value)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFMTU, ifr)
@property
def flags (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCGIFFLAGS, ifr)
return struct.unpack("H", ret[IFNAMESIZ:IFNAMESIZ+2])[0]
@flags.setter
def flags (self, value):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sH", self.name, value)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFFLAGS, ifr)
def set_flags (self, flags, on=True):
if on:
self.flags |= flags
else:
self.unset_flags(flags)
def unset_flags (self, flags):
self.flags = self.flags & (flags ^ 0xffFF)
@property
def promiscuous (self):
return bool(self.flags & IFF_PROMISC)
@promiscuous.setter
def promiscuous (self, value):
self.set_flags(IFF_PROMISC, value)
@property
def is_up (self):
return (self.flags & IFF_UP) != 0
@is_up.setter
def is_up (self, value):
self.set_flags(IFF_UP, value)
@property
def is_running (self):
return (self.flags & IFF_RUNNING) != 0
@property
def arp_enabled (self):
return (self.flags & IFF_NOARP) == 0
@arp_enabled.setter
def arp_enabled (self, value):
self.set_flags(IFF_NOARP, not value)
@property
def ip_addr (self):
try:
return self._ioctl_get_ipv4(SIOCGIFADDR)
except IOError as e:
if e.errno == 99: return None
raise
@ip_addr.setter
def ip_addr (self, value):
return self._ioctl_set_ipv4(SIOCSIFADDR, value)
@property
def netmask (self):
try:
return self._ioctl_get_ipv4(SIOCGIFNETMASK)
except IOError as e:
if e.errno == 99: return None
raise
@netmask.setter
def netmask (self, value):
return self._ioctl_set_ipv4(SIOCSIFNETMASK, value)
@property
def broadcast_addr (self):
try:
return self._ioctl_get_ipv4(SIOCGIFBRDADDR)
except IOError as e:
if e.errno == 99: return None
raise
@broadcast_addr.setter
def broadcast_addr (self, value):
return self._ioctl_set_ipv4(SIOCSIFBRDADDR, value)
@property
def eth_addr (self):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCGIFHWADDR, ifr)
sa = ret[IFNAMESIZ:] # sockaddr
return self._get_eth(sa)
@eth_addr.setter
def eth_addr (self, value):
value = EthAddr(value).raw
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sH", self.name, ARPHRD_ETHER)
ifr += value # Append to sockaddr
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, SIOCSIFHWADDR, ifr)
def _ioctl_get_ipv4 (self, which):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "s", self.name)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, which, ifr)
return self._get_ipv4(ret[IFNAMESIZ:])
def _ioctl_set_ipv4 (self, which, value):
value = IPAddr(value)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ifr = struct.pack(str(IFNAMESIZ) + "sHHI", self.name, socket.AF_INET, 0,
value.toUnsigned(networkOrder=True))
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(sock, which, ifr)
@staticmethod
def _get_ipv4 (sa):
sa_family = struct.unpack("H", sa[:2])[0]
if sa_family == socket.AF_INET:
return IPAddr(sa[4:8])
else:
raise RuntimeError("Unsupported hardware type %s for %s (expected %s)"
% (sa_family, self, socket.AF_INET))
@staticmethod
def _get_eth (sa):
sa_family = struct.unpack("H", sa[:2])[0]
if sa_family == ARPHRD_ETHER:
return EthAddr(sa[2:8])
else:
raise RuntimeError("Unsupported hardware type %s (expected %s)"
% (sa_family, ARPHRD_ETHER))
def add_default_route (self, *args, **kw):
return self.add_route("0.0.0.0/0", *args, **kw)
def add_route (self, network, gateway=None, dev=(), metric=0):
"""
Add routing table entry
If dev is unspecified, it defaults to this device
"""
return self._add_del_route(network, gateway, dev, metric, SIOCADDRT)
def del_route (self, network, gateway=None, dev=(), metric=0):
"""
Remove a routing table entry
If dev is unspecified, it defaults to this device
"""
return self._add_del_route(network, gateway, dev, metric, SIOCDELRT)
def _add_del_route (self, network, gateway=None, dev=(), metric=0,
command=None):
"""
Add or remove a routing table entry
If dev is unspecified, it defaults to this device
"""
r = rtentry()
if isinstance(network, tuple):
addr,mask = network
addr = str(addr)
if isinstance(mask, int):
mask = cidr_to_netmask(mask)
mask = str(mask)
network = "%s/%s" % (addr,mask)
host = False
if isinstance(network, IPAddr) or (isinstance(network, str)
and "/" not in network):
host = True
network,bits = parse_cidr(network)
r.rt_dst = network
r.rt_genmask = cidr_to_netmask(bits)
if gateway is not None:
r.rt_gateway = IPAddr(gateway)
r.rt_flags |= r.RTF_GATEWAY
r.rt_metric = metric
if dev is (): dev = self
if isinstance(dev, Interface): dev = dev.name
if dev: r.rt_dev = dev
if host: r.rt_flags |= r.RTF_HOST
r.rt_flags |= r.RTF_UP
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
rv = ioctl(sock, command, r.pack())
class TunTap (object):
"""
Simple wrapper for tun/tap interfaces
Looks like a file-like object. You should be able to read/write it, select
on it, etc.
"""
def __init__ (self, name=None, tun=False, raw=False):
"""
Create tun or tap
By default, it creates a new tun or tap with a default name. If you
specify a name, it will either try to create it (if it doesn't exist),
or try to use an existing interface (for which you must have permission).
Defaults to tap (Ethernet) mode. Specify tun=True for tun (IP) mode.
Specify raw=True to skip the 32 bits of flag/protocol metadata.
"""
if name is None: name = ""
openflags = os.O_RDWR
try:
openflow |= os.O_BINARY
except:
pass
self._f = os.open("/dev/net/tun", openflags)
# an ifreq is IFREQ_SIZE bytes long, starting with an interface name
# (IFNAMESIZ bytes) followed by a big union.
self.is_tun = tun
self.is_tap = not tun
self.is_raw = raw
flags = 0
if tun: flags |= IFF_TUN
else: flags |= IFF_TAP
if raw: flags |= IFF_NO_PI
ifr = struct.pack(str(IFNAMESIZ) + "sH", name, flags)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(self.fileno(), TUNSETIFF, ifr)
self.name = ret[:IFNAMESIZ]
iflags = flags
ifr = struct.pack(str(IFNAMESIZ) + "sH", name, 0)
ifr += "\0" * (IFREQ_SIZE - len(ifr))
ret = ioctl(self.fileno(), TUNGETIFF, ifr)
flags = struct.unpack("H", ret[IFNAMESIZ:IFNAMESIZ+2])[0]
self.is_tun = (flags & IFF_TUN) == IFF_TUN
self.is_tap = not self.is_tun
#self.is_raw = (flags & IFF_NO_PI) == IFF_NO_PI
def fileno (self):
return self._f
def write (self, data):
return os.write(self.fileno(), data)
def read (self, n):
return os.read(self.fileno(), n)
def close (self):
return os.close(self.fileno())
@property
def eth_addr (self):
return Interface(self.name).eth_addr
class RXData (Event):
"""
Event fired when an interface receives data
"""
def __init__ (self, interface, data):
self.interface = interface
self.data = data
class PCapInterface (Interface, EventMixin):
_eventMixin_events = set([
RXData,
])
def __init__ (self, name):
Interface.__init__(self, name)
EventMixin.__init__(self)
self._q = Queue()
p = PCap(name, callback=self._pcap_cb, start=False)
p.set_direction(True, False) # Incoming, not outgoing
p.start()
self.pcap = p
core.add_listener(self._handle_GoingDownEvent)
def _handle_GoingDownEvent (self, event):
self.close()
def send (self, data):
if self.pcap is None: return
self.pcap.inject(data)
def _pcap_cb (self, obj, data, sec, usec, length):
"""
Handles incoming data from pcap
This may not be on the right thread, so we just push it to a thread-safe
queue and poke the cooperative thread, which will pop it later.
"""
do_read = self._q.empty()
self._q.put((obj,data))
if do_read: core.callLater(self._queue_read)
def _queue_read (self):
anything = False
for _ in range(10): # as most X at once
try:
data = self._q.get(False)
self._q.task_done()
anything = True
except:
break
pcap,data = data
self.raiseEventNoErrors(RXData, self, data)
if anything:
# Check for remainders later
core.callLater(self._queue_read)
def __del__ (self):
self.close()
def close (self):
if self.pcap:
self.pcap.close()
self.pcap = None
class TapInterface (Interface, EventMixin):
_eventMixin_events = set([
RXData,
])
io_loop = None
max_read_size = 1600
default_send_protocol = None
def __init__ (self, name="", tun=False, raw=False, protocol=None):
self.tap = None
self.last_flags = None
self.last_protocol = None
if protocol: self.default_send_protocol = protocol
self.io_loop = ReadLoop.singleton
Interface.__init__(self, name)
EventMixin.__init__(self)
self.tap = TunTap(name, raw=raw, tun=tun)
if not name: self._name = self.tap.name
self.io_loop.add(self)
@property
def is_tap (self):
return self.tap.is_tap
@property
def is_tun (self):
return self.tap.is_tun
def send (self, data, flags=0, protocol=None):
if not self.tap.is_raw:
if protocol is None: protocol = self.default_send_protocol or 0
#FIXME: In the "0" case above, should we fall back to using the Etherype
# in the packet?
if flags or protocol:
flags = struct.pack("!HH", flags, protocol) # Flags reversed?
else:
flags = "\0\0\0\0"
data = flags + data
self.tap.write(data)
def _do_rx (self):
data = self.tap.read(self.max_read_size)
if not self.tap.is_raw:
flags,proto = struct.unpack("!HH", data[:4])
#FIXME: This may invert the flags...
self.last_flags = flags
self.last_protocol = proto
data = data[4:] # Cut off header
self.raiseEvent(RXData, self, data)
def fileno (self):
# Support fileno so that this can be used in IO loop directly
return self.tap.fileno()
def close (self):
if self.tap:
self.tap.close()
self.tap = None
self.io_loop.remove(self)
def __del__ (self):
self.close()
| [
"fcntl.ioctl",
"socket.socket",
"os.open",
"pox.lib.addresses.EthAddr",
"ctypes.c_char_p",
"struct.pack",
"queue.Queue",
"struct.unpack",
"pox.lib.addresses.IPAddr",
"pox.core.core.callLater",
"pox.core.core.add_listener",
"ctypes.cast",
"pox.lib.pxpcap.PCap",
"pox.lib.addresses.parse_cidr... | [((3415, 3432), 'pox.lib.addresses.IPAddr', 'IPAddr', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (3421, 3432), False, 'from pox.lib.addresses import EthAddr, IPAddr\n'), ((3448, 3465), 'pox.lib.addresses.IPAddr', 'IPAddr', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (3454, 3465), False, 'from pox.lib.addresses import EthAddr, IPAddr\n'), ((3481, 3498), 'pox.lib.addresses.IPAddr', 'IPAddr', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (3487, 3498), False, 'from pox.lib.addresses import EthAddr, IPAddr\n'), ((4489, 4506), 'pox.lib.addresses.IPAddr', 'IPAddr', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (4495, 4506), False, 'from pox.lib.addresses import EthAddr, IPAddr\n'), ((4693, 4742), 'struct.pack', 'struct.pack', (['"""hH"""', 'self.sin_family', 'self.sin_port'], {}), "('hH', self.sin_family, self.sin_port)\n", (4704, 4742), False, 'import struct\n'), ((5347, 5395), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (5360, 5395), False, 'import socket\n'), ((5520, 5549), 'fcntl.ioctl', 'ioctl', (['sock', 'SIOCSIFNAME', 'ifr'], {}), '(sock, SIOCSIFNAME, ifr)\n', (5525, 5549), False, 'from fcntl import ioctl\n'), ((6336, 6384), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (6349, 6384), False, 'import socket\n'), ((6492, 6520), 'fcntl.ioctl', 'ioctl', (['sock', 'SIOCGIFMTU', 'ifr'], {}), '(sock, SIOCGIFMTU, ifr)\n', (6497, 6520), False, 'from fcntl import ioctl\n'), ((6626, 6674), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (6639, 6674), False, 'import socket\n'), ((6790, 6818), 'fcntl.ioctl', 'ioctl', (['sock', 'SIOCSIFMTU', 'ifr'], {}), '(sock, SIOCSIFMTU, ifr)\n', (6795, 6818), False, 'from fcntl import ioctl\n'), ((6863, 6911), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (6876, 6911), False, 'import socket\n'), ((7019, 7049), 'fcntl.ioctl', 'ioctl', (['sock', 'SIOCGIFFLAGS', 'ifr'], {}), '(sock, SIOCGIFFLAGS, ifr)\n', (7024, 7049), False, 'from fcntl import ioctl\n'), ((7166, 7214), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (7179, 7214), False, 'import socket\n'), ((7330, 7360), 'fcntl.ioctl', 'ioctl', (['sock', 'SIOCSIFFLAGS', 'ifr'], {}), '(sock, SIOCSIFFLAGS, ifr)\n', (7335, 7360), False, 'from fcntl import ioctl\n'), ((9015, 9063), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (9028, 9063), False, 'import socket\n'), ((9171, 9202), 'fcntl.ioctl', 'ioctl', (['sock', 'SIOCGIFHWADDR', 'ifr'], {}), '(sock, SIOCGIFHWADDR, ifr)\n', (9176, 9202), False, 'from fcntl import ioctl\n'), ((9360, 9408), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (9373, 9408), False, 'import socket\n'), ((9569, 9600), 'fcntl.ioctl', 'ioctl', (['sock', 'SIOCSIFHWADDR', 'ifr'], {}), '(sock, SIOCSIFHWADDR, ifr)\n', (9574, 9600), False, 'from fcntl import ioctl\n'), ((9650, 9698), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (9663, 9698), False, 'import socket\n'), ((9806, 9829), 'fcntl.ioctl', 'ioctl', (['sock', 'which', 'ifr'], {}), '(sock, which, ifr)\n', (9811, 9829), False, 'from fcntl import ioctl\n'), ((9930, 9943), 'pox.lib.addresses.IPAddr', 'IPAddr', (['value'], {}), '(value)\n', (9936, 9943), False, 'from pox.lib.addresses import EthAddr, IPAddr\n'), ((9955, 10003), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (9968, 10003), False, 'import socket\n'), ((10192, 10215), 'fcntl.ioctl', 'ioctl', (['sock', 'which', 'ifr'], {}), '(sock, which, ifr)\n', (10197, 10215), False, 'from fcntl import ioctl\n'), ((12008, 12027), 'pox.lib.addresses.parse_cidr', 'parse_cidr', (['network'], {}), '(network)\n', (12018, 12027), False, 'from pox.lib.addresses import parse_cidr, cidr_to_netmask\n'), ((12070, 12091), 'pox.lib.addresses.cidr_to_netmask', 'cidr_to_netmask', (['bits'], {}), '(bits)\n', (12085, 12091), False, 'from pox.lib.addresses import parse_cidr, cidr_to_netmask\n'), ((12398, 12446), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (12411, 12446), False, 'import socket\n'), ((13254, 13288), 'os.open', 'os.open', (['"""/dev/net/tun"""', 'openflags'], {}), "('/dev/net/tun', openflags)\n", (13261, 13288), False, 'import os\n'), ((14765, 14790), 'pox.lib.revent.EventMixin.__init__', 'EventMixin.__init__', (['self'], {}), '(self)\n', (14784, 14790), False, 'from pox.lib.revent import Event, EventMixin\n'), ((14805, 14812), 'queue.Queue', 'Queue', ([], {}), '()\n', (14810, 14812), False, 'from queue import Queue\n'), ((14821, 14868), 'pox.lib.pxpcap.PCap', 'PCap', (['name'], {'callback': 'self._pcap_cb', 'start': '(False)'}), '(name, callback=self._pcap_cb, start=False)\n', (14825, 14868), False, 'from pox.lib.pxpcap import PCap\n'), ((14963, 15009), 'pox.core.core.add_listener', 'core.add_listener', (['self._handle_GoingDownEvent'], {}), '(self._handle_GoingDownEvent)\n', (14980, 15009), False, 'from pox.core import core\n'), ((16469, 16494), 'pox.lib.revent.EventMixin.__init__', 'EventMixin.__init__', (['self'], {}), '(self)\n', (16488, 16494), False, 'from pox.lib.revent import Event, EventMixin\n'), ((3749, 3786), 'ctypes.c_char_p', 'ctypes.c_char_p', (["(self.rt_dev + '\\x00')"], {}), "(self.rt_dev + '\\x00')\n", (3764, 3786), False, 'import ctypes\n'), ((4600, 4612), 'pox.lib.addresses.IPAddr', 'IPAddr', (['addr'], {}), '(addr)\n', (4606, 4612), False, 'from pox.lib.addresses import EthAddr, IPAddr\n'), ((6532, 6571), 'struct.unpack', 'struct.unpack', (['"""I"""', 'ret[IFNAMESIZ:][:4]'], {}), "('I', ret[IFNAMESIZ:][:4])\n", (6545, 6571), False, 'import struct\n'), ((7061, 7109), 'struct.unpack', 'struct.unpack', (['"""H"""', 'ret[IFNAMESIZ:IFNAMESIZ + 2]'], {}), "('H', ret[IFNAMESIZ:IFNAMESIZ + 2])\n", (7074, 7109), False, 'import struct\n'), ((9330, 9344), 'pox.lib.addresses.EthAddr', 'EthAddr', (['value'], {}), '(value)\n', (9337, 9344), False, 'from pox.lib.addresses import EthAddr, IPAddr\n'), ((10271, 10297), 'struct.unpack', 'struct.unpack', (['"""H"""', 'sa[:2]'], {}), "('H', sa[:2])\n", (10284, 10297), False, 'import struct\n'), ((10350, 10365), 'pox.lib.addresses.IPAddr', 'IPAddr', (['sa[4:8]'], {}), '(sa[4:8])\n', (10356, 10365), False, 'from pox.lib.addresses import EthAddr, IPAddr\n'), ((10569, 10595), 'struct.unpack', 'struct.unpack', (['"""H"""', 'sa[:2]'], {}), "('H', sa[:2])\n", (10582, 10595), False, 'import struct\n'), ((10646, 10662), 'pox.lib.addresses.EthAddr', 'EthAddr', (['sa[2:8]'], {}), '(sa[2:8])\n', (10653, 10662), False, 'from pox.lib.addresses import EthAddr, IPAddr\n'), ((12141, 12156), 'pox.lib.addresses.IPAddr', 'IPAddr', (['gateway'], {}), '(gateway)\n', (12147, 12156), False, 'from pox.lib.addresses import EthAddr, IPAddr\n'), ((13944, 13992), 'struct.unpack', 'struct.unpack', (['"""H"""', 'ret[IFNAMESIZ:IFNAMESIZ + 2]'], {}), "('H', ret[IFNAMESIZ:IFNAMESIZ + 2])\n", (13957, 13992), False, 'import struct\n'), ((15484, 15516), 'pox.core.core.callLater', 'core.callLater', (['self._queue_read'], {}), '(self._queue_read)\n', (15498, 15516), False, 'from pox.core import core\n'), ((15867, 15899), 'pox.core.core.callLater', 'core.callLater', (['self._queue_read'], {}), '(self._queue_read)\n', (15881, 15899), False, 'from pox.core import core\n'), ((17293, 17323), 'struct.unpack', 'struct.unpack', (['"""!HH"""', 'data[:4]'], {}), "('!HH', data[:4])\n", (17306, 17323), False, 'import struct\n'), ((3826, 3857), 'ctypes.cast', 'ctypes.cast', (['s', 'ctypes.c_void_p'], {}), '(s, ctypes.c_void_p)\n', (3837, 3857), False, 'import ctypes\n'), ((11743, 11764), 'pox.lib.addresses.cidr_to_netmask', 'cidr_to_netmask', (['mask'], {}), '(mask)\n', (11758, 11764), False, 'from pox.lib.addresses import parse_cidr, cidr_to_netmask\n'), ((17034, 17069), 'struct.pack', 'struct.pack', (['"""!HH"""', 'flags', 'protocol'], {}), "('!HH', flags, protocol)\n", (17045, 17069), False, 'import struct\n')] |
# 2020 <NAME> and <NAME>
import os
import json
import numpy as np
from typing import Set, List
from geopy.distance import great_circle
from scipy.spatial.ckdtree import cKDTree
from shapely.geometry import Polygon, shape, Point
from icarus_simulator.sat_core.coordinate_util import geo2cart
from icarus_simulator.strategies.atk_geo_constraint.base_geo_constraint_strat import (
BaseGeoConstraintStrat,
)
from icarus_simulator.structure_definitions import GridPos
dirname = os.path.dirname(__file__)
strategies_dirname = os.path.split(dirname)[0]
library_dirname = os.path.split(strategies_dirname)[0]
data_dirname = os.path.join(library_dirname, "data")
COUNTRIES_FILE: str = os.path.join(data_dirname, "natural_earth_world_small.geo.json")
class GeoConstrStrat(BaseGeoConstraintStrat):
def __init__(self, geo_names: List[str], **kwargs):
super().__init__()
self.geo_names = geo_names
if len(kwargs) > 0:
pass # Appease the unused param inspection
@property
def name(self) -> str:
return "geo"
@property
def param_description(self) -> str:
return ",".join(self.geo_names)
def compute(self, grid_pos: GridPos) -> Set[int]:
allowed = set()
geo_data = load_country_geojson()
for s in self.geo_names:
allowed.update(get_allowed_gridpoints(s, grid_pos, geo_data))
return allowed
# noinspection PyTypeChecker
def get_allowed_gridpoints(geo_location: str, grid_pos: GridPos, geo_data) -> Set[int]:
# Get a list of all possible source points
if geo_location in geo_data["countries"]:
indices = [geo_data["countries"][geo_location]]
elif geo_location in geo_data["subregions"]:
indices = geo_data["subregions"][geo_location]
elif geo_location in geo_data["continents"]:
indices = geo_data["continents"][geo_location]
else:
raise ValueError("Invalid geographic constraint")
geometries = [geo_data["geometries"][index] for index in indices]
allowed_points = set()
# Create a unique shape, union of all shapes in the region, and take the points include within
shp = Polygon()
for idx, geo in enumerate(geometries):
shp = shp.union(shape(geo))
for idx, pos in grid_pos.items():
if Point(pos.lat, pos.lon).within(shp):
allowed_points.add(idx)
# Extract the border points
x, y = [], []
if shp.geom_type == "MultiPolygon":
for idx, shap in enumerate(shp.geoms):
if True:
x1, y1 = shap.exterior.xy
x.extend(x1)
y.extend(y1)
else:
x1, y1 = shp.exterior.xy
x.extend(x1)
y.extend(y1)
# plotter.plot_points({idx: GeodeticPosInfo({"lat": x[idx], "lon": y[idx], "elev": 0.0})
# for idx in range(len(x))}, "GRID", "TEST", "aa", "asas",)
grid_cart = np.zeros((len(grid_pos), 3))
grid_map = {}
i = 0
for idx, pos in grid_pos.items():
grid_map[i] = idx
grid_cart[i] = geo2cart({"elev": 0, "lon": pos.lon, "lat": pos.lat})
i += 1
# Put the homogeneous grid into a KD-tree and query the border points to include also point slightly in the sea
kd = cKDTree(grid_cart)
for idx in range(len(x)):
_, closest_grid_idx = kd.query(
geo2cart({"elev": 0, "lon": y[idx], "lat": x[idx]}), k=1
)
grid_id = grid_map[closest_grid_idx]
if (
great_circle(
(grid_pos[grid_id].lat, grid_pos[grid_id].lon), (x[idx], y[idx])
).meters
< 300000
):
# 300000 -> number elaborated to keep the out-of-coast values without including wrong points
allowed_points.add(grid_map[closest_grid_idx])
return allowed_points
# noinspection PyTypeChecker
def load_country_geojson():
new_data = {"geometries": [], "countries": {}, "continents": {}, "subregions": {}}
with open(COUNTRIES_FILE, encoding="utf-8") as f:
data = json.load(f)
new_data["geometries"] = [""] * len(data["features"])
for idx, feature in enumerate(data["features"]):
props = feature["properties"]
code = props["iso_a3"]
if code == "-99":
continue
continent = props["continent"]
subregion = props["region_wb"]
subregion2 = props["subregion"]
if continent not in new_data["continents"]:
new_data["continents"][continent] = []
if subregion not in new_data["subregions"]:
new_data["subregions"][subregion] = []
if subregion2 not in new_data["subregions"]:
new_data["subregions"][subregion2] = []
new_data["continents"][continent].append(idx)
new_data["subregions"][subregion].append(idx)
new_data["subregions"][subregion2].append(idx)
new_data["countries"][code] = idx
new_data["geometries"][idx] = feature["geometry"]
geom = new_data["geometries"][idx]
if geom["type"] == "MultiPolygon":
for l1 in range(len(geom["coordinates"])):
for l2 in range(len(geom["coordinates"][l1])):
for l3 in range(len(geom["coordinates"][l1][l2])):
geom["coordinates"][l1][l2][l3] = geom["coordinates"][l1][l2][
l3
][::-1]
elif geom["type"] == "Polygon":
for l1 in range(len(geom["coordinates"])):
for l2 in range(len(geom["coordinates"][l1])):
geom["coordinates"][l1][l2] = geom["coordinates"][l1][l2][::-1]
print(f"Available subregions: {list(new_data['subregions'].keys())}")
return new_data
| [
"scipy.spatial.ckdtree.cKDTree",
"os.path.join",
"icarus_simulator.sat_core.coordinate_util.geo2cart",
"os.path.split",
"shapely.geometry.Point",
"os.path.dirname",
"shapely.geometry.Polygon",
"geopy.distance.great_circle",
"json.load",
"shapely.geometry.shape"
] | [((481, 506), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (496, 506), False, 'import os\n'), ((624, 661), 'os.path.join', 'os.path.join', (['library_dirname', '"""data"""'], {}), "(library_dirname, 'data')\n", (636, 661), False, 'import os\n'), ((684, 748), 'os.path.join', 'os.path.join', (['data_dirname', '"""natural_earth_world_small.geo.json"""'], {}), "(data_dirname, 'natural_earth_world_small.geo.json')\n", (696, 748), False, 'import os\n'), ((528, 550), 'os.path.split', 'os.path.split', (['dirname'], {}), '(dirname)\n', (541, 550), False, 'import os\n'), ((572, 605), 'os.path.split', 'os.path.split', (['strategies_dirname'], {}), '(strategies_dirname)\n', (585, 605), False, 'import os\n'), ((2159, 2168), 'shapely.geometry.Polygon', 'Polygon', ([], {}), '()\n', (2166, 2168), False, 'from shapely.geometry import Polygon, shape, Point\n'), ((3249, 3267), 'scipy.spatial.ckdtree.cKDTree', 'cKDTree', (['grid_cart'], {}), '(grid_cart)\n', (3256, 3267), False, 'from scipy.spatial.ckdtree import cKDTree\n'), ((3054, 3107), 'icarus_simulator.sat_core.coordinate_util.geo2cart', 'geo2cart', (["{'elev': 0, 'lon': pos.lon, 'lat': pos.lat}"], {}), "({'elev': 0, 'lon': pos.lon, 'lat': pos.lat})\n", (3062, 3107), False, 'from icarus_simulator.sat_core.coordinate_util import geo2cart\n'), ((4040, 4052), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4049, 4052), False, 'import json\n'), ((2236, 2246), 'shapely.geometry.shape', 'shape', (['geo'], {}), '(geo)\n', (2241, 2246), False, 'from shapely.geometry import Polygon, shape, Point\n'), ((3350, 3401), 'icarus_simulator.sat_core.coordinate_util.geo2cart', 'geo2cart', (["{'elev': 0, 'lon': y[idx], 'lat': x[idx]}"], {}), "({'elev': 0, 'lon': y[idx], 'lat': x[idx]})\n", (3358, 3401), False, 'from icarus_simulator.sat_core.coordinate_util import geo2cart\n'), ((2297, 2320), 'shapely.geometry.Point', 'Point', (['pos.lat', 'pos.lon'], {}), '(pos.lat, pos.lon)\n', (2302, 2320), False, 'from shapely.geometry import Polygon, shape, Point\n'), ((3487, 3565), 'geopy.distance.great_circle', 'great_circle', (['(grid_pos[grid_id].lat, grid_pos[grid_id].lon)', '(x[idx], y[idx])'], {}), '((grid_pos[grid_id].lat, grid_pos[grid_id].lon), (x[idx], y[idx]))\n', (3499, 3565), False, 'from geopy.distance import great_circle\n')] |
# coding: utf-8
"""
Grafeas API
An API to insert and retrieve annotations on cloud artifacts. # noqa: E501
OpenAPI spec version: v1alpha1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from grafeas.models.deployment_details_platform import DeploymentDetailsPlatform # noqa: F401,E501
class DeployableDeploymentDetails(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'user_email': 'str',
'deploy_time': 'datetime',
'undeploy_time': 'datetime',
'config': 'str',
'address': 'str',
'resource_uri': 'list[str]',
'platform': 'DeploymentDetailsPlatform'
}
attribute_map = {
'user_email': 'user_email',
'deploy_time': 'deploy_time',
'undeploy_time': 'undeploy_time',
'config': 'config',
'address': 'address',
'resource_uri': 'resource_uri',
'platform': 'platform'
}
def __init__(self, user_email=None, deploy_time=None, undeploy_time=None, config=None, address=None, resource_uri=None, platform=None): # noqa: E501
"""DeployableDeploymentDetails - a model defined in Swagger""" # noqa: E501
self._user_email = None
self._deploy_time = None
self._undeploy_time = None
self._config = None
self._address = None
self._resource_uri = None
self._platform = None
self.discriminator = None
if user_email is not None:
self.user_email = user_email
if deploy_time is not None:
self.deploy_time = deploy_time
if undeploy_time is not None:
self.undeploy_time = undeploy_time
if config is not None:
self.config = config
if address is not None:
self.address = address
if resource_uri is not None:
self.resource_uri = resource_uri
if platform is not None:
self.platform = platform
@property
def user_email(self):
"""Gets the user_email of this DeployableDeploymentDetails. # noqa: E501
Identity of the user that triggered this deployment. # noqa: E501
:return: The user_email of this DeployableDeploymentDetails. # noqa: E501
:rtype: str
"""
return self._user_email
@user_email.setter
def user_email(self, user_email):
"""Sets the user_email of this DeployableDeploymentDetails.
Identity of the user that triggered this deployment. # noqa: E501
:param user_email: The user_email of this DeployableDeploymentDetails. # noqa: E501
:type: str
"""
self._user_email = user_email
@property
def deploy_time(self):
"""Gets the deploy_time of this DeployableDeploymentDetails. # noqa: E501
Beginning of the lifetime of this deployment. # noqa: E501
:return: The deploy_time of this DeployableDeploymentDetails. # noqa: E501
:rtype: datetime
"""
return self._deploy_time
@deploy_time.setter
def deploy_time(self, deploy_time):
"""Sets the deploy_time of this DeployableDeploymentDetails.
Beginning of the lifetime of this deployment. # noqa: E501
:param deploy_time: The deploy_time of this DeployableDeploymentDetails. # noqa: E501
:type: datetime
"""
self._deploy_time = deploy_time
@property
def undeploy_time(self):
"""Gets the undeploy_time of this DeployableDeploymentDetails. # noqa: E501
End of the lifetime of this deployment. # noqa: E501
:return: The undeploy_time of this DeployableDeploymentDetails. # noqa: E501
:rtype: datetime
"""
return self._undeploy_time
@undeploy_time.setter
def undeploy_time(self, undeploy_time):
"""Sets the undeploy_time of this DeployableDeploymentDetails.
End of the lifetime of this deployment. # noqa: E501
:param undeploy_time: The undeploy_time of this DeployableDeploymentDetails. # noqa: E501
:type: datetime
"""
self._undeploy_time = undeploy_time
@property
def config(self):
"""Gets the config of this DeployableDeploymentDetails. # noqa: E501
Configuration used to create this deployment. # noqa: E501
:return: The config of this DeployableDeploymentDetails. # noqa: E501
:rtype: str
"""
return self._config
@config.setter
def config(self, config):
"""Sets the config of this DeployableDeploymentDetails.
Configuration used to create this deployment. # noqa: E501
:param config: The config of this DeployableDeploymentDetails. # noqa: E501
:type: str
"""
self._config = config
@property
def address(self):
"""Gets the address of this DeployableDeploymentDetails. # noqa: E501
Address of the runtime element hosting this deployment. # noqa: E501
:return: The address of this DeployableDeploymentDetails. # noqa: E501
:rtype: str
"""
return self._address
@address.setter
def address(self, address):
"""Sets the address of this DeployableDeploymentDetails.
Address of the runtime element hosting this deployment. # noqa: E501
:param address: The address of this DeployableDeploymentDetails. # noqa: E501
:type: str
"""
self._address = address
@property
def resource_uri(self):
"""Gets the resource_uri of this DeployableDeploymentDetails. # noqa: E501
Output only. Resource URI for the artifact being deployed taken from the deployable field with the same name. # noqa: E501
:return: The resource_uri of this DeployableDeploymentDetails. # noqa: E501
:rtype: list[str]
"""
return self._resource_uri
@resource_uri.setter
def resource_uri(self, resource_uri):
"""Sets the resource_uri of this DeployableDeploymentDetails.
Output only. Resource URI for the artifact being deployed taken from the deployable field with the same name. # noqa: E501
:param resource_uri: The resource_uri of this DeployableDeploymentDetails. # noqa: E501
:type: list[str]
"""
self._resource_uri = resource_uri
@property
def platform(self):
"""Gets the platform of this DeployableDeploymentDetails. # noqa: E501
Platform hosting this deployment. # noqa: E501
:return: The platform of this DeployableDeploymentDetails. # noqa: E501
:rtype: DeploymentDetailsPlatform
"""
return self._platform
@platform.setter
def platform(self, platform):
"""Sets the platform of this DeployableDeploymentDetails.
Platform hosting this deployment. # noqa: E501
:param platform: The platform of this DeployableDeploymentDetails. # noqa: E501
:type: DeploymentDetailsPlatform
"""
self._platform = platform
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeployableDeploymentDetails):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((7594, 7627), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (7607, 7627), False, 'import six\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Author: stdrickforce (<NAME>)
# Email: <<EMAIL>> <<EMAIL>>
import cat
import time
def ignore_exception(func):
def wraps(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception:
pass
return wraps
@ignore_exception
@cat.transaction("Trans", "T1")
def test1():
'''
Use via decorator
'''
print(1 / 0) # NOTE will cause ZeroDivisionException
def test2():
'''
Use via context manager
'''
def do_something():
import random
if random.random() < 0.1:
raise Exception("error occured!")
with cat.Transaction("Trans", "T2") as t:
cat.log_event("Event", "E2")
try:
do_something()
except Exception:
t.set_status(cat.CAT_ERROR)
t.add_data("context-manager")
t.add_data("foo", "bar")
def test3():
try:
trans = cat.Transaction("Trans", "T3")
trans.add_data("content")
trans.add_data("key", "val")
trans.set_status("error")
trans.set_duration(500)
trans.set_duration_start(time.time() * 1000 - 30 * 1000)
trans.set_timestamp(time.time() * 1000 - 30 * 1000)
finally:
# NOTE don't forget to complete the transaction!
trans.complete()
if __name__ == '__main__':
cat.init("pycat", debug=True, logview=False)
for i in range(100):
test1()
test2()
test3()
time.sleep(0.01)
time.sleep(1)
| [
"cat.Transaction",
"cat.log_event",
"cat.init",
"time.sleep",
"random.random",
"cat.transaction",
"time.time"
] | [((329, 359), 'cat.transaction', 'cat.transaction', (['"""Trans"""', '"""T1"""'], {}), "('Trans', 'T1')\n", (344, 359), False, 'import cat\n'), ((1377, 1421), 'cat.init', 'cat.init', (['"""pycat"""'], {'debug': '(True)', 'logview': '(False)'}), "('pycat', debug=True, logview=False)\n", (1385, 1421), False, 'import cat\n'), ((1524, 1537), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1534, 1537), False, 'import time\n'), ((665, 695), 'cat.Transaction', 'cat.Transaction', (['"""Trans"""', '"""T2"""'], {}), "('Trans', 'T2')\n", (680, 695), False, 'import cat\n'), ((710, 738), 'cat.log_event', 'cat.log_event', (['"""Event"""', '"""E2"""'], {}), "('Event', 'E2')\n", (723, 738), False, 'import cat\n'), ((956, 986), 'cat.Transaction', 'cat.Transaction', (['"""Trans"""', '"""T3"""'], {}), "('Trans', 'T3')\n", (971, 986), False, 'import cat\n'), ((1503, 1519), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (1513, 1519), False, 'import time\n'), ((586, 601), 'random.random', 'random.random', ([], {}), '()\n', (599, 601), False, 'import random\n'), ((1157, 1168), 'time.time', 'time.time', ([], {}), '()\n', (1166, 1168), False, 'import time\n'), ((1217, 1228), 'time.time', 'time.time', ([], {}), '()\n', (1226, 1228), False, 'import time\n')] |
# -*- coding: utf-8 -*-
import csv
from matplotlib import pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
def isValid(p, ep):
return p in ep.patterns
# CLASS ANALYSER
class Analyser:
"""
Représentation d'un résultat d'analyse
"""
def __init__(self):
"""
:param results: contient les résultats de l'analyse
"""
self.results = []
def addResult(self, result):
"""
Ajoute une liste de résultats à l'ensemble des résultats
:param result: la ligne de résultats
:return: None
"""
self.results.append(result)
def __str__(self):
"""
Affichage des résultats sur la sortie standard
"""
return "Résultats : %r" % self.results
def toFile(self, filename):
with open(filename, "w") as outfile:
fieldnames = ['idxExpert', 'idxMining', 'pattern expert', 'pattern mining' , 'full pattern']
w = csv.DictWriter(outfile, delimiter=";", fieldnames=fieldnames)
w.writeheader()
w.writerows(self.results)
| [
"csv.DictWriter"
] | [((980, 1041), 'csv.DictWriter', 'csv.DictWriter', (['outfile'], {'delimiter': '""";"""', 'fieldnames': 'fieldnames'}), "(outfile, delimiter=';', fieldnames=fieldnames)\n", (994, 1041), False, 'import csv\n')] |
#!/usr/bin/env python3
import sys, os, glob
from glbase3 import *
all_species = glload('species_annotations/species.glb')
newl = []
for file in glob.glob('pep_counts/*.txt'):
oh = open(file, 'rt')
count = int(oh.readline().split()[0])
oh.close()
species_name = os.path.split(file)[1].split('.')[0].lower() # seems a simple rule
assembly_name = os.path.split(file)[1].replace('.txt', '')
if count < 5000:
continue
newl.append({'species': species_name, 'assembly_name': assembly_name, 'num_pep': count})
pep_counts = genelist()
pep_counts.load_list(newl)
all_species = all_species.map(genelist=pep_counts, key='species')
all_species = all_species.removeDuplicates('name')
print(all_species)
all_species = all_species.getColumns(['name', 'species', 'division' ,'num_pep', 'assembly_name'])
all_species.sort('name')
all_species.saveTSV('all_species.tsv')
all_species.save('all_species.glb')
# and add the peptide counts for all species
| [
"glob.glob",
"os.path.split"
] | [((148, 177), 'glob.glob', 'glob.glob', (['"""pep_counts/*.txt"""'], {}), "('pep_counts/*.txt')\n", (157, 177), False, 'import sys, os, glob\n'), ((369, 388), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (382, 388), False, 'import sys, os, glob\n'), ((282, 301), 'os.path.split', 'os.path.split', (['file'], {}), '(file)\n', (295, 301), False, 'import sys, os, glob\n')] |
import torch
import torch.nn as nn
import csv
#image quantization
def quantization(x):
x_quan=torch.round(x*255)/255
return x_quan
#picecwise-linear color filter
def CF(img, param,pieces):
param=param[:,:,None,None]
color_curve_sum = torch.sum(param, 4) + 1e-30
total_image = img * 0
for i in range(pieces):
total_image += torch.clamp(img - 1.0 * i /pieces, 0, 1.0 / pieces) * param[:, :, :, :, i]
total_image *= pieces/ color_curve_sum
return total_image
#parsing the data annotation
def load_ground_truth(csv_filename):
image_id_list = []
label_ori_list = []
label_tar_list = []
with open(csv_filename) as csvfile:
reader = csv.DictReader(csvfile, delimiter=',')
for row in reader:
image_id_list.append( row['ImageId'] )
label_ori_list.append( int(row['TrueLabel']) )
label_tar_list.append( int(row['TargetClass']) )
return image_id_list,label_ori_list,label_tar_list
# simple Module to normalize an image
class Normalize(nn.Module):
def __init__(self, mean, std):
super(Normalize, self).__init__()
self.mean = torch.Tensor(mean)
self.std = torch.Tensor(std)
def forward(self, x):
return (x - self.mean.type_as(x)[None,:,None,None]) / self.std.type_as(x)[None,:,None,None]
# values are standard normalization for ImageNet images,
# from https://github.com/pytorch/examples/blob/master/imagenet/main.py
norm = Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
| [
"csv.DictReader",
"torch.Tensor",
"torch.round",
"torch.sum",
"torch.clamp"
] | [((99, 119), 'torch.round', 'torch.round', (['(x * 255)'], {}), '(x * 255)\n', (110, 119), False, 'import torch\n'), ((261, 280), 'torch.sum', 'torch.sum', (['param', '(4)'], {}), '(param, 4)\n', (270, 280), False, 'import torch\n'), ((702, 740), 'csv.DictReader', 'csv.DictReader', (['csvfile'], {'delimiter': '""","""'}), "(csvfile, delimiter=',')\n", (716, 740), False, 'import csv\n'), ((1160, 1178), 'torch.Tensor', 'torch.Tensor', (['mean'], {}), '(mean)\n', (1172, 1178), False, 'import torch\n'), ((1198, 1215), 'torch.Tensor', 'torch.Tensor', (['std'], {}), '(std)\n', (1210, 1215), False, 'import torch\n'), ((364, 416), 'torch.clamp', 'torch.clamp', (['(img - 1.0 * i / pieces)', '(0)', '(1.0 / pieces)'], {}), '(img - 1.0 * i / pieces, 0, 1.0 / pieces)\n', (375, 416), False, 'import torch\n')] |
#!/usr/bin/env python
# The MIT License (MIT)
# Copyright (c) 2014 <NAME>
# Technische Universität München (TUM)
# Autonomous Navigation for Flying Robots
# Homework 2.1
from plot import plot
class UserCode:
def __init__(self):
# initialize data you want to store in this object between calls to the measurement_callback() method
self.last_yaw_velocity = 0
self.max_roll_angle = 0
self.max_pitch_angle = 0
self.max_yaw_velocity = 0
def measurement_callback(self, t, dt, navdata):
'''
:param t: time since simulation start
:param dt: time since last call to measurement_callback
:param navdata: measurements of the quadrotor
'''
# add your plot commands here
self.max_roll_angle = max(self.max_roll_angle, abs(navdata.rotX))
self.max_pitch_angle = max(self.max_pitch_angle, abs(navdata.rotY))
self.max_yaw_velocity = max(self.max_yaw_velocity, abs((navdata.rotZ - self.last_yaw_velocity) / dt))
self.last_yaw_velocity = navdata.rotZ
plot("max_roll_angle", self.max_roll_angle)
plot("max_pitch_angle", self.max_pitch_angle)
plot("max_yaw_velocity", self.max_yaw_velocity)
| [
"plot.plot"
] | [((1081, 1124), 'plot.plot', 'plot', (['"""max_roll_angle"""', 'self.max_roll_angle'], {}), "('max_roll_angle', self.max_roll_angle)\n", (1085, 1124), False, 'from plot import plot\n'), ((1133, 1178), 'plot.plot', 'plot', (['"""max_pitch_angle"""', 'self.max_pitch_angle'], {}), "('max_pitch_angle', self.max_pitch_angle)\n", (1137, 1178), False, 'from plot import plot\n'), ((1187, 1234), 'plot.plot', 'plot', (['"""max_yaw_velocity"""', 'self.max_yaw_velocity'], {}), "('max_yaw_velocity', self.max_yaw_velocity)\n", (1191, 1234), False, 'from plot import plot\n')] |
from configs import args
import tensorflow as tf
def forward(x, mode):
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
x = tf.contrib.layers.embed_sequence(x, args.vocab_size, args.embed_dim)
x = tf.layers.dropout(x, 0.2, training=is_training)
feat_map = []
for k_size in [3, 4, 5]:
_x = tf.layers.conv1d(x, args.filters, k_size, activation=tf.nn.relu)
_x = tf.layers.max_pooling1d(_x, _x.get_shape().as_list()[1], 1)
_x = tf.reshape(_x, (tf.shape(x)[0], args.filters))
feat_map.append(_x)
x = tf.concat(feat_map, -1)
x = tf.layers.dense(x, args.filters, tf.nn.relu)
logits = tf.layers.dense(x, args.n_class)
return logits
def model_fn(features, labels, mode):
logits = forward(features, mode)
if mode == tf.estimator.ModeKeys.PREDICT:
preds = tf.argmax(logits, -1)
return tf.estimator.EstimatorSpec(mode, predictions=preds)
if mode == tf.estimator.ModeKeys.TRAIN:
global_step = tf.train.get_global_step()
LR = {'start': 5e-3, 'end': 5e-4, 'steps': 1500}
lr_op = tf.train.exponential_decay(
LR['start'], global_step, LR['steps'], LR['end']/LR['start'])
loss_op = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels))
train_op = tf.train.AdamOptimizer(lr_op).minimize(
loss_op, global_step=global_step)
lth = tf.train.LoggingTensorHook({'lr': lr_op}, every_n_iter=100)
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss_op, train_op=train_op, training_hooks=[lth])
| [
"tensorflow.train.AdamOptimizer",
"tensorflow.shape",
"tensorflow.estimator.EstimatorSpec",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits",
"tensorflow.concat",
"tensorflow.argmax",
"tensorflow.train.get_global_step",
"tensorflow.layers.dropout",
"tensorflow.train.exponential_decay",
"te... | [((139, 207), 'tensorflow.contrib.layers.embed_sequence', 'tf.contrib.layers.embed_sequence', (['x', 'args.vocab_size', 'args.embed_dim'], {}), '(x, args.vocab_size, args.embed_dim)\n', (171, 207), True, 'import tensorflow as tf\n'), ((216, 263), 'tensorflow.layers.dropout', 'tf.layers.dropout', (['x', '(0.2)'], {'training': 'is_training'}), '(x, 0.2, training=is_training)\n', (233, 263), True, 'import tensorflow as tf\n'), ((558, 581), 'tensorflow.concat', 'tf.concat', (['feat_map', '(-1)'], {}), '(feat_map, -1)\n', (567, 581), True, 'import tensorflow as tf\n'), ((590, 634), 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'args.filters', 'tf.nn.relu'], {}), '(x, args.filters, tf.nn.relu)\n', (605, 634), True, 'import tensorflow as tf\n'), ((648, 680), 'tensorflow.layers.dense', 'tf.layers.dense', (['x', 'args.n_class'], {}), '(x, args.n_class)\n', (663, 680), True, 'import tensorflow as tf\n'), ((324, 388), 'tensorflow.layers.conv1d', 'tf.layers.conv1d', (['x', 'args.filters', 'k_size'], {'activation': 'tf.nn.relu'}), '(x, args.filters, k_size, activation=tf.nn.relu)\n', (340, 388), True, 'import tensorflow as tf\n'), ((843, 864), 'tensorflow.argmax', 'tf.argmax', (['logits', '(-1)'], {}), '(logits, -1)\n', (852, 864), True, 'import tensorflow as tf\n'), ((880, 931), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', (['mode'], {'predictions': 'preds'}), '(mode, predictions=preds)\n', (906, 931), True, 'import tensorflow as tf\n'), ((1003, 1029), 'tensorflow.train.get_global_step', 'tf.train.get_global_step', ([], {}), '()\n', (1027, 1029), True, 'import tensorflow as tf\n'), ((1113, 1207), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (["LR['start']", 'global_step', "LR['steps']", "(LR['end'] / LR['start'])"], {}), "(LR['start'], global_step, LR['steps'], LR['end'] /\n LR['start'])\n", (1139, 1207), True, 'import tensorflow as tf\n'), ((1461, 1520), 'tensorflow.train.LoggingTensorHook', 'tf.train.LoggingTensorHook', (["{'lr': lr_op}"], {'every_n_iter': '(100)'}), "({'lr': lr_op}, every_n_iter=100)\n", (1487, 1520), True, 'import tensorflow as tf\n'), ((1545, 1641), 'tensorflow.estimator.EstimatorSpec', 'tf.estimator.EstimatorSpec', ([], {'mode': 'mode', 'loss': 'loss_op', 'train_op': 'train_op', 'training_hooks': '[lth]'}), '(mode=mode, loss=loss_op, train_op=train_op,\n training_hooks=[lth])\n', (1571, 1641), True, 'import tensorflow as tf\n'), ((1249, 1325), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logits', 'labels': 'labels'}), '(logits=logits, labels=labels)\n', (1295, 1325), True, 'import tensorflow as tf\n'), ((1360, 1389), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['lr_op'], {}), '(lr_op)\n', (1382, 1389), True, 'import tensorflow as tf\n'), ((491, 502), 'tensorflow.shape', 'tf.shape', (['x'], {}), '(x)\n', (499, 502), True, 'import tensorflow as tf\n')] |
# -*- python -*-
"""@file
@brief pyserial transport for pato
Copyright (c) 2014-2015 <NAME> <<EMAIL>>.
All rights reserved.
@page License
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The views and conclusions contained in the software and documentation
are those of the authors and should not be interpreted as representing
official policies, either expressed or implied, of the Pato Project.
"""
import serial
from util.protocol import ProtocolException
class Uart(object):
"""
@brief Communication transport using any UART TTL cable (FTDI)
A simple transport that allows python code running on PC to talk
with Pato via UART (using any UART cable or dongle e.g. FTDI),
while Pato is compiled with UART interface.
This requires python pyserial package to be installed.
"""
def __init__(self, *args, **kwargs):
"""
@brief Constructor
@param[in] args arguments for pyserial
@param[in] kwargs keyword arguments for pyserial
"""
self.serial = serial.Serial(*args, **kwargs)
def query(self, request):
"""
@brief Generic query (request/reply) method via pyserial interface.
Send request packet to Pato via serial interface and wait for reply
packet.
If send and/or receive return unexpected result,
@ref ProtocolException is thrown.
@param[in] request regular list of bytes representing packet to be sent
via the bridge.
@returns Received reply packet
@throws ProtocolException upon send or receive error
"""
bytes_written = self.serial.write(bytes(request))
if bytes_written != len(request):
raise ProtocolException("Failed to send request")
reply_size = 5
reply = self.serial.read(reply_size)
if len(reply) != reply_size:
raise ProtocolException("Failed to receive reply")
reply = [ord(c) for c in reply]
return reply
def close(self):
"""
@brief Close serial line to bridge
"""
self.serial.close()
| [
"util.protocol.ProtocolException",
"serial.Serial"
] | [((2234, 2264), 'serial.Serial', 'serial.Serial', (['*args'], {}), '(*args, **kwargs)\n', (2247, 2264), False, 'import serial\n'), ((2932, 2975), 'util.protocol.ProtocolException', 'ProtocolException', (['"""Failed to send request"""'], {}), "('Failed to send request')\n", (2949, 2975), False, 'from util.protocol import ProtocolException\n'), ((3100, 3144), 'util.protocol.ProtocolException', 'ProtocolException', (['"""Failed to receive reply"""'], {}), "('Failed to receive reply')\n", (3117, 3144), False, 'from util.protocol import ProtocolException\n')] |
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by <NAME>
# Extended by <NAME>
# --------------------------------------------------------
import os
import cv2
import numpy as np
import torch
import torch.utils.data as data
import xml.etree.ElementTree as ET
from utils.bbox import quad_2_rbox
class VOCDataset(data.Dataset):
""""""
def __init__(self,
dataset='trainval.txt',
augment = False,
level = 1,
random_flip=True):
self.image_set = dataset
self.data_path = self.image_set.strip('/ImageSets/Main/trainval.txt')
self.image_ext = [".jpg"]
self.image_list = self._load_image_names()
self.classes = ('__background__', 'aeroplane','bicycle','bird','boat',
'bottle','bus','car','cat','chair','cow','diningtable',
'dog','horse','motorbike','person','pottedplant',
'sheep','sofa','train','tvmonitor')
self.num_classes = len(self.classes)
self.class_to_ind = dict(zip(self.classes, range(self.num_classes)))
self.random_flip = random_flip
def __len__(self):
return len(self.image_list)
def __getitem__(self, index):
im_path = self._image_path_from_index(self.image_list[index])
im = cv2.cvtColor(cv2.imread(im_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
roidb = self._load_pascal_annotation(self.image_list[index])
gt_inds = np.where(roidb['gt_classes'] != 0)[0]
bboxes = roidb['boxes'][gt_inds, :]
classes = roidb['gt_classes'][gt_inds]
if self.random_flip and np.random.rand() >= 0.5:
im = cv2.flip(im, 1, None)
oldxs = bboxes[:, 0::2].copy()
bboxes[:, 0::2] = im.shape[1] - oldxs - 1
gt_boxes = np.empty((len(gt_inds), 6), dtype=np.float32)
for i, bbox in enumerate(bboxes):
gt_boxes[i, :5] = quad_2_rbox(np.array(bbox))
gt_boxes[i, 5] = classes[i]
return {'image': im, 'boxes': gt_boxes}
def _load_image_names(self):
"""
Load the names listed in this dataset's image set file.
"""
image_set_file = self.image_set
if not os.path.exists(image_set_file):
'Path does not exist: {}'.format(image_set_file)
image_names = []
else:
with open(image_set_file) as f:
image_names = [x.strip() for x in f.readlines()]
return image_names
def _image_path_from_index(self, index):
"""
Construct an image path from the image's "index" identifier.
"""
image_path = None
image_exist = False
for image_ext in self.image_ext:
image_path = os.path.join(self.data_path, 'JPEGImages', index + image_ext)
if os.path.exists(image_path):
image_exist = True
break
if not image_exist:
raise Exception('Image path does not exist: {}'.format(
os.path.join(self.data_path, 'JPEGImages', index))
)
return image_path
def _load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC format.
"""
filename = os.path.join(self.data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
boxes, gt_classes = [], []
for _, obj in enumerate(objs):
difficult = int(obj.find('difficult').text)
is_latin = obj.find('language') is None or obj.find('language').text == 'Latin'
bnd_box = obj.find('bndbox')
box = [
float(bnd_box.find('xmin').text),
float(bnd_box.find('ymin').text),
float(bnd_box.find('xmax').text),
float(bnd_box.find('ymin').text),
float(bnd_box.find('xmax').text),
float(bnd_box.find('ymax').text),
float(bnd_box.find('xmin').text),
float(bnd_box.find('ymax').text),
]
label = self.class_to_ind[obj.find('name').text.lower().strip()]
if difficult:
continue
# if self.only_latin and not is_latin:
# continue
boxes.append(box)
gt_classes.append(label)
return {'boxes': np.array(boxes, dtype=np.int32), 'gt_classes': np.array(gt_classes)}
def image_path_at(self, i):
"""
Return the absolute path to image i in the image sequence.
"""
return self._image_path_from_index(self.image_list[i])
def return_class(self, id):
id = int(id)
return self.classes[id]
if __name__ == '__main__':
pass | [
"os.path.exists",
"xml.etree.ElementTree.parse",
"cv2.flip",
"numpy.random.rand",
"numpy.where",
"os.path.join",
"numpy.array",
"cv2.imread"
] | [((3434, 3493), 'os.path.join', 'os.path.join', (['self.data_path', '"""Annotations"""', "(index + '.xml')"], {}), "(self.data_path, 'Annotations', index + '.xml')\n", (3446, 3493), False, 'import os\n'), ((3509, 3527), 'xml.etree.ElementTree.parse', 'ET.parse', (['filename'], {}), '(filename)\n', (3517, 3527), True, 'import xml.etree.ElementTree as ET\n'), ((1466, 1503), 'cv2.imread', 'cv2.imread', (['im_path', 'cv2.IMREAD_COLOR'], {}), '(im_path, cv2.IMREAD_COLOR)\n', (1476, 1503), False, 'import cv2\n'), ((1611, 1645), 'numpy.where', 'np.where', (["(roidb['gt_classes'] != 0)"], {}), "(roidb['gt_classes'] != 0)\n", (1619, 1645), True, 'import numpy as np\n'), ((1815, 1836), 'cv2.flip', 'cv2.flip', (['im', '(1)', 'None'], {}), '(im, 1, None)\n', (1823, 1836), False, 'import cv2\n'), ((2365, 2395), 'os.path.exists', 'os.path.exists', (['image_set_file'], {}), '(image_set_file)\n', (2379, 2395), False, 'import os\n'), ((2896, 2957), 'os.path.join', 'os.path.join', (['self.data_path', '"""JPEGImages"""', '(index + image_ext)'], {}), "(self.data_path, 'JPEGImages', index + image_ext)\n", (2908, 2957), False, 'import os\n'), ((2973, 2999), 'os.path.exists', 'os.path.exists', (['image_path'], {}), '(image_path)\n', (2987, 2999), False, 'import os\n'), ((4563, 4594), 'numpy.array', 'np.array', (['boxes'], {'dtype': 'np.int32'}), '(boxes, dtype=np.int32)\n', (4571, 4594), True, 'import numpy as np\n'), ((4610, 4630), 'numpy.array', 'np.array', (['gt_classes'], {}), '(gt_classes)\n', (4618, 4630), True, 'import numpy as np\n'), ((1773, 1789), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (1787, 1789), True, 'import numpy as np\n'), ((2084, 2098), 'numpy.array', 'np.array', (['bbox'], {}), '(bbox)\n', (2092, 2098), True, 'import numpy as np\n'), ((3170, 3219), 'os.path.join', 'os.path.join', (['self.data_path', '"""JPEGImages"""', 'index'], {}), "(self.data_path, 'JPEGImages', index)\n", (3182, 3219), False, 'import os\n')] |
import discord
import random
import asyncio
import logging
import urllib.request
from discord.ext import commands
bot = commands.Bot(command_prefix='nep ', description= "Nep Nep")
counter = 0
countTask = None
@bot.event
async def on_ready():
print('Logged in as')
print(bot.user.name)
# print(bot.user.id)
print('------')
@bot.command()
async def nep(ctx):
await ctx.send("NEP NEP")
@bot.command(pass_context = True)
async def guessWhat(ctx):
await ctx.send(str(ctx.message.author.display_name) + " officially learned how to code a Discord bot")
async def countdown(channel):
global counter
while not bot.is_closed():
counter += 1
await channel.send("Count is at " + str(counter))
await asyncio.sleep(3)
@bot.command(pass_context = True, aliases = ["collect"])
async def sc(ctx):
global countTask
await ctx.send("Countdown Started!")
countTask = bot.loop.create_task(countdown(ctx.message.channel))
@bot.command(pass_context = True, aliases = ["cancel", "stop"])
async def cc(ctx):
global countTask
await ctx.send("Countdown Cancelled!")
countTask.cancel()
@bot.command(pass_context = True)
async def pm(ctx, *content):
if ctx.author.dm_channel is not None:
await ctx.author.dm_channel.send(content)
else:
await ctx.author.create_dm()
sendString = ''
for c in content:
sendString += c + ' '
await ctx.author.dm_channel.send(sendString)
@bot.command(aliases = ['nh'])
async def nhentai(ctx):
rurl = "https://nhentai.net/random/"
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/50.0.2661.102 Safari/537.36'}
accessHurl = urllib.request.urlopen(urllib.request.Request(rurl, headers = headers))
await ctx.send(accessHurl.geturl())
token = "insert token here"
bot.run(token) | [
"discord.ext.commands.Bot",
"asyncio.sleep"
] | [((121, 179), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""nep """', 'description': '"""Nep Nep"""'}), "(command_prefix='nep ', description='Nep Nep')\n", (133, 179), False, 'from discord.ext import commands\n'), ((747, 763), 'asyncio.sleep', 'asyncio.sleep', (['(3)'], {}), '(3)\n', (760, 763), False, 'import asyncio\n')] |
import os
import time
import argparse
import pandas as pd
from smf import SessionMF
parser = argparse.ArgumentParser()
parser.add_argument('--K', type=int, default=20, help="K items to be used in Recall@K and MRR@K")
parser.add_argument('--factors', type=int, default=100, help="Number of latent factors.")
parser.add_argument('--batch', type=int, default=32, help="Batch size for the training process")
parser.add_argument('--momentum', type=float, default=0.0, help="Momentum of the optimizer adagrad_sub")
parser.add_argument('--regularization', type=float, default=0.0001, help="Regularization Amount of the objective function")
parser.add_argument('--dropout', type=float, default=0.0, help="Share of items that are randomly discarded from the current session while training")
parser.add_argument('--skip', type=float, default=0.0, help="Probability that an item is skiped and the next one is used as the positive example")
parser.add_argument('--neg_samples', type=int, default=2048, help="Number of items that are sampled as negative examples")
parser.add_argument('--activation', type=str, default='linear', help="Final activation function (linear, sigmoid, uf_sigmoid, hard_sigmoid, relu, softmax, softsign, softplus, tanh)")
parser.add_argument('--objective', type=str, default='bpr_max', help="Loss Function (bpr_max, top1_max, bpr, top1)")
parser.add_argument('--epochs', type=int, default=10, help="Number of Epochs")
parser.add_argument('--lr', type=float, default=0.001, help="Learning Rate")
parser.add_argument('--itemid', default='ItemID', type=str)
parser.add_argument('--sessionid', default='SessionID', type=str)
parser.add_argument('--valid_data', default='recSys15Valid.txt', type=str)
parser.add_argument('--train_data', default='recSys15TrainOnly.txt', type=str)
parser.add_argument('--data_folder', default='/home/icvuser/Desktop/Recsys cleaned data/RecSys15 Dataset Splits', type=str)
# Get the arguments
args = parser.parse_args()
train_data = os.path.join(args.data_folder, args.train_data)
x_train = pd.read_csv(train_data)
x_train.sort_values(args.sessionid, inplace=True)
x_train = x_train.iloc[-int(len(x_train) / 64) :] #just take 1/64 last instances
valid_data = os.path.join(args.data_folder, args.valid_data)
x_valid = pd.read_csv(valid_data)
x_valid.sort_values(args.sessionid, inplace=True)
print('Finished Reading Data \nStart Model Fitting...')
# Fitting Model
t1 = time.time()
model = SessionMF(factors = args.factors, session_key = args.sessionid, item_key = args.itemid,
batch = args.batch, momentum = args.momentum, regularization = args.regularization,
dropout = args.dropout, skip = args.skip, samples = args.neg_samples,
activation = args.activation, objective = args.objective, epochs = args.epochs, learning_rate = args.lr)
model.fit(x_train)
t2 = time.time()
print('End Model Fitting with total time =', t2 - t1, '\n Start Predictions...')
# Test Set Evaluation
test_size = 0.0
hit = 0.0
MRR = 0.0
cur_length = 0
cur_session = -1
last_items = []
t1 = time.time()
index_item = x_valid.columns.get_loc(args.itemid)
index_session = x_valid.columns.get_loc(args.sessionid)
train_items = model.unique_items
counter = 0
for row in x_valid.itertuples( index=False ):
counter += 1
if counter % 10000 == 0:
print('Finished Prediction for ', counter, 'items.')
session_id, item_id = row[index_session], row[index_item]
if session_id != cur_session:
cur_session = session_id
last_items = []
cur_length = 0
if item_id in model.item_map.keys():
if len(last_items) > cur_length: #make prediction
cur_length += 1
test_size += 1
# Predict the most similar items to items
predictions = model.predict_next(last_items, K = args.K)
# Evaluation
rank = 0
for predicted_item in predictions:
#print(predicted_item, item_id, '###')
rank += 1
if int(predicted_item) == item_id:
hit += 1.0
MRR += 1/rank
break
last_items.append(item_id)
t2 = time.time()
print('Recall: {}'.format(hit / test_size))
print ('\nMRR: {}'.format(MRR / test_size))
print('End Model Predictions with total time =', t2 - t1) | [
"argparse.ArgumentParser",
"pandas.read_csv",
"os.path.join",
"smf.SessionMF",
"time.time"
] | [((94, 119), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (117, 119), False, 'import argparse\n'), ((1975, 2022), 'os.path.join', 'os.path.join', (['args.data_folder', 'args.train_data'], {}), '(args.data_folder, args.train_data)\n', (1987, 2022), False, 'import os\n'), ((2033, 2056), 'pandas.read_csv', 'pd.read_csv', (['train_data'], {}), '(train_data)\n', (2044, 2056), True, 'import pandas as pd\n'), ((2204, 2251), 'os.path.join', 'os.path.join', (['args.data_folder', 'args.valid_data'], {}), '(args.data_folder, args.valid_data)\n', (2216, 2251), False, 'import os\n'), ((2262, 2285), 'pandas.read_csv', 'pd.read_csv', (['valid_data'], {}), '(valid_data)\n', (2273, 2285), True, 'import pandas as pd\n'), ((2415, 2426), 'time.time', 'time.time', ([], {}), '()\n', (2424, 2426), False, 'import time\n'), ((2435, 2774), 'smf.SessionMF', 'SessionMF', ([], {'factors': 'args.factors', 'session_key': 'args.sessionid', 'item_key': 'args.itemid', 'batch': 'args.batch', 'momentum': 'args.momentum', 'regularization': 'args.regularization', 'dropout': 'args.dropout', 'skip': 'args.skip', 'samples': 'args.neg_samples', 'activation': 'args.activation', 'objective': 'args.objective', 'epochs': 'args.epochs', 'learning_rate': 'args.lr'}), '(factors=args.factors, session_key=args.sessionid, item_key=args.\n itemid, batch=args.batch, momentum=args.momentum, regularization=args.\n regularization, dropout=args.dropout, skip=args.skip, samples=args.\n neg_samples, activation=args.activation, objective=args.objective,\n epochs=args.epochs, learning_rate=args.lr)\n', (2444, 2774), False, 'from smf import SessionMF\n'), ((2863, 2874), 'time.time', 'time.time', ([], {}), '()\n', (2872, 2874), False, 'import time\n'), ((3068, 3079), 'time.time', 'time.time', ([], {}), '()\n', (3077, 3079), False, 'import time\n'), ((4207, 4218), 'time.time', 'time.time', ([], {}), '()\n', (4216, 4218), False, 'import time\n')] |
# coding: utf-8
"""
App Center Client
Microsoft Visual Studio App Center API # noqa: E501
OpenAPI spec version: preview
Contact: <EMAIL>
Project Repository: https://github.com/b3nab/appcenter-sdks
"""
import pprint
import re # noqa: F401
import six
class Device(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'sdk_name': 'string',
'sdk_version': 'string',
'wrapper_sdk_version': 'string',
'wrapper_sdk_name': 'string',
'model': 'string',
'oem_name': 'string',
'os_name': 'string',
'os_version': 'string',
'os_build': 'string',
'os_api_level': 'integer',
'locale': 'string',
'time_zone_offset': 'integer',
'screen_size': 'string',
'app_version': 'string',
'carrier_name': 'string',
'carrier_code': 'string',
'carrier_country': 'string',
'app_build': 'string',
'app_namespace': 'string',
'live_update_release_label': 'string',
'live_update_deployment_key': 'string',
'live_update_package_hash': 'string',
'wrapper_runtime_version': 'string'
}
attribute_map = {
'sdk_name': 'sdk_name',
'sdk_version': 'sdk_version',
'wrapper_sdk_version': 'wrapper_sdk_version',
'wrapper_sdk_name': 'wrapper_sdk_name',
'model': 'model',
'oem_name': 'oem_name',
'os_name': 'os_name',
'os_version': 'os_version',
'os_build': 'os_build',
'os_api_level': 'os_api_level',
'locale': 'locale',
'time_zone_offset': 'time_zone_offset',
'screen_size': 'screen_size',
'app_version': 'app_version',
'carrier_name': 'carrier_name',
'carrier_code': 'carrier_code',
'carrier_country': 'carrier_country',
'app_build': 'app_build',
'app_namespace': 'app_namespace',
'live_update_release_label': 'live_update_release_label',
'live_update_deployment_key': 'live_update_deployment_key',
'live_update_package_hash': 'live_update_package_hash',
'wrapper_runtime_version': 'wrapper_runtime_version'
}
def __init__(self, sdk_name=None, sdk_version=None, wrapper_sdk_version=None, wrapper_sdk_name=None, model=None, oem_name=None, os_name=None, os_version=None, os_build=None, os_api_level=None, locale=None, time_zone_offset=None, screen_size=None, app_version=None, carrier_name=None, carrier_code=None, carrier_country=None, app_build=None, app_namespace=None, live_update_release_label=None, live_update_deployment_key=None, live_update_package_hash=None, wrapper_runtime_version=None): # noqa: E501
"""Device - a model defined in Swagger""" # noqa: E501
self._sdk_name = None
self._sdk_version = None
self._wrapper_sdk_version = None
self._wrapper_sdk_name = None
self._model = None
self._oem_name = None
self._os_name = None
self._os_version = None
self._os_build = None
self._os_api_level = None
self._locale = None
self._time_zone_offset = None
self._screen_size = None
self._app_version = None
self._carrier_name = None
self._carrier_code = None
self._carrier_country = None
self._app_build = None
self._app_namespace = None
self._live_update_release_label = None
self._live_update_deployment_key = None
self._live_update_package_hash = None
self._wrapper_runtime_version = None
self.discriminator = None
self.sdk_name = sdk_name
self.sdk_version = sdk_version
if wrapper_sdk_version is not None:
self.wrapper_sdk_version = wrapper_sdk_version
if wrapper_sdk_name is not None:
self.wrapper_sdk_name = wrapper_sdk_name
if model is not None:
self.model = model
if oem_name is not None:
self.oem_name = oem_name
self.os_name = os_name
self.os_version = os_version
if os_build is not None:
self.os_build = os_build
if os_api_level is not None:
self.os_api_level = os_api_level
self.locale = locale
self.time_zone_offset = time_zone_offset
if screen_size is not None:
self.screen_size = screen_size
self.app_version = app_version
if carrier_name is not None:
self.carrier_name = carrier_name
if carrier_code is not None:
self.carrier_code = carrier_code
if carrier_country is not None:
self.carrier_country = carrier_country
self.app_build = app_build
if app_namespace is not None:
self.app_namespace = app_namespace
if live_update_release_label is not None:
self.live_update_release_label = live_update_release_label
if live_update_deployment_key is not None:
self.live_update_deployment_key = live_update_deployment_key
if live_update_package_hash is not None:
self.live_update_package_hash = live_update_package_hash
if wrapper_runtime_version is not None:
self.wrapper_runtime_version = wrapper_runtime_version
@property
def sdk_name(self):
"""Gets the sdk_name of this Device. # noqa: E501
Name of the SDK. Consists of the name of the SDK and the platform, e.g. "appcenter.ios", "hockeysdk.android".
# noqa: E501
:return: The sdk_name of this Device. # noqa: E501
:rtype: string
"""
return self._sdk_name
@sdk_name.setter
def sdk_name(self, sdk_name):
"""Sets the sdk_name of this Device.
Name of the SDK. Consists of the name of the SDK and the platform, e.g. "appcenter.ios", "hockeysdk.android".
# noqa: E501
:param sdk_name: The sdk_name of this Device. # noqa: E501
:type: string
"""
if sdk_name is None:
raise ValueError("Invalid value for `sdk_name`, must not be `None`") # noqa: E501
self._sdk_name = sdk_name
@property
def sdk_version(self):
"""Gets the sdk_version of this Device. # noqa: E501
Version of the SDK in semver format, e.g. "1.2.0" or "0.12.3-alpha.1".
# noqa: E501
:return: The sdk_version of this Device. # noqa: E501
:rtype: string
"""
return self._sdk_version
@sdk_version.setter
def sdk_version(self, sdk_version):
"""Sets the sdk_version of this Device.
Version of the SDK in semver format, e.g. "1.2.0" or "0.12.3-alpha.1".
# noqa: E501
:param sdk_version: The sdk_version of this Device. # noqa: E501
:type: string
"""
if sdk_version is None:
raise ValueError("Invalid value for `sdk_version`, must not be `None`") # noqa: E501
self._sdk_version = sdk_version
@property
def wrapper_sdk_version(self):
"""Gets the wrapper_sdk_version of this Device. # noqa: E501
Version of the wrapper SDK in semver format. When the SDK is embedding another base SDK (for example Xamarin.Android wraps Android), the Xamarin specific version is populated into this field while sdkVersion refers to the original Android SDK.
# noqa: E501
:return: The wrapper_sdk_version of this Device. # noqa: E501
:rtype: string
"""
return self._wrapper_sdk_version
@wrapper_sdk_version.setter
def wrapper_sdk_version(self, wrapper_sdk_version):
"""Sets the wrapper_sdk_version of this Device.
Version of the wrapper SDK in semver format. When the SDK is embedding another base SDK (for example Xamarin.Android wraps Android), the Xamarin specific version is populated into this field while sdkVersion refers to the original Android SDK.
# noqa: E501
:param wrapper_sdk_version: The wrapper_sdk_version of this Device. # noqa: E501
:type: string
"""
self._wrapper_sdk_version = wrapper_sdk_version
@property
def wrapper_sdk_name(self):
"""Gets the wrapper_sdk_name of this Device. # noqa: E501
Name of the wrapper SDK. Consists of the name of the SDK and the wrapper platform, e.g. "appcenter.xamarin", "hockeysdk.cordova".
# noqa: E501
:return: The wrapper_sdk_name of this Device. # noqa: E501
:rtype: string
"""
return self._wrapper_sdk_name
@wrapper_sdk_name.setter
def wrapper_sdk_name(self, wrapper_sdk_name):
"""Sets the wrapper_sdk_name of this Device.
Name of the wrapper SDK. Consists of the name of the SDK and the wrapper platform, e.g. "appcenter.xamarin", "hockeysdk.cordova".
# noqa: E501
:param wrapper_sdk_name: The wrapper_sdk_name of this Device. # noqa: E501
:type: string
"""
self._wrapper_sdk_name = wrapper_sdk_name
@property
def model(self):
"""Gets the model of this Device. # noqa: E501
Device model (example: iPad2,3).
# noqa: E501
:return: The model of this Device. # noqa: E501
:rtype: string
"""
return self._model
@model.setter
def model(self, model):
"""Sets the model of this Device.
Device model (example: iPad2,3).
# noqa: E501
:param model: The model of this Device. # noqa: E501
:type: string
"""
self._model = model
@property
def oem_name(self):
"""Gets the oem_name of this Device. # noqa: E501
Device manufacturer (example: HTC).
# noqa: E501
:return: The oem_name of this Device. # noqa: E501
:rtype: string
"""
return self._oem_name
@oem_name.setter
def oem_name(self, oem_name):
"""Sets the oem_name of this Device.
Device manufacturer (example: HTC).
# noqa: E501
:param oem_name: The oem_name of this Device. # noqa: E501
:type: string
"""
self._oem_name = oem_name
@property
def os_name(self):
"""Gets the os_name of this Device. # noqa: E501
OS name (example: iOS). The following OS names are standardized (non-exclusive): Android, iOS, macOS, tvOS, Windows.
# noqa: E501
:return: The os_name of this Device. # noqa: E501
:rtype: string
"""
return self._os_name
@os_name.setter
def os_name(self, os_name):
"""Sets the os_name of this Device.
OS name (example: iOS). The following OS names are standardized (non-exclusive): Android, iOS, macOS, tvOS, Windows.
# noqa: E501
:param os_name: The os_name of this Device. # noqa: E501
:type: string
"""
if os_name is None:
raise ValueError("Invalid value for `os_name`, must not be `None`") # noqa: E501
self._os_name = os_name
@property
def os_version(self):
"""Gets the os_version of this Device. # noqa: E501
OS version (example: 9.3.0).
# noqa: E501
:return: The os_version of this Device. # noqa: E501
:rtype: string
"""
return self._os_version
@os_version.setter
def os_version(self, os_version):
"""Sets the os_version of this Device.
OS version (example: 9.3.0).
# noqa: E501
:param os_version: The os_version of this Device. # noqa: E501
:type: string
"""
if os_version is None:
raise ValueError("Invalid value for `os_version`, must not be `None`") # noqa: E501
self._os_version = os_version
@property
def os_build(self):
"""Gets the os_build of this Device. # noqa: E501
OS build code (example: LMY47X).
# noqa: E501
:return: The os_build of this Device. # noqa: E501
:rtype: string
"""
return self._os_build
@os_build.setter
def os_build(self, os_build):
"""Sets the os_build of this Device.
OS build code (example: LMY47X).
# noqa: E501
:param os_build: The os_build of this Device. # noqa: E501
:type: string
"""
self._os_build = os_build
@property
def os_api_level(self):
"""Gets the os_api_level of this Device. # noqa: E501
API level when applicable like in Android (example: 15).
# noqa: E501
:return: The os_api_level of this Device. # noqa: E501
:rtype: integer
"""
return self._os_api_level
@os_api_level.setter
def os_api_level(self, os_api_level):
"""Sets the os_api_level of this Device.
API level when applicable like in Android (example: 15).
# noqa: E501
:param os_api_level: The os_api_level of this Device. # noqa: E501
:type: integer
"""
self._os_api_level = os_api_level
@property
def locale(self):
"""Gets the locale of this Device. # noqa: E501
Language code (example: en_US).
# noqa: E501
:return: The locale of this Device. # noqa: E501
:rtype: string
"""
return self._locale
@locale.setter
def locale(self, locale):
"""Sets the locale of this Device.
Language code (example: en_US).
# noqa: E501
:param locale: The locale of this Device. # noqa: E501
:type: string
"""
if locale is None:
raise ValueError("Invalid value for `locale`, must not be `None`") # noqa: E501
self._locale = locale
@property
def time_zone_offset(self):
"""Gets the time_zone_offset of this Device. # noqa: E501
The offset in minutes from UTC for the device time zone, including daylight savings time.
# noqa: E501
:return: The time_zone_offset of this Device. # noqa: E501
:rtype: integer
"""
return self._time_zone_offset
@time_zone_offset.setter
def time_zone_offset(self, time_zone_offset):
"""Sets the time_zone_offset of this Device.
The offset in minutes from UTC for the device time zone, including daylight savings time.
# noqa: E501
:param time_zone_offset: The time_zone_offset of this Device. # noqa: E501
:type: integer
"""
if time_zone_offset is None:
raise ValueError("Invalid value for `time_zone_offset`, must not be `None`") # noqa: E501
self._time_zone_offset = time_zone_offset
@property
def screen_size(self):
"""Gets the screen_size of this Device. # noqa: E501
Screen size of the device in pixels (example: 640x480).
# noqa: E501
:return: The screen_size of this Device. # noqa: E501
:rtype: string
"""
return self._screen_size
@screen_size.setter
def screen_size(self, screen_size):
"""Sets the screen_size of this Device.
Screen size of the device in pixels (example: 640x480).
# noqa: E501
:param screen_size: The screen_size of this Device. # noqa: E501
:type: string
"""
self._screen_size = screen_size
@property
def app_version(self):
"""Gets the app_version of this Device. # noqa: E501
Application version name, e.g. 1.1.0
# noqa: E501
:return: The app_version of this Device. # noqa: E501
:rtype: string
"""
return self._app_version
@app_version.setter
def app_version(self, app_version):
"""Sets the app_version of this Device.
Application version name, e.g. 1.1.0
# noqa: E501
:param app_version: The app_version of this Device. # noqa: E501
:type: string
"""
if app_version is None:
raise ValueError("Invalid value for `app_version`, must not be `None`") # noqa: E501
self._app_version = app_version
@property
def carrier_name(self):
"""Gets the carrier_name of this Device. # noqa: E501
Carrier name (for mobile devices).
# noqa: E501
:return: The carrier_name of this Device. # noqa: E501
:rtype: string
"""
return self._carrier_name
@carrier_name.setter
def carrier_name(self, carrier_name):
"""Sets the carrier_name of this Device.
Carrier name (for mobile devices).
# noqa: E501
:param carrier_name: The carrier_name of this Device. # noqa: E501
:type: string
"""
self._carrier_name = carrier_name
@property
def carrier_code(self):
"""Gets the carrier_code of this Device. # noqa: E501
Carrier country code (for mobile devices).
# noqa: E501
:return: The carrier_code of this Device. # noqa: E501
:rtype: string
"""
return self._carrier_code
@carrier_code.setter
def carrier_code(self, carrier_code):
"""Sets the carrier_code of this Device.
Carrier country code (for mobile devices).
# noqa: E501
:param carrier_code: The carrier_code of this Device. # noqa: E501
:type: string
"""
self._carrier_code = carrier_code
@property
def carrier_country(self):
"""Gets the carrier_country of this Device. # noqa: E501
Carrier country.
# noqa: E501
:return: The carrier_country of this Device. # noqa: E501
:rtype: string
"""
return self._carrier_country
@carrier_country.setter
def carrier_country(self, carrier_country):
"""Sets the carrier_country of this Device.
Carrier country.
# noqa: E501
:param carrier_country: The carrier_country of this Device. # noqa: E501
:type: string
"""
self._carrier_country = carrier_country
@property
def app_build(self):
"""Gets the app_build of this Device. # noqa: E501
The app's build number, e.g. 42.
# noqa: E501
:return: The app_build of this Device. # noqa: E501
:rtype: string
"""
return self._app_build
@app_build.setter
def app_build(self, app_build):
"""Sets the app_build of this Device.
The app's build number, e.g. 42.
# noqa: E501
:param app_build: The app_build of this Device. # noqa: E501
:type: string
"""
if app_build is None:
raise ValueError("Invalid value for `app_build`, must not be `None`") # noqa: E501
self._app_build = app_build
@property
def app_namespace(self):
"""Gets the app_namespace of this Device. # noqa: E501
The bundle identifier, package identifier, or namespace, depending on what the individual plattforms use, .e.g com.microsoft.example.
# noqa: E501
:return: The app_namespace of this Device. # noqa: E501
:rtype: string
"""
return self._app_namespace
@app_namespace.setter
def app_namespace(self, app_namespace):
"""Sets the app_namespace of this Device.
The bundle identifier, package identifier, or namespace, depending on what the individual plattforms use, .e.g com.microsoft.example.
# noqa: E501
:param app_namespace: The app_namespace of this Device. # noqa: E501
:type: string
"""
self._app_namespace = app_namespace
@property
def live_update_release_label(self):
"""Gets the live_update_release_label of this Device. # noqa: E501
Label that is used to identify application code 'version' released via Live Update beacon running on device
# noqa: E501
:return: The live_update_release_label of this Device. # noqa: E501
:rtype: string
"""
return self._live_update_release_label
@live_update_release_label.setter
def live_update_release_label(self, live_update_release_label):
"""Sets the live_update_release_label of this Device.
Label that is used to identify application code 'version' released via Live Update beacon running on device
# noqa: E501
:param live_update_release_label: The live_update_release_label of this Device. # noqa: E501
:type: string
"""
self._live_update_release_label = live_update_release_label
@property
def live_update_deployment_key(self):
"""Gets the live_update_deployment_key of this Device. # noqa: E501
Identifier of environment that current application release belongs to, deployment key then maps to environment like Production, Staging.
# noqa: E501
:return: The live_update_deployment_key of this Device. # noqa: E501
:rtype: string
"""
return self._live_update_deployment_key
@live_update_deployment_key.setter
def live_update_deployment_key(self, live_update_deployment_key):
"""Sets the live_update_deployment_key of this Device.
Identifier of environment that current application release belongs to, deployment key then maps to environment like Production, Staging.
# noqa: E501
:param live_update_deployment_key: The live_update_deployment_key of this Device. # noqa: E501
:type: string
"""
self._live_update_deployment_key = live_update_deployment_key
@property
def live_update_package_hash(self):
"""Gets the live_update_package_hash of this Device. # noqa: E501
Hash of all files (ReactNative or Cordova) deployed to device via LiveUpdate beacon. Helps identify the Release version on device or need to download updates in future.
# noqa: E501
:return: The live_update_package_hash of this Device. # noqa: E501
:rtype: string
"""
return self._live_update_package_hash
@live_update_package_hash.setter
def live_update_package_hash(self, live_update_package_hash):
"""Sets the live_update_package_hash of this Device.
Hash of all files (ReactNative or Cordova) deployed to device via LiveUpdate beacon. Helps identify the Release version on device or need to download updates in future.
# noqa: E501
:param live_update_package_hash: The live_update_package_hash of this Device. # noqa: E501
:type: string
"""
self._live_update_package_hash = live_update_package_hash
@property
def wrapper_runtime_version(self):
"""Gets the wrapper_runtime_version of this Device. # noqa: E501
Version of the wrapper technology framework (Xamarin runtime version or ReactNative or Cordova etc...). See wrapper_sdk_name to see if this version refers to Xamarin or ReactNative or other.
# noqa: E501
:return: The wrapper_runtime_version of this Device. # noqa: E501
:rtype: string
"""
return self._wrapper_runtime_version
@wrapper_runtime_version.setter
def wrapper_runtime_version(self, wrapper_runtime_version):
"""Sets the wrapper_runtime_version of this Device.
Version of the wrapper technology framework (Xamarin runtime version or ReactNative or Cordova etc...). See wrapper_sdk_name to see if this version refers to Xamarin or ReactNative or other.
# noqa: E501
:param wrapper_runtime_version: The wrapper_runtime_version of this Device. # noqa: E501
:type: string
"""
self._wrapper_runtime_version = wrapper_runtime_version
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Device):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"six.iteritems"
] | [((23895, 23928), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (23908, 23928), False, 'import six\n')] |
from django.conf.urls import url
from django.contrib.auth import views as auth_views
from django.contrib.auth.forms import AuthenticationForm
from oscar.core.application import (
DashboardApplication as BaseDashboardApplication)
from oscar.core.loading import get_class
class DashboardApplication(BaseDashboardApplication):
name = 'dashboard'
permissions_map = {
'index': (['is_staff'], ['partner.dashboard_access']),
}
index_view = get_class('dashboard.views', 'IndexView')
reports_app = get_class('dashboard.reports.app', 'application')
orders_app = get_class('dashboard.orders.app', 'application')
users_app = get_class('dashboard.users.app', 'application')
catalogue_app = get_class('dashboard.catalogue.app', 'application')
promotions_app = get_class('dashboard.promotions.app', 'application')
pages_app = get_class('dashboard.pages.app', 'application')
partners_app = get_class('dashboard.partners.app', 'application')
offers_app = get_class('dashboard.offers.app', 'application')
ranges_app = get_class('dashboard.ranges.app', 'application')
reviews_app = get_class('dashboard.reviews.app', 'application')
vouchers_app = get_class('dashboard.vouchers.app', 'application')
comms_app = get_class('dashboard.communications.app', 'application')
shipping_app = get_class('dashboard.shipping.app', 'application')
system_app = get_class('dashboard.system.app', 'application')
def get_urls(self):
urls = [
url(r'^$', self.index_view.as_view(), name='index'),
url(r'^catalogue/', self.catalogue_app.urls),
url(r'^reports/', self.reports_app.urls),
url(r'^orders/', self.orders_app.urls),
url(r'^users/', self.users_app.urls),
url(r'^content-blocks/', self.promotions_app.urls),
url(r'^pages/', self.pages_app.urls),
url(r'^partners/', self.partners_app.urls),
url(r'^offers/', self.offers_app.urls),
url(r'^ranges/', self.ranges_app.urls),
url(r'^reviews/', self.reviews_app.urls),
url(r'^vouchers/', self.vouchers_app.urls),
url(r'^comms/', self.comms_app.urls),
url(r'^shipping/', self.shipping_app.urls),
url(r'^system/', self.system_app.urls),
url(r'^login/$',
auth_views.LoginView.as_view(template_name='dashboard/login.html',
authentication_form=AuthenticationForm),
name='login'),
url(r'^logout/$', auth_views.LogoutView.as_view(next_page='/'), name='logout'),
]
return self.post_process_urls(urls)
application = DashboardApplication()
| [
"oscar.core.loading.get_class",
"django.contrib.auth.views.LoginView.as_view",
"django.conf.urls.url",
"django.contrib.auth.views.LogoutView.as_view"
] | [((465, 506), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.views"""', '"""IndexView"""'], {}), "('dashboard.views', 'IndexView')\n", (474, 506), False, 'from oscar.core.loading import get_class\n'), ((525, 574), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.reports.app"""', '"""application"""'], {}), "('dashboard.reports.app', 'application')\n", (534, 574), False, 'from oscar.core.loading import get_class\n'), ((592, 640), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.orders.app"""', '"""application"""'], {}), "('dashboard.orders.app', 'application')\n", (601, 640), False, 'from oscar.core.loading import get_class\n'), ((657, 704), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.users.app"""', '"""application"""'], {}), "('dashboard.users.app', 'application')\n", (666, 704), False, 'from oscar.core.loading import get_class\n'), ((725, 776), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.catalogue.app"""', '"""application"""'], {}), "('dashboard.catalogue.app', 'application')\n", (734, 776), False, 'from oscar.core.loading import get_class\n'), ((798, 850), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.promotions.app"""', '"""application"""'], {}), "('dashboard.promotions.app', 'application')\n", (807, 850), False, 'from oscar.core.loading import get_class\n'), ((867, 914), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.pages.app"""', '"""application"""'], {}), "('dashboard.pages.app', 'application')\n", (876, 914), False, 'from oscar.core.loading import get_class\n'), ((934, 984), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.partners.app"""', '"""application"""'], {}), "('dashboard.partners.app', 'application')\n", (943, 984), False, 'from oscar.core.loading import get_class\n'), ((1002, 1050), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.offers.app"""', '"""application"""'], {}), "('dashboard.offers.app', 'application')\n", (1011, 1050), False, 'from oscar.core.loading import get_class\n'), ((1068, 1116), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.ranges.app"""', '"""application"""'], {}), "('dashboard.ranges.app', 'application')\n", (1077, 1116), False, 'from oscar.core.loading import get_class\n'), ((1135, 1184), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.reviews.app"""', '"""application"""'], {}), "('dashboard.reviews.app', 'application')\n", (1144, 1184), False, 'from oscar.core.loading import get_class\n'), ((1204, 1254), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.vouchers.app"""', '"""application"""'], {}), "('dashboard.vouchers.app', 'application')\n", (1213, 1254), False, 'from oscar.core.loading import get_class\n'), ((1271, 1327), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.communications.app"""', '"""application"""'], {}), "('dashboard.communications.app', 'application')\n", (1280, 1327), False, 'from oscar.core.loading import get_class\n'), ((1347, 1397), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.shipping.app"""', '"""application"""'], {}), "('dashboard.shipping.app', 'application')\n", (1356, 1397), False, 'from oscar.core.loading import get_class\n'), ((1415, 1463), 'oscar.core.loading.get_class', 'get_class', (['"""dashboard.system.app"""', '"""application"""'], {}), "('dashboard.system.app', 'application')\n", (1424, 1463), False, 'from oscar.core.loading import get_class\n'), ((1583, 1626), 'django.conf.urls.url', 'url', (['"""^catalogue/"""', 'self.catalogue_app.urls'], {}), "('^catalogue/', self.catalogue_app.urls)\n", (1586, 1626), False, 'from django.conf.urls import url\n'), ((1641, 1680), 'django.conf.urls.url', 'url', (['"""^reports/"""', 'self.reports_app.urls'], {}), "('^reports/', self.reports_app.urls)\n", (1644, 1680), False, 'from django.conf.urls import url\n'), ((1695, 1732), 'django.conf.urls.url', 'url', (['"""^orders/"""', 'self.orders_app.urls'], {}), "('^orders/', self.orders_app.urls)\n", (1698, 1732), False, 'from django.conf.urls import url\n'), ((1747, 1782), 'django.conf.urls.url', 'url', (['"""^users/"""', 'self.users_app.urls'], {}), "('^users/', self.users_app.urls)\n", (1750, 1782), False, 'from django.conf.urls import url\n'), ((1797, 1846), 'django.conf.urls.url', 'url', (['"""^content-blocks/"""', 'self.promotions_app.urls'], {}), "('^content-blocks/', self.promotions_app.urls)\n", (1800, 1846), False, 'from django.conf.urls import url\n'), ((1861, 1896), 'django.conf.urls.url', 'url', (['"""^pages/"""', 'self.pages_app.urls'], {}), "('^pages/', self.pages_app.urls)\n", (1864, 1896), False, 'from django.conf.urls import url\n'), ((1911, 1952), 'django.conf.urls.url', 'url', (['"""^partners/"""', 'self.partners_app.urls'], {}), "('^partners/', self.partners_app.urls)\n", (1914, 1952), False, 'from django.conf.urls import url\n'), ((1967, 2004), 'django.conf.urls.url', 'url', (['"""^offers/"""', 'self.offers_app.urls'], {}), "('^offers/', self.offers_app.urls)\n", (1970, 2004), False, 'from django.conf.urls import url\n'), ((2019, 2056), 'django.conf.urls.url', 'url', (['"""^ranges/"""', 'self.ranges_app.urls'], {}), "('^ranges/', self.ranges_app.urls)\n", (2022, 2056), False, 'from django.conf.urls import url\n'), ((2071, 2110), 'django.conf.urls.url', 'url', (['"""^reviews/"""', 'self.reviews_app.urls'], {}), "('^reviews/', self.reviews_app.urls)\n", (2074, 2110), False, 'from django.conf.urls import url\n'), ((2125, 2166), 'django.conf.urls.url', 'url', (['"""^vouchers/"""', 'self.vouchers_app.urls'], {}), "('^vouchers/', self.vouchers_app.urls)\n", (2128, 2166), False, 'from django.conf.urls import url\n'), ((2181, 2216), 'django.conf.urls.url', 'url', (['"""^comms/"""', 'self.comms_app.urls'], {}), "('^comms/', self.comms_app.urls)\n", (2184, 2216), False, 'from django.conf.urls import url\n'), ((2231, 2272), 'django.conf.urls.url', 'url', (['"""^shipping/"""', 'self.shipping_app.urls'], {}), "('^shipping/', self.shipping_app.urls)\n", (2234, 2272), False, 'from django.conf.urls import url\n'), ((2287, 2324), 'django.conf.urls.url', 'url', (['"""^system/"""', 'self.system_app.urls'], {}), "('^system/', self.system_app.urls)\n", (2290, 2324), False, 'from django.conf.urls import url\n'), ((2373, 2483), 'django.contrib.auth.views.LoginView.as_view', 'auth_views.LoginView.as_view', ([], {'template_name': '"""dashboard/login.html"""', 'authentication_form': 'AuthenticationForm'}), "(template_name='dashboard/login.html',\n authentication_form=AuthenticationForm)\n", (2401, 2483), True, 'from django.contrib.auth import views as auth_views\n'), ((2587, 2631), 'django.contrib.auth.views.LogoutView.as_view', 'auth_views.LogoutView.as_view', ([], {'next_page': '"""/"""'}), "(next_page='/')\n", (2616, 2631), True, 'from django.contrib.auth import views as auth_views\n')] |
import pandas as pd
import pickle
def read_metric_logs(bucket_type):
metrics = pd.DataFrame(columns=['source_type', 'target_type', 'stats'])
type_list_path = f'/l/users/shikhar.srivastava/data/pannuke/{bucket_type}/selected_types.csv'
type_list = pd.read_csv(type_list_path)['0']
for source_type in type_list:
for target_type in type_list:
logs_path = f'/l/users/shikhar.srivastava/workspace/hover_net/logs/test/second_order/{bucket_type}/ckpts/{source_type}-{target_type}/per_image_stat.pkl'
# Read pickle file
with open(logs_path, 'rb') as f:
stats = pickle.load(f)
metrics = metrics.append({'source_type': source_type, 'target_type': target_type, 'stats': stats}, ignore_index=True)
return metrics, type_list | [
"pandas.DataFrame",
"pickle.load",
"pandas.read_csv"
] | [((85, 146), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['source_type', 'target_type', 'stats']"}), "(columns=['source_type', 'target_type', 'stats'])\n", (97, 146), True, 'import pandas as pd\n'), ((262, 289), 'pandas.read_csv', 'pd.read_csv', (['type_list_path'], {}), '(type_list_path)\n', (273, 289), True, 'import pandas as pd\n'), ((633, 647), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (644, 647), False, 'import pickle\n')] |
# Machine Learning Online Class - Exercise 2: Logistic Regression
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the logistic
# regression exercise. You will need to complete the following functions
# in this exericse:
#
# sigmoid.py
# costFunction.py
# predict.py
# costFunctionReg.py
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize as opt
from plotData import *
import costFunction as cf
import plotDecisionBoundary as pdb
import predict as predict
from sigmoid import *
plt.ion()
# Load data
# The first two columns contain the exam scores and the third column contains the label.
data = np.loadtxt('ex2data1.txt', delimiter=',')
print('plot_decision_boundary data[0, 0:1] = \n{}'.format(data[0, 0:1]))
print('plot_decision_boundary data[0, 0:2] = \n{}'.format(data[0, 0:2]))
print('plot_decision_boundary data[0, 0:3] = \n{}'.format(data[0, 0:3]))
print('plot_decision_boundary data[0, 1:1] = \n{}'.format(data[0, 1:1]))
print('plot_decision_boundary data[0, 1:2] = \n{}'.format(data[0, 1:2]))
print('plot_decision_boundary data[0, 1:3] = \n{}'.format(data[0, 1:3]))
print('plot_decision_boundary data[0, 2:1] = \n{}'.format(data[0, 2:1]))
print('plot_decision_boundary data[0, 2:2] = \n{}'.format(data[0, 2:2]))
print('plot_decision_boundary data[0, 2:3] = \n{}'.format(data[0, 2:3]))
X = data[:, 0:2]
y = data[:, 2]
# ===================== Part 1: Plotting =====================
# We start the exercise by first plotting the data to understand the
# the problem we are working with.
print('Plotting Data with + indicating (y = 1) examples and o indicating (y = 0) examples.')
plot_data(X, y)
plt.axis([30, 100, 30, 100])
# Specified in plot order. 按绘图顺序指定
plt.legend(['Admitted', 'Not admitted'], loc=1)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 2: Compute Cost and Gradient =====================
# In this part of the exercise, you will implement the cost and gradient
# for logistic regression. You need to complete the code in
# costFunction.py
# Setup the data array appropriately, and add ones for the intercept term
(m, n) = X.shape
# Add intercept term
X = np.c_[np.ones(m), X]
# Initialize fitting parameters
initial_theta = np.zeros(n + 1) # 初始化权重theta
# Compute and display initial cost and gradient
cost, grad = cf.cost_function(initial_theta, X, y)
np.set_printoptions(formatter={'float': '{: 0.4f}\n'.format})
print('Cost at initial theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.693')
print('Gradient at initial theta (zeros): \n{}'.format(grad))
print('Expected gradients (approx): \n-0.1000\n-12.0092\n-11.2628')
# Compute and display cost and gradient with non-zero theta
test_theta = np.array([-24, 0.2, 0.2])
cost, grad = cf.cost_function(test_theta, X, y)
print('Cost at test theta (zeros): {:0.3f}'.format(cost))
print('Expected cost (approx): 0.218')
print('Gradient at test theta: \n{}'.format(grad))
print('Expected gradients (approx): \n0.043\n2.566\n2.647')
input('Program paused. Press ENTER to continue')
# ===================== Part 3: Optimizing using fmin_bfgs =====================
# In this exercise, you will use a built-in function (opt.fmin_bfgs) to find the
# optimal parameters theta
def cost_func(t):
return cf.cost_function(t, X, y)[0]
def grad_func(t):
return cf.cost_function(t, X, y)[1]
# Run fmin_bfgs to obtain the optimal theta
theta, cost, *unused = opt.fmin_bfgs(f=cost_func, fprime=grad_func, x0=initial_theta, maxiter=400, full_output=True, disp=False)
print('Cost at theta found by fmin: {:0.4f}'.format(cost))
print('Expected cost (approx): 0.203')
print('theta: \n{}'.format(theta))
print('Expected Theta (approx): \n-25.161\n0.206\n0.201')
# Plot boundary 画出二分边界
pdb.plot_decision_boundary(theta, X, y)
plt.xlabel('Exam 1 score')
plt.ylabel('Exam 2 score')
input('Program paused. Press ENTER to continue')
# ===================== Part 4: Predict and Accuracies =====================
# After learning the parameters, you'll like to use it to predict the outcomes
# on unseen data. In this part, you will use the logistic regression model
# to predict the probability that a student with score 45 on exam 1 and
# score 85 on exam 2 will be admitted
#
# Furthermore, you will compute the training and test set accuracies of our model.
#
# Your task is to complete the code in predict.py
# Predict probability for a student with score 45 on exam 1
# and score 85 on exam 2
prob = sigmoid(np.array([1, 45, 85]).dot(theta))
print('For a student with scores 45 and 85, we predict an admission probability of {:0.4f}'.format(prob))
print('Expected value : 0.775 +/- 0.002')
# Compute the accuracy on our training set
p = predict.predict(theta, X)
print('Train accuracy: {}'.format(np.mean(y == p) * 100))
print('Expected accuracy (approx): 89.0')
input('ex2 Finished. Press ENTER to exit')
| [
"numpy.mean",
"predict.predict",
"numpy.ones",
"matplotlib.pyplot.ylabel",
"scipy.optimize.fmin_bfgs",
"matplotlib.pyplot.xlabel",
"plotDecisionBoundary.plot_decision_boundary",
"numpy.array",
"numpy.zeros",
"costFunction.cost_function",
"matplotlib.pyplot.ion",
"matplotlib.pyplot.axis",
"nu... | [((696, 705), 'matplotlib.pyplot.ion', 'plt.ion', ([], {}), '()\n', (703, 705), True, 'import matplotlib.pyplot as plt\n'), ((814, 855), 'numpy.loadtxt', 'np.loadtxt', (['"""ex2data1.txt"""'], {'delimiter': '""","""'}), "('ex2data1.txt', delimiter=',')\n", (824, 855), True, 'import numpy as np\n'), ((1827, 1855), 'matplotlib.pyplot.axis', 'plt.axis', (['[30, 100, 30, 100]'], {}), '([30, 100, 30, 100])\n', (1835, 1855), True, 'import matplotlib.pyplot as plt\n'), ((1892, 1939), 'matplotlib.pyplot.legend', 'plt.legend', (["['Admitted', 'Not admitted']"], {'loc': '(1)'}), "(['Admitted', 'Not admitted'], loc=1)\n", (1902, 1939), True, 'import matplotlib.pyplot as plt\n'), ((1940, 1966), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Exam 1 score"""'], {}), "('Exam 1 score')\n", (1950, 1966), True, 'import matplotlib.pyplot as plt\n'), ((1967, 1993), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Exam 2 score"""'], {}), "('Exam 2 score')\n", (1977, 1993), True, 'import matplotlib.pyplot as plt\n'), ((2464, 2479), 'numpy.zeros', 'np.zeros', (['(n + 1)'], {}), '(n + 1)\n', (2472, 2479), True, 'import numpy as np\n'), ((2555, 2592), 'costFunction.cost_function', 'cf.cost_function', (['initial_theta', 'X', 'y'], {}), '(initial_theta, X, y)\n', (2571, 2592), True, 'import costFunction as cf\n'), ((2594, 2655), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'formatter': "{'float': '{: 0.4f}\\n'.format}"}), "(formatter={'float': '{: 0.4f}\\n'.format})\n", (2613, 2655), True, 'import numpy as np\n'), ((2961, 2986), 'numpy.array', 'np.array', (['[-24, 0.2, 0.2]'], {}), '([-24, 0.2, 0.2])\n', (2969, 2986), True, 'import numpy as np\n'), ((3000, 3034), 'costFunction.cost_function', 'cf.cost_function', (['test_theta', 'X', 'y'], {}), '(test_theta, X, y)\n', (3016, 3034), True, 'import costFunction as cf\n'), ((3673, 3782), 'scipy.optimize.fmin_bfgs', 'opt.fmin_bfgs', ([], {'f': 'cost_func', 'fprime': 'grad_func', 'x0': 'initial_theta', 'maxiter': '(400)', 'full_output': '(True)', 'disp': '(False)'}), '(f=cost_func, fprime=grad_func, x0=initial_theta, maxiter=400,\n full_output=True, disp=False)\n', (3686, 3782), True, 'import scipy.optimize as opt\n'), ((3995, 4034), 'plotDecisionBoundary.plot_decision_boundary', 'pdb.plot_decision_boundary', (['theta', 'X', 'y'], {}), '(theta, X, y)\n', (4021, 4034), True, 'import plotDecisionBoundary as pdb\n'), ((4036, 4062), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Exam 1 score"""'], {}), "('Exam 1 score')\n", (4046, 4062), True, 'import matplotlib.pyplot as plt\n'), ((4063, 4089), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Exam 2 score"""'], {}), "('Exam 2 score')\n", (4073, 4089), True, 'import matplotlib.pyplot as plt\n'), ((4951, 4976), 'predict.predict', 'predict.predict', (['theta', 'X'], {}), '(theta, X)\n', (4966, 4976), True, 'import predict as predict\n'), ((2400, 2410), 'numpy.ones', 'np.ones', (['m'], {}), '(m)\n', (2407, 2410), True, 'import numpy as np\n'), ((3515, 3540), 'costFunction.cost_function', 'cf.cost_function', (['t', 'X', 'y'], {}), '(t, X, y)\n', (3531, 3540), True, 'import costFunction as cf\n'), ((3575, 3600), 'costFunction.cost_function', 'cf.cost_function', (['t', 'X', 'y'], {}), '(t, X, y)\n', (3591, 3600), True, 'import costFunction as cf\n'), ((4721, 4742), 'numpy.array', 'np.array', (['[1, 45, 85]'], {}), '([1, 45, 85])\n', (4729, 4742), True, 'import numpy as np\n'), ((5012, 5027), 'numpy.mean', 'np.mean', (['(y == p)'], {}), '(y == p)\n', (5019, 5027), True, 'import numpy as np\n')] |
#poly_gauss_coil model
#conversion of Poly_GaussCoil.py
#converted by <NAME>, Mar 2016
r"""
This empirical model describes the scattering from *polydisperse* polymer
chains in theta solvents or polymer melts, assuming a Schulz-Zimm type
molecular weight distribution.
To describe the scattering from *monodisperse* polymer chains, see the
:ref:`mono-gauss-coil` model.
Definition
----------
.. math::
I(q) = \text{scale} \cdot I_0 \cdot P(q) + \text{background}
where
.. math::
I_0 &= \phi_\text{poly} \cdot V \cdot (\rho_\text{poly}-\rho_\text{solv})^2 \\
P(q) &= 2 [(1 + UZ)^{-1/U} + Z - 1] / [(1 + U) Z^2] \\
Z &= [(q R_g)^2] / (1 + 2U) \\
U &= (Mw / Mn) - 1 = \text{polydispersity ratio} - 1 \\
V &= M / (N_A \delta)
Here, $\phi_\text{poly}$, is the volume fraction of polymer, $V$ is the
volume of a polymer coil, $M$ is the molecular weight of the polymer,
$N_A$ is Avogadro's Number, $\delta$ is the bulk density of the polymer,
$\rho_\text{poly}$ is the sld of the polymer, $\rho_\text{solv}$ is the
sld of the solvent, and $R_g$ is the radius of gyration of the polymer coil.
The 2D scattering intensity is calculated in the same way as the 1D,
but where the $q$ vector is redefined as
.. math::
q = \sqrt{q_x^2 + q_y^2}
References
----------
.. [#] O Glatter and O Kratky (editors), *Small Angle X-ray Scattering*, Academic Press, (1982) Page 404
.. [#] <NAME>, <NAME>, *Polymers and Neutron Scattering*, Oxford Science Publications, (1996)
.. [#] <NAME>, *Small Angle Neutron Scattering* in *Modern Techniques for Polymer Characterisation*, Wiley, (1999)
.. [#] http://www.ncnr.nist.gov/staff/hammouda/distance_learning/chapter_28.pdf
Authorship and Verification
----------------------------
* **Author:**
* **Last Modified by:**
* **Last Reviewed by:**
"""
import numpy as np
from numpy import inf, expm1, power
name = "poly_gauss_coil"
title = "Scattering from polydisperse polymer coils"
description = """
Evaluates the scattering from
polydisperse polymer chains.
"""
category = "shape-independent"
# pylint: disable=bad-whitespace, line-too-long
# ["name", "units", default, [lower, upper], "type", "description"],
parameters = [
["i_zero", "1/cm", 70.0, [0.0, inf], "", "Intensity at q=0"],
["rg", "Ang", 75.0, [0.0, inf], "", "Radius of gyration"],
["polydispersity", "None", 2.0, [1.0, inf], "", "Polymer Mw/Mn"],
]
# pylint: enable=bad-whitespace, line-too-long
# NB: Scale and Background are implicit parameters on every model
def Iq(q, i_zero, rg, polydispersity):
# pylint: disable = missing-docstring
u = polydispersity - 1.0
z = q**2 * (rg**2 / (1.0 + 2.0*u))
# need to trap the case of the polydispersity being 1 (ie, monodisperse!)
if polydispersity == 1.0:
result = 2.0 * (expm1(-z) + z)
index = q != 0.
result[index] /= z[index]**2
result[~index] = 1.0
else:
# Taylor series around z=0 of (2*(1+uz)^(-1/u) + z - 1) / (z^2(u+1))
p = [
#(-1 - 20*u - 155*u**2 - 580*u**3 - 1044*u**4 - 720*u**5) / 2520.,
#(+1 + 14*u + 71*u**2 + 154*u**3 + 120*u**4) / 360.,
#(-1 - 9*u - 26*u**2 - 24*u**3) / 60.,
(+1 + 5*u + 6*u**2) / 12.,
(-1 - 2*u) / 3.,
(+1),
]
result = 2.0 * (power(1.0 + u*z, -1.0/u) + z - 1.0) / (1.0 + u)
index = z > 1e-4
result[index] /= z[index]**2
result[~index] = np.polyval(p, z[~index])
return i_zero * result
Iq.vectorized = True # Iq accepts an array of q values
def random():
"""Return a random parameter set for the model."""
rg = 10**np.random.uniform(0, 4)
#rg = 1e3
polydispersity = 10**np.random.uniform(0, 3)
pars = dict(
#scale=1, background=0,
i_zero=1e7, # i_zero is a simple scale
rg=rg,
polydispersity=polydispersity,
)
return pars
demo = dict(scale=1.0,
i_zero=70.0,
rg=75.0,
polydispersity=2.0,
background=0.0)
# these unit test values taken from SasView 3.1.2
tests = [
[{'scale': 1.0, 'i_zero': 70.0, 'rg': 75.0,
'polydispersity': 2.0, 'background': 0.0},
[0.0106939, 0.469418], [57.6405, 0.169016]],
]
| [
"numpy.expm1",
"numpy.polyval",
"numpy.power",
"numpy.random.uniform"
] | [((3486, 3510), 'numpy.polyval', 'np.polyval', (['p', 'z[~index]'], {}), '(p, z[~index])\n', (3496, 3510), True, 'import numpy as np\n'), ((3677, 3700), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(4)'], {}), '(0, 4)\n', (3694, 3700), True, 'import numpy as np\n'), ((3740, 3763), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(3)'], {}), '(0, 3)\n', (3757, 3763), True, 'import numpy as np\n'), ((2826, 2835), 'numpy.expm1', 'expm1', (['(-z)'], {}), '(-z)\n', (2831, 2835), False, 'from numpy import inf, expm1, power\n'), ((3351, 3379), 'numpy.power', 'power', (['(1.0 + u * z)', '(-1.0 / u)'], {}), '(1.0 + u * z, -1.0 / u)\n', (3356, 3379), False, 'from numpy import inf, expm1, power\n')] |
import os
from itertools import product
from concurrent import futures
from contextlib import closing
from datetime import datetime
import numpy as np
from . import _z5py
from .file import File, S3File
from .dataset import Dataset
from .shape_utils import normalize_slices
def product1d(inrange):
for ii in inrange:
yield ii
def blocking(shape, block_shape, roi=None, center_blocks_at_roi=False):
""" Generator for nd blocking.
Args:
shape (tuple): nd shape
block_shape (tuple): nd block shape
roi (tuple[slice]): region of interest (default: None)
center_blocks_at_roi (bool): if given a roi,
whether to center the blocks being generated
at the roi's origin (default: False)
"""
assert len(shape) == len(block_shape), "Invalid number of dimensions."
if roi is None:
# compute the ranges for the full shape
ranges = [range(sha // bsha if sha % bsha == 0 else sha // bsha + 1)
for sha, bsha in zip(shape, block_shape)]
min_coords = [0] * len(shape)
max_coords = shape
else:
# make sure that the roi is valid
roi, _ = normalize_slices(roi, shape)
ranges = [range(rr.start // bsha,
rr.stop // bsha if rr.stop % bsha == 0 else rr.stop // bsha + 1)
for rr, bsha in zip(roi, block_shape)]
min_coords = [rr.start for rr in roi]
max_coords = [rr.stop for rr in roi]
need_shift = False
if roi is not None and center_blocks_at_roi:
shift = [rr.start % bsha for rr, bsha in zip(roi, block_shape)]
need_shift = sum(shift) > 0
# product raises memory error for too large ranges,
# because input iterators are cast to tuple
# so far I have only seen this for 1d "open-ended" datasets
# and hence just implemented a workaround for this case,
# but it should be fairly easy to implement an nd version of product
# without casting to tuple for our use case using the imglib loop trick, see also
# https://stackoverflow.com/questions/8695422/why-do-i-get-a-memoryerror-with-itertools-product
try:
start_points = product(*ranges)
except MemoryError:
assert len(ranges) == 1
start_points = product1d(ranges)
for start_point in start_points:
positions = [sp * bshape for sp, bshape in zip(start_point, block_shape)]
if need_shift:
positions = [pos + sh for pos, sh in zip(positions, shift)]
if any(pos > maxc for pos, maxc in zip(positions, max_coords)):
continue
yield tuple(slice(max(pos, minc), min(pos + bsha, maxc))
for pos, bsha, minc, maxc in zip(positions, block_shape,
min_coords, max_coords))
def copy_dataset_impl(f_in, f_out, in_path_in_file, out_path_in_file,
n_threads, chunks=None, block_shape=None, dtype=None,
roi=None, fit_to_roi=False, **new_compression):
""" Implementation of copy dataset.
Used to implement `copy_dataset`, `convert_to_h5` and `convert_from_h5`.
Can also be used for more flexible use cases, like copying from a zarr/n5
cloud dataset to a filesytem dataset.
Args:
f_in (File): input file object.
f_out (File): output file object.
in_path_in_file (str): name of input dataset.
out_path_in_file (str): name of output dataset.
n_threads (int): number of threads used for copying.
chunks (tuple): chunks of the output dataset.
By default same as input dataset's chunks. (default: None)
block_shape (tuple): block shape used for copying. Must be a multiple
of ``chunks``, which are used by default (default: None)
dtype (str): datatype of the output dataset, default does not change datatype (default: None).
roi (tuple[slice]): region of interest that will be copied. (default: None)
fit_to_roi (bool): if given a roi, whether to set the shape of
the output dataset to the roi's shape
and align chunks with the roi's origin. (default: False)
**new_compression: compression library and options for output dataset. If not given,
the same compression as in the input is used.
"""
ds_in = f_in[in_path_in_file]
# check if we can copy chunk by chunk
in_is_z5 = isinstance(f_in, (File, S3File))
out_is_z5 = isinstance(f_out, (File, S3File))
copy_chunks = (in_is_z5 and out_is_z5) and (chunks is None or chunks == ds_in.chunks) and (roi is None)
# get dataset metadata from input dataset if defaults were given
chunks = ds_in.chunks if chunks is None else chunks
dtype = ds_in.dtype if dtype is None else dtype
# zarr objects may not have compression attribute. if so set it to the settings sent to this function
if not hasattr(ds_in, "compression"):
ds_in.compression = new_compression
compression = new_compression.pop("compression", ds_in.compression)
compression_opts = new_compression
same_lib = in_is_z5 == out_is_z5
if same_lib and compression == ds_in.compression:
compression_opts = compression_opts if compression_opts else ds_in.compression_opts
if out_is_z5:
compression = None if compression == 'raw' else compression
compression_opts = {} if compression_opts is None else compression_opts
else:
compression_opts = {'compression_opts': None} if compression_opts is None else compression_opts
# if we don't have block-shape explitictly given, use chunk size
# otherwise check that it's a multiple of chunks
if block_shape is None:
block_shape = chunks
else:
assert all(bs % ch == 0 for bs, ch in zip(block_shape, chunks)),\
"block_shape must be a multiple of chunks"
shape = ds_in.shape
# we need to create the blocking here, before the shape is potentially altered
# if fit_to_roi == True
blocks = blocking(shape, block_shape, roi, fit_to_roi)
if roi is not None:
roi, _ = normalize_slices(roi, shape)
if fit_to_roi:
shape = tuple(rr.stop - rr.start for rr in roi)
ds_out = f_out.require_dataset(out_path_in_file,
dtype=dtype,
shape=shape,
chunks=chunks,
compression=compression,
**compression_opts)
def write_single_block(bb):
data_in = ds_in[bb].astype(dtype, copy=False)
if np.sum(data_in) == 0:
return
if fit_to_roi and roi is not None:
bb = tuple(slice(b.start - rr.start, b.stop - rr.start)
for b, rr in zip(bb, roi))
ds_out[bb] = data_in
def write_single_chunk(bb):
chunk_id = tuple(b.start // ch for b, ch in zip(bb, chunks))
chunk_in = ds_in.read_chunk(chunk_id)
if chunk_in is None:
return
# check if this is a varlen chunk
varlen = tuple(chunk_in.shape) != tuple(b.stop - b.start for b in bb)
ds_out.write_chunk(chunk_id, chunk_in.astype(dtype, copy=False), varlen)
write_single = write_single_chunk if copy_chunks else write_single_block
with futures.ThreadPoolExecutor(max_workers=n_threads) as tp:
tasks = [tp.submit(write_single, bb) for bb in blocks]
[t.result() for t in tasks]
# copy attributes
in_attrs = ds_in.attrs
out_attrs = ds_out.attrs
for key, val in in_attrs.items():
out_attrs[key] = val
def copy_dataset(in_path, out_path,
in_path_in_file, out_path_in_file,
n_threads, chunks=None,
block_shape=None, dtype=None,
use_zarr_format=None, roi=None,
fit_to_roi=False, **new_compression):
""" Copy dataset, optionally change metadata.
The input dataset will be copied to the output dataset chunk by chunk.
Allows to change chunks, datatype, file format and compression.
Can also just copy a roi.
Args:
in_path (str): path to the input file.
out_path (str): path to the output file.
in_path_in_file (str): name of input dataset.
out_path_in_file (str): name of output dataset.
n_threads (int): number of threads used for copying.
chunks (tuple): chunks of the output dataset.
By default same as input dataset's chunks. (default: None)
block_shape (tuple): block shape used for copying. Must be a multiple
of ``chunks``, which are used by default (default: None)
dtype (str): datatype of the output dataset, default does not change datatype (default: None).
use_zarr_format (bool): file format of the output file,
default does not change format (default: None).
roi (tuple[slice]): region of interest that will be copied. (default: None)
fit_to_roi (bool): if given a roi, whether to set the shape of
the output dataset to the roi's shape
and align chunks with the roi's origin. (default: False)
**new_compression: compression library and options for output dataset. If not given,
the same compression as in the input is used.
"""
f_in = File(in_path)
# check if the file format was specified
# if not, keep the format of the input file
# otherwise set the file format
is_zarr = f_in.is_zarr if use_zarr_format is None else use_zarr_format
f_out = File(out_path, use_zarr_format=is_zarr)
copy_dataset_impl(f_in, f_out, in_path_in_file, out_path_in_file,
n_threads, chunks=chunks, block_shape=block_shape,
dtype=dtype, roi=roi, fit_to_roi=fit_to_roi,
**new_compression)
def copy_group(in_path, out_path, in_path_in_file, out_path_in_file, n_threads):
""" Copy group recursively.
Copy the group recursively, using copy_dataset. Metadata of datasets that
are copied cannot be changed and rois cannot be applied.
Args:
in_path (str): path to the input file.
out_path (str): path to the output file.
in_path_in_file (str): name of input group.
out_path_in_file (str): name of output group.
n_threads (int): number of threads used to copy datasets.
"""
f_in = File(in_path)
f_out = File(out_path)
def copy_attrs(gin, gout):
in_attrs = gin.attrs
out_attrs = gout.attrs
for key, val in in_attrs.items():
out_attrs[key] = val
g_in = f_in[in_path_in_file]
g_out = f_out.require_group(out_path_in_file)
copy_attrs(g_in, g_out)
def copy_object(name, obj):
abs_in_key = os.path.join(in_path_in_file, name)
abs_out_key = os.path.join(out_path_in_file, name)
if isinstance(obj, Dataset):
copy_dataset(in_path, out_path,
abs_in_key, abs_out_key, n_threads)
else:
g = f_out.require_group(abs_out_key)
copy_attrs(obj, g)
g_in.visititems(copy_object)
class Timer:
def __init__(self):
self.start_time = None
self.stop_time = None
@property
def elapsed(self):
try:
return (self.stop_time - self.start_time).total_seconds()
except TypeError as e:
if "'NoneType'" in str(e):
raise RuntimeError("{} either not started, or not stopped".format(self))
def start(self):
self.start_time = datetime.utcnow()
def stop(self):
self.stop_time = datetime.utcnow()
return self.elapsed
def __enter__(self):
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
def fetch_test_data_stent():
from imageio import volread
data_i16 = volread('imageio:stent.npz')
return (data_i16 / data_i16.max() * 255).astype(np.uint8)
def fetch_test_data():
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
try:
from io import BytesIO as Buffer
except ImportError:
from StringIO import StringIO as Buffer
import zipfile
from imageio import volread
im_url = "https://imagej.nih.gov/ij/images/t1-head-raw.zip"
with closing(urlopen(im_url)) as response:
if response.status != 200:
raise RuntimeError("Test data could not be found at {}, status code {}".format(
im_url, response.status
))
zip_buffer = Buffer(response.read())
with zipfile.ZipFile(zip_buffer) as zf:
tif_buffer = Buffer(zf.read('JeffT1_le.tif'))
return np.asarray(volread(tif_buffer, format='tif'), dtype=np.uint8)
def remove_trivial_chunks(dataset, n_threads,
remove_specific_value=None):
""" Remove chunks that only contain a single value.
The input dataset will be copied to the output dataset chunk by chunk.
Allows to change datatype, file format and compression as well.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
remove_specific_value (int or float): only remove chunks that contain (only) this specific value (default: None)
"""
dtype = dataset.dtype
function = getattr(_z5py, 'remove_trivial_chunks_%s' % dtype)
remove_specific = remove_specific_value is not None
value = remove_specific_value if remove_specific else 0
function(dataset._impl, n_threads, remove_specific, value)
def remove_dataset(dataset, n_threads):
""" Remvoe dataset multi-threaded.
"""
_z5py.remove_dataset(dataset._impl, n_threads)
def remove_chunk(dataset, chunk_id):
""" Remove a chunk
"""
dataset._impl.remove_chunk(dataset._impl, chunk_id)
def remove_chunks(dataset, bounding_box):
""" Remove all chunks overlapping the bounding box
"""
shape = dataset.shape
chunks = dataset.chunks
blocks = blocking(shape, chunks, roi=bounding_box)
for block in blocks:
chunk_id = tuple(b.start // ch for b, ch in zip(block, chunks))
remove_chunk(dataset, chunk_id)
def unique(dataset, n_threads, return_counts=False):
""" Find unique values in dataset.
Args:
dataset (z5py.Dataset)
n_threads (int): number of threads
return_counts (bool): return counts of unique values (default: False)
"""
dtype = dataset.dtype
if return_counts:
function = getattr(_z5py, 'unique_with_counts_%s' % dtype)
else:
function = getattr(_z5py, 'unique_%s' % dtype)
return function(dataset._impl, n_threads)
| [
"urllib2.urlopen",
"zipfile.ZipFile",
"datetime.datetime.utcnow",
"concurrent.futures.ThreadPoolExecutor",
"itertools.product",
"os.path.join",
"imageio.volread",
"numpy.sum"
] | [((11994, 12022), 'imageio.volread', 'volread', (['"""imageio:stent.npz"""'], {}), "('imageio:stent.npz')\n", (12001, 12022), False, 'from imageio import volread\n'), ((2191, 2207), 'itertools.product', 'product', (['*ranges'], {}), '(*ranges)\n', (2198, 2207), False, 'from itertools import product\n'), ((7395, 7444), 'concurrent.futures.ThreadPoolExecutor', 'futures.ThreadPoolExecutor', ([], {'max_workers': 'n_threads'}), '(max_workers=n_threads)\n', (7421, 7444), False, 'from concurrent import futures\n'), ((10873, 10908), 'os.path.join', 'os.path.join', (['in_path_in_file', 'name'], {}), '(in_path_in_file, name)\n', (10885, 10908), False, 'import os\n'), ((10931, 10967), 'os.path.join', 'os.path.join', (['out_path_in_file', 'name'], {}), '(out_path_in_file, name)\n', (10943, 10967), False, 'import os\n'), ((11667, 11684), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (11682, 11684), False, 'from datetime import datetime\n'), ((11731, 11748), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (11746, 11748), False, 'from datetime import datetime\n'), ((12747, 12774), 'zipfile.ZipFile', 'zipfile.ZipFile', (['zip_buffer'], {}), '(zip_buffer)\n', (12762, 12774), False, 'import zipfile\n'), ((6679, 6694), 'numpy.sum', 'np.sum', (['data_in'], {}), '(data_in)\n', (6685, 6694), True, 'import numpy as np\n'), ((12480, 12495), 'urllib2.urlopen', 'urlopen', (['im_url'], {}), '(im_url)\n', (12487, 12495), False, 'from urllib2 import urlopen\n'), ((12862, 12895), 'imageio.volread', 'volread', (['tif_buffer'], {'format': '"""tif"""'}), "(tif_buffer, format='tif')\n", (12869, 12895), False, 'from imageio import volread\n')] |
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from json import JSONDecodeError
import requests
from funcy.calc import cache
from funcy.debug import print_calls
from funcy.simple_funcs import curry
HEADERS = {
"Accept": "application/json, text/javascript, */*; q=0.01",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/58.0.3029.110 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
HOME_URL = "https://www.webnovel.com/"
class QidianException(Exception):
pass
@cache(60)
def _get_csrftoken():
response = requests.get(HOME_URL)
return response.cookies.get('_csrfToken', None)
def novels():
for page in range(1, 10000):
response = requests.get("https://www.webnovel.com/apiajax/listing/popularAjax", headers=HEADERS, params={
'_csrfToken': _get_csrftoken(),
'category': '',
'pageIndex': page
})
data = _response_to_json(response)
if 'data' not in data or 'items' not in data['data'] or 'isLast' not in data['data']:
raise QidianException('Expected data not found')
yield from data['data']['items']
if data['data']['isLast'] == 1:
break
def _response_to_json(response):
try:
data = response.json()
except JSONDecodeError:
raise QidianException('Json parse Error')
return data
def charpters_list(bookId):
response = requests.get('https://www.webnovel.com/apiajax/chapter/GetChapterList', headers=HEADERS, params={
'_csrfToken': _get_csrftoken(),
'bookId': bookId
})
data = _response_to_json(response)
if 'data' not in data or 'chapterItems' not in data['data']:
raise QidianException('Expected data not found')
yield from data['data']['chapterItems']
def chapter(bookId, chapterId):
response = requests.get('https://www.webnovel.com/apiajax/chapter/GetContent', headers=HEADERS, params={
'_csrfToken': _get_csrftoken(),
'bookId': bookId,
'chapterId': chapterId
})
data = _response_to_json(response)
if 'data' not in data or 'chapterInfo' not in data['data']:
raise QidianException('Expected data not found')
return data['data']['chapterInfo']
def all_chapters(bookId, poolsize=10):
charpters = charpters_list(bookId=bookId)
with ThreadPoolExecutor(max_workers=poolsize) as executor:
chapter_getter = partial(chapter, bookId)
yield from executor.map(chapter_getter, (c['chapterId'] for c in charpters))
| [
"concurrent.futures.ThreadPoolExecutor",
"funcy.calc.cache",
"functools.partial",
"requests.get"
] | [((597, 606), 'funcy.calc.cache', 'cache', (['(60)'], {}), '(60)\n', (602, 606), False, 'from funcy.calc import cache\n'), ((644, 666), 'requests.get', 'requests.get', (['HOME_URL'], {}), '(HOME_URL)\n', (656, 666), False, 'import requests\n'), ((2433, 2473), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'poolsize'}), '(max_workers=poolsize)\n', (2451, 2473), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((2512, 2536), 'functools.partial', 'partial', (['chapter', 'bookId'], {}), '(chapter, bookId)\n', (2519, 2536), False, 'from functools import partial\n')] |
"""
Data creation:
Load the data, normalize it, and split into train and test.
"""
'''
Added the capability of loading pre-separated UCI train/test data
function LoadData_Splitted_UCI
'''
import numpy as np
import os
import pandas as pd
import tensorflow as tf
DATA_PATH = "../UCI_Datasets"
class DataGenerator:
def __init__(self, dataset_name):
self.dataset_name = dataset_name
# used for metrics calculation
self.scale_c = None # std
self.shift_c = None # mean
def create_cubic_10D_data(self):
Npar = 10
Ntrain = 5000
Nout = 1
Ntest = 1000
# x_train = tf.random.uniform(shape=(Ntrain, Npar))*4.0-2.0
x_train = tf.random.normal(shape=(Ntrain, Npar))
y_train = x_train ** 3
y_train = tf.reduce_sum(y_train, axis=1, keepdims=True)/10.0 + 1.0*tf.random.normal([x_train.shape[0], 1])
# x_test = tf.random.uniform(shape=(Ntest, Npar))
# x_test[:,1] = x_test[:,1] + 4.0
# x_test = np.random.uniform(size=(Ntest,Npar))
# x_test[:,1] = x_test[:,1] + 4.0
x_test = np.random.normal(size=(Ntest,Npar)) + 2.0
x_test = tf.convert_to_tensor(x_test, dtype=tf.float32)
scale_c = np.std(x_test.eval(session=tf.compat.v1.Session()))
y_test = x_test ** 3
y_test = tf.reduce_sum(y_test, axis=1, keepdims=True)/10.0 + 1.0*tf.random.normal([x_test.shape[0], 1])
### to Numpy array in TF1 compat environment using TF2
x_train = x_train.eval(session=tf.compat.v1.Session())
y_train = y_train.eval(session=tf.compat.v1.Session())
x_test = x_test.eval(session=tf.compat.v1.Session())
y_test = y_test.eval(session=tf.compat.v1.Session())
### normalization
x_mean = np.mean(x_train, axis=0)
x_std = np.std(x_train,axis=0)
xtrain_normal = (x_train - x_mean)/x_std
y_mean = np.mean(y_train,axis=0)
y_std = np.std(y_train,axis=0)
ytrain_normal = (y_train - y_mean)/y_std
xvalid_normal = (x_test - x_mean) / x_std
yvalid_normal = (y_test - y_mean) / y_std
X_train = xtrain_normal
y_train = ytrain_normal
X_val = xvalid_normal
y_val = yvalid_normal
self.scale_c = scale_c
return X_train, y_train, X_val, y_val
def create_data(self, seed_in=5, train_prop=0.9):
"""
@param seed_in: seed for numpy random seed
@param train_prop: train proportion
"""
np.random.seed(seed_in)
# load UCI data
dataset = self.dataset_name
dataset_path = f"{DATA_PATH}/{dataset}.txt"
if dataset == 'YearPredictionMSD':
data = np.loadtxt(dataset_path, delimiter=',')
elif dataset == 'naval':
data = np.loadtxt(dataset_path)
data = data[:, :-1] # have 2 y as GT, ignore last
else:
data = np.loadtxt(dataset_path)
# save normalization constants (used for calculating results)
if dataset == 'YearPredictionMSD':
scale_c = np.std(data[:, 0]) # in YearPredictionMSD, label's index = 0
shift_c = np.mean(data[:, 0])
else:
scale_c = np.std(data[:, -1])
shift_c = np.mean(data[:, -1])
# normalize data
for i in range(data.shape[1]):
sdev_norm = np.std(data[:, i])
sdev_norm = 0.001 if sdev_norm == 0 else sdev_norm # avoid zero variance features
data[:, i] = (data[:, i] - np.mean(data[:, i])) / sdev_norm
# split train test
if dataset == 'YearPredictionMSD':
# train: first 463,715 examples
# test: last 51,630 examples
train = data[:463715, :]
test = data[-51630:, :]
else:
# split into train/test in random
perm = np.random.permutation(data.shape[0])
train_size = int(round(train_prop * data.shape[0]))
train = data[perm[:train_size], :]
test = data[perm[train_size:], :]
# split to target and data
if dataset == 'YearPredictionMSD':
y_train = train[:, 0].reshape(-1, 1)
X_train = train[:, 1:]
y_val = test[:, 0].reshape(-1, 1)
X_val = test[:, 1:]
else:
y_train = train[:, -1].reshape(-1, 1)
X_train = train[:, :-1]
y_val = test[:, -1].reshape(-1, 1)
X_val = test[:, :-1]
self.scale_c = scale_c
self.shift_c = shift_c
return X_train, y_train, X_val, y_val
def LoadData_Splitted_UCI(self, loadCSVName, original_data_path, splitted_data_path, split_seed, **kwargs):
## (1) Load the original data for the normalization purpose
# current_dir = os.path.dirname(__file__)
# uci_dir = os.path.join(current_dir, 'UCI_datasets')
uci_dir = original_data_path
if loadCSVName == 'boston':
data = np.loadtxt(os.path.join(uci_dir, 'boston-housing/boston_housing.txt'))
if loadCSVName == 'concrete':
data_df = pd.read_excel(os.path.join(uci_dir, 'concrete/Concrete_Data.xls'))
data = data_df.values
if loadCSVName == 'energy':
data_df = pd.read_excel(os.path.join(uci_dir, 'energy-efficiency/ENB2012_data.xlsx'), engine='openpyxl')
data_df = data_df.dropna(how='all', axis='columns')
data_df = data_df.dropna(how='all', axis='rows')
data = data_df.values
if loadCSVName == 'kin8nm':
data_df = pd.read_csv(os.path.join(uci_dir, 'kin8nm/dataset_2175_kin8nm.csv'), sep=',')
data = data_df.values
if loadCSVName == 'naval':
data = np.loadtxt(os.path.join(uci_dir, 'naval/data.txt'))
if loadCSVName == 'power':
data_df = pd.read_excel(os.path.join(uci_dir, 'power-plant/Folds5x2_pp.xlsx'), engine='openpyxl')
data = data_df.values
if loadCSVName == 'protein':
data_df = pd.read_csv(os.path.join(uci_dir, 'protein/CASP.csv'), sep=',')
# print(data_df)
'''Move the Y data (originally located at the first column) to last column in order to keep consistency
with the normalization process'''
col_names = data_df.columns.tolist()
col_names.append(col_names[0])
del col_names[col_names.index(col_names[0])]
# print(col_names)
data_df = data_df[col_names]
# print(data_df)
data = data_df.values
if loadCSVName == 'wine':
data_df = pd.read_csv(os.path.join(uci_dir, 'wine-quality/winequality-red.csv'), sep=';')
data = data_df.values
if loadCSVName == 'yacht':
data = np.loadtxt(os.path.join(uci_dir, 'yacht/yacht_hydrodynamics.data'))
if loadCSVName == 'MSD':
with open(os.path.join(uci_dir, 'song/YearPredictionMSD.npy'), 'rb') as f:
data = np.load(f)
## (2) Load the pre-splitted train/test data
##
xyTrain_load = np.loadtxt(splitted_data_path+'xyTrain_'+loadCSVName+'_seed_'+str(split_seed)+'.csv', delimiter=',')
xyTest_load = np.loadtxt(splitted_data_path+'xyTest_'+loadCSVName+'_seed_'+str(split_seed)+'.csv', delimiter=',')
xyTrain_load = xyTrain_load.astype(np.float32)
# xyValid_load = xyValid_load.astype(np.float32)
xyTest_load = xyTest_load.astype(np.float32)
# original normalization functions
# work out normalisation constants (need when unnormalising later)
scale_c = np.std(data[:, -1])
shift_c = np.mean(data[:, -1])
# normalise data
num_cols = xyTrain_load.shape[1]
print('num cols: {}'.format(num_cols))
for i in range(0, num_cols):
# get the sdev_norm from original data
sdev_norm = np.std(data[:, i])
sdev_norm = 0.001 if sdev_norm == 0 else sdev_norm
# apply on the pre-splitted data
xyTrain_load[:, i] = (xyTrain_load[:, i] - np.mean(data[:, i]) )/sdev_norm
xyTest_load[:, i] = (xyTest_load[:, i] - np.mean(data[:, i]) )/sdev_norm
# xyValid_load[:, i] = (xyValid_load[:, i] - np.mean(data[:, i]) )/sdev_norm
if loadCSVName == 'energy' or loadCSVName == 'naval':
xTrain = xyTrain_load[:, :-2] ## all columns except last two columns as inputs
yTrain = xyTrain_load[:, -1] ## last column as output
xTest = xyTest_load[:, :-2]
yTest = xyTest_load[:, -1]
else:
xTrain = xyTrain_load[:, :-1]
yTrain = xyTrain_load[:, -1]
xTest = xyTest_load[:, :-1]
yTest = xyTest_load[:, -1]
self.scale_c = scale_c
self.shift_c = shift_c
return xTrain, yTrain, xTest, yTest
| [
"numpy.random.normal",
"tensorflow.random.normal",
"numpy.mean",
"tensorflow.reduce_sum",
"os.path.join",
"numpy.random.seed",
"numpy.std",
"tensorflow.convert_to_tensor",
"numpy.loadtxt",
"numpy.load",
"tensorflow.compat.v1.Session",
"numpy.random.permutation"
] | [((714, 752), 'tensorflow.random.normal', 'tf.random.normal', ([], {'shape': '(Ntrain, Npar)'}), '(shape=(Ntrain, Npar))\n', (730, 752), True, 'import tensorflow as tf\n'), ((1174, 1220), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x_test'], {'dtype': 'tf.float32'}), '(x_test, dtype=tf.float32)\n', (1194, 1220), True, 'import tensorflow as tf\n'), ((1789, 1813), 'numpy.mean', 'np.mean', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (1796, 1813), True, 'import numpy as np\n'), ((1830, 1853), 'numpy.std', 'np.std', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (1836, 1853), True, 'import numpy as np\n'), ((1920, 1944), 'numpy.mean', 'np.mean', (['y_train'], {'axis': '(0)'}), '(y_train, axis=0)\n', (1927, 1944), True, 'import numpy as np\n'), ((1960, 1983), 'numpy.std', 'np.std', (['y_train'], {'axis': '(0)'}), '(y_train, axis=0)\n', (1966, 1983), True, 'import numpy as np\n'), ((2528, 2551), 'numpy.random.seed', 'np.random.seed', (['seed_in'], {}), '(seed_in)\n', (2542, 2551), True, 'import numpy as np\n'), ((7671, 7690), 'numpy.std', 'np.std', (['data[:, -1]'], {}), '(data[:, -1])\n', (7677, 7690), True, 'import numpy as np\n'), ((7709, 7729), 'numpy.mean', 'np.mean', (['data[:, -1]'], {}), '(data[:, -1])\n', (7716, 7729), True, 'import numpy as np\n'), ((1115, 1151), 'numpy.random.normal', 'np.random.normal', ([], {'size': '(Ntest, Npar)'}), '(size=(Ntest, Npar))\n', (1131, 1151), True, 'import numpy as np\n'), ((2728, 2767), 'numpy.loadtxt', 'np.loadtxt', (['dataset_path'], {'delimiter': '""","""'}), "(dataset_path, delimiter=',')\n", (2738, 2767), True, 'import numpy as np\n'), ((3102, 3120), 'numpy.std', 'np.std', (['data[:, 0]'], {}), '(data[:, 0])\n', (3108, 3120), True, 'import numpy as np\n'), ((3186, 3205), 'numpy.mean', 'np.mean', (['data[:, 0]'], {}), '(data[:, 0])\n', (3193, 3205), True, 'import numpy as np\n'), ((3242, 3261), 'numpy.std', 'np.std', (['data[:, -1]'], {}), '(data[:, -1])\n', (3248, 3261), True, 'import numpy as np\n'), ((3284, 3304), 'numpy.mean', 'np.mean', (['data[:, -1]'], {}), '(data[:, -1])\n', (3291, 3304), True, 'import numpy as np\n'), ((3394, 3412), 'numpy.std', 'np.std', (['data[:, i]'], {}), '(data[:, i])\n', (3400, 3412), True, 'import numpy as np\n'), ((3889, 3925), 'numpy.random.permutation', 'np.random.permutation', (['data.shape[0]'], {}), '(data.shape[0])\n', (3910, 3925), True, 'import numpy as np\n'), ((7957, 7975), 'numpy.std', 'np.std', (['data[:, i]'], {}), '(data[:, i])\n', (7963, 7975), True, 'import numpy as np\n'), ((802, 847), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['y_train'], {'axis': '(1)', 'keepdims': '(True)'}), '(y_train, axis=1, keepdims=True)\n', (815, 847), True, 'import tensorflow as tf\n'), ((859, 898), 'tensorflow.random.normal', 'tf.random.normal', (['[x_train.shape[0], 1]'], {}), '([x_train.shape[0], 1])\n', (875, 898), True, 'import tensorflow as tf\n'), ((1337, 1381), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['y_test'], {'axis': '(1)', 'keepdims': '(True)'}), '(y_test, axis=1, keepdims=True)\n', (1350, 1381), True, 'import tensorflow as tf\n'), ((1393, 1431), 'tensorflow.random.normal', 'tf.random.normal', (['[x_test.shape[0], 1]'], {}), '([x_test.shape[0], 1])\n', (1409, 1431), True, 'import tensorflow as tf\n'), ((1536, 1558), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1556, 1558), True, 'import tensorflow as tf\n'), ((1599, 1621), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1619, 1621), True, 'import tensorflow as tf\n'), ((1660, 1682), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1680, 1682), True, 'import tensorflow as tf\n'), ((1721, 1743), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1741, 1743), True, 'import tensorflow as tf\n'), ((2820, 2844), 'numpy.loadtxt', 'np.loadtxt', (['dataset_path'], {}), '(dataset_path)\n', (2830, 2844), True, 'import numpy as np\n'), ((2941, 2965), 'numpy.loadtxt', 'np.loadtxt', (['dataset_path'], {}), '(dataset_path)\n', (2951, 2965), True, 'import numpy as np\n'), ((5013, 5071), 'os.path.join', 'os.path.join', (['uci_dir', '"""boston-housing/boston_housing.txt"""'], {}), "(uci_dir, 'boston-housing/boston_housing.txt')\n", (5025, 5071), False, 'import os\n'), ((5148, 5199), 'os.path.join', 'os.path.join', (['uci_dir', '"""concrete/Concrete_Data.xls"""'], {}), "(uci_dir, 'concrete/Concrete_Data.xls')\n", (5160, 5199), False, 'import os\n'), ((5308, 5368), 'os.path.join', 'os.path.join', (['uci_dir', '"""energy-efficiency/ENB2012_data.xlsx"""'], {}), "(uci_dir, 'energy-efficiency/ENB2012_data.xlsx')\n", (5320, 5368), False, 'import os\n'), ((5620, 5675), 'os.path.join', 'os.path.join', (['uci_dir', '"""kin8nm/dataset_2175_kin8nm.csv"""'], {}), "(uci_dir, 'kin8nm/dataset_2175_kin8nm.csv')\n", (5632, 5675), False, 'import os\n'), ((5786, 5825), 'os.path.join', 'os.path.join', (['uci_dir', '"""naval/data.txt"""'], {}), "(uci_dir, 'naval/data.txt')\n", (5798, 5825), False, 'import os\n'), ((5899, 5952), 'os.path.join', 'os.path.join', (['uci_dir', '"""power-plant/Folds5x2_pp.xlsx"""'], {}), "(uci_dir, 'power-plant/Folds5x2_pp.xlsx')\n", (5911, 5952), False, 'import os\n'), ((6079, 6120), 'os.path.join', 'os.path.join', (['uci_dir', '"""protein/CASP.csv"""'], {}), "(uci_dir, 'protein/CASP.csv')\n", (6091, 6120), False, 'import os\n'), ((6675, 6732), 'os.path.join', 'os.path.join', (['uci_dir', '"""wine-quality/winequality-red.csv"""'], {}), "(uci_dir, 'wine-quality/winequality-red.csv')\n", (6687, 6732), False, 'import os\n'), ((6843, 6898), 'os.path.join', 'os.path.join', (['uci_dir', '"""yacht/yacht_hydrodynamics.data"""'], {}), "(uci_dir, 'yacht/yacht_hydrodynamics.data')\n", (6855, 6898), False, 'import os\n'), ((7044, 7054), 'numpy.load', 'np.load', (['f'], {}), '(f)\n', (7051, 7054), True, 'import numpy as np\n'), ((1266, 1288), 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), '()\n', (1286, 1288), True, 'import tensorflow as tf\n'), ((3547, 3566), 'numpy.mean', 'np.mean', (['data[:, i]'], {}), '(data[:, i])\n', (3554, 3566), True, 'import numpy as np\n'), ((6956, 7007), 'os.path.join', 'os.path.join', (['uci_dir', '"""song/YearPredictionMSD.npy"""'], {}), "(uci_dir, 'song/YearPredictionMSD.npy')\n", (6968, 7007), False, 'import os\n'), ((8139, 8158), 'numpy.mean', 'np.mean', (['data[:, i]'], {}), '(data[:, i])\n', (8146, 8158), True, 'import numpy as np\n'), ((8225, 8244), 'numpy.mean', 'np.mean', (['data[:, i]'], {}), '(data[:, i])\n', (8232, 8244), True, 'import numpy as np\n')] |
# This script adds a new message to a specific SQS queue
#
# Author - <NAME> 2013
#
#
#from __future__ import print_function
import sys
import Queue
import boto.sqs
import argparse
import socket
import datetime
import sys
import time
from boto.sqs.attributes import Attributes
parser = argparse.ArgumentParser()
parser.add_argument('queuearg',help='name of the sqs queue to use',metavar="myQueueName")
parser.add_argument('experiment',help='name of the experiment queue to use')
args = parser.parse_args()
from boto.sqs.message import Message
import threading
conn = boto.sqs.connect_to_region("us-east-1", aws_access_key_id='<KEY>', aws_secret_access_key='<KEY>')
sqs_queue = conn.get_queue(args.queuearg)
class Sender(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run(self):
global sqs_queue,queue
name = args.experiment+str(queue.get())+"-"+args.queuearg+".csv"
f = open(name,'w')
while True:
try:
m = sqs_queue.get_messages(num_messages=1,attributes='SentTimestamp')
f.write(str(m[0].attributes)+","+str(m[0].get_body())+"\n")
sqs_queue.delete_message(m[0])
except:
if sqs_queue.count() < 1:
f.write(args.queuearg + " is empty\n")
return
queue = Queue.Queue(0)
threads = []
for n in xrange(40):
queue.put(n)
t = Sender()
t.start()
threads.append(t)
for t in threads:
t.join()
| [
"threading.Thread.__init__",
"Queue.Queue",
"argparse.ArgumentParser"
] | [((287, 312), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (310, 312), False, 'import argparse\n'), ((1231, 1245), 'Queue.Queue', 'Queue.Queue', (['(0)'], {}), '(0)\n', (1242, 1245), False, 'import Queue\n'), ((766, 797), 'threading.Thread.__init__', 'threading.Thread.__init__', (['self'], {}), '(self)\n', (791, 797), False, 'import threading\n')] |
from typing import Optional
import pandas as pd
from dero.ml.typing import ModelDict, AllModelResultsDict, DfDict
def model_dict_to_df(model_results: ModelDict, model_name: Optional[str] = None) -> pd.DataFrame:
df = pd.DataFrame(model_results).T
df.drop('score', inplace=True)
df['score'] = model_results['score']
if model_name is not None:
df['model'] = model_name
first_cols = ['model', 'score']
else:
first_cols = ['score']
other_cols = [col for col in df.columns if col not in first_cols]
return df[first_cols + other_cols]
def all_model_results_dict_to_df(results: AllModelResultsDict) -> pd.DataFrame:
df = pd.DataFrame()
for model_type, instance_list in results.items():
for instance in instance_list:
model_df = model_dict_to_df(instance, model_name=model_type)
df = df.append(model_df)
first_cols = ['model', 'score']
other_cols = [col for col in df.columns if col not in first_cols]
return df[first_cols + other_cols].sort_values('score', ascending=False)
def all_model_results_dict_to_model_df_dict(results: AllModelResultsDict) -> DfDict:
out_dict = {}
for model_type, instance_list in results.items():
model_df = pd.DataFrame()
for instance in instance_list:
model_instance_df = model_dict_to_df(instance, model_name=model_type)
model_df = model_df.append(model_instance_df)
out_dict[model_type] = model_df.sort_values('score', ascending=False)
return out_dict
| [
"pandas.DataFrame"
] | [((676, 690), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (688, 690), True, 'import pandas as pd\n'), ((223, 250), 'pandas.DataFrame', 'pd.DataFrame', (['model_results'], {}), '(model_results)\n', (235, 250), True, 'import pandas as pd\n'), ((1255, 1269), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1267, 1269), True, 'import pandas as pd\n')] |
import pygame
from engine.utils import Rect
from engine.app import get_screen_size
# EXPORT
class View(object):
def __init__(self, rect=None):
if rect:
self.rect = rect
else:
res = get_screen_size()
self.rect = Rect(0,0,res[0],res[1])
def offset(self, d):
self.rect.move(d[0], d[1])
def get_position(self):
return self.rect.tl
def set_position(self, pos):
self.rect = Rect(pos.x, pos.y, pos.x+self.rect.width(), pos.y+self.rect.height())
def relative_position(self, pos):
return pos - self.rect.tl
def get_rect(self):
return Rect(self.rect)
| [
"engine.utils.Rect",
"engine.app.get_screen_size"
] | [((647, 662), 'engine.utils.Rect', 'Rect', (['self.rect'], {}), '(self.rect)\n', (651, 662), False, 'from engine.utils import Rect\n'), ((226, 243), 'engine.app.get_screen_size', 'get_screen_size', ([], {}), '()\n', (241, 243), False, 'from engine.app import get_screen_size\n'), ((268, 294), 'engine.utils.Rect', 'Rect', (['(0)', '(0)', 'res[0]', 'res[1]'], {}), '(0, 0, res[0], res[1])\n', (272, 294), False, 'from engine.utils import Rect\n')] |
from __future__ import print_function
import emcee
from multiprocessing import Pool
import numpy as np
import corner
import matplotlib.pyplot as plt
import sys
import scipy.optimize as op
from rbvfit.rb_vfit import rb_veldiff as rb_veldiff
from rbvfit import rb_setline as rb
import pdb
def plot_model(wave_obs,fnorm,enorm,fit,model,outfile= False,xlim=[-600.,600.],verbose=False):
#This model only works if there are no nuissance paramteres
theta_prime=fit.best_theta
value1=fit.low_theta
value2=fit.high_theta
n_clump=model.nclump
n_clump_total=np.int(len(theta_prime)/3)
ntransition=model.ntransition
zabs=model.zabs
samples=fit.samples
model_mcmc=fit.model
wave_list=np.zeros( len(model.lambda_rest_original),)
# Use the input lambda rest list to plot correctly
for i in range(0,len(wave_list)):
s=rb.rb_setline(model.lambda_rest_original[i],'closest')
wave_list[i]=s['wave']
wave_rest=wave_obs/(1+zabs[0])
best_N = theta_prime[0:n_clump_total]
best_b = theta_prime[n_clump_total:2 * n_clump_total]
best_v = theta_prime[2 * n_clump_total:3 * n_clump_total]
low_N = value1[0:n_clump_total]
low_b = value1[n_clump_total:2 * n_clump_total]
low_v = value1[2 * n_clump_total:3 * n_clump_total]
high_N = value2[0:n_clump_total]
high_b = value2[n_clump_total:2 * n_clump_total]
high_v = value2[2 * n_clump_total:3 * n_clump_total]
#Now extracting individual fitted components
best_fit, f1 = model.model_fit(theta_prime, wave_obs)
fig, axs = plt.subplots(ntransition, sharex=True, sharey=False,figsize=(12,18 ),gridspec_kw={'hspace': 0})
BIGGER_SIZE = 18
plt.rc('font', size=BIGGER_SIZE) # controls default text sizes
plt.rc('axes', titlesize=BIGGER_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=BIGGER_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=BIGGER_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=BIGGER_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
index = np.random.randint(0, high=len(samples), size=100)
if ntransition == 1:
#When there are no nuissance parameter
#Now loop through each transition and plot them in velocity space
vel=rb_veldiff(wave_list[0],wave_rest)
axs.step(vel, fnorm, 'k-', linewidth=1.)
axs.step(vel, enorm, color='r', linewidth=1.)
# Plotting a random sample of outputs extracted from posterior dis
for ind in range(len(index)):
axs.plot(vel, model_mcmc(samples[index[ind], :], wave_obs), color="k", alpha=0.1)
axs.set_ylim([0, 1.6])
axs.set_xlim(xlim)
axs.plot(vel, best_fit, color='b', linewidth=3)
axs.plot([0., 0.], [-0.2, 2.5], 'k:', lw=0.5)
# plot individual components
for dex in range(0,np.shape(f1)[1]):
axs.plot(vel, f1[:, dex], 'g:', linewidth=3)
for iclump in range(0,n_clump):
axs.plot([best_v[iclump],best_v[iclump]],[1.05,1.15],'k--',lw=4)
text1=r'$logN \;= '+ np.str('%.2f' % best_N[iclump]) +'^{ + ' + np.str('%.2f' % (best_N[iclump]-low_N[iclump]))+'}'+ '_{ -' + np.str('%.2f' % (high_N[iclump]-best_N[iclump]))+'}$'
axs.text(best_v[iclump],1.2,text1,
fontsize=14,rotation=90, rotation_mode='anchor')
text2=r'$b ='+np.str('%.0f' % best_b[iclump]) +'^{ + ' + np.str('%.0f' % (best_b[iclump]-low_b[iclump]))+'}'+ '_{ -' + np.str('%.0f' % (high_b[iclump]-best_b[iclump]))+'}$'
axs.text(best_v[iclump]+30,1.2, text2,fontsize=14,rotation=90, rotation_mode='anchor')
else:
#Now loop through each transition and plot them in velocity space
for i in range(0,ntransition):
print(wave_list[i])
vel=rb_veldiff(wave_list[i],wave_rest)
axs[i].step(vel, fnorm, 'k-', linewidth=1.)
axs[i].step(vel, enorm, color='r', linewidth=1.)
#pdb.set_trace()
# Plotting a random sample of outputs extracted from posterior distribution
for ind in range(len(index)):
axs[i].plot(vel, model_mcmc(samples[index[ind], :], wave_obs), color="k", alpha=0.1)
axs[i].set_ylim([0, 1.6])
axs[i].set_xlim(xlim)
axs[i].plot(vel, best_fit, color='b', linewidth=3)
axs[i].plot([0., 0.], [-0.2, 2.5], 'k:', lw=0.5)
# plot individual components
for dex in range(0,np.shape(f1)[1]):
axs[i].plot(vel, f1[:, dex], 'g:', linewidth=3)
for iclump in range(0,n_clump):
axs[i].plot([best_v[iclump],best_v[iclump]],[1.05,1.15],'k--',lw=4)
if i ==0:
text1=r'$logN \;= '+ np.str('%.2f' % best_N[iclump]) +'^{ + ' + np.str('%.2f' % (best_N[iclump]-low_N[iclump]))+'}'+ '_{ -' + np.str('%.2f' % (high_N[iclump]-best_N[iclump]))+'}$'
axs[i].text(best_v[iclump],1.2,text1,
fontsize=14,rotation=90, rotation_mode='anchor')
text2=r'$b ='+np.str('%.0f' % best_b[iclump]) +'^{ + ' + np.str('%.0f' % (best_b[iclump]-low_b[iclump]))+'}'+ '_{ -' + np.str('%.0f' % (high_b[iclump]-best_b[iclump]))+'}$'
axs[i].text(best_v[iclump]+30,1.2, text2,
fontsize=14,rotation=90, rotation_mode='anchor')
if verbose==True:
from IPython.display import display, Math
samples = fit.sampler.get_chain(discard=100, thin=15, flat=True)
nfit = int(fit.ndim / 3)
N_tile = np.tile("logN", nfit)
b_tile = np.tile("b", nfit)
v_tile = np.tile("v", nfit)
tmp = np.append(N_tile, b_tile)
text_label = np.append(tmp, v_tile)
for i in range(len(text_label)):
mcmc = np.percentile(samples[:, i], [16, 50, 84])
q = np.diff(mcmc)
txt = "\mathrm{{{3}}} = {0:.2f}_{{-{1:.2f}}}^{{{2:.2f}}}"
txt = txt.format(mcmc[1], q[0], q[1], text_label[i])
display(Math(txt))
if outfile==False:
plt.show()
else:
outfile_fig =outfile
fig.savefig(outfile_fig, bbox_inches='tight')
######## Computing Likelihoods######
def lnprior(theta, lb, ub):
for index in range(0, len(lb)):
if (lb[index] > theta[index]) or (ub[index] < theta[index]):
return -np.inf
break
return 0.0
def lnlike(theta, model, x, y, yerr):
model = model(theta, x)
inv_sigma2 = 1.0 / (yerr ** 2)
return -0.5 * (np.sum((y - model) ** 2 * inv_sigma2 - np.log(inv_sigma2)))
def lnprob(theta, lb, ub, model, x, y, yerr):
lp = lnprior(theta, lb, ub)
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(theta, model, x, y, yerr)
def optimize_guess(model, theta, lb, ub, x, y, yerr):
nll = lambda *args: -lnprob(*args)
result = op.minimize(nll, [theta], args=(lb, ub, model, x, y, yerr))
p = result["x"]
return p
def set_bounds(nguess,bguess,vguess):
Nlow=np.zeros((len(nguess,)))
blow=np.zeros((len(nguess,)))
vlow=np.zeros((len(nguess,)))
NHI=np.zeros((len(nguess,)))
bHI=np.zeros((len(nguess,)))
vHI=np.zeros((len(nguess,)))
for i in range(0,len(nguess)):
Nlow[i]=nguess[i]-2.
blow[i]=bguess[i]-40.
if blow[i] < 2.:
blow[i] = 2.
vlow[i]=vguess[i]-50.
NHI[i]=nguess[i]+2.
bHI[i]=bguess[i]+40.
if bHI[i] > 200.:
bHI[i] = 150.
vHI[i]=vguess[i]+50.
lb=np.concatenate((Nlow,blow,vlow))
ub=np.concatenate((NHI,bHI,vHI))
bounds=[lb,ub]
return bounds, lb, ub
class vfit(object):
def __init__(self, model, theta, lb, ub, wave_obs, fnorm, enorm, no_of_Chain=50, no_of_steps=1000,
perturbation=1e-6):
# Main class that performs all the fitting
self.wave_obs = wave_obs
self.fnorm = fnorm
self.enorm = enorm
self.model = model
self.lb = lb
self.ub = ub
self.theta = theta
self.no_of_Chain = no_of_Chain
self.no_of_steps = no_of_steps
self.perturbation = perturbation
def runmcmc(self, optimize=True,verbose=False):
model = self.model
theta = self.theta
lb = self.lb
ub = self.ub
wave_obs = self.wave_obs
fnorm = self.fnorm
enorm = self.enorm
no_of_Chain = self.no_of_Chain
no_of_steps = self.no_of_steps
perturbation = self.perturbation
if optimize == True:
print('Optimizing Guess ***********')
# Now make a better guess
popt = optimize_guess(model, theta, lb, ub, wave_obs, fnorm, enorm)
print('Done ***********')
else:
print('Skipping Optimizing Guess ***********')
print('Using input guess for mcmc ***********')
popt = theta
print('Preparing emcee ***********')
###### Define a lot of walkers
length_of_lb = len(lb)
ndim, nwalkers = length_of_lb, no_of_Chain
guesses = [popt + perturbation * np.random.randn(ndim) for i in range(nwalkers)]
print("Starting emcee ***********")
burntime = np.round(no_of_steps * .2)
with Pool() as pool:
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob, pool=pool, args=(lb, ub, model, wave_obs, fnorm, enorm))
pos, prob, state = sampler.run_mcmc(guesses, no_of_steps,progress=True)
#sampler.reset()
print("Done!")
#print("Now starting the Final Calculations:")
print("*****************")
#width = 30
# Now Running mcmc
#for i, result in enumerate(sampler.sample(pos, iterations=no_of_steps)):
# n = int((width + 1) * float(i) / no_of_steps)
#sys.stdout.write("\r[{0}{1}]".format('#' * n, ' ' * (width - n)))
#sys.stdout.write("\n")
if verbose==True:
from IPython.display import display, Math
samples = sampler.get_chain(discard=100, thin=15, flat=True)
nfit = int(ndim / 3)
N_tile = np.tile("logN", nfit)
b_tile = np.tile("b", nfit)
v_tile = np.tile("v", nfit)
tmp = np.append(N_tile, b_tile)
text_label = np.append(tmp, v_tile)
for i in range(len(text_label)):
mcmc = np.percentile(samples[:, i], [16, 50, 84])
q = np.diff(mcmc)
txt = "\mathrm{{{3}}} = {0:.2f}_{{-{1:.2f}}}^{{{2:.2f}}}"
txt = txt.format(mcmc[1], q[0], q[1], text_label[i])
display(Math(txt))
self.sampler = sampler
self.ndim = ndim
self.nwalkers = nwalkers
def plot_corner(self,outfile=False):
ndim=self.ndim
#samples = self.sampler.chain[:, 100:, :].reshape((-1, ndim)) # sampler.flatchain
samples = self.sampler.get_chain(discard=100, thin=15, flat=True)
st = np.percentile(samples, 50, axis=0) # =np.median(samples,axis=0)#np.median(sampler.flatchain, axis=0)
# df = pd.DataFrame(samples)
# temp=df.mode()
# st=temp.values[0]
nfit = int(ndim / 3)
N_tile = np.tile("logN", nfit)
b_tile = np.tile("b", nfit)
v_tile = np.tile("v", nfit)
tmp = np.append(N_tile, b_tile)
text_label = np.append(tmp, v_tile)
figure = corner.corner(samples, labels=text_label, truths=st)
theta_prime = st
value1 = np.percentile(samples, 10, axis=0)
# This is the empirical mean of the sample:
value2 = np.percentile(samples, 90, axis=0)
# Extract the axes
axes = np.array(figure.axes).reshape((ndim, ndim))
# Loop over the diagonal
for i in range(ndim):
ax = axes[i, i]
ax.axvline(value1[i], color="aqua")
ax.axvline(value2[i], color="aqua")
# Loop over the histograms
for yi in range(ndim):
for xi in range(yi):
ax = axes[yi, xi]
ax.axvline(value1[xi], color="aqua")
ax.axvline(value2[xi], color="aqua")
# ax.axhline(value1[yi], color="g")
# ax.axhline(value2[yi], color="r")
# ax.plot(value1[xi], value1[yi], "sg")
# ax.plot(value2[xi], value2[yi], "sr")
self.best_theta=theta_prime
self.low_theta=value1
self.high_theta=value2
self.samples=samples
if outfile==False:
plt.show()
else:
outfile_fig =outfile
figure.savefig(outfile_fig, bbox_inches='tight')
| [
"numpy.log",
"emcee.EnsembleSampler",
"numpy.array",
"numpy.isfinite",
"rbvfit.rb_vfit.rb_veldiff",
"corner.corner",
"numpy.diff",
"numpy.concatenate",
"numpy.round",
"numpy.tile",
"scipy.optimize.minimize",
"IPython.display.Math",
"numpy.shape",
"numpy.random.randn",
"matplotlib.pyplot.... | [((1731, 1832), 'matplotlib.pyplot.subplots', 'plt.subplots', (['ntransition'], {'sharex': '(True)', 'sharey': '(False)', 'figsize': '(12, 18)', 'gridspec_kw': "{'hspace': 0}"}), "(ntransition, sharex=True, sharey=False, figsize=(12, 18),\n gridspec_kw={'hspace': 0})\n", (1743, 1832), True, 'import matplotlib.pyplot as plt\n'), ((1878, 1910), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'size': 'BIGGER_SIZE'}), "('font', size=BIGGER_SIZE)\n", (1884, 1910), True, 'import matplotlib.pyplot as plt\n'), ((1958, 1995), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': 'BIGGER_SIZE'}), "('axes', titlesize=BIGGER_SIZE)\n", (1964, 1995), True, 'import matplotlib.pyplot as plt\n'), ((2037, 2074), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'labelsize': 'BIGGER_SIZE'}), "('axes', labelsize=BIGGER_SIZE)\n", (2043, 2074), True, 'import matplotlib.pyplot as plt\n'), ((2119, 2157), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'labelsize': 'BIGGER_SIZE'}), "('xtick', labelsize=BIGGER_SIZE)\n", (2125, 2157), True, 'import matplotlib.pyplot as plt\n'), ((2199, 2237), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'labelsize': 'BIGGER_SIZE'}), "('ytick', labelsize=BIGGER_SIZE)\n", (2205, 2237), True, 'import matplotlib.pyplot as plt\n'), ((2279, 2317), 'matplotlib.pyplot.rc', 'plt.rc', (['"""legend"""'], {'fontsize': 'BIGGER_SIZE'}), "('legend', fontsize=BIGGER_SIZE)\n", (2285, 2317), True, 'import matplotlib.pyplot as plt\n'), ((2347, 2386), 'matplotlib.pyplot.rc', 'plt.rc', (['"""figure"""'], {'titlesize': 'BIGGER_SIZE'}), "('figure', titlesize=BIGGER_SIZE)\n", (2353, 2386), True, 'import matplotlib.pyplot as plt\n'), ((7768, 7827), 'scipy.optimize.minimize', 'op.minimize', (['nll', '[theta]'], {'args': '(lb, ub, model, x, y, yerr)'}), '(nll, [theta], args=(lb, ub, model, x, y, yerr))\n', (7779, 7827), True, 'import scipy.optimize as op\n'), ((8431, 8465), 'numpy.concatenate', 'np.concatenate', (['(Nlow, blow, vlow)'], {}), '((Nlow, blow, vlow))\n', (8445, 8465), True, 'import numpy as np\n'), ((8471, 8502), 'numpy.concatenate', 'np.concatenate', (['(NHI, bHI, vHI)'], {}), '((NHI, bHI, vHI))\n', (8485, 8502), True, 'import numpy as np\n'), ((934, 989), 'rbvfit.rb_setline.rb_setline', 'rb.rb_setline', (['model.lambda_rest_original[i]', '"""closest"""'], {}), "(model.lambda_rest_original[i], 'closest')\n", (947, 989), True, 'from rbvfit import rb_setline as rb\n'), ((2678, 2713), 'rbvfit.rb_vfit.rb_veldiff', 'rb_veldiff', (['wave_list[0]', 'wave_rest'], {}), '(wave_list[0], wave_rest)\n', (2688, 2713), True, 'from rbvfit.rb_vfit import rb_veldiff as rb_veldiff\n'), ((6363, 6384), 'numpy.tile', 'np.tile', (['"""logN"""', 'nfit'], {}), "('logN', nfit)\n", (6370, 6384), True, 'import numpy as np\n'), ((6406, 6424), 'numpy.tile', 'np.tile', (['"""b"""', 'nfit'], {}), "('b', nfit)\n", (6413, 6424), True, 'import numpy as np\n'), ((6446, 6464), 'numpy.tile', 'np.tile', (['"""v"""', 'nfit'], {}), "('v', nfit)\n", (6453, 6464), True, 'import numpy as np\n'), ((6483, 6508), 'numpy.append', 'np.append', (['N_tile', 'b_tile'], {}), '(N_tile, b_tile)\n', (6492, 6508), True, 'import numpy as np\n'), ((6534, 6556), 'numpy.append', 'np.append', (['tmp', 'v_tile'], {}), '(tmp, v_tile)\n', (6543, 6556), True, 'import numpy as np\n'), ((6948, 6958), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6956, 6958), True, 'import matplotlib.pyplot as plt\n'), ((7571, 7586), 'numpy.isfinite', 'np.isfinite', (['lp'], {}), '(lp)\n', (7582, 7586), True, 'import numpy as np\n'), ((10125, 10152), 'numpy.round', 'np.round', (['(no_of_steps * 0.2)'], {}), '(no_of_steps * 0.2)\n', (10133, 10152), True, 'import numpy as np\n'), ((11889, 11923), 'numpy.percentile', 'np.percentile', (['samples', '(50)'], {'axis': '(0)'}), '(samples, 50, axis=0)\n', (11902, 11923), True, 'import numpy as np\n'), ((12128, 12149), 'numpy.tile', 'np.tile', (['"""logN"""', 'nfit'], {}), "('logN', nfit)\n", (12135, 12149), True, 'import numpy as np\n'), ((12167, 12185), 'numpy.tile', 'np.tile', (['"""b"""', 'nfit'], {}), "('b', nfit)\n", (12174, 12185), True, 'import numpy as np\n'), ((12203, 12221), 'numpy.tile', 'np.tile', (['"""v"""', 'nfit'], {}), "('v', nfit)\n", (12210, 12221), True, 'import numpy as np\n'), ((12237, 12262), 'numpy.append', 'np.append', (['N_tile', 'b_tile'], {}), '(N_tile, b_tile)\n', (12246, 12262), True, 'import numpy as np\n'), ((12284, 12306), 'numpy.append', 'np.append', (['tmp', 'v_tile'], {}), '(tmp, v_tile)\n', (12293, 12306), True, 'import numpy as np\n'), ((12325, 12377), 'corner.corner', 'corner.corner', (['samples'], {'labels': 'text_label', 'truths': 'st'}), '(samples, labels=text_label, truths=st)\n', (12338, 12377), False, 'import corner\n'), ((12421, 12455), 'numpy.percentile', 'np.percentile', (['samples', '(10)'], {'axis': '(0)'}), '(samples, 10, axis=0)\n', (12434, 12455), True, 'import numpy as np\n'), ((12526, 12560), 'numpy.percentile', 'np.percentile', (['samples', '(90)'], {'axis': '(0)'}), '(samples, 90, axis=0)\n', (12539, 12560), True, 'import numpy as np\n'), ((4373, 4408), 'rbvfit.rb_vfit.rb_veldiff', 'rb_veldiff', (['wave_list[i]', 'wave_rest'], {}), '(wave_list[i], wave_rest)\n', (4383, 4408), True, 'from rbvfit.rb_vfit import rb_veldiff as rb_veldiff\n'), ((6625, 6667), 'numpy.percentile', 'np.percentile', (['samples[:, i]', '[16, 50, 84]'], {}), '(samples[:, i], [16, 50, 84])\n', (6638, 6667), True, 'import numpy as np\n'), ((6688, 6701), 'numpy.diff', 'np.diff', (['mcmc'], {}), '(mcmc)\n', (6695, 6701), True, 'import numpy as np\n'), ((10165, 10171), 'multiprocessing.Pool', 'Pool', ([], {}), '()\n', (10169, 10171), False, 'from multiprocessing import Pool\n'), ((10203, 10309), 'emcee.EnsembleSampler', 'emcee.EnsembleSampler', (['nwalkers', 'ndim', 'lnprob'], {'pool': 'pool', 'args': '(lb, ub, model, wave_obs, fnorm, enorm)'}), '(nwalkers, ndim, lnprob, pool=pool, args=(lb, ub,\n model, wave_obs, fnorm, enorm))\n', (10224, 10309), False, 'import emcee\n'), ((11035, 11056), 'numpy.tile', 'np.tile', (['"""logN"""', 'nfit'], {}), "('logN', nfit)\n", (11042, 11056), True, 'import numpy as np\n'), ((11078, 11096), 'numpy.tile', 'np.tile', (['"""b"""', 'nfit'], {}), "('b', nfit)\n", (11085, 11096), True, 'import numpy as np\n'), ((11118, 11136), 'numpy.tile', 'np.tile', (['"""v"""', 'nfit'], {}), "('v', nfit)\n", (11125, 11136), True, 'import numpy as np\n'), ((11156, 11181), 'numpy.append', 'np.append', (['N_tile', 'b_tile'], {}), '(N_tile, b_tile)\n', (11165, 11181), True, 'import numpy as np\n'), ((11207, 11229), 'numpy.append', 'np.append', (['tmp', 'v_tile'], {}), '(tmp, v_tile)\n', (11216, 11229), True, 'import numpy as np\n'), ((13426, 13436), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13434, 13436), True, 'import matplotlib.pyplot as plt\n'), ((3299, 3311), 'numpy.shape', 'np.shape', (['f1'], {}), '(f1)\n', (3307, 3311), True, 'import numpy as np\n'), ((6887, 6896), 'IPython.display.Math', 'Math', (['txt'], {}), '(txt)\n', (6891, 6896), False, 'from IPython.display import display, Math\n'), ((7459, 7477), 'numpy.log', 'np.log', (['inv_sigma2'], {}), '(inv_sigma2)\n', (7465, 7477), True, 'import numpy as np\n'), ((11299, 11341), 'numpy.percentile', 'np.percentile', (['samples[:, i]', '[16, 50, 84]'], {}), '(samples[:, i], [16, 50, 84])\n', (11312, 11341), True, 'import numpy as np\n'), ((11362, 11375), 'numpy.diff', 'np.diff', (['mcmc'], {}), '(mcmc)\n', (11369, 11375), True, 'import numpy as np\n'), ((12603, 12624), 'numpy.array', 'np.array', (['figure.axes'], {}), '(figure.axes)\n', (12611, 12624), True, 'import numpy as np\n'), ((3651, 3701), 'numpy.str', 'np.str', (["('%.2f' % (high_N[iclump] - best_N[iclump]))"], {}), "('%.2f' % (high_N[iclump] - best_N[iclump]))\n", (3657, 3701), True, 'import numpy as np\n'), ((3962, 4012), 'numpy.str', 'np.str', (["('%.0f' % (high_b[iclump] - best_b[iclump]))"], {}), "('%.0f' % (high_b[iclump] - best_b[iclump]))\n", (3968, 4012), True, 'import numpy as np\n'), ((5153, 5165), 'numpy.shape', 'np.shape', (['f1'], {}), '(f1)\n', (5161, 5165), True, 'import numpy as np\n'), ((10014, 10035), 'numpy.random.randn', 'np.random.randn', (['ndim'], {}), '(ndim)\n', (10029, 10035), True, 'import numpy as np\n'), ((11543, 11552), 'IPython.display.Math', 'Math', (['txt'], {}), '(txt)\n', (11547, 11552), False, 'from IPython.display import display, Math\n'), ((5573, 5623), 'numpy.str', 'np.str', (["('%.2f' % (high_N[iclump] - best_N[iclump]))"], {}), "('%.2f' % (high_N[iclump] - best_N[iclump]))\n", (5579, 5623), True, 'import numpy as np\n'), ((5915, 5965), 'numpy.str', 'np.str', (["('%.0f' % (high_b[iclump] - best_b[iclump]))"], {}), "('%.0f' % (high_b[iclump] - best_b[iclump]))\n", (5921, 5965), True, 'import numpy as np\n'), ((3588, 3637), 'numpy.str', 'np.str', (["('%.2f' % (best_N[iclump] - low_N[iclump]))"], {}), "('%.2f' % (best_N[iclump] - low_N[iclump]))\n", (3594, 3637), True, 'import numpy as np\n'), ((3899, 3948), 'numpy.str', 'np.str', (["('%.0f' % (best_b[iclump] - low_b[iclump]))"], {}), "('%.0f' % (best_b[iclump] - low_b[iclump]))\n", (3905, 3948), True, 'import numpy as np\n'), ((3545, 3576), 'numpy.str', 'np.str', (["('%.2f' % best_N[iclump])"], {}), "('%.2f' % best_N[iclump])\n", (3551, 3576), True, 'import numpy as np\n'), ((3856, 3887), 'numpy.str', 'np.str', (["('%.0f' % best_b[iclump])"], {}), "('%.0f' % best_b[iclump])\n", (3862, 3887), True, 'import numpy as np\n'), ((5510, 5559), 'numpy.str', 'np.str', (["('%.2f' % (best_N[iclump] - low_N[iclump]))"], {}), "('%.2f' % (best_N[iclump] - low_N[iclump]))\n", (5516, 5559), True, 'import numpy as np\n'), ((5852, 5901), 'numpy.str', 'np.str', (["('%.0f' % (best_b[iclump] - low_b[iclump]))"], {}), "('%.0f' % (best_b[iclump] - low_b[iclump]))\n", (5858, 5901), True, 'import numpy as np\n'), ((5467, 5498), 'numpy.str', 'np.str', (["('%.2f' % best_N[iclump])"], {}), "('%.2f' % best_N[iclump])\n", (5473, 5498), True, 'import numpy as np\n'), ((5809, 5840), 'numpy.str', 'np.str', (["('%.0f' % best_b[iclump])"], {}), "('%.0f' % best_b[iclump])\n", (5815, 5840), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) Megvii, Inc. and its affiliates.
import torch
from torch.utils.data.dataloader import DataLoader as torchDataLoader
from torch.utils.data.dataloader import default_collate
import os
import random
from .samplers import YoloBatchSampler
def get_yolox_datadir():
"""
get dataset dir of YOLOX. If environment variable named `YOLOX_DATADIR` is set,
this function will return value of the environment variable. Otherwise, use data
"""
yolox_datadir = os.getenv("YOLOX_DATADIR", None)
if yolox_datadir is None:
import yolox
yolox_path = os.path.dirname(os.path.dirname(yolox.__file__))
yolox_datadir = os.path.join(yolox_path, "datasets")
return yolox_datadir
class DataLoader(torchDataLoader):
"""
Lightnet dataloader that enables on the fly resizing of the images.
See :class:`torch.utils.data.DataLoader` for more information on the arguments.
Check more on the following website:
https://gitlab.com/EAVISE/lightnet/-/blob/master/lightnet/data/_dataloading.py
Note:
This dataloader only works with :class:`lightnet.data.Dataset` based datasets.
Example:
>>> class CustomSet(ln.data.Dataset):
... def __len__(self):
... return 4
... @ln.data.Dataset.resize_getitem
... def __getitem__(self, index):
... # Should return (image, anno) but here we return (input_dim,)
... return (self.input_dim,)
>>> dl = ln.data.DataLoader(
... CustomSet((200,200)),
... batch_size = 2,
... collate_fn = ln.data.list_collate # We want the data to be grouped as a list
... )
>>> dl.dataset.input_dim # Default input_dim
(200, 200)
>>> for d in dl:
... d
[[(200, 200), (200, 200)]]
[[(200, 200), (200, 200)]]
>>> dl.change_input_dim(320, random_range=None)
(320, 320)
>>> for d in dl:
... d
[[(320, 320), (320, 320)]]
[[(320, 320), (320, 320)]]
>>> dl.change_input_dim((480, 320), random_range=None)
(480, 320)
>>> for d in dl:
... d
[[(480, 320), (480, 320)]]
[[(480, 320), (480, 320)]]
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.__initialized = False
shuffle = False
batch_sampler = None
if len(args) > 5:
shuffle = args[2]
sampler = args[3]
batch_sampler = args[4]
elif len(args) > 4:
shuffle = args[2]
sampler = args[3]
if "batch_sampler" in kwargs:
batch_sampler = kwargs["batch_sampler"]
elif len(args) > 3:
shuffle = args[2]
if "sampler" in kwargs:
sampler = kwargs["sampler"]
if "batch_sampler" in kwargs:
batch_sampler = kwargs["batch_sampler"]
else:
if "shuffle" in kwargs:
shuffle = kwargs["shuffle"]
if "sampler" in kwargs:
sampler = kwargs["sampler"]
if "batch_sampler" in kwargs:
batch_sampler = kwargs["batch_sampler"]
# Use custom BatchSampler
if batch_sampler is None:
if sampler is None:
if shuffle:
sampler = torch.utils.data.sampler.RandomSampler(self.dataset)
# sampler = torch.utils.data.DistributedSampler(self.dataset)
else:
sampler = torch.utils.data.sampler.SequentialSampler(self.dataset)
batch_sampler = YoloBatchSampler(
sampler,
self.batch_size,
self.drop_last,
input_dimension=self.dataset.input_dim,
)
# batch_sampler = IterationBasedBatchSampler(batch_sampler, num_iterations =
self.batch_sampler = batch_sampler
self.__initialized = True
def close_mosaic(self):
self.batch_sampler.mosaic = False
def change_input_dim(self, multiple=32, random_range=(10, 19)):
"""This function will compute a new size and update it on the next mini_batch.
Args:
multiple (int or tuple, optional): values to multiply the randomly generated range by.
Default **32**
random_range (tuple, optional): This (min, max) tuple sets the range
for the randomisation; Default **(10, 19)**
Return:
tuple: width, height tuple with new dimension
Note:
The new size is generated as follows: |br|
First we compute a random integer inside ``[random_range]``.
We then multiply that number with the ``multiple`` argument,
which gives our final new input size. |br|
If ``multiple`` is an integer we generate a square size. If you give a tuple
of **(width, height)**, the size is computed
as :math:`rng * multiple[0], rng * multiple[1]`.
Note:
You can set the ``random_range`` argument to **None** to set
an exact size of multiply. |br|
See the example above for how this works.
"""
if random_range is None:
size = 1
else:
size = random.randint(*random_range)
if isinstance(multiple, int):
size = (size * multiple, size * multiple)
else:
size = (size * multiple[0], size * multiple[1])
self.batch_sampler.new_input_dim = size
return size
def list_collate(batch):
"""
Function that collates lists or tuples together into one list (of lists/tuples).
Use this as the collate function in a Dataloader, if you want to have a list of
items as an output, as opposed to tensors (eg. Brambox.boxes).
"""
items = list(zip(*batch))
for i in range(len(items)):
if isinstance(items[i][0], (list, tuple)):
items[i] = list(items[i])
else:
items[i] = default_collate(items[i])
return items
| [
"torch.utils.data.dataloader.default_collate",
"os.getenv",
"os.path.join",
"torch.utils.data.sampler.SequentialSampler",
"os.path.dirname",
"torch.utils.data.sampler.RandomSampler",
"random.randint"
] | [((551, 583), 'os.getenv', 'os.getenv', (['"""YOLOX_DATADIR"""', 'None'], {}), "('YOLOX_DATADIR', None)\n", (560, 583), False, 'import os\n'), ((735, 771), 'os.path.join', 'os.path.join', (['yolox_path', '"""datasets"""'], {}), "(yolox_path, 'datasets')\n", (747, 771), False, 'import os\n'), ((677, 708), 'os.path.dirname', 'os.path.dirname', (['yolox.__file__'], {}), '(yolox.__file__)\n', (692, 708), False, 'import os\n'), ((5603, 5632), 'random.randint', 'random.randint', (['*random_range'], {}), '(*random_range)\n', (5617, 5632), False, 'import random\n'), ((6362, 6387), 'torch.utils.data.dataloader.default_collate', 'default_collate', (['items[i]'], {}), '(items[i])\n', (6377, 6387), False, 'from torch.utils.data.dataloader import default_collate\n'), ((3586, 3638), 'torch.utils.data.sampler.RandomSampler', 'torch.utils.data.sampler.RandomSampler', (['self.dataset'], {}), '(self.dataset)\n', (3624, 3638), False, 'import torch\n'), ((3776, 3832), 'torch.utils.data.sampler.SequentialSampler', 'torch.utils.data.sampler.SequentialSampler', (['self.dataset'], {}), '(self.dataset)\n', (3818, 3832), False, 'import torch\n')] |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = r'''
---
module: ec2_vpc_vpn_info
version_added: 1.0.0
short_description: Gather information about VPN Connections in AWS.
description:
- Gather information about VPN Connections in AWS.
- This module was called C(ec2_vpc_vpn_facts) before Ansible 2.9. The usage did not change.
requirements: [ boto3 ]
author: <NAME> (@Madhura-CSI)
options:
filters:
description:
- A dict of filters to apply. Each dict item consists of a filter key and a filter value.
See U(https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeVpnConnections.html) for possible filters.
required: false
type: dict
vpn_connection_ids:
description:
- Get details of a specific VPN connections using vpn connection ID/IDs. This value should be provided as a list.
required: false
type: list
elements: str
extends_documentation_fragment:
- amazon.aws.aws
- amazon.aws.ec2
'''
EXAMPLES = r'''
# # Note: These examples do not set authentication details, see the AWS Guide for details.
- name: Gather information about all vpn connections
community.aws.ec2_vpc_vpn_info:
- name: Gather information about a filtered list of vpn connections, based on tags
community.aws.ec2_vpc_vpn_info:
filters:
"tag:Name": test-connection
register: vpn_conn_info
- name: Gather information about vpn connections by specifying connection IDs.
community.aws.ec2_vpc_vpn_info:
filters:
vpn-gateway-id: vgw-cbe66beb
register: vpn_conn_info
'''
RETURN = r'''
vpn_connections:
description: List of one or more VPN Connections.
returned: always
type: complex
contains:
category:
description: The category of the VPN connection.
returned: always
type: str
sample: VPN
customer_gatway_configuration:
description: The configuration information for the VPN connection's customer gateway (in the native XML format).
returned: always
type: str
customer_gateway_id:
description: The ID of the customer gateway at your end of the VPN connection.
returned: always
type: str
sample: cgw-17a53c37
options:
description: The VPN connection options.
returned: always
type: dict
sample: {
"static_routes_only": false
}
routes:
description: List of static routes associated with the VPN connection.
returned: always
type: complex
contains:
destination_cidr_block:
description: The CIDR block associated with the local subnet of the customer data center.
returned: always
type: str
sample: 10.0.0.0/16
state:
description: The current state of the static route.
returned: always
type: str
sample: available
state:
description: The current state of the VPN connection.
returned: always
type: str
sample: available
tags:
description: Any tags assigned to the VPN connection.
returned: always
type: dict
sample: {
"Name": "test-conn"
}
type:
description: The type of VPN connection.
returned: always
type: str
sample: ipsec.1
vgw_telemetry:
description: Information about the VPN tunnel.
returned: always
type: complex
contains:
accepted_route_count:
description: The number of accepted routes.
returned: always
type: int
sample: 0
last_status_change:
description: The date and time of the last change in status.
returned: always
type: str
sample: "2018-02-09T14:35:27+00:00"
outside_ip_address:
description: The Internet-routable IP address of the virtual private gateway's outside interface.
returned: always
type: str
sample: 13.127.79.191
status:
description: The status of the VPN tunnel.
returned: always
type: str
sample: DOWN
status_message:
description: If an error occurs, a description of the error.
returned: always
type: str
sample: IPSEC IS DOWN
certificate_arn:
description: The Amazon Resource Name of the virtual private gateway tunnel endpoint certificate.
returned: when a private certificate is used for authentication
type: str
sample: "arn:aws:acm:us-east-1:123456789101:certificate/c544d8ce-20b8-4fff-98b0-example"
vpn_connection_id:
description: The ID of the VPN connection.
returned: always
type: str
sample: vpn-f700d5c0
vpn_gateway_id:
description: The ID of the virtual private gateway at the AWS side of the VPN connection.
returned: always
type: str
sample: vgw-cbe56bfb
'''
import json
try:
from botocore.exceptions import ClientError, BotoCoreError
except ImportError:
pass # caught by AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule
from ansible_collections.amazon.aws.plugins.module_utils.ec2 import (ansible_dict_to_boto3_filter_list,
boto3_tag_list_to_ansible_dict,
camel_dict_to_snake_dict,
)
def date_handler(obj):
return obj.isoformat() if hasattr(obj, 'isoformat') else obj
def list_vpn_connections(connection, module):
params = dict()
params['Filters'] = ansible_dict_to_boto3_filter_list(module.params.get('filters'))
params['VpnConnectionIds'] = module.params.get('vpn_connection_ids')
try:
result = json.loads(json.dumps(connection.describe_vpn_connections(**params), default=date_handler))
except ValueError as e:
module.fail_json_aws(e, msg="Cannot validate JSON data")
except (ClientError, BotoCoreError) as e:
module.fail_json_aws(e, msg="Could not describe customer gateways")
snaked_vpn_connections = [camel_dict_to_snake_dict(vpn_connection) for vpn_connection in result['VpnConnections']]
if snaked_vpn_connections:
for vpn_connection in snaked_vpn_connections:
vpn_connection['tags'] = boto3_tag_list_to_ansible_dict(vpn_connection.get('tags', []))
module.exit_json(changed=False, vpn_connections=snaked_vpn_connections)
def main():
argument_spec = dict(
vpn_connection_ids=dict(default=[], type='list', elements='str'),
filters=dict(default={}, type='dict')
)
module = AnsibleAWSModule(argument_spec=argument_spec,
mutually_exclusive=[['vpn_connection_ids', 'filters']],
supports_check_mode=True)
if module._module._name == 'ec2_vpc_vpn_facts':
module._module.deprecate("The 'ec2_vpc_vpn_facts' module has been renamed to 'ec2_vpc_vpn_info'", date='2021-12-01', collection_name='community.aws')
connection = module.client('ec2')
list_vpn_connections(connection, module)
if __name__ == '__main__':
main()
| [
"ansible_collections.amazon.aws.plugins.module_utils.ec2.camel_dict_to_snake_dict",
"ansible_collections.amazon.aws.plugins.module_utils.core.AnsibleAWSModule"
] | [((7229, 7361), 'ansible_collections.amazon.aws.plugins.module_utils.core.AnsibleAWSModule', 'AnsibleAWSModule', ([], {'argument_spec': 'argument_spec', 'mutually_exclusive': "[['vpn_connection_ids', 'filters']]", 'supports_check_mode': '(True)'}), "(argument_spec=argument_spec, mutually_exclusive=[[\n 'vpn_connection_ids', 'filters']], supports_check_mode=True)\n", (7245, 7361), False, 'from ansible_collections.amazon.aws.plugins.module_utils.core import AnsibleAWSModule\n'), ((6698, 6738), 'ansible_collections.amazon.aws.plugins.module_utils.ec2.camel_dict_to_snake_dict', 'camel_dict_to_snake_dict', (['vpn_connection'], {}), '(vpn_connection)\n', (6722, 6738), False, 'from ansible_collections.amazon.aws.plugins.module_utils.ec2 import ansible_dict_to_boto3_filter_list, boto3_tag_list_to_ansible_dict, camel_dict_to_snake_dict\n')] |
import discord
from discord.ext import commands
from pathlib import Path
from config import bot
from collections import OrderedDict
import json
class RoleSelector(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.messages_path = str(Path('cogs/data/messages.json'))
async def opener(self):
with open(self.messages_path, 'r') as f:
return json.load(f)
async def closer(self, messages):
with open(self.messages_path, 'w') as f:
json.dump(messages, f)
@commands.Cog.listener()
async def on_ready(self):
emojis = self.emoji_selector(self.bot.guilds[0].id)
channel = discord.utils.get(self.bot.get_all_channels(), name='roles')
text = await self.embeder(self.data(emojis))
messages = await self.opener()
try:
self.msg = await channel.fetch_message(messages['role_message']['id'])
await self.msg.edit(embed=text)
except:
print("Role Message hasn't been added yet")
self.msg = await channel.send(embed=text)
messages['role_message'] = {}
messages['role_message']['id'] = self.msg.id
await self.closer(messages)
for emoji in emojis.values():
await self.msg.add_reaction(emoji=emoji)
@commands.Cog.listener(name='on_raw_reaction_add')
async def role_reaction_add(self, payload):
try:
if payload.message_id != self.msg.id:
return
except AttributeError:
return
guild = self.bot.get_guild(payload.guild_id)
user = guild.get_member(payload.user_id)
if user.id == self.bot.user.id:
return
emojis = self.emoji_selector(guild.id)
clean_emoji = str(payload.emoji).strip('<:>')
for k, v in emojis.items():
if v in clean_emoji:
role = discord.utils.get(user.guild.roles, name=k)
if 'mission-maker' in k:
results = await self.saturday_check()
if user.id not in results:
await self.msg.remove_reaction(v, user)
return
if 'auditor' in k:
role_mm = discord.utils.get(user.guild.roles, name='mission-maker')
if role_mm not in user.roles:
await self.msg.remove_reaction(v, user)
return
if role in user.roles:
await user.remove_roles(role)
else:
await user.add_roles(role)
await self.msg.remove_reaction(v, user)
async def saturday_check(self):
results = await self.bot.conn.fetch("""
SELECT user_id FROM attendance""")
id_list = [x["user_id"] for x in results]
return id_list
async def embeder(self, msg_embed):
em = discord.Embed(
title=self.msg_embed['title'], description=self.msg_embed['description'], color=0x008080)
em.set_thumbnail(url=self.msg_embed['thumbnail'])
for value in self.field_dict.values():
em.add_field(name=value['name'], value=value['value'], inline=False)
em.set_footer(text=self.footer['footer'])
return em
def emoji_selector(self, guild):
if 169696752461414401 == guild:
emojis = OrderedDict([
('mission-maker', 'feelscornman:485958281458876416'),
('auditor', '\U0001F913'),
('heretic', '\U0001f300'),
('liberation', 'finger_gun:300089586460131328'),
('r6siege', '\U0001f308'),
('ricefields', 'rice_fields:483791993370181632'),
('minecraft', '\U000026cf'),
('flight-sims', '\U0001f525'),
('vr', 'iron_uncle:548645154454765568'),
('zeus-op', '\U000026a1'),
('4x', '\U0001f3ed'),
('rts', 'smoothbrain:592115163390410783'),
('destiny-2', '\U0001f47e'),
('squad', 'CplChad:409868955239579649'),
('zomboid', 'the_devil:663562931681624081')
])
else:
emojis = OrderedDict([
('mission-maker', 'uncle:567728566540697635'),
('auditor', '\U0001F913'),
('heretic', '\U0001f300'),
('liberation', 'snek_uncle:567728565781528576'),
('r6siege', '\U0001f3c3'),
('ricefields', 'shadow_uncle:567728565248851989'),
('minecraft', '\U000026cf'),
('flight-sims', '\U0001f525'),
('vr', 'jensen_uncle:567728565391589399'),
('zeus-op', '\U000026a1'),
('4x', '\U0001f3ed'),
('rts', 'fast_uncle:567728565525807104'),
('destiny-2', '\U0001f47e'),
('squad', 'uncle_uncle:567728565785985025'),
('zomboid', 'uncle_hacker:567728565798567940')
])
return emojis
def data(self, emojis):
self.msg_embed = OrderedDict([
('title', '**TCS Role Selector**'),
('description', '''Use this tool to select optional Discord roles.\n\n'''
'''**DO NOT ABUSE THE BOT!**\n'''
'''\u200B'''),
('thumbnail', 'https://s3.amazonaws.com/files.enjin.com/1015535/site_logo/2020_logo.png')
])
self.field_dict = OrderedDict([
('mission_maker', OrderedDict([
('name', '<:{}> @mission-maker'.format(emojis['mission-maker'])),
('value', '''Provides access to our mission making channels, which *MAY HAVE SPOILERS*.\n\n'''
'''__**REQUIREMENTS**__\n'''
'''**__1.)__** You **MUST** attend a Saturday Op before taking this role.\n'''
'''**__2.)__** **ONLY** select this role if you plan on making missions for TCS.\n'''
'''**__3.)__** **DO NOT** use this role to provide feedback or suggestions in the mission making channel, use **#debriefing**.\n'''
'''**__4.)__** Understand that we make missions differently than other units.\n'''
'''**__5.)__** Understand that this is not an easy job and you might not get it right the first time.\n'''
'''\u200B''')])
),
('auditor', OrderedDict([
('name', '{} @auditor'.format(emojis['auditor'])),
('value', '''Allows other mission makers to ping you to check their missions for errors. *(requires @mission-maker tag)*\n''')])
),
('heretic', OrderedDict([
('name', '{} @heretic'.format(emojis['heretic'])),
('value', '''Provides access to the **#heresy** channel.\n'''
'''*A place for Warhammer 40K discussion and shitposting.*''')])
),
('liberation', OrderedDict([
('name', '<:{}> @liberation'.format(emojis['liberation'])),
('value', '''Allows other members to ping you to play *Arma 3 Liberation* on our server.''')])
),
('r6siege', OrderedDict([
('name', '{} @r6siege'.format(emojis['r6siege'])),
('value', '''Allows other members to ping you to play *Rainbow Six Siege*.''')])
),
('ricefields', OrderedDict([
('name', '<:{}> @ricefields'.format(emojis['ricefields'])),
('value', '''Allows other members to ping you to play *Rising Storm 2: Vietnam*.''')])
),
('minecraft', OrderedDict([
('name', '{} @minecraft'.format(emojis['minecraft'])),
('value', '''Allows other members to ping you to play *Minecraft* on our server.''')])
),
('flight_sims', OrderedDict([
('name', '{} @flight-sims'.format(emojis['flight-sims'])),
('value', '''Allows other members to ping you to play *DCS* or *IL2*.''')])
),
('vr', OrderedDict([
('name', '<:{}> @vr'.format(emojis['vr'])),
('value', '''Allows other members to ping you to play any *Virtual Reality Games*.''')])
),
('zeus-op', OrderedDict([
('name', '{} @zeus-op'.format(emojis['zeus-op'])),
('value', '''Allows other members to ping you to play *Impromptu Zeus Missions*.\n\n'''
'''__**RULES**__\n'''
'''**__1.)__** Don't expect someone to step-up as Zeus.\n'''
'''**__2.)__** Zeus has final say on what's allowed in their mission.\n'''
'''\u200B''')])
),
('4x', OrderedDict([
('name', '{} @4x'.format(emojis['4x'])),
('value', '''Allows other members to ping you to play *4X Games*.\n\n'''
'''__**Active Games**__\n'''
'''> *Hearts of Iron 4*\n'''
'''> *Stellaris*\n'''
'''\u200B''')])
),
('rts', OrderedDict([
('name', '<:{}> @rts'.format(emojis['rts'])),
('value', '''Allows other members to ping you to play *RTS Games*.\n\n'''
'''__**Active Games**__\n'''
'''> *Wargame: Red Dragon*\n'''
'''> *Wargame: War in the East*\n'''
'''> *Men of War: Assault Squad 2*\n'''
'''> *StarCraft 2*\n'''
'''\u200B''')])
),
('destiny-2', OrderedDict([
('name', '{} @destiny-2'.format(emojis['destiny-2'])),
('value', '''Allows other members to ping you to play *Destiny 2*.\n\n'''
)])
),
('squad', OrderedDict([
('name', '<:{}> @squad'.format(emojis['squad'])),
('value', '''Allows other members to ping you to play *Squad*.\n\n'''
)])
),
('zomboid', OrderedDict([
('name', '<:{}> @zomboid'.format(emojis['zomboid'])),
('value', '''Allows other members to ping you to play organized *Project Zomboid*.\n\n'''
)])
)
])
self.footer = OrderedDict([
('footer', '''React to toggle role on/off''')
])
def setup(bot):
bot.add_cog(RoleSelector(bot))
| [
"discord.ext.commands.Cog.listener",
"collections.OrderedDict",
"pathlib.Path",
"discord.utils.get",
"json.load",
"discord.Embed",
"json.dump"
] | [((537, 560), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {}), '()\n', (558, 560), False, 'from discord.ext import commands\n'), ((1312, 1361), 'discord.ext.commands.Cog.listener', 'commands.Cog.listener', ([], {'name': '"""on_raw_reaction_add"""'}), "(name='on_raw_reaction_add')\n", (1333, 1361), False, 'from discord.ext import commands\n'), ((2922, 3027), 'discord.Embed', 'discord.Embed', ([], {'title': "self.msg_embed['title']", 'description': "self.msg_embed['description']", 'color': '(32896)'}), "(title=self.msg_embed['title'], description=self.msg_embed[\n 'description'], color=32896)\n", (2935, 3027), False, 'import discord\n'), ((5125, 5384), 'collections.OrderedDict', 'OrderedDict', (['[(\'title\', \'**TCS Role Selector**\'), (\'description\',\n """Use this tool to select optional Discord roles.\n\n**DO NOT ABUSE THE BOT!**\n\u200b"""\n ), (\'thumbnail\',\n \'https://s3.amazonaws.com/files.enjin.com/1015535/site_logo/2020_logo.png\')\n ]'], {}), '([(\'title\', \'**TCS Role Selector**\'), (\'description\',\n """Use this tool to select optional Discord roles.\n\n**DO NOT ABUSE THE BOT!**\n\u200b"""\n ), (\'thumbnail\',\n \'https://s3.amazonaws.com/files.enjin.com/1015535/site_logo/2020_logo.png\'\n )])\n', (5136, 5384), False, 'from collections import OrderedDict\n'), ((10319, 10375), 'collections.OrderedDict', 'OrderedDict', (["[('footer', 'React to toggle role on/off')]"], {}), "([('footer', 'React to toggle role on/off')])\n", (10330, 10375), False, 'from collections import OrderedDict\n'), ((265, 296), 'pathlib.Path', 'Path', (['"""cogs/data/messages.json"""'], {}), "('cogs/data/messages.json')\n", (269, 296), False, 'from pathlib import Path\n'), ((395, 407), 'json.load', 'json.load', (['f'], {}), '(f)\n', (404, 407), False, 'import json\n'), ((508, 530), 'json.dump', 'json.dump', (['messages', 'f'], {}), '(messages, f)\n', (517, 530), False, 'import json\n'), ((3392, 3905), 'collections.OrderedDict', 'OrderedDict', (["[('mission-maker', 'feelscornman:485958281458876416'), ('auditor', '🤓'), (\n 'heretic', '🌀'), ('liberation', 'finger_gun:300089586460131328'), (\n 'r6siege', '🌈'), ('ricefields', 'rice_fields:483791993370181632'), (\n 'minecraft', '⛏'), ('flight-sims', '🔥'), ('vr',\n 'iron_uncle:548645154454765568'), ('zeus-op', '⚡'), ('4x', '🏭'), ('rts',\n 'smoothbrain:592115163390410783'), ('destiny-2', '👾'), ('squad',\n 'CplChad:409868955239579649'), ('zomboid', 'the_devil:663562931681624081')]"], {}), "([('mission-maker', 'feelscornman:485958281458876416'), (\n 'auditor', '🤓'), ('heretic', '🌀'), ('liberation',\n 'finger_gun:300089586460131328'), ('r6siege', '🌈'), ('ricefields',\n 'rice_fields:483791993370181632'), ('minecraft', '⛏'), ('flight-sims',\n '🔥'), ('vr', 'iron_uncle:548645154454765568'), ('zeus-op', '⚡'), ('4x',\n '🏭'), ('rts', 'smoothbrain:592115163390410783'), ('destiny-2', '👾'), (\n 'squad', 'CplChad:409868955239579649'), ('zomboid',\n 'the_devil:663562931681624081')])\n", (3403, 3905), False, 'from collections import OrderedDict\n'), ((4237, 4754), 'collections.OrderedDict', 'OrderedDict', (["[('mission-maker', 'uncle:567728566540697635'), ('auditor', '🤓'), (\n 'heretic', '🌀'), ('liberation', 'snek_uncle:567728565781528576'), (\n 'r6siege', '🏃'), ('ricefields', 'shadow_uncle:567728565248851989'), (\n 'minecraft', '⛏'), ('flight-sims', '🔥'), ('vr',\n 'jensen_uncle:567728565391589399'), ('zeus-op', '⚡'), ('4x', '🏭'), (\n 'rts', 'fast_uncle:567728565525807104'), ('destiny-2', '👾'), ('squad',\n 'uncle_uncle:567728565785985025'), ('zomboid',\n 'uncle_hacker:567728565798567940')]"], {}), "([('mission-maker', 'uncle:567728566540697635'), ('auditor', '🤓'\n ), ('heretic', '🌀'), ('liberation', 'snek_uncle:567728565781528576'), (\n 'r6siege', '🏃'), ('ricefields', 'shadow_uncle:567728565248851989'), (\n 'minecraft', '⛏'), ('flight-sims', '🔥'), ('vr',\n 'jensen_uncle:567728565391589399'), ('zeus-op', '⚡'), ('4x', '🏭'), (\n 'rts', 'fast_uncle:567728565525807104'), ('destiny-2', '👾'), ('squad',\n 'uncle_uncle:567728565785985025'), ('zomboid',\n 'uncle_hacker:567728565798567940')])\n", (4248, 4754), False, 'from collections import OrderedDict\n'), ((1900, 1943), 'discord.utils.get', 'discord.utils.get', (['user.guild.roles'], {'name': 'k'}), '(user.guild.roles, name=k)\n', (1917, 1943), False, 'import discord\n'), ((2250, 2307), 'discord.utils.get', 'discord.utils.get', (['user.guild.roles'], {'name': '"""mission-maker"""'}), "(user.guild.roles, name='mission-maker')\n", (2267, 2307), False, 'import discord\n')] |
#!/usr/bin/env python3
from .base_test import BaseTest
from fbc.symphony.cli.graphql_compiler.gql.utils_codegen import CodeChunk
class TestRendererDataclasses(BaseTest):
def test_codegen_write_simple_strings(self):
gen = CodeChunk()
gen.write('def sum(a, b):')
gen.indent()
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_write_template_strings_args(self):
gen = CodeChunk()
gen.write('def {0}(a, b):', 'sum')
gen.indent()
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_write_template_strings_kwargs(self):
gen = CodeChunk()
gen.write('def {method}(a, b):', method='sum')
gen.indent()
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_block(self):
gen = CodeChunk()
gen.write('def sum(a, b):')
with gen.block():
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_write_block(self):
gen = CodeChunk()
with gen.write_block('def {name}(a, b):', name='sum'):
gen.write('return a + b')
code = str(gen)
m = self.load_module(code)
assert m.sum(2, 3) == 5
def test_codegen_write_lines(self):
lines = [
'@staticmethod',
'def sum(a, b):'
' return a + b'
]
gen = CodeChunk()
gen.write('class Math:')
gen.indent()
gen.write_lines(lines)
code = str(gen)
m = self.load_module(code)
assert m.Math.sum(2, 3) == 5
| [
"fbc.symphony.cli.graphql_compiler.gql.utils_codegen.CodeChunk"
] | [((236, 247), 'fbc.symphony.cli.graphql_compiler.gql.utils_codegen.CodeChunk', 'CodeChunk', ([], {}), '()\n', (245, 247), False, 'from fbc.symphony.cli.graphql_compiler.gql.utils_codegen import CodeChunk\n'), ((503, 514), 'fbc.symphony.cli.graphql_compiler.gql.utils_codegen.CodeChunk', 'CodeChunk', ([], {}), '()\n', (512, 514), False, 'from fbc.symphony.cli.graphql_compiler.gql.utils_codegen import CodeChunk\n'), ((779, 790), 'fbc.symphony.cli.graphql_compiler.gql.utils_codegen.CodeChunk', 'CodeChunk', ([], {}), '()\n', (788, 790), False, 'from fbc.symphony.cli.graphql_compiler.gql.utils_codegen import CodeChunk\n'), ((1043, 1054), 'fbc.symphony.cli.graphql_compiler.gql.utils_codegen.CodeChunk', 'CodeChunk', ([], {}), '()\n', (1052, 1054), False, 'from fbc.symphony.cli.graphql_compiler.gql.utils_codegen import CodeChunk\n'), ((1303, 1314), 'fbc.symphony.cli.graphql_compiler.gql.utils_codegen.CodeChunk', 'CodeChunk', ([], {}), '()\n', (1312, 1314), False, 'from fbc.symphony.cli.graphql_compiler.gql.utils_codegen import CodeChunk\n'), ((1681, 1692), 'fbc.symphony.cli.graphql_compiler.gql.utils_codegen.CodeChunk', 'CodeChunk', ([], {}), '()\n', (1690, 1692), False, 'from fbc.symphony.cli.graphql_compiler.gql.utils_codegen import CodeChunk\n')] |
from flask import Flask
def create_app():
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
from .views import app as main_app
app.register_blueprint(main_app)
return app
| [
"flask.Flask"
] | [((54, 69), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (59, 69), False, 'from flask import Flask\n')] |
from numpy import logspace
from sys import path as sysPath
sysPath.append('../../src')
#load the module
from interfacePy import Cosmo
cosmo=Cosmo('../../src/data/eos2020.dat',0,1e5)
for T in logspace(-5,5,50):
print(
'T=',T,'GeV\t',
'H=',cosmo.Hubble(T),'GeV\t',
'h_eff=',cosmo.heff(T),'\t',
'g_eff=',cosmo.geff(T),'\t',
's=',cosmo.s(T),'GeV^3\t',
)
if False:
import matplotlib.pyplot as plt
#########-----g_eff and h_eff-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
gt=[cosmo.geff(i) for i in T]
ht=[cosmo.heff(i) for i in T]
sub.plot(T,gt,linestyle='--',c='xkcd:red',label=r"$g_{\rm eff} (T)$")
sub.plot(T,ht,linestyle=':',c='xkcd:black',label=r"$h_{\rm eff} (T)$")
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.set_ylabel(r'rel. dof')
sub.legend(bbox_to_anchor=(1, 0.0),borderaxespad=0.,
borderpad=0.05,ncol=1,loc='lower right',fontsize=14,framealpha=0)
sub.set_yscale('log')
sub.set_xscale('log')
fig.savefig('rdofs-T_examplePlot.pdf',bbox_inches='tight')
#########-----dg_effdT and dh_effdT-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
dg=[cosmo.dgeffdT (i) for i in T]
dh=[cosmo.dheffdT(i) for i in T]
sub.plot(T,dg,linestyle='--',c='xkcd:red',label=r"$\dfrac{d g_{\rm eff}}{dT} (T)$")
sub.plot(T,dh,linestyle=':',c='xkcd:black',label=r"$\dfrac{d h_{\rm eff}}{dT} (T)$")
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.legend(bbox_to_anchor=(1, 0.5),borderaxespad=0.,
borderpad=0.05,ncol=1,loc='lower right',fontsize=14,framealpha=0)
sub.set_yscale('symlog')
sub.set_xscale('log')
fig.savefig('drdofsdT-T_examplePlot.pdf',bbox_inches='tight')
#########-----dh-----#########
fig=plt.figure(figsize=(9,4))
fig.subplots_adjust(bottom=0.15, left=0.15, top = 0.95, right=0.9,wspace=0.0,hspace=0.0)
fig.suptitle('')
sub = fig.add_subplot(1,1,1)
T=logspace(-5,5,500)
dht=[cosmo.dh(i) for i in T]
sub.plot(T,dht,linestyle='-',c='xkcd:black')
sub.set_xlabel(r'$T ~ [{\rm GeV}]$')
sub.set_ylabel(r'$\delta_h = 1 + \dfrac{1}{3} \dfrac{d \log h_{\rm eff} }{d \log T}$')
sub.set_yscale('linear')
sub.set_xscale('log')
fig.savefig('dh-T_examplePlot.pdf',bbox_inches='tight')
| [
"matplotlib.pyplot.figure",
"numpy.logspace",
"sys.path.append",
"interfacePy.Cosmo"
] | [((61, 88), 'sys.path.append', 'sysPath.append', (['"""../../src"""'], {}), "('../../src')\n", (75, 88), True, 'from sys import path as sysPath\n'), ((145, 193), 'interfacePy.Cosmo', 'Cosmo', (['"""../../src/data/eos2020.dat"""', '(0)', '(100000.0)'], {}), "('../../src/data/eos2020.dat', 0, 100000.0)\n", (150, 193), False, 'from interfacePy import Cosmo\n'), ((197, 216), 'numpy.logspace', 'logspace', (['(-5)', '(5)', '(50)'], {}), '(-5, 5, 50)\n', (205, 216), False, 'from numpy import logspace\n'), ((510, 536), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 4)'}), '(figsize=(9, 4))\n', (520, 536), True, 'import matplotlib.pyplot as plt\n'), ((691, 711), 'numpy.logspace', 'logspace', (['(-5)', '(5)', '(500)'], {}), '(-5, 5, 500)\n', (699, 711), False, 'from numpy import logspace\n'), ((1321, 1347), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 4)'}), '(figsize=(9, 4))\n', (1331, 1347), True, 'import matplotlib.pyplot as plt\n'), ((1502, 1522), 'numpy.logspace', 'logspace', (['(-5)', '(5)', '(500)'], {}), '(-5, 5, 500)\n', (1510, 1522), False, 'from numpy import logspace\n'), ((2121, 2147), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 4)'}), '(figsize=(9, 4))\n', (2131, 2147), True, 'import matplotlib.pyplot as plt\n'), ((2302, 2322), 'numpy.logspace', 'logspace', (['(-5)', '(5)', '(500)'], {}), '(-5, 5, 500)\n', (2310, 2322), False, 'from numpy import logspace\n')] |
#!/usr/bin/env python3
import argparse
import logging
import sys
import zlib
sys.path.append("../..")
from bento.client.api import ClientConnection
from bento.common.protocol import *
import bento.common.util as util
function_name= "browser"
function_code= """
import requests
import zlib
import os
def browser(url, padding):
body= requests.get(url, timeout=1).content
compressed= zlib.compress(body)
final= compressed
if padding - len(final) > 0:
final= final + (os.urandom(padding - len(final)))
else:
final= final + (os.urandom((len(final) + padding) % padding))
api.send(final)
"""
@util.timeit
def main():
logging.basicConfig(format='%(levelname)s:\t%(message)s',
level=logging.DEBUG)
parser = argparse.ArgumentParser(
description='Fetch a website and pad response with dummy bytes')
parser.add_argument('host', help="server's IPv4 address")
parser.add_argument('port', type=int, help="server's port")
parser.add_argument('url', help="URL to fetch")
parser.add_argument('padding', help="pad URL body to ne")
args = parser.parse_args()
conn= ClientConnection(args.host, args.port)
token, errmsg= conn.send_store_request(function_name, function_code)
if errmsg is not None:
util.fatal(f"Error message from server {errmsg}")
logging.debug(f"Got token: {token}")
call= f"{function_name}('{args.url}', {args.padding})"
session_id, errmsg= conn.send_execute_request(call, token)
if errmsg is not None:
util.fatal(f"Error message from server {errmsg}")
logging.debug(f"Got session_id: {session_id}")
logging.debug("Getting output...")
conn.send_open_request(session_id)
data, session_id, err= conn.get_sessionmsg()
print(zlib.decompress(data))
if __name__ == '__main__':
main()
| [
"logging.basicConfig",
"logging.debug",
"argparse.ArgumentParser",
"bento.common.util.fatal",
"bento.client.api.ClientConnection",
"sys.path.append",
"zlib.decompress"
] | [((79, 103), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (94, 103), False, 'import sys\n'), ((661, 739), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:\t%(message)s"""', 'level': 'logging.DEBUG'}), "(format='%(levelname)s:\\t%(message)s', level=logging.DEBUG)\n", (680, 739), False, 'import logging\n'), ((766, 859), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Fetch a website and pad response with dummy bytes"""'}), "(description=\n 'Fetch a website and pad response with dummy bytes')\n", (789, 859), False, 'import argparse\n'), ((1150, 1188), 'bento.client.api.ClientConnection', 'ClientConnection', (['args.host', 'args.port'], {}), '(args.host, args.port)\n', (1166, 1188), False, 'from bento.client.api import ClientConnection\n'), ((1353, 1389), 'logging.debug', 'logging.debug', (['f"""Got token: {token}"""'], {}), "(f'Got token: {token}')\n", (1366, 1389), False, 'import logging\n'), ((1603, 1649), 'logging.debug', 'logging.debug', (['f"""Got session_id: {session_id}"""'], {}), "(f'Got session_id: {session_id}')\n", (1616, 1649), False, 'import logging\n'), ((1655, 1689), 'logging.debug', 'logging.debug', (['"""Getting output..."""'], {}), "('Getting output...')\n", (1668, 1689), False, 'import logging\n'), ((1298, 1347), 'bento.common.util.fatal', 'util.fatal', (['f"""Error message from server {errmsg}"""'], {}), "(f'Error message from server {errmsg}')\n", (1308, 1347), True, 'import bento.common.util as util\n'), ((1548, 1597), 'bento.common.util.fatal', 'util.fatal', (['f"""Error message from server {errmsg}"""'], {}), "(f'Error message from server {errmsg}')\n", (1558, 1597), True, 'import bento.common.util as util\n'), ((1788, 1809), 'zlib.decompress', 'zlib.decompress', (['data'], {}), '(data)\n', (1803, 1809), False, 'import zlib\n')] |
from app import webapp, mysql
from app.models import Search , Utils, Collection, WebUtils
from flask import request, jsonify
from flask.ext.jsonpify import jsonify as jsonp
import json
'''
Generic search call
@params
q: search query
page: the page number of search results (default 0)
type: type of search: {default: free(all fields), category, isbn}
@response
List of search result objects(ES)
'''
@webapp.route('/search')
def searchString():
response = {'status': 'False'}
results = {}
query = Utils.getParam(request.args, 'q')
page = Utils.getParam(request.args, 'page', var_type='int', default=1)
search_type = Utils.getParam(request.args, 'type', default='free')
user_id = Utils.getParam(request.args, 'userId', 'int')
flow = Utils.getParam(request.args, 'flow', default='borrow')
gcm_id = Utils.getParam(request.args, 'gcm_id', default=None)
uuid = Utils.getParam(request.args, 'distinct_id', default=None)
ref = Utils.getParam(request.args, 'ref', default='mobile')
if not query:
return Utils.errorResponse(response, 'HTTP_STATUS_CODE_DATA_MISSING')
if ref == 'web':
return jsonify(WebUtils.fetchSearchResults(query, search_type, page))
user_info = {'user_id': user_id, 'gcm_id': gcm_id, 'uuid': uuid}
search = Search(query, user_info, flow)
if search_type == 'free':
results = search.basicSearch(page=page-1)
elif search_type == 'category':
results = search.categorySearch(page=page-1)
elif search_type == 'collections':
results = search.collectionsSearch(page=page-1)
elif search_type == 'isbn':
results = search.isbnSearch(page=page-1)
elif search_type == 'auto':
results = search.autoComplete()
elif search_type == 'custom':
results = search.customQuery()
return results
#log
if user_id not in Utils.getAdmins():
Search.logSearch({_:request.args.get(_) for _ in request.args}, search_type)
return jsonify(results) if flow != 'admin' else jsonp(results)
@webapp.route('/getCategories')
def getCategories():
categories = Search.getSearchCategoriesForApp()
return jsonify(categories)
@webapp.route('/getCollectionCategory')
def getCollectionCategory():
return jsonify(Collection.getByCategory())
@webapp.route('/searchFail', methods=['POST'])
def searchFail():
#NOTE deprecated. Done directly from backend
return jsonify(status='true')
user_id = Utils.getParam(request.form, 'user_id', 'int')
q = Utils.getParam(request.form, 'q')
q_type = Utils.getParam(request.form,'type')
flow = Utils.getParam(request.form, 'flow', default='borrow')
Search(q, {'user_id': user_id}, flow).reportFail(True,True,q_type)
return jsonify(status='true')
@webapp.route('/recommended', methods=['GET'])
def recommended():
return jsonify(Search([]).mostRecommended())
@webapp.route('/mostSearched', methods=['GET'])
def mostSearched():
return jsonify(Search([]).mostSearched())
@webapp.route('/getMultiplePanels')
def getMultiplePanels():
cursor = mysql.connect().cursor()
cursor.execute("""SELECT collection_id FROM collections WHERE active = 1 AND
partial_order = 1 ORDER BY collection_id DESC""")
panels = []
for col_id in cursor.fetchall():
panels.append(Collection(col_id).getObj())
return jsonify(panels)
| [
"flask.request.args.get",
"app.mysql.connect",
"app.models.Utils.getAdmins",
"app.models.Collection",
"app.models.Search.getSearchCategoriesForApp",
"app.models.Search",
"app.webapp.route",
"app.models.Utils.errorResponse",
"app.models.Utils.getParam",
"app.models.WebUtils.fetchSearchResults",
"... | [((447, 470), 'app.webapp.route', 'webapp.route', (['"""/search"""'], {}), "('/search')\n", (459, 470), False, 'from app import webapp, mysql\n'), ((2094, 2124), 'app.webapp.route', 'webapp.route', (['"""/getCategories"""'], {}), "('/getCategories')\n", (2106, 2124), False, 'from app import webapp, mysql\n'), ((2231, 2269), 'app.webapp.route', 'webapp.route', (['"""/getCollectionCategory"""'], {}), "('/getCollectionCategory')\n", (2243, 2269), False, 'from app import webapp, mysql\n'), ((2347, 2392), 'app.webapp.route', 'webapp.route', (['"""/searchFail"""'], {'methods': "['POST']"}), "('/searchFail', methods=['POST'])\n", (2359, 2392), False, 'from app import webapp, mysql\n'), ((2829, 2874), 'app.webapp.route', 'webapp.route', (['"""/recommended"""'], {'methods': "['GET']"}), "('/recommended', methods=['GET'])\n", (2841, 2874), False, 'from app import webapp, mysql\n'), ((2945, 2991), 'app.webapp.route', 'webapp.route', (['"""/mostSearched"""'], {'methods': "['GET']"}), "('/mostSearched', methods=['GET'])\n", (2957, 2991), False, 'from app import webapp, mysql\n'), ((3060, 3094), 'app.webapp.route', 'webapp.route', (['"""/getMultiplePanels"""'], {}), "('/getMultiplePanels')\n", (3072, 3094), False, 'from app import webapp, mysql\n'), ((556, 589), 'app.models.Utils.getParam', 'Utils.getParam', (['request.args', '"""q"""'], {}), "(request.args, 'q')\n", (570, 589), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((602, 665), 'app.models.Utils.getParam', 'Utils.getParam', (['request.args', '"""page"""'], {'var_type': '"""int"""', 'default': '(1)'}), "(request.args, 'page', var_type='int', default=1)\n", (616, 665), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((684, 736), 'app.models.Utils.getParam', 'Utils.getParam', (['request.args', '"""type"""'], {'default': '"""free"""'}), "(request.args, 'type', default='free')\n", (698, 736), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((751, 796), 'app.models.Utils.getParam', 'Utils.getParam', (['request.args', '"""userId"""', '"""int"""'], {}), "(request.args, 'userId', 'int')\n", (765, 796), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((808, 862), 'app.models.Utils.getParam', 'Utils.getParam', (['request.args', '"""flow"""'], {'default': '"""borrow"""'}), "(request.args, 'flow', default='borrow')\n", (822, 862), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((876, 928), 'app.models.Utils.getParam', 'Utils.getParam', (['request.args', '"""gcm_id"""'], {'default': 'None'}), "(request.args, 'gcm_id', default=None)\n", (890, 928), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((940, 997), 'app.models.Utils.getParam', 'Utils.getParam', (['request.args', '"""distinct_id"""'], {'default': 'None'}), "(request.args, 'distinct_id', default=None)\n", (954, 997), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((1008, 1061), 'app.models.Utils.getParam', 'Utils.getParam', (['request.args', '"""ref"""'], {'default': '"""mobile"""'}), "(request.args, 'ref', default='mobile')\n", (1022, 1061), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((1345, 1375), 'app.models.Search', 'Search', (['query', 'user_info', 'flow'], {}), '(query, user_info, flow)\n', (1351, 1375), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((2163, 2197), 'app.models.Search.getSearchCategoriesForApp', 'Search.getSearchCategoriesForApp', ([], {}), '()\n', (2195, 2197), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((2209, 2228), 'flask.jsonify', 'jsonify', (['categories'], {}), '(categories)\n', (2216, 2228), False, 'from flask import request, jsonify\n'), ((2471, 2493), 'flask.jsonify', 'jsonify', ([], {'status': '"""true"""'}), "(status='true')\n", (2478, 2493), False, 'from flask import request, jsonify\n'), ((2513, 2559), 'app.models.Utils.getParam', 'Utils.getParam', (['request.form', '"""user_id"""', '"""int"""'], {}), "(request.form, 'user_id', 'int')\n", (2527, 2559), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((2568, 2601), 'app.models.Utils.getParam', 'Utils.getParam', (['request.form', '"""q"""'], {}), "(request.form, 'q')\n", (2582, 2601), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((2615, 2651), 'app.models.Utils.getParam', 'Utils.getParam', (['request.form', '"""type"""'], {}), "(request.form, 'type')\n", (2629, 2651), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((2662, 2716), 'app.models.Utils.getParam', 'Utils.getParam', (['request.form', '"""flow"""'], {'default': '"""borrow"""'}), "(request.form, 'flow', default='borrow')\n", (2676, 2716), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((2804, 2826), 'flask.jsonify', 'jsonify', ([], {'status': '"""true"""'}), "(status='true')\n", (2811, 2826), False, 'from flask import request, jsonify\n'), ((3412, 3427), 'flask.jsonify', 'jsonify', (['panels'], {}), '(panels)\n', (3419, 3427), False, 'from flask import request, jsonify\n'), ((1096, 1158), 'app.models.Utils.errorResponse', 'Utils.errorResponse', (['response', '"""HTTP_STATUS_CODE_DATA_MISSING"""'], {}), "(response, 'HTTP_STATUS_CODE_DATA_MISSING')\n", (1115, 1158), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((1920, 1937), 'app.models.Utils.getAdmins', 'Utils.getAdmins', ([], {}), '()\n', (1935, 1937), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((2036, 2052), 'flask.jsonify', 'jsonify', (['results'], {}), '(results)\n', (2043, 2052), False, 'from flask import request, jsonify\n'), ((2077, 2091), 'flask.ext.jsonpify.jsonify', 'jsonp', (['results'], {}), '(results)\n', (2082, 2091), True, 'from flask.ext.jsonpify import jsonify as jsonp\n'), ((2317, 2343), 'app.models.Collection.getByCategory', 'Collection.getByCategory', ([], {}), '()\n', (2341, 2343), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((1207, 1260), 'app.models.WebUtils.fetchSearchResults', 'WebUtils.fetchSearchResults', (['query', 'search_type', 'page'], {}), '(query, search_type, page)\n', (1234, 1260), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((2726, 2763), 'app.models.Search', 'Search', (['q', "{'user_id': user_id}", 'flow'], {}), "(q, {'user_id': user_id}, flow)\n", (2732, 2763), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((3133, 3148), 'app.mysql.connect', 'mysql.connect', ([], {}), '()\n', (3146, 3148), False, 'from app import webapp, mysql\n'), ((1967, 1986), 'flask.request.args.get', 'request.args.get', (['_'], {}), '(_)\n', (1983, 1986), False, 'from flask import request, jsonify\n'), ((2913, 2923), 'app.models.Search', 'Search', (['[]'], {}), '([])\n', (2919, 2923), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((3031, 3041), 'app.models.Search', 'Search', (['[]'], {}), '([])\n', (3037, 3041), False, 'from app.models import Search, Utils, Collection, WebUtils\n'), ((3372, 3390), 'app.models.Collection', 'Collection', (['col_id'], {}), '(col_id)\n', (3382, 3390), False, 'from app.models import Search, Utils, Collection, WebUtils\n')] |
from flask import Flask, make_response
app = Flask(__name__)
@app.route("/")
@app.route("/index.html")
def index():
html = open("assets/index.html").read()
return html
@app.route("/assets/<name>")
def wasm(name):
r = make_response(open(f"assets/{name}","rb").read())
if name.endswith(".wasm"):
r.headers.set('Content-Type', "application/wasm")
return r
@app.route("/data.csv")
def csv():
print("GET CSV")
html = open("data.csv").read()
return html
if __name__ == "__main__":
app.run(debug=True,port=8080)
| [
"flask.Flask"
] | [((46, 61), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (51, 61), False, 'from flask import Flask, make_response\n')] |
from .audio_source import AudioSource
from engine import disk
import pyglet.media
class AudioDirector(object):
"""Director for loading audio and controlling playback.
Attributes:
attenuation_distance (int): The default attenuation distance for newly
loaded audio. Existing audio will retain its attenuation distance,
see :fn:`set_attenuation_distance` for setting distance on existing
sources.
master_volume (float): The master volume for audio playback.
0 for silence, 1 for nominal volume. A value of 1 disables
audio attenuation and ignore the position of audio sources.
To avoid this, set volume to 0.99 or lower.
position (tuple of int): The location of the audio listener in
two-dimensional space. Listeners close to this position will be
louder than those further away.
"""
def __init__(self, master_volume=1, position=(0, 0)):
"""Creates a director for grouping and controlling audio playback.
Kwargs:
master_volume (float, optional): Master volume for audio playback.
0 for silence, 1 for nominal volume. A value of 1 will disable
audio attenuation and ignore the position of audio sources.
To avoid this, set volume to 0.99 or lower. Defaults to 1.
position (tuple of int, optional): The location of the audio
listener in two-dimensional space. Listeners close to this
position will be louder than those farther. Defaults to (0, 0).
"""
super(AudioDirector, self).__init__()
self.attenuation_distance = 1
self.master_volume = master_volume
self.position = position
# Cache of loaded resources from disk
self._disk_cache = {}
# Groupings for audio sources
self._groups = {
'all': set()
}
def load(self, filepath, streaming=True):
"""Loads and audio file from disk.
The loaded audio will be added to the 'all' group for this director.
A cached object will be returned if the file has already been loaded.
Streaming should be used for large audio sources, such as music.
Only one instance of a streaming audio source can be played at a time.
Args:
filepath (str): Path to audio, relative to the resource directory.
Kwargs:
streaming (bool, optional): Streams the audio from disk rather
than loading the entire file into memory. Defaults to True.
Returns:
An :obj:`audio.AudioSource` object for the resource on disk.
"""
# Load the file from disk and cache it if necessary
if filepath not in self._disk_cache:
disk_file = disk.DiskLoader.load_audio(filepath, streaming)
new_source = AudioSource(disk_file, streaming)
# Cache the new source
self._disk_cache[filepath] = new_source
# Apply the default attenuation distance
new_source.attenuation_distance = self.attenuation_distance
# Add this audio source to the default group
self.add(new_source)
return self._disk_cache[filepath]
def add(self, audio_source, group='all'):
"""Adds an audio source to a group.
Grouping audio allows you to control the playback of the entire group
rather than an individual source instance. By default, the audio source
is added to the 'all' group.
Args:
audio_source (:obj:`audio.AudioSource`): The audio source to add.
Kwargs:
group (str, optional): The group to add the audio to.
Defaults to 'all'.
"""
self._groups.setdefault(group, set()).add(audio_source)
def _filter_sources(self, group='all', states=None):
"""Returns all sources in the group matching the given states.
Kwargs:
group (str, optional): Name of group to filter. Defaults to 'all'.
states (list of int, optional): List of :cls:`AudioSource` states
to filter on. If the list is not empty and a source's state is
not in the list, it will be excluded from the return value.
Returns:
An iterator containing sources in the group matching the states.
"""
# If the group does not exist, return an empty iterator
if group not in self._groups:
return iter(())
# If there are no states to filter on, return all sources in the group
if not states:
return iter(self._groups[group])
# Return sources in the group matching the states to filter on
return filter(lambda src: src.state in states, self._groups[group])
def play(self, group='all'):
"""Plays all audio sources in a group.
Kwargs:
group (str, optional): Name of group to play. Defaults to 'all'.
"""
for audio_source in self._filter_sources(group=group):
audio_source.play()
def pause(self, group='all'):
"""Pauses all playing audio sources in a group.
Audio sources which are not currently playing will be left alone.
Kwargs:
group (str, optional): Name of group to pause. Defaults to 'all'.
"""
states = [AudioSource.PLAY]
for audio_source in self._filter_sources(group=group, states=states):
audio_source.pause()
def stop(self, group='all'):
"""Stops all audio sources in a group.
Kwargs:
group (str, optional): Name of group to stop. Defaults to 'all'.
"""
states = [AudioSource.PLAY, AudioSource.PAUSE]
for audio_source in self._filter_sources(group=group, states=states):
audio_source.stop()
def resume(self, group='all'):
"""Resumes playback of all paused audio sources in a group.
Audio sources which are not currently paused will be left alone.
Kwargs:
group (str, optional): Name of group to resume. Defaults to 'all'.
"""
states = [AudioSource.PAUSE]
for audio_source in self._filter_sources(group=group, states=states):
audio_source.play()
def set_volume(self, level, group='all'):
"""Sets the volume of all audio sources in a group.
Args:
volume (float): 0 for silence, 1 for nominal volume.
Kwargs:
group (str, optional): Group to set volume of. Defaults to 'all'.
"""
for audio_source in self._filter_sources(group=group):
audio_source.volume = level
def set_attenuation_distance(self, distance, group='all'):
"""Sets the distance from the listener before player volumes attenuate.
Args:
distance (int): The distance from the listener before the source
volume attenuates. Within this distance, the volume remains
nominal. Outside this distance, the volume approaches zero.
Kwargs:
group (str, optional): Group to set distance of. Defaults to 'all'.
"""
for audio_source in self._filter_sources(group=group):
audio_source.attenuation_distance = distance
@property
def position(self):
"""The position of the listener in 2d space as a tuple-like type."""
return self._position
@position.setter
def position(self, position):
"""Sets the listener location in 2d space with a tuple-like object."""
self._position = position
# Pyglet uses 3d coordinates, convert 2d to a 3d tuple
listener = pyglet.media.get_audio_driver().get_listener()
listener.position = (position[0], position[1], 0)
@property
def master_volume(self):
"""Returns the master audio volume as a float between 0 and 1."""
listener = pyglet.media.get_audio_driver().get_listener()
return listener.volume
@master_volume.setter
def master_volume(self, level):
"""Sets the master audio playback volume.
0 for silence, 1 for nominal volume. Setting this to 1 disables audio
attenuation, ignoring the position of listeners. Set to 0.99 to
allow for audio positioning.
"""
listener = pyglet.media.get_audio_driver().get_listener()
listener.volume = level
| [
"engine.disk.DiskLoader.load_audio"
] | [((2846, 2893), 'engine.disk.DiskLoader.load_audio', 'disk.DiskLoader.load_audio', (['filepath', 'streaming'], {}), '(filepath, streaming)\n', (2872, 2893), False, 'from engine import disk\n')] |
import os
import time
print("=====================================================================")
print(" ")
print(" STARTING SYSTEM REPAIR ")
print(" ")
print("=====================================================================")
print(" ")
print("These are the jobs this application can do for you.")
print("1.Clean The DISM Component Store")
print("2.Repair Corrupted Windows Files Using SFC")
print("3.Repair Corrupted Windows Files Using DISM")
choice = input("Enter the serial number of the job which you want this application to do (1/2/3): ")
if choice == "1":
print("Analyzing Component Store")
os.system("dism.exe /Online /Cleanup-Image /AnalyzeComponentStore")
time.sleep(3)
print("Warning: You have to cleanup component store only if necessary.")
time.sleep(3)
Confirmation = input("Do you want to cleanup the component store?(y/n): ")
if Confirmation.upper() == "Y":
os.system("dism.exe /Online /Cleanup-Image /StartComponentCleanup")
time.sleep(3)
print("Now Exiting!")
elif Confirmation.upper() == "N":
print("Skipping Component Cleanup As Per The User's Instructions")
time.sleep(3)
print("Now Exiting!")
time.sleep(1)
else:
print('You have to enter only "y" or "n"')
time.sleep(3)
print("Now Exiting!")
time.sleep(1)
elif choice == "2":
print("Starting SFC Repair Job")
os.system("SFC /SCANNOW")
time.sleep(3)
print("Operation Cpmpleted Successfully!")
time.sleep(3)
print("Now Exiting!")
elif choice == "3":
Internet_Connection = input("Do you have an active internet connection?(y/n): ")
if Internet_Connection.upper() == "N":
iso_file = input("Do you have windows10 wim file?(y/n): ")
if iso_file.upper() == "Y":
Location = input("Enter the location of the wim file: ")
print("Starting DISM")
os.system("dism.exe /Online /Cleanup-Image /RestoreHealth /Source:" + Location + " /LimitAccess")
time.sleep(3)
print("Now Exiting!")
else:
print("Sorry but you need either internet connection or wim file in order to run Dism")
time.sleep(3)
print("Now Exiting!")
elif Internet_Connection.upper() == "Y":
print("Starting DISM")
os.system("dism.exe /Online /Cleanup-Image /RestoreHealth")
time.sleep(3)
print("Now Exiting")
else:
print("You have to enter only Y/N")
time.sleep(3)
else:
print("Choice Not Valid")
time.sleep(3)
print("Now Exiting!")
| [
"os.system",
"time.sleep"
] | [((866, 933), 'os.system', 'os.system', (['"""dism.exe /Online /Cleanup-Image /AnalyzeComponentStore"""'], {}), "('dism.exe /Online /Cleanup-Image /AnalyzeComponentStore')\n", (875, 933), False, 'import os\n'), ((938, 951), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (948, 951), False, 'import time\n'), ((1033, 1046), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1043, 1046), False, 'import time\n'), ((1170, 1237), 'os.system', 'os.system', (['"""dism.exe /Online /Cleanup-Image /StartComponentCleanup"""'], {}), "('dism.exe /Online /Cleanup-Image /StartComponentCleanup')\n", (1179, 1237), False, 'import os\n'), ((1246, 1259), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1256, 1259), False, 'import time\n'), ((1673, 1698), 'os.system', 'os.system', (['"""SFC /SCANNOW"""'], {}), "('SFC /SCANNOW')\n", (1682, 1698), False, 'import os\n'), ((1703, 1716), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1713, 1716), False, 'import time\n'), ((1768, 1781), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1778, 1781), False, 'import time\n'), ((1411, 1424), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1421, 1424), False, 'import time\n'), ((1463, 1476), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1473, 1476), False, 'import time\n'), ((1546, 1559), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (1556, 1559), False, 'import time\n'), ((1598, 1611), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1608, 1611), False, 'import time\n'), ((2818, 2831), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2828, 2831), False, 'import time\n'), ((2175, 2276), 'os.system', 'os.system', (["('dism.exe /Online /Cleanup-Image /RestoreHealth /Source:' + Location +\n ' /LimitAccess')"], {}), "('dism.exe /Online /Cleanup-Image /RestoreHealth /Source:' +\n Location + ' /LimitAccess')\n", (2184, 2276), False, 'import os\n'), ((2285, 2298), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2295, 2298), False, 'import time\n'), ((2459, 2472), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2469, 2472), False, 'import time\n'), ((2591, 2650), 'os.system', 'os.system', (['"""dism.exe /Online /Cleanup-Image /RestoreHealth"""'], {}), "('dism.exe /Online /Cleanup-Image /RestoreHealth')\n", (2600, 2650), False, 'import os\n'), ((2659, 2672), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2669, 2672), False, 'import time\n'), ((2764, 2777), 'time.sleep', 'time.sleep', (['(3)'], {}), '(3)\n', (2774, 2777), False, 'import time\n')] |
"""
SNMP subagent entrypoint.
"""
import asyncio
import functools
import os
import signal
import sys
import ax_interface
from sonic_ax_impl.mibs import ieee802_1ab
from . import logger
from .mibs.ietf import rfc1213, rfc2737, rfc2863, rfc3433, rfc4292, rfc4363
from .mibs.vendor import dell, cisco
# Background task update frequency ( in seconds )
DEFAULT_UPDATE_FREQUENCY = 5
event_loop = asyncio.get_event_loop()
shutdown_task = None
class SonicMIB(
rfc1213.InterfacesMIB,
rfc1213.IpMib,
rfc1213.SysNameMIB,
rfc2737.PhysicalTableMIB,
rfc3433.PhysicalSensorTableMIB,
rfc2863.InterfaceMIBObjects,
rfc4363.QBridgeMIBObjects,
rfc4292.IpCidrRouteTable,
ieee802_1ab.LLDPLocalSystemData,
ieee802_1ab.LLDPLocalSystemData.LLDPLocPortTable,
ieee802_1ab.LLDPLocalSystemData.LLDPLocManAddrTable,
ieee802_1ab.LLDPRemTable,
ieee802_1ab.LLDPRemManAddrTable,
dell.force10.SSeriesMIB,
cisco.bgp4.CiscoBgp4MIB,
cisco.ciscoPfcExtMIB.cpfcIfTable,
cisco.ciscoPfcExtMIB.cpfcIfPriorityTable,
cisco.ciscoSwitchQosMIB.csqIfQosGroupStatsTable,
cisco.ciscoEntityFruControlMIB.cefcFruPowerStatusTable,
):
"""
If SONiC was to create custom MIBEntries, they may be specified here.
"""
def shutdown(signame, agent):
# FIXME: If the Agent dies, the background tasks will zombie.
global event_loop, shutdown_task
logger.info("Recieved '{}' signal, shutting down...".format(signame))
shutdown_task = event_loop.create_task(agent.shutdown())
def main(update_frequency=None):
global event_loop
try:
# initialize handler and set update frequency (or use the default)
agent = ax_interface.Agent(SonicMIB, update_frequency or DEFAULT_UPDATE_FREQUENCY, event_loop)
# add "shutdown" signal handlers
# https://docs.python.org/3.5/library/asyncio-eventloop.html#set-signal-handlers-for-sigint-and-sigterm
for signame in ('SIGINT', 'SIGTERM'):
event_loop.add_signal_handler(getattr(signal, signame),
functools.partial(shutdown, signame, agent))
# start the agent, wait for it to come back.
logger.info("Starting agent with PID: {}".format(os.getpid()))
event_loop.run_until_complete(agent.run_in_event_loop())
except Exception:
logger.exception("Uncaught exception in {}".format(__name__))
sys.exit(1)
finally:
if shutdown_task is not None:
# make sure shutdown has completed completely before closing the loop
event_loop.run_until_complete(shutdown_task)
# the agent runtime has exited, close the event loop and exit.
event_loop.close()
logger.info("Goodbye!")
sys.exit(0)
| [
"ax_interface.Agent",
"functools.partial",
"os.getpid",
"sys.exit",
"asyncio.get_event_loop"
] | [((394, 418), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (416, 418), False, 'import asyncio\n'), ((1679, 1769), 'ax_interface.Agent', 'ax_interface.Agent', (['SonicMIB', '(update_frequency or DEFAULT_UPDATE_FREQUENCY)', 'event_loop'], {}), '(SonicMIB, update_frequency or DEFAULT_UPDATE_FREQUENCY,\n event_loop)\n', (1697, 1769), False, 'import ax_interface\n'), ((2753, 2764), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (2761, 2764), False, 'import sys\n'), ((2412, 2423), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2420, 2423), False, 'import sys\n'), ((2076, 2119), 'functools.partial', 'functools.partial', (['shutdown', 'signame', 'agent'], {}), '(shutdown, signame, agent)\n', (2093, 2119), False, 'import functools\n'), ((2232, 2243), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2241, 2243), False, 'import os\n')] |
#!/usr/bin/env python
# Author: <NAME>
# Date: 2015oct31
from __future__ import print_function
import os
import sys
import stat
import errno
import shutil
import optparse
import traceback
import subprocess
wrappaconda_name_string = 'Wr[App]-A-Conda'
class AppAtizer(object):
def __init__(self):
# tmp paths
self._downloads_prefix = os.path.expanduser('~/Downloads')
if not os.path.isdir(self._downloads_prefix):
self._downloads_prefix = './' # use cwd
# try for wget or curl
self._get = self._getDownloaderCommand()
# cli input
self._parseUserInput()
# .app paths
self._apppath = '/Applications/'+self._name+'.app'
self._contents_prefix = self._apppath + "/Contents"
self._resource_prefix = self._contents_prefix + "/Resources"
self._info_plist_path = self._contents_prefix + "/Info.plist"
self._pkg_info_path = self._contents_prefix + "/PkgInfo"
self._macos_prefix = self._contents_prefix + "/MacOS"
self._cfbundle_icon_filename = 'app.icns'
# Wr[App]-A-Conda paths
self._id_file_path = self._resource_prefix + "/wrappaconda"
# miniconda paths
self._miniconda_prefix = self._resource_prefix + "/miniconda"
self._python_path = self._miniconda_prefix + "/bin/python"
self._conda_path = self._miniconda_prefix + "/bin/conda"
def _parseUserInput(self):
# get user input
parser = optparse.OptionParser()
parser.add_option("-n", "--name", dest='name', help="[REQUIRED] The name of this app.")
parser.add_option("-t", "--target", dest='target', help="[REQUIRED] The binary or script found in Anaconda\'s $PREFIX/bin.")
parser.add_option("-v", "--version", dest='version', help="The version of this app.", default='0.1')
parser.add_option("-i", "--icon", dest='icon_file', help="Icon file to be used in the bundle.")
parser.add_option("-c", "--channel", dest='channel', help="The Anaconda.org package channel(s), or url(s) separated by commas (e.g. nckz,https://conda.anaconda.org/gpi/channel/rc) (defaults to \'defaults\')", default='defaults')
parser.add_option("-p", "--package", dest='package', help="The package name(s) separated by commas (e.g. scipy=0.15.0,curl=7.26.0,pip).")
parser.add_option("-r", "--rootenv", dest='rootenv', help="A root environment file (created using: \'conda list --export\').")
parser.add_option("--py", dest='py_ver', help="Choose the distro python version using the major and minor version numbers (defaults to 3.5).", default='3.5')
parser.add_option("-o", "--overwrite", action="store_true", dest='overwrite', help="Overwrite an existing app with the same \'name\'. Use caution!!!")
options, args = parser.parse_args()
try:
# check for input errors
assert options.name is not None
assert options.target is not None
if options.icon_file is not None:
assert os.path.isfile(options.icon_file)
assert options.icon_file.endswith(".icns")
if options.rootenv is not None:
assert os.path.isfile(options.rootenv)
except:
parser.print_help()
raise
self._name = options.name
self._version = options.version
self._target = options.target
self._icon_file = options.icon_file
self._channel = options.channel
self._package = options.package
self._root_env = options.rootenv
self._py_ver = options.py_ver
self._overwrite = options.overwrite
def _getDownloaderCommand(self):
# check for installed utilities
try:
subprocess.check_output('command -v wget >/dev/null 2>&1;', shell=True)
return 'wget --directory-prefix ' + self._downloads_prefix + ' -c {}'
except:
try:
subprocess.check_output('command -v curl >/dev/null 2>&1;', shell=True)
return 'cd '+self._downloads_prefix+' && curl --fail -O -C - {} '
except:
print("This script requires \'wget\' or \'curl\' and neither were found.")
raise
def appPath(self):
return self._apppath
def deleteExistingApp(self):
if os.path.exists(self._apppath):
if self._overwrite:
print("Removing existing path: "+self._apppath)
try:
with open(self._id_file_path, 'r') as f:
assert f.read().count(wrappaconda_name_string) > 0
shutil.rmtree(self._apppath)
except:
print("The app \'"+self._apppath+"\' cannot be verified for deletion. You may have to remove it manually. Skipping...")
else:
print("The app \'"+self._apppath+"\' already exists, exiting...")
def buildAppSkeleton(self):
# build the .app directory and supporting files
try:
os.mkdir(self._apppath)
os.mkdir(self._contents_prefix)
os.mkdir(self._macos_prefix)
os.mkdir(self._resource_prefix)
except OSError as e:
if e.errno == errno.EPERM:
print("You must have root permissions to write to /Applications.")
raise
def copyIconFile(self):
if self._icon_file is not None:
shutil.copy(self._icon_file, self._resource_prefix + '/' + self._cfbundle_icon_filename)
def writeInfoPList(self):
# http://stackoverflow.com/questions/7404792/how-to-create-mac-application-bundle-for-python-script-via-python
CFBundleName = self._name
CFBundleVersion = self._version
CFBundleIconFile = self._cfbundle_icon_filename
CFBundleGetInfoString = CFBundleName + " " + CFBundleVersion
CFBundleShortVersionString = CFBundleGetInfoString
CFBundleIdentifier = "com.gpilab."+CFBundleName
CFBundleExecutable = self._target
info_plist = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<string>%s</string>
<key>CFBundleGetInfoString</key>
<string>%s</string>
<key>CFBundleIconFile</key>
<string>%s</string>
<key>CFBundleIdentifier</key>
<string>%s</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundleName</key>
<string>%s</string>
<key>CFBundlePackageType</key>
<string>APPL</string>
<key>CFBundleShortVersionString</key>
<string>%s</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>%s</string>
<key>NSAppleScriptEnabled</key>
<string>YES</string>
<key>NSMainNibFile</key>
<string>MainMenu</string>
<key>NSPrincipalClass</key>
<string>NSApplication</string>
</dict>
</plist>
"""
with open(self._info_plist_path, "w") as f:
f.write(info_plist % (CFBundleExecutable, CFBundleGetInfoString, CFBundleIconFile, CFBundleIdentifier, CFBundleName, CFBundleShortVersionString, CFBundleVersion))
def writePkgInfo(self):
with open(self._pkg_info_path, "w") as f:
f.write("APPL????")
def writeWrappacondaIDFile(self):
with open(self._id_file_path, "w") as f:
f.write("This app was generated by " + wrappaconda_name_string)
def setupMiniconda(self):
# anaconda website and miniconda package info
# -python 3 is the default miniconda
MINICONDA_NAME='Miniconda3'
if float(self._py_ver) < 3:
MINICONDA_NAME='Miniconda'
MINICONDA_WEB='https://repo.continuum.io/miniconda/'
MINICONDA_OSX=MINICONDA_NAME+'-latest-MacOSX-x86_64.sh'
# download miniconda
try:
cmd = self._get.format(MINICONDA_WEB+MINICONDA_OSX)
print(cmd)
subprocess.check_output(cmd, shell=True)
except:
print("Failed to download miniconda.")
# install miniconda
try:
os.chmod(self._downloads_prefix+'/'+MINICONDA_OSX, 0o777)
cmd = self._downloads_prefix+'/'+MINICONDA_OSX+' -b -p '+self._miniconda_prefix
print(cmd)
subprocess.check_output(cmd, shell=True)
except:
print("Failed to run miniconda.")
# install central conda package
if self._package:
try:
python = ' python=='+self._py_ver+' '
conda_cmd = self._conda_path+' install -y -c '+' -c '.join(self._channel.split(','))+' '+' '.join(self._package.split(',')) + python
if self._root_env:
conda_cmd += ' --file '+self._root_env
print(conda_cmd)
subprocess.check_output(conda_cmd, shell=True)
subprocess.check_output(self._conda_path+' clean -t -i -p -l -y', shell=True)
except:
print("Failed to run conda.")
raise
def linkTarget(self):
# check for the existence of the target
try:
assert os.path.isfile(self._miniconda_prefix + '/bin/' + self._target)
os.link(self._miniconda_prefix + '/bin/' + self._target, self._macos_prefix + '/' + self._target)
except:
print(self._target, ' doesn\'t exist in Miniconda bin.')
raise
def main():
make = AppAtizer()
make.deleteExistingApp()
make.buildAppSkeleton()
make.writeWrappacondaIDFile()
make.copyIconFile()
make.setupMiniconda()
make.linkTarget()
make.writeInfoPList()
make.writePkgInfo()
print(make.appPath() + " has been created.")
if __name__ == '__main__':
main()
| [
"subprocess.check_output",
"os.path.exists",
"os.link",
"optparse.OptionParser",
"os.chmod",
"os.path.isfile",
"os.path.isdir",
"os.mkdir",
"shutil.copy",
"shutil.rmtree",
"os.path.expanduser"
] | [((359, 392), 'os.path.expanduser', 'os.path.expanduser', (['"""~/Downloads"""'], {}), "('~/Downloads')\n", (377, 392), False, 'import os\n'), ((1494, 1517), 'optparse.OptionParser', 'optparse.OptionParser', ([], {}), '()\n', (1515, 1517), False, 'import optparse\n'), ((4366, 4395), 'os.path.exists', 'os.path.exists', (['self._apppath'], {}), '(self._apppath)\n', (4380, 4395), False, 'import os\n'), ((408, 445), 'os.path.isdir', 'os.path.isdir', (['self._downloads_prefix'], {}), '(self._downloads_prefix)\n', (421, 445), False, 'import os\n'), ((3778, 3849), 'subprocess.check_output', 'subprocess.check_output', (['"""command -v wget >/dev/null 2>&1;"""'], {'shell': '(True)'}), "('command -v wget >/dev/null 2>&1;', shell=True)\n", (3801, 3849), False, 'import subprocess\n'), ((5077, 5100), 'os.mkdir', 'os.mkdir', (['self._apppath'], {}), '(self._apppath)\n', (5085, 5100), False, 'import os\n'), ((5113, 5144), 'os.mkdir', 'os.mkdir', (['self._contents_prefix'], {}), '(self._contents_prefix)\n', (5121, 5144), False, 'import os\n'), ((5157, 5185), 'os.mkdir', 'os.mkdir', (['self._macos_prefix'], {}), '(self._macos_prefix)\n', (5165, 5185), False, 'import os\n'), ((5198, 5229), 'os.mkdir', 'os.mkdir', (['self._resource_prefix'], {}), '(self._resource_prefix)\n', (5206, 5229), False, 'import os\n'), ((5480, 5573), 'shutil.copy', 'shutil.copy', (['self._icon_file', "(self._resource_prefix + '/' + self._cfbundle_icon_filename)"], {}), "(self._icon_file, self._resource_prefix + '/' + self.\n _cfbundle_icon_filename)\n", (5491, 5573), False, 'import shutil\n'), ((8157, 8197), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (8180, 8197), False, 'import subprocess\n'), ((8319, 8378), 'os.chmod', 'os.chmod', (["(self._downloads_prefix + '/' + MINICONDA_OSX)", '(511)'], {}), "(self._downloads_prefix + '/' + MINICONDA_OSX, 511)\n", (8327, 8378), False, 'import os\n'), ((8504, 8544), 'subprocess.check_output', 'subprocess.check_output', (['cmd'], {'shell': '(True)'}), '(cmd, shell=True)\n', (8527, 8544), False, 'import subprocess\n'), ((9373, 9436), 'os.path.isfile', 'os.path.isfile', (["(self._miniconda_prefix + '/bin/' + self._target)"], {}), "(self._miniconda_prefix + '/bin/' + self._target)\n", (9387, 9436), False, 'import os\n'), ((9449, 9550), 'os.link', 'os.link', (["(self._miniconda_prefix + '/bin/' + self._target)", "(self._macos_prefix + '/' + self._target)"], {}), "(self._miniconda_prefix + '/bin/' + self._target, self._macos_prefix +\n '/' + self._target)\n", (9456, 9550), False, 'import os\n'), ((3057, 3090), 'os.path.isfile', 'os.path.isfile', (['options.icon_file'], {}), '(options.icon_file)\n', (3071, 3090), False, 'import os\n'), ((3217, 3248), 'os.path.isfile', 'os.path.isfile', (['options.rootenv'], {}), '(options.rootenv)\n', (3231, 3248), False, 'import os\n'), ((9037, 9083), 'subprocess.check_output', 'subprocess.check_output', (['conda_cmd'], {'shell': '(True)'}), '(conda_cmd, shell=True)\n', (9060, 9083), False, 'import subprocess\n'), ((9100, 9179), 'subprocess.check_output', 'subprocess.check_output', (["(self._conda_path + ' clean -t -i -p -l -y')"], {'shell': '(True)'}), "(self._conda_path + ' clean -t -i -p -l -y', shell=True)\n", (9123, 9179), False, 'import subprocess\n'), ((3981, 4052), 'subprocess.check_output', 'subprocess.check_output', (['"""command -v curl >/dev/null 2>&1;"""'], {'shell': '(True)'}), "('command -v curl >/dev/null 2>&1;', shell=True)\n", (4004, 4052), False, 'import subprocess\n'), ((4670, 4698), 'shutil.rmtree', 'shutil.rmtree', (['self._apppath'], {}), '(self._apppath)\n', (4683, 4698), False, 'import shutil\n')] |
import requests
class Wallet(object):
def __init__(self, testnet=False):
if testnet:
self.base_url = 'https://testnet.watchtower.cash/api/'
else:
self.base_url = 'https://watchtower.cash/api/'
def _get_utxos(self, wallet_hash, amount):
url = self.base_url + f'utxo/wallet/{wallet_hash}'
resp = requests.get(url)
print(resp.status_code)
print(resp.json())
def send(self, amount):
self._get_utxos('abcd0123456', amount)
print(f"Sending {amount} BCH...")
| [
"requests.get"
] | [((362, 379), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (374, 379), False, 'import requests\n')] |
from PyQt5.QtWidgets import QTreeWidget
from PyQt5.Qt import pyqtSignal
from PyQt5.QtWidgets import QTreeWidgetItem
from PyQt5.Qt import Qt
class TreeWidget(QTreeWidget):
# enum ItemShowMode
ItemsCollapsed = 0
ItemsExpanded = 1
def __init__(self, parent=None):
super().__init__(parent)
self._refreshAllItemsNeeded = True
self._allTreeItems = [] # QList<QTreeWidgetItem>
self._showMode = self.itemCollapsed # ItemShowMode
self.itemChanged.connect(self._scheduleRefresh)
def defaultItemShowMode(self):
'''
@return: ItemShowMode
'''
return self._showMode
def setDefaultItemShowMode(self, mode):
'''
@param: item ItemShowMode
'''
self._showMode = mode
def allItems(self):
'''
@return: QList<QTreeWidgetItem>
'''
if self._refreshAllItemsNeeded:
self._allTreeItems.clear()
self.iterateAllItems(None)
self._refreshAllItemsNeeded = False
return self._allTreeItems
def appendToParentItemByText(self, parentText, item):
'''
@param: parentText QString
@param: item QTreeWidgetItem
'''
list_ = self.findItems(parentText, Qt.MatchExactly)
if len(list_) == 0:
return False
# QTreeWidgetItem
parentItem = list_[0]
if not parentItem:
return False
self._allTreeItems.append(item)
parentItem.addChild(item)
return True
def appendToParentItemByItem(self, parent, item):
if not parent or parent.treeWidget() != self:
return False
self._allTreeItems.append(item)
parent.appendChild(item)
return True
def prependToParentItemByText(self, parentText, item):
list_ = self.findItems(parentText, Qt.MatchExactly)
if len(list_) == 0:
return False
# QTreeWidgetItem
parentItem = list_[0]
if not parentItem:
return False
self._allTreeItems.append(item)
parentItem.insertChild(0, item)
return True
def prependToParentItemByItem(self, parent, item):
if not parent or parent.treeWidget() != self:
return False
self._allTreeItems.append(item)
parent.insertChild(0, item)
return True
def addTopLevelItem(self, item):
'''
@param: item QTreeWidgetItem
'''
self._allTreeItems.append(item)
super().addTopLevelItem(item)
def addTopLevelItems(self, items):
'''
@param: items QList<QTreeWidgetItem>
'''
self._allTreeItems.extend(items)
super().addTopLevelItems(items)
def insertTopLevelItem(self, index, item):
'''
@param: index int
@param: item QTreeWidgetItem
'''
self._allTreeItems.append(item)
super().insertTopLevelItem(index, item)
def insertTopLevelItems(self, index, items):
'''
@param: index int
@param: items QList<QTreeWidgetItem>
'''
self._allTreeItems.extend(items)
super().insertTopLevelItems(index, items)
def deleteItem(self, item):
'''
@param: item QTreeWidgetItem
'''
if item in self._allTreeItems:
self._allTreeItems.remove(item)
self._refreshAllItemsNeeded = True
def deleteItems(self, items):
'''
@param: items QList<QTreeWidgetItem>
'''
for item in items:
if item in self._allTreeItems:
self._allTreeItems.remove(item)
self._refreshAllItemsNeeded = True
# Q_SIGNALS:
itemControlClicked = pyqtSignal(QTreeWidgetItem) # item
itemMiddleButtonClicked = pyqtSignal(QTreeWidgetItem) # item
# public Q_SLOTS:
def filterString(self, string):
# QList<QTreeWidgetItem>
_allItems = self.allItems()
# QList<QTreeWidgetItem>
parents = []
stringIsEmpty = not string
strLower = string.lower()
for item in _allItems:
if stringIsEmpty:
containsString = True
else:
text = item.text(0).lower()
containsString = strLower in text
if containsString:
item.setHidden(False)
itemParent = item.parent()
if itemParent and itemParent not in parents:
parents.append(itemParent)
else:
item.setHidden(True)
itemParent = item.parent()
if itemParent:
itemParent.setHidden(True)
for parentItem in parents:
parentItem.setHidden(False)
if stringIsEmpty:
parentItem.setExpanded(self._showMode == self.itemExpanded)
else:
parentItem.setExpanded(True)
parentOfParentItem = parentItem.parent()
if parentOfParentItem and parentOfParentItem not in parents:
parents.append(parentOfParentItem)
def clear(self):
super().clear()
self._allTreeItems.clear()
# private Q_SLOTS:
def _scheduleRefresh(self):
self._refreshAllItemsNeeded = True
# private:
def mousePressEvent(self, event):
'''
@param: event QMouseEvent
'''
if event.modifiers() == Qt.ControlModifier:
self.itemControlClicked.emit(self.itemAt(event.pos()))
if event.buttons() == Qt.MiddleButton:
self.itemMiddleButtonClicked.emit(self.itemAt(event.pos()))
super().mousePressEvent(event)
def iterateAllItems(self, parent):
'''
@param: parent QTreeWidgetItem
'''
if parent:
count = parent.childCount()
else:
count = self.topLevelItemCount()
for idx in range(count):
if parent:
item = parent.child(idx)
else:
item = self.topLevelItem(idx)
if item.childCount() == 0:
self._allTreeItems.append(item)
self.iterateAllItems(item)
| [
"PyQt5.Qt.pyqtSignal"
] | [((3741, 3768), 'PyQt5.Qt.pyqtSignal', 'pyqtSignal', (['QTreeWidgetItem'], {}), '(QTreeWidgetItem)\n', (3751, 3768), False, 'from PyQt5.Qt import pyqtSignal\n'), ((3807, 3834), 'PyQt5.Qt.pyqtSignal', 'pyqtSignal', (['QTreeWidgetItem'], {}), '(QTreeWidgetItem)\n', (3817, 3834), False, 'from PyQt5.Qt import pyqtSignal\n')] |
""":run
"""
import curses
import datetime
import json
import logging
import os
import re
import shlex
import shutil
import time
import uuid
from math import floor
from queue import Queue
from typing import Any
from typing import Callable
from typing import Dict
from typing import List
from typing import Optional
from typing import Tuple
from typing import Union
from ..action_base import ActionBase
from ..action_defs import RunStdoutReturn
from ..app_public import AppPublic
from ..configuration_subsystem import ApplicationConfiguration
from ..runner import CommandAsync
from ..steps import Step
from ..ui_framework import CursesLine
from ..ui_framework import CursesLinePart
from ..ui_framework import CursesLines
from ..ui_framework import Interaction
from ..ui_framework import dict_to_form
from ..ui_framework import form_to_dict
from ..ui_framework import nonblocking_notification
from ..ui_framework import warning_notification
from ..utils.functions import abs_user_path
from ..utils.functions import human_time
from ..utils.functions import remove_ansi
from ..utils.functions import round_half_up
from ..utils.serialize import json_dump
from . import _actions as actions
from . import run_action
RESULT_TO_COLOR = [
("(?i)^failed$", 9),
("(?i)^ok$", 10),
("(?i)^ignored$", 13),
("(?i)^skipped$", 14),
("(?i)^in_progress$", 8),
]
get_color = lambda word: next( # noqa: E731
(x[1] for x in RESULT_TO_COLOR if re.match(x[0], word)),
0,
)
def color_menu(_colno: int, colname: str, entry: Dict[str, Any]) -> Tuple[int, int]:
# pylint: disable=too-many-branches
"""Find matching color for word
:param colname: A word to match
"""
colval = entry[colname]
color = 0
decoration = 0
if "__play_name" in entry:
if not colval:
color = 8
elif colname in ["__task_count", "__play_name", "__progress"]:
failures = entry["__failed"] + entry["__unreachable"]
if failures:
color = 9
elif entry["__ok"]:
color = 10
else:
color = 8
elif colname == "__changed":
color = 11
else:
color = get_color(colname[2:])
if colname == "__progress" and entry["__progress"].strip().lower() == "complete":
decoration = curses.A_BOLD
elif "task" in entry:
if entry["__result"].lower() == "__in_progress":
color = get_color(entry["__result"])
elif colname in ["__result", "__host", "__number", "__task", "__task_action"]:
color = get_color(entry["__result"])
elif colname == "__changed":
if colval is True:
color = 11
else:
color = get_color(entry["__result"])
elif colname == "__duration":
color = 12
return color, decoration
def content_heading(obj: Any, screen_w: int) -> Union[CursesLines, None]:
"""create a heading for some piece of content showing
:param obj: The content going to be shown
:param screen_w: The current screen width
:return: The heading
"""
if isinstance(obj, dict) and "task" in obj:
detail = f"PLAY [{obj['play']}:{obj['__number']}] "
stars = "*" * (screen_w - len(detail))
line_1 = CursesLine(
(CursesLinePart(column=0, string=detail + stars, color=0, decoration=0),),
)
detail = f"TASK [{obj['task']}] "
stars = "*" * (screen_w - len(detail))
line_2 = CursesLine(
(CursesLinePart(column=0, string=detail + stars, color=0, decoration=0),),
)
if obj["__changed"] is True:
color = 11
res = "CHANGED"
else:
color = next((x[1] for x in RESULT_TO_COLOR if re.match(x[0], obj["__result"])), 0)
res = obj["__result"]
if "res" in obj and "msg" in obj["res"]:
msg = str(obj["res"]["msg"]).replace("\n", " ").replace("\r", "")
else:
msg = ""
string = f"{res}: [{obj['__host']}] {msg}"
string = string + (" " * (screen_w - len(string) + 1))
line_3 = CursesLine(
(CursesLinePart(column=0, string=string, color=color, decoration=curses.A_UNDERLINE),),
)
return CursesLines((line_1, line_2, line_3))
return None
def filter_content_keys(obj: Dict[Any, Any]) -> Dict[Any, Any]:
"""when showing content, filter out some keys"""
return {k: v for k, v in obj.items() if not (k.startswith("_") or k.endswith("uuid"))}
PLAY_COLUMNS = [
"__play_name",
"__ok",
"__changed",
"__unreachable",
"__failed",
"__skipped",
"__ignored",
"__in_progress",
"__task_count",
"__progress",
]
TASK_LIST_COLUMNS = [
"__result",
"__host",
"__number",
"__changed",
"__task",
"__task_action",
"__duration",
]
@actions.register
class Action(ActionBase):
# pylint: disable=too-many-instance-attributes
""":run"""
KEGEX = r"""(?x)
^
(?P<run>r(?:un)?
(\s(?P<params_run>.*))?)
$"""
def __init__(self, args: ApplicationConfiguration):
"""Initialize the ``:run`` action.
:param args: The current settings for the application
"""
super().__init__(args=args, logger_name=__name__, name="run")
self._subaction_type: str
self._msg_from_plays: Tuple[Optional[str], Optional[int]] = (None, None)
self._queue: Queue = Queue()
self.runner: CommandAsync
self._runner_finished: bool
self._auto_scroll = False
#: Flag when the first message is received from runner
self._first_message_received: bool = False
self._plays = Step(
name="plays",
step_type="menu",
columns=PLAY_COLUMNS,
value=[],
show_func=self._play_stats,
select_func=self._task_list_for_play,
)
self._task_list_columns: List[str] = TASK_LIST_COLUMNS
self._content_key_filter: Callable = filter_content_keys
@property
def mode(self):
"""if mode == stdout and playbook artifact creation is enabled
run in interactive mode, but print stdout"""
if all(
(
self._args.mode == "stdout",
self._args.playbook_artifact_enable,
self._args.app != "replay",
),
):
return "stdout_w_artifact"
return self._args.mode
def run_stdout(self) -> RunStdoutReturn:
"""Execute the ``inventory`` request for mode stdout.
:returns: The return code from the runner invocation, along with a message to review the
logs if not 0.
"""
if self._args.app == "replay":
successful: bool = self._init_replay()
if successful:
return RunStdoutReturn(message="", return_code=0)
return RunStdoutReturn(message="Please review the log for errors.", return_code=1)
self._logger.debug("playbook requested in interactive mode")
self._subaction_type = "playbook"
self._logger = logging.getLogger(f"{__name__}_{self._subaction_type}")
self._run_runner()
while True:
self._dequeue()
if self.runner.finished:
if self._args.playbook_artifact_enable:
self.write_artifact()
self._logger.debug("runner finished")
break
# Sleep briefly to prevent 100% CPU utilization
# in mode stdout, the delay introduced by the curses key read is not present
time.sleep(0.01)
return_code = self.runner.ansible_runner_instance.rc
if return_code != 0:
return RunStdoutReturn(
message="Please review the log for errors.",
return_code=return_code,
)
return RunStdoutReturn(message="", return_code=return_code)
def run(self, interaction: Interaction, app: AppPublic) -> Union[Interaction, None]:
"""run :run or :replay
:param interaction: The interaction from the user
:param app: The app instance
:return: The pending interaction or none
"""
self._prepare_to_run(app, interaction)
if interaction.action.match.groupdict().get("run"):
self._logger.debug("run requested in interactive mode")
self._subaction_type = "run"
str_uuid = str(uuid.uuid4())
self._logger = logging.getLogger(f"{__name__}_{str_uuid[-4:]}")
self._name = f"run_{str_uuid[-4:]}"
initialized = self._init_run()
elif interaction.action.match.groupdict().get("replay"):
self._logger.debug("replay requested in interactive mode")
self._subaction_type = "replay"
self._name = "replay"
self._logger = logging.getLogger(f"{__name__}_{self._subaction_type}")
initialized = self._init_replay()
if not initialized:
self._prepare_to_exit(interaction)
return None
self.steps.append(self._plays)
# Show a notification until the first the first message from the queue is processed
if self._subaction_type == "run":
messages = ["Preparing for automation, please wait..."]
notification = nonblocking_notification(messages=messages)
interaction.ui.show(notification)
while not self._first_message_received:
self.update()
while True:
self.update()
self._take_step()
if not self.steps:
if not self._runner_finished:
self._logger.error("Can not step back while playbook in progress, :q! to exit")
self.steps.append(self._plays)
else:
self._logger.debug(
"No steps remaining for '%s' returning to calling app",
self._name,
)
break
if self.steps.current.name == "quit":
if self._args.app == "replay":
self._prepare_to_exit(interaction)
return self.steps.current
done = self._prepare_to_quit(self.steps.current)
if done:
self._prepare_to_exit(interaction)
return self.steps.current
self.steps.back_one()
self._prepare_to_exit(interaction)
return None
# pylint: disable=too-many-branches
def _init_run(self) -> bool:
"""in the case of :run, check the user input"""
# Ensure the playbook and inventory are valid
self._update_args(
["run"] + shlex.split(self._interaction.action.match.groupdict()["params_run"] or ""),
)
if isinstance(self._args.playbook, str):
playbook_valid = os.path.exists(self._args.playbook)
else:
playbook_valid = False
if isinstance(self._args.inventory, list):
inventory_valid = all((os.path.exists(inv) for inv in self._args.inventory))
else:
# Permit running without an inventory
inventory_valid = True
if not all((playbook_valid, inventory_valid)):
populated_form = self._prompt_for_playbook()
if populated_form["cancelled"]:
return False
new_cmd = ["run"]
new_cmd.append(populated_form["fields"]["playbook"]["value"])
for field in populated_form["fields"].values():
if field["name"].startswith("inv_") and field["value"] != "":
new_cmd.extend(["-i", field["value"]])
if populated_form["fields"]["cmdline"]["value"]:
new_cmd.extend(shlex.split(populated_form["fields"]["cmdline"]["value"]))
# Parse as if provided from the cmdline
self._update_args(new_cmd)
self._run_runner()
self._logger.info("Run initialized and playbook started.")
return True
def _init_replay(self) -> bool:
"""in the case of :replay, replay the artifact
check for a version, to be safe
copy the calling app args as our our so the can be updated safely
with a uuid attached to the name
"""
self._logger.debug("Starting replay artifact request with mode %s", self.mode)
if self.mode == "interactive":
self._update_args(
["replay"]
+ shlex.split(self._interaction.action.match.groupdict()["params_replay"] or ""),
)
artifact_file = self._args.playbook_artifact_replay
if isinstance(self._args.playbook_artifact_replay, str):
artifact_valid = os.path.exists(self._args.playbook_artifact_replay)
else:
artifact_valid = False
if not artifact_valid and self.mode == "interactive":
populated_form = self._prompt_for_artifact(artifact_file=artifact_file)
if populated_form["cancelled"]:
return False
artifact_file = populated_form["fields"]["artifact_file"]["value"]
try:
with open(artifact_file, encoding="utf-8") as fh:
data = json.load(fh)
except json.JSONDecodeError as exc:
self._logger.debug("json decode error: %s", str(exc))
self._logger.error("Unable to parse artifact file")
return False
version = data.get("version", "")
if version.startswith("1."):
try:
stdout = data["stdout"]
if self.mode == "interactive":
self._plays.value = data["plays"]
self._interaction.ui.update_status(data["status"], data["status_color"])
self.stdout = stdout
else:
for line in data["stdout"]:
if self._args.display_color is True:
print(line)
else:
print(remove_ansi(line))
except KeyError as exc:
self._logger.debug("missing keys from artifact file")
self._logger.debug("error was: %s", str(exc))
return False
else:
self._logger.error(
"Incompatible artifact version, got '%s', compatible = '1.y.z'",
version,
)
return False
self._runner_finished = True
self._logger.debug("Completed replay artifact request with mode %s", self.mode)
return True
def _prompt_for_artifact(self, artifact_file: str) -> Dict[Any, Any]:
"""prompt for a valid artifact file"""
if not isinstance(artifact_file, str):
artifact_file = ""
FType = Dict[str, Any]
form_dict: FType = {
"title": "Artifact file not found, please confirm the following",
"fields": [],
}
form_field = {
"name": "artifact_file",
"prompt": "Path to artifact file",
"type": "text_input",
"validator": {"name": "valid_file_path"},
"pre_populate": artifact_file,
}
form_dict["fields"].append(form_field)
form = dict_to_form(form_dict)
self._interaction.ui.show(form)
populated_form = form_to_dict(form, key_on_name=True)
return populated_form
def _prompt_for_playbook(self) -> Dict[Any, Any]:
"""prepopulate a form to confirm the playbook details"""
self._logger.debug("Inventory/Playbook not set, provided, or valid, prompting")
if isinstance(self._args.playbook, str):
playbook = self._args.playbook
else:
playbook = ""
if isinstance(self._args.inventory, list):
inventory = self._args.inventory
else:
inventory = ["", "", ""]
if isinstance(self._args.cmdline, list):
cmdline = " ".join(self._args.cmdline)
else:
cmdline = ""
FType = Dict[str, Any]
form_dict: FType = {
"title": "Inventory and/or playbook not found, please confirm the following",
"fields": [],
}
form_field = {
"name": "playbook",
"pre_populate": playbook,
"prompt": "Path to playbook",
"type": "text_input",
"validator": {"name": "valid_file_path"},
}
form_dict["fields"].append(form_field)
for idx, inv in enumerate(inventory):
form_field = {
"name": f"inv_{idx}",
"pre_populate": inv,
"prompt": "Inventory source",
"type": "text_input",
"validator": {"name": "none"},
}
form_dict["fields"].append(form_field)
form_field = {
"name": "cmdline",
"pre_populate": cmdline,
"prompt": "Additional command line parameters",
"type": "text_input",
"validator": {"name": "none"},
}
form_dict["fields"].append(form_field)
form = dict_to_form(form_dict)
self._interaction.ui.show(form)
populated_form = form_to_dict(form, key_on_name=True)
return populated_form
def _take_step(self) -> None:
"""run the current step on the stack"""
result = None
if isinstance(self.steps.current, Interaction):
result = run_action(self.steps.current.name, self.app, self.steps.current)
elif isinstance(self.steps.current, Step):
if self.steps.current.show_func:
self.steps.current.show_func()
if self.steps.current.type == "menu":
new_scroll = len(self.steps.current.value)
if self._auto_scroll:
self._interaction.ui.scroll(new_scroll)
result = self._interaction.ui.show(
obj=self.steps.current.value,
columns=self.steps.current.columns,
color_menu_item=color_menu,
)
if self._interaction.ui.scroll() < new_scroll and self._auto_scroll:
self._logger.debug("autoscroll disabled")
self._auto_scroll = False
elif self._interaction.ui.scroll() >= new_scroll and not self._auto_scroll:
self._logger.debug("autoscroll enabled")
self._auto_scroll = True
elif self.steps.current.type == "content":
result = self._interaction.ui.show(
obj=self.steps.current.value,
index=self.steps.current.index,
content_heading=content_heading,
filter_content_keys=self._content_key_filter,
)
if result is None:
self.steps.back_one()
else:
self.steps.append(result)
def _run_runner(self) -> None:
"""spin up runner"""
executable_cmd: Optional[str]
if self.mode == "stdout_w_artifact":
mode = "interactive"
else:
mode = self.mode
if isinstance(self._args.set_environment_variable, dict):
set_env_vars = {**self._args.set_environment_variable}
else:
set_env_vars = {}
if self._args.display_color is False:
set_env_vars["ANSIBLE_NOCOLOR"] = "1"
kwargs = {
"container_engine": self._args.container_engine,
"host_cwd": os.getcwd(),
"execution_environment_image": self._args.execution_environment_image,
"execution_environment": self._args.execution_environment,
"inventory": self._args.inventory,
"navigator_mode": mode,
"pass_environment_variable": self._args.pass_environment_variable,
"set_environment_variable": set_env_vars,
"private_data_dir": self._args.ansible_runner_artifact_dir,
"rotate_artifacts": self._args.ansible_runner_rotate_artifacts_count,
"timeout": self._args.ansible_runner_timeout,
}
if isinstance(self._args.playbook, str):
kwargs.update({"playbook": self._args.playbook})
if isinstance(self._args.execution_environment_volume_mounts, list):
kwargs.update(
{"container_volume_mounts": self._args.execution_environment_volume_mounts},
)
if isinstance(self._args.container_options, list):
kwargs.update({"container_options": self._args.container_options})
if self._args.execution_environment:
executable_cmd = "ansible-playbook"
else:
executable_cmd = shutil.which("ansible-playbook")
if not executable_cmd:
msg = "'ansible-playbook' executable not found"
self._logger.error(msg)
raise RuntimeError(msg)
pass_through_arg = []
if self._args.help_playbook is True:
pass_through_arg.append("--help")
if isinstance(self._args.cmdline, list):
pass_through_arg.extend(self._args.cmdline)
kwargs.update({"cmdline": pass_through_arg})
self.runner = CommandAsync(executable_cmd=executable_cmd, queue=self._queue, **kwargs)
self.runner.run()
self._runner_finished = False
self._logger.debug("runner requested to start")
def _dequeue(self) -> None:
"""Drain the runner queue"""
drain_count = 0
while not self._queue.empty():
if not self._first_message_received:
self._first_message_received = True
message = self._queue.get()
self._handle_message(message)
drain_count += 1
if drain_count:
self._logger.debug("Drained %s events", drain_count)
def _handle_message(self, message: dict) -> None:
# pylint: disable=too-many-branches
# pylint: disable=too-many-nested-blocks
# pylint: disable=too-many-statements
"""Handle a runner message
:param message: The message from runner
:type message: dict
"""
try:
event = message["event"]
except KeyError:
error = f"Unhandled message from runner queue, discarded: {message}"
self._logger.critical(error)
else:
if "stdout" in message and message["stdout"]:
self.stdout.extend(message["stdout"].splitlines())
if self.mode == "stdout_w_artifact":
print(message["stdout"])
if event in ["verbose", "error"]:
if "ERROR!" in message["stdout"]:
self._msg_from_plays = ("ERROR", 9)
if self.mode == "interactive":
self._notify_error(message["stdout"])
elif "WARNING" in message["stdout"]:
self._msg_from_plays = ("WARNINGS", 13)
if event == "playbook_on_play_start":
play = message["event_data"]
play["__play_name"] = play["name"]
play["tasks"] = []
self._plays.value.append(play)
if event.startswith("runner_on_"):
runner_event = event.split("_")[2]
task = message["event_data"]
play_id = next(
idx for idx, p in enumerate(self._plays.value) if p["uuid"] == task["play_uuid"]
)
if runner_event in ["ok", "skipped", "unreachable", "failed"]:
if runner_event == "failed" and task["ignore_errors"]:
result = "ignored"
else:
result = runner_event
task["__task"] = task["task"]
task["__result"] = result.upper()
task["__changed"] = task.get("res", {}).get("changed", False)
if isinstance(task["duration"], (int, float)):
task["__duration"] = human_time(seconds=round_half_up(task["duration"]))
else:
msg = (
f"Task duration for '{task['task']}' was type {type(task['duration'])},"
" set to 0"
)
self._logger.debug(msg)
task["__duration"] = 0
task_id = None
for idx, play_task in enumerate(self._plays.value[play_id]["tasks"]):
if task["task_uuid"] == play_task["task_uuid"]:
if task["host"] == play_task["host"]:
task_id = idx
break
if task_id is not None:
self._plays.value[play_id]["tasks"][task_id].update(task)
elif runner_event == "start":
task["__host"] = task["host"]
task["__result"] = "IN_PROGRESS"
task["__changed"] = "unknown"
task["__duration"] = None
task["__number"] = len(self._plays.value[play_id]["tasks"])
task["__task"] = task["task"]
task["__task_action"] = task["task_action"]
self._plays.value[play_id]["tasks"].append(task)
def _play_stats(self) -> None:
"""Calculate the play's stats based
on it's tasks
"""
for idx, play in enumerate(self._plays.value):
total = ["__ok", "__skipped", "__failed", "__unreachable", "__ignored", "__in_progress"]
self._plays.value[idx].update(
{
tot: len([t for t in play["tasks"] if t["__result"].lower() == tot[2:]])
for tot in total
},
)
self._plays.value[idx]["__changed"] = len(
[t for t in play["tasks"] if t["__changed"] is True],
)
task_count = len(play["tasks"])
self._plays.value[idx]["__task_count"] = task_count
completed = task_count - self._plays.value[idx]["__in_progress"]
if completed:
new = floor((completed / task_count * 100))
current = self._plays.value[idx].get("__percent_complete", 0)
self._plays.value[idx]["__percent_complete"] = max(new, current)
self._plays.value[idx]["__progress"] = str(max(new, current)) + "%"
else:
self._plays.value[idx]["__progress"] = "0%"
def _prepare_to_quit(self, interaction: Interaction) -> bool:
"""Looks like we're headed out of here
:param interaction: the quit interaction
:return: a bool indicating whether of not it's safe to exit
"""
self.update()
if self.runner is not None and not self.runner.finished:
if interaction.action.match.groupdict()["exclamation"]:
self._logger.debug("shutting down runner")
self.runner.cancelled = True
while not self.runner.finished:
pass
self.write_artifact()
return True
self._logger.warning("Quit requested but playbook running, try q! or quit!")
return False
self._logger.debug("runner not running")
return True
def _task_list_for_play(self) -> Step:
"""generate a menu of task for the currently selected play
:return: The menu step
"""
value = self.steps.current.selected["tasks"]
step = Step(
name="task_list",
step_type="menu",
columns=self._task_list_columns,
select_func=self._task_from_task_list,
value=value,
)
return step
def _task_from_task_list(self) -> Step:
"""generate task content for the selected task
:return: content which show a task
"""
value = self.steps.current.value
index = self.steps.current.index
step = Step(name="task", step_type="content", index=index, value=value)
return step
def update(self) -> None:
"""Drain the queue, set the status and write the artifact if needed"""
# let the calling app update as well
self._calling_app.update()
if hasattr(self, "runner"):
self._dequeue()
self._set_status()
if self.runner.finished and not self._runner_finished:
self._logger.debug("runner finished")
self._logger.info("Playbook complete")
self.write_artifact()
self._runner_finished = True
def _get_status(self) -> Tuple[str, int]:
"""Get the status and color
:return: status string, status color
"""
status = ""
status_color = 0
if self.runner.status:
if self.runner and self.runner.finished and self.runner.status:
status = self.runner.status
if self.runner.status == "failed":
status_color = 9
else:
status_color = self._msg_from_plays[1] or 10
else:
if self._msg_from_plays[0] is not None and self._msg_from_plays[1] is not None:
status = self._msg_from_plays[0]
status_color = self._msg_from_plays[1]
else:
status = self.runner.status
status_color = 10
return status, status_color
def _set_status(self) -> None:
"""Set the UI status"""
status, status_color = self._get_status()
self._interaction.ui.update_status(status, status_color)
def write_artifact(self, filename: Optional[str] = None) -> None:
"""Write the artifact
:param filename: The file to write to
:type filename: str
"""
if (
filename
or self._args.playbook_artifact_enable is True
and self._args.help_playbook is not True
):
filename = filename or self._args.playbook_artifact_save_as
filename = filename.format(
playbook_dir=os.path.dirname(self._args.playbook),
playbook_name=os.path.splitext(os.path.basename(self._args.playbook))[0],
ts_utc=datetime.datetime.now(tz=datetime.timezone.utc).isoformat(),
)
self._logger.debug("Formatted artifact file name set to %s", filename)
filename = abs_user_path(filename)
self._logger.debug("Resolved artifact file name set to %s", filename)
status, status_color = self._get_status()
try:
os.makedirs(os.path.dirname(filename), exist_ok=True)
with open(filename, "w", encoding="utf-8") as fh:
artifact = {
"version": "1.0.0",
"plays": self._plays.value,
"stdout": self.stdout,
"status": status,
"status_color": status_color,
}
json_dump(artifact, fh)
self._logger.info("Saved artifact as %s", filename)
except (IOError, OSError) as exc:
error = (
f"Saving the artifact file failed, resulted in the following error: f{str(exc)}"
)
self._logger.error(error)
def rerun(self) -> None:
"""rerun the current playbook
since we're not reinstating run,
drain the queue, clear the steps, reset the index, etc
"""
if self._subaction_type == "run":
if self.runner.finished:
self._plays.value = []
self._plays.index = None
self._msg_from_plays = (None, None)
self._queue.queue.clear()
self.stdout = []
self._run_runner()
self.steps.clear()
self.steps.append(self._plays)
self._logger.debug("Playbook rerun triggered")
else:
self._logger.warning("Playbook rerun ignored, current playbook not complete")
elif self._subaction_type == "replay":
self._logger.error("No rerun available when artifact is loaded")
else:
self._logger.error("sub-action type '%s' is invalid", self._subaction_type)
def _notify_error(self, message: str):
"""show a blocking warning"""
warn_msg = ["Errors were encountered while running the playbook:"]
messages = remove_ansi(message).splitlines()
messages[-1] += "..."
warn_msg.extend(messages)
warn_msg += ["[HINT] To see the full error message try ':stdout'"]
warn_msg += ["[HINT] After it's fixed, try to ':rerun' the playbook"]
warning = warning_notification(warn_msg)
self._interaction.ui.show(warning)
| [
"logging.getLogger",
"os.path.exists",
"math.floor",
"shlex.split",
"shutil.which",
"re.match",
"time.sleep",
"os.getcwd",
"uuid.uuid4",
"os.path.dirname",
"datetime.datetime.now",
"os.path.basename",
"json.load",
"queue.Queue"
] | [((5542, 5549), 'queue.Queue', 'Queue', ([], {}), '()\n', (5547, 5549), False, 'from queue import Queue\n'), ((7222, 7277), 'logging.getLogger', 'logging.getLogger', (['f"""{__name__}_{self._subaction_type}"""'], {}), "(f'{__name__}_{self._subaction_type}')\n", (7239, 7277), False, 'import logging\n'), ((7725, 7741), 'time.sleep', 'time.sleep', (['(0.01)'], {}), '(0.01)\n', (7735, 7741), False, 'import time\n'), ((8616, 8664), 'logging.getLogger', 'logging.getLogger', (['f"""{__name__}_{str_uuid[-4:]}"""'], {}), "(f'{__name__}_{str_uuid[-4:]}')\n", (8633, 8664), False, 'import logging\n'), ((11067, 11102), 'os.path.exists', 'os.path.exists', (['self._args.playbook'], {}), '(self._args.playbook)\n', (11081, 11102), False, 'import os\n'), ((12951, 13002), 'os.path.exists', 'os.path.exists', (['self._args.playbook_artifact_replay'], {}), '(self._args.playbook_artifact_replay)\n', (12965, 13002), False, 'import os\n'), ((19847, 19858), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (19856, 19858), False, 'import os\n'), ((21051, 21083), 'shutil.which', 'shutil.which', (['"""ansible-playbook"""'], {}), "('ansible-playbook')\n", (21063, 21083), False, 'import shutil\n'), ((1449, 1469), 're.match', 're.match', (['x[0]', 'word'], {}), '(x[0], word)\n', (1457, 1469), False, 'import re\n'), ((8575, 8587), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8585, 8587), False, 'import uuid\n'), ((8997, 9052), 'logging.getLogger', 'logging.getLogger', (['f"""{__name__}_{self._subaction_type}"""'], {}), "(f'{__name__}_{self._subaction_type}')\n", (9014, 9052), False, 'import logging\n'), ((13450, 13463), 'json.load', 'json.load', (['fh'], {}), '(fh)\n', (13459, 13463), False, 'import json\n'), ((26636, 26671), 'math.floor', 'floor', (['(completed / task_count * 100)'], {}), '(completed / task_count * 100)\n', (26641, 26671), False, 'from math import floor\n'), ((11239, 11258), 'os.path.exists', 'os.path.exists', (['inv'], {}), '(inv)\n', (11253, 11258), False, 'import os\n'), ((11973, 12030), 'shlex.split', 'shlex.split', (["populated_form['fields']['cmdline']['value']"], {}), "(populated_form['fields']['cmdline']['value'])\n", (11984, 12030), False, 'import shlex\n'), ((30696, 30732), 'os.path.dirname', 'os.path.dirname', (['self._args.playbook'], {}), '(self._args.playbook)\n', (30711, 30732), False, 'import os\n'), ((31235, 31260), 'os.path.dirname', 'os.path.dirname', (['filename'], {}), '(filename)\n', (31250, 31260), False, 'import os\n'), ((3812, 3843), 're.match', 're.match', (['x[0]', "obj['__result']"], {}), "(x[0], obj['__result'])\n", (3820, 3843), False, 'import re\n'), ((30781, 30818), 'os.path.basename', 'os.path.basename', (['self._args.playbook'], {}), '(self._args.playbook)\n', (30797, 30818), False, 'import os\n'), ((30847, 30894), 'datetime.datetime.now', 'datetime.datetime.now', ([], {'tz': 'datetime.timezone.utc'}), '(tz=datetime.timezone.utc)\n', (30868, 30894), False, 'import datetime\n')] |
# Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import textwrap
import json
import sys
import os
from cement.core import controller
from ebcli import __version__
from ebcli.core.ebglobals import Constants
from ebcli.lib import elasticbeanstalk, utils
from ebcli.core import io, fileoperations
from ebcli.objects.exceptions import (
NoEnvironmentForBranchError,
PlatformWorkspaceNotSupportedError,
ApplicationWorkspaceNotSupportedError,
EBCLIException,
NotInitializedError
)
from ebcli.resources.strings import strings, flag_text
from ebcli.objects import region
from ebcli.operations import commonops
class AbstractBaseController(controller.CementBaseController):
"""
This is an abstract base class that is useless on its own, but used
by other classes to sub-class from and to share common commands and
arguments.
"""
class Meta:
label = 'abstract'
stacked_on = 'base'
stacked_type = 'nested'
arguments = [
(['environment_name'], dict(action='store', nargs='?',
default=[],
help=flag_text['general.env'])),
]
epilog = ''
usage = 'eb {cmd} <environment_name> [options ...]'
def do_command(self):
pass
@classmethod
def validate_workspace(cls):
workspace_type = fileoperations.get_workspace_type(None)
is_platform_workspace_only_command = cls.Meta.__dict__.get(
'is_platform_workspace_only_command'
)
requires_directory_initialization = cls.Meta.__dict__.get(
'requires_directory_initialization'
)
if '--modules' in sys.argv:
pass
elif '--help' in sys.argv:
pass
elif cls.__name__ == 'PlatformListController' or cls.__name__ == 'EBPListController':
pass
elif requires_directory_initialization and not workspace_type:
raise NotInitializedError(strings['exit.notsetup'])
elif is_platform_workspace_only_command:
if Constants.WorkSpaceTypes.APPLICATION == workspace_type:
raise ApplicationWorkspaceNotSupportedError(
strings['exit.applicationworkspacenotsupported']
)
@controller.expose(hide=True)
def default(self):
"""
This command will be shared within all controllers that sub-class
from here. It can also be overridden in the sub-class
"""
self.validate_workspace()
self.do_command()
self.check_for_cli_update(__version__)
def check_workspace_type(self, expected_type):
workspace_type = fileoperations.get_workspace_type()
if workspace_type != expected_type:
if Constants.WorkSpaceTypes.PLATFORM == workspace_type:
raise PlatformWorkspaceNotSupportedError(
strings['exit.platformworkspacenotsupported']
)
if Constants.WorkSpaceTypes.APPLICATION == workspace_type:
raise ApplicationWorkspaceNotSupportedError(
strings['exit.applicationworkspacenotsupported']
)
def check_for_cli_update(self, version):
label = self.Meta.label
if label in ('create', 'deploy', 'status', 'clone', 'config'):
if cli_update_exists(version):
if self.check_install_script_used():
io.log_alert(strings['base.update_available_script_install'])
else:
io.log_alert(strings['base.update_available'])
def get_app_name(self):
app_name = fileoperations.get_application_name()
return app_name
def get_env_name(self, cmd_example=None, noerror=False, varname='environment_name'):
env_name = getattr(self.app.pargs, varname, None)
if not env_name:
env_name = commonops. \
get_current_branch_environment()
workspace_type = fileoperations.get_workspace_type(Constants.WorkSpaceTypes.APPLICATION)
if not env_name:
if Constants.WorkSpaceTypes.PLATFORM == workspace_type:
raise EBCLIException(strings['platform.nobuilderenv'])
if noerror:
return None
if not cmd_example:
message = strings['branch.noenv'].replace('{cmd}',
self.Meta.label)
else:
message = strings['branch.noenv'].replace('eb {cmd}',
cmd_example)
io.log_error(message)
raise NoEnvironmentForBranchError()
return env_name
def check_install_script_used(self):
return '.ebcli-virtual-env' in os.path.abspath(__file__)
@classmethod
def _add_to_handler(cls, handler):
handler.register(cls)
@property
def _help_text(self):
"""
Returns the help text displayed when for the commands of the type `eb <command> <subcommand>`
except where <command> is "platform".
"""
longest = 0
def pad(label):
padlength = longest - len(label) + 2
padding = ' '
if padlength < 0:
for x in range(0, longest):
padding += ' '
else:
for x in range(0, padlength):
padding += ' '
return padding
help_txt = ''
for label in self._visible_commands:
if len(label) > longest:
longest = len(label)
for label in self._visible_commands:
cmd = self._dispatch_map[label]
cmd_txt = ' '
cmd_name = label
cmd_aliases = cmd['aliases']
if len(cmd_aliases) > 0 and cmd['aliases_only']:
cmd_name = cmd_aliases.pop(0)
cmd_txt += '{}'.format(cmd_name)
if cmd['help']:
cmd_txt += '{}{}'.format(pad(cmd_txt), cmd['help'])
if len(cmd_aliases) > 0:
cmd_txt += '\n{}(alias: {})'.format(pad(''), ', '.join(cmd_aliases))
cmd_txt += '\n'
help_txt += cmd_txt
if len(help_txt) > 0:
txt = '''{}
commands:
{}
'''.format(self._meta.description, help_txt)
else:
txt = self._meta.description
return textwrap.dedent(txt)
def cli_update_exists(current_version):
try:
data = utils.get_data_from_url(
'https://pypi.python.org/pypi/awsebcli/json', timeout=5)
data = json.loads(data)
latest = data['info']['version']
return latest != current_version
except:
return False
| [
"textwrap.dedent",
"ebcli.core.fileoperations.get_workspace_type",
"json.loads",
"ebcli.operations.commonops.get_current_branch_environment",
"ebcli.core.fileoperations.get_application_name",
"ebcli.objects.exceptions.NotInitializedError",
"os.path.abspath",
"ebcli.core.io.log_alert",
"ebcli.objects... | [((2820, 2848), 'cement.core.controller.expose', 'controller.expose', ([], {'hide': '(True)'}), '(hide=True)\n', (2837, 2848), False, 'from cement.core import controller\n'), ((1901, 1940), 'ebcli.core.fileoperations.get_workspace_type', 'fileoperations.get_workspace_type', (['None'], {}), '(None)\n', (1934, 1940), False, 'from ebcli.core import io, fileoperations\n'), ((3218, 3253), 'ebcli.core.fileoperations.get_workspace_type', 'fileoperations.get_workspace_type', ([], {}), '()\n', (3251, 3253), False, 'from ebcli.core import io, fileoperations\n'), ((4191, 4228), 'ebcli.core.fileoperations.get_application_name', 'fileoperations.get_application_name', ([], {}), '()\n', (4226, 4228), False, 'from ebcli.core import io, fileoperations\n'), ((4537, 4608), 'ebcli.core.fileoperations.get_workspace_type', 'fileoperations.get_workspace_type', (['Constants.WorkSpaceTypes.APPLICATION'], {}), '(Constants.WorkSpaceTypes.APPLICATION)\n', (4570, 4608), False, 'from ebcli.core import io, fileoperations\n'), ((6984, 7004), 'textwrap.dedent', 'textwrap.dedent', (['txt'], {}), '(txt)\n', (6999, 7004), False, 'import textwrap\n'), ((7071, 7156), 'ebcli.lib.utils.get_data_from_url', 'utils.get_data_from_url', (['"""https://pypi.python.org/pypi/awsebcli/json"""'], {'timeout': '(5)'}), "('https://pypi.python.org/pypi/awsebcli/json', timeout=5\n )\n", (7094, 7156), False, 'from ebcli.lib import elasticbeanstalk, utils\n'), ((7180, 7196), 'json.loads', 'json.loads', (['data'], {}), '(data)\n', (7190, 7196), False, 'import json\n'), ((4449, 4491), 'ebcli.operations.commonops.get_current_branch_environment', 'commonops.get_current_branch_environment', ([], {}), '()\n', (4489, 4491), False, 'from ebcli.operations import commonops\n'), ((5173, 5194), 'ebcli.core.io.log_error', 'io.log_error', (['message'], {}), '(message)\n', (5185, 5194), False, 'from ebcli.core import io, fileoperations\n'), ((5213, 5242), 'ebcli.objects.exceptions.NoEnvironmentForBranchError', 'NoEnvironmentForBranchError', ([], {}), '()\n', (5240, 5242), False, 'from ebcli.objects.exceptions import NoEnvironmentForBranchError, PlatformWorkspaceNotSupportedError, ApplicationWorkspaceNotSupportedError, EBCLIException, NotInitializedError\n'), ((5349, 5374), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (5364, 5374), False, 'import os\n'), ((3388, 3474), 'ebcli.objects.exceptions.PlatformWorkspaceNotSupportedError', 'PlatformWorkspaceNotSupportedError', (["strings['exit.platformworkspacenotsupported']"], {}), "(strings[\n 'exit.platformworkspacenotsupported'])\n", (3422, 3474), False, 'from ebcli.objects.exceptions import NoEnvironmentForBranchError, PlatformWorkspaceNotSupportedError, ApplicationWorkspaceNotSupportedError, EBCLIException, NotInitializedError\n'), ((3601, 3693), 'ebcli.objects.exceptions.ApplicationWorkspaceNotSupportedError', 'ApplicationWorkspaceNotSupportedError', (["strings['exit.applicationworkspacenotsupported']"], {}), "(strings[\n 'exit.applicationworkspacenotsupported'])\n", (3638, 3693), False, 'from ebcli.objects.exceptions import NoEnvironmentForBranchError, PlatformWorkspaceNotSupportedError, ApplicationWorkspaceNotSupportedError, EBCLIException, NotInitializedError\n'), ((4725, 4773), 'ebcli.objects.exceptions.EBCLIException', 'EBCLIException', (["strings['platform.nobuilderenv']"], {}), "(strings['platform.nobuilderenv'])\n", (4739, 4773), False, 'from ebcli.objects.exceptions import NoEnvironmentForBranchError, PlatformWorkspaceNotSupportedError, ApplicationWorkspaceNotSupportedError, EBCLIException, NotInitializedError\n'), ((3992, 4053), 'ebcli.core.io.log_alert', 'io.log_alert', (["strings['base.update_available_script_install']"], {}), "(strings['base.update_available_script_install'])\n", (4004, 4053), False, 'from ebcli.core import io, fileoperations\n'), ((4096, 4142), 'ebcli.core.io.log_alert', 'io.log_alert', (["strings['base.update_available']"], {}), "(strings['base.update_available'])\n", (4108, 4142), False, 'from ebcli.core import io, fileoperations\n'), ((2499, 2544), 'ebcli.objects.exceptions.NotInitializedError', 'NotInitializedError', (["strings['exit.notsetup']"], {}), "(strings['exit.notsetup'])\n", (2518, 2544), False, 'from ebcli.objects.exceptions import NoEnvironmentForBranchError, PlatformWorkspaceNotSupportedError, ApplicationWorkspaceNotSupportedError, EBCLIException, NotInitializedError\n'), ((2687, 2779), 'ebcli.objects.exceptions.ApplicationWorkspaceNotSupportedError', 'ApplicationWorkspaceNotSupportedError', (["strings['exit.applicationworkspacenotsupported']"], {}), "(strings[\n 'exit.applicationworkspacenotsupported'])\n", (2724, 2779), False, 'from ebcli.objects.exceptions import NoEnvironmentForBranchError, PlatformWorkspaceNotSupportedError, ApplicationWorkspaceNotSupportedError, EBCLIException, NotInitializedError\n')] |
import unittest
import speak
class SpeakTests(unittest.TestCase):
"""
Unit test for the speak library
"""
def testHello(self):
self.assertEqual("Hello World!", speak.helloworld())
def testGoodbye(self):
self.assertEqual("Goodbye World!", speak.goodbyeworld())
if __name__ == "__main__":
unittest.main()
| [
"unittest.main",
"speak.goodbyeworld",
"speak.helloworld"
] | [((330, 345), 'unittest.main', 'unittest.main', ([], {}), '()\n', (343, 345), False, 'import unittest\n'), ((186, 204), 'speak.helloworld', 'speak.helloworld', ([], {}), '()\n', (202, 204), False, 'import speak\n'), ((276, 296), 'speak.goodbyeworld', 'speak.goodbyeworld', ([], {}), '()\n', (294, 296), False, 'import speak\n')] |
#!/usr/bin/env python3
# METADATA OF THIS TAL_SERVICE:
problem="eggs"
service="confirm_min_throws"
args_list = [
('min',int),
('n_eggs',int),
('n_floors',int),
('lang',str),
('ISATTY',bool),
]
from sys import stderr, exit, argv
from random import randrange
from math import inf as IMPOSSIBLE
from multilanguage import Env, Lang, TALcolors
ENV =Env(problem, service, args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
TAc.print(LANG.opening_msg, "green")
# START CODING YOUR SERVICE:
# INITIALIZATON: allocation, base cases, sentinels
table = [ [0] + [IMPOSSIBLE] * ENV['n_floors'] ]
for u in range(ENV['n_eggs']):
table.append([0] + [None] * ENV['n_floors'])
# INDUCTTVE STEP: the min-max recursion with nature playing against
for u in range(1,1+ENV['n_eggs']):
for f in range(1,1+ENV['n_floors']):
table[u][f] = IMPOSSIBLE
for first_launch_floor in range(1,1+f):
table[u][f] = min(table[u][f],1+max(table[u][f-first_launch_floor],table[u-1][first_launch_floor-1]))
if table[ENV['n_eggs']][ENV['n_floors']] < ENV['min']:
print(f"No! When you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']} then there exists a policy that guarantees you to find out the truth in strictly less than {ENV['min']} launches, whatever will happen (worst case).")
#English: print("No! When you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']} then there exists a policy that guarantees you to find out the truth in strictly less than {ENV['min']} launches, whatever will happen (worst case).")
if table[ENV['n_eggs']][ENV['n_floors']] > ENV['min']:
print(f"No! When you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']} then no policy guarantees you to find out the truth within {ENV['min']} launches in every possible scenario (aka, whathever the truth is).")
#English:
if table[ENV['n_eggs']][ENV['n_floors']] == ENV['min']:
print(f"Yes! Indeed, {ENV['min']} is the smallest possible natural B such that, when you are given {ENV['n_eggs']} eggs and the floors are {ENV['n_floors']}, still there exists a policy that guarantees you to find out the truth within B launches in every possible scenario.")
#English:
exit(0)
| [
"multilanguage.Env",
"sys.exit",
"multilanguage.TALcolors"
] | [((367, 399), 'multilanguage.Env', 'Env', (['problem', 'service', 'args_list'], {}), '(problem, service, args_list)\n', (370, 399), False, 'from multilanguage import Env, Lang, TALcolors\n'), ((405, 419), 'multilanguage.TALcolors', 'TALcolors', (['ENV'], {}), '(ENV)\n', (414, 419), False, 'from multilanguage import Env, Lang, TALcolors\n'), ((2277, 2284), 'sys.exit', 'exit', (['(0)'], {}), '(0)\n', (2281, 2284), False, 'from sys import stderr, exit, argv\n')] |
from django.contrib import admin
from .models import Doctor, ConsultationTime, Medicine, Allergy, Child, Parent
admin.site.site_header = "Allisto - We Do Good"
@admin.register(Doctor)
class DoctorAdmin(admin.ModelAdmin):
list_display = ('name', 'aadhar_number', 'specialization', 'email', 'phone_number')
list_filter = ('specialization', 'consultation_fee', 'working_hours')
search_fields = ('name', 'specialization', 'consultation_fee')
@admin.register(Parent)
class ParentAdmin(admin.ModelAdmin):
list_display = ('name', 'aadhar_number', 'email', 'phone_number', 'address')
list_filter = ('name', 'email', 'phone_number')
search_fields = ('name', 'aadhar_number', 'email', 'phone_number', 'address')
@admin.register(Child)
class ChildAdmin(admin.ModelAdmin):
list_display = ('name', 'autistic', 'birthday', 'gender')
list_filter = ('name', 'autistic', 'birthday')
search_fields = ('name', 'autistic', 'birthday')
@admin.register(Allergy)
class AllergyAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
list_filter = ('name', 'description')
search_fields = ('name',)
@admin.register(Medicine)
class MedicineAdmin(admin.ModelAdmin):
list_display = ('name', 'description')
list_filter = ('name', 'description')
search_fields = ('name',)
admin.site.register(ConsultationTime)
| [
"django.contrib.admin.register",
"django.contrib.admin.site.register"
] | [((165, 187), 'django.contrib.admin.register', 'admin.register', (['Doctor'], {}), '(Doctor)\n', (179, 187), False, 'from django.contrib import admin\n'), ((457, 479), 'django.contrib.admin.register', 'admin.register', (['Parent'], {}), '(Parent)\n', (471, 479), False, 'from django.contrib import admin\n'), ((735, 756), 'django.contrib.admin.register', 'admin.register', (['Child'], {}), '(Child)\n', (749, 756), False, 'from django.contrib import admin\n'), ((962, 985), 'django.contrib.admin.register', 'admin.register', (['Allergy'], {}), '(Allergy)\n', (976, 985), False, 'from django.contrib import admin\n'), ((1142, 1166), 'django.contrib.admin.register', 'admin.register', (['Medicine'], {}), '(Medicine)\n', (1156, 1166), False, 'from django.contrib import admin\n'), ((1323, 1360), 'django.contrib.admin.site.register', 'admin.site.register', (['ConsultationTime'], {}), '(ConsultationTime)\n', (1342, 1360), False, 'from django.contrib import admin\n')] |
from backend.entity.entity import DefinedFuntion
from backend.ir.dumper import Dumper
from backend.ir.stmt import Assign
from backend.ir.stmt import Return
from backend.ir.expr import Bin
from backend.ir.expr import Call
from backend.entity.scope import *
def import_ir(data, asm_file):
def_vars = list()
def_funs = list()
for i in data["variablelist"]:
t = DefinedVariable(
name=i["name"], type=i["type"], priv=i["is_private"], init=i["value"])
def_vars.append(t)
for i in data["functionlist"]:
t = DefinedFuntion(priv=False, body=i["body"], name=i["name"],
params=i["parameterlist"], type=i["type"], scope=LocalScope(i["variablelist"]))
def_funs.append(t)
ir = IR(source=asm_file, defuns=def_funs, defvars=def_vars,
constant_table=None, funcdecls=None, scope=None)
return ir
def inst_factory(insn):
if insn["name"] == "store":
return Assign(loc=insn["line_number"], lhs=insn["address"], rhs=insn["value"])
elif insn["name"] == "return":
return Return(loc=insn["line_number"], expr=insn["expr"])
elif insn["name"] == "bin":
return Bin(left=insn["left"], right=insn["right"], op=insn["op"], type=insn["type"], value=insn["value"])
elif insn["name"] == "call":
return Call(args=insn["args"], expr=insn["expr"], type=insn["type"])
else:
raise Exception("Feature not implemented")
# This class were used to import IR from json text
class IR ():
def __init__(self,
source,
defvars,
defuns,
funcdecls,
constant_table,
scope):
self.source = source
self.defvars = defvars
self.defuns = defuns
self.funcdecls = funcdecls
self.scope = scope
self.constant_table = constant_table
self.gvars = []
self.comms = []
def file_name(self):
return self.source
def location(self):
return self.source
def defined_variables(self):
return self.defvars
def is_function_defined(self):
if self.defuns:
return True
else:
return False
def defined_funcitons(self):
return self.defuns
def scope(self):
return self.scope
def all_functions(self):
result = []
if self.defuns:
result.extend(self.defuns)
if self.funcdecls:
result.extend(self.funcdecls)
return result
def init_variables(self):
self.comms = []
self.comms = []
for var in self.scope.defined_glabal_scope_variables():
if var.has_initializer == True:
self.gvars.append(var)
else:
self.comms.append(var)
#a list of all defined/declared global-scope variables
def all_global_variables(self):
#return self.scope.all_global_variables()
return self.defvars
def is_global_variable_defined(self):
if self.defined_global_variables:
return True
else:
return False
#Returns the list of global variables.
def defined_global_variables(self):
'''
if not self.gvars:
self.init_variables()
else:
return self.gvars
'''
return self.defvars
def is_common_symbol_defined(self):
if self.defined_common_symbols():
return True
else:
return False
def defined_common_symbols(self):
if not self.comms:
self.init_variables()
else:
return self.comms
def is_string_literal_defined(self):
if self.constant_table:
return True
else:
return False
def const_table(self):
return self.constant_table
def dump(self):
d = Dumper()
d.print_class(self, self.source)
d.print_vars("variables", self.defvars)
d.print_funs("function", self.defuns)
| [
"backend.ir.expr.Bin",
"backend.ir.expr.Call",
"backend.ir.stmt.Return",
"backend.ir.stmt.Assign",
"backend.ir.dumper.Dumper"
] | [((963, 1034), 'backend.ir.stmt.Assign', 'Assign', ([], {'loc': "insn['line_number']", 'lhs': "insn['address']", 'rhs': "insn['value']"}), "(loc=insn['line_number'], lhs=insn['address'], rhs=insn['value'])\n", (969, 1034), False, 'from backend.ir.stmt import Assign\n'), ((3923, 3931), 'backend.ir.dumper.Dumper', 'Dumper', ([], {}), '()\n', (3929, 3931), False, 'from backend.ir.dumper import Dumper\n'), ((1085, 1135), 'backend.ir.stmt.Return', 'Return', ([], {'loc': "insn['line_number']", 'expr': "insn['expr']"}), "(loc=insn['line_number'], expr=insn['expr'])\n", (1091, 1135), False, 'from backend.ir.stmt import Return\n'), ((1183, 1286), 'backend.ir.expr.Bin', 'Bin', ([], {'left': "insn['left']", 'right': "insn['right']", 'op': "insn['op']", 'type': "insn['type']", 'value': "insn['value']"}), "(left=insn['left'], right=insn['right'], op=insn['op'], type=insn['type'\n ], value=insn['value'])\n", (1186, 1286), False, 'from backend.ir.expr import Bin\n'), ((1330, 1391), 'backend.ir.expr.Call', 'Call', ([], {'args': "insn['args']", 'expr': "insn['expr']", 'type': "insn['type']"}), "(args=insn['args'], expr=insn['expr'], type=insn['type'])\n", (1334, 1391), False, 'from backend.ir.expr import Call\n')] |
import numpy as np
from pyquil.gate_matrices import X, Y, Z, H
from forest.benchmarking.operator_tools.superoperator_transformations import *
# Test philosophy:
# Using the by hand calculations found in the docs we check conversion
# between one qubit channels with one Kraus operator (Hadamard) and two
# Kraus operators (the amplitude damping channel). Additionally we check
# a few two qubit channel conversions to get additional confidence.
def amplitude_damping_kraus(p):
Ad0 = np.asarray([[1, 0], [0, np.sqrt(1 - p)]])
Ad1 = np.asarray([[0, np.sqrt(p)], [0, 0]])
return [Ad0, Ad1]
def amplitude_damping_chi(p):
poly1 = (1 + np.sqrt(1 - p)) ** 2
poly2 = (-1 + np.sqrt(1 - p)) ** 2
ad_pro = 0.25 * np.asarray([[poly1, 0, 0, p],
[0, p, -1j * p, 0],
[0, 1j * p, p, 0],
[p, 0, 0, poly2]])
return ad_pro
def amplitude_damping_pauli(p):
poly1 = np.sqrt(1 - p)
ad_pau = np.asarray([[1, 0, 0, 0],
[0, poly1, 0, 0],
[0, 0, poly1, 0],
[p, 0, 0, 1 - p]])
return ad_pau
def amplitude_damping_super(p):
poly1 = np.sqrt(1 - p)
ad_sup = np.asarray([[1, 0, 0, p],
[0, poly1, 0, 0],
[0, 0, poly1, 0],
[0, 0, 0, 1 - p]])
return ad_sup
def amplitude_damping_choi(p):
poly1 = np.sqrt(1 - p)
ad_choi = np.asarray([[1, 0, 0, poly1],
[0, 0, 0, 0],
[0, 0, p, 0],
[poly1, 0, 0, 1 - p]])
return ad_choi
HADChi = 0.5 * np.asarray([[0, 0, 0, 0],
[0, 1, 0, 1],
[0, 0, 0, 0],
[0, 1, 0, 1]])
HADPauli = 1.0 * np.asarray([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, -1, 0],
[0, 1, 0, 0]])
HADSuper = 0.5 * np.asarray([[1, 1, 1, 1],
[1, -1, 1, -1],
[1, 1, -1, -1],
[1, -1, -1, 1]])
HADChoi = 0.5 * np.asarray([[1, 1, 1, -1],
[1, 1, 1, -1],
[1, 1, 1, -1],
[-1, -1, -1, 1]])
# Single Qubit Pauli Channel
def one_q_pauli_channel_chi(px, py, pz):
p = (px + py + pz)
pp_chi = np.asarray([[1 - p, 0, 0, 0],
[0, px, 0, 0],
[0, 0, py, 0],
[0, 0, 0, pz]])
return pp_chi
# Pauli twirled Amplitude damping channel
def analytical_pauli_twirl_of_AD_chi(p):
# see equation 7 of https://arxiv.org/pdf/1701.03708.pdf
poly1 = (2 + 2 * np.sqrt(1 - p) - p) / 4
poly2 = p / 4
poly3 = (2 - 2 * np.sqrt(1 - p) - p) / 4
pp_chi = np.asarray([[poly1, 0, 0, 0],
[0, poly2, 0, 0],
[0, 0, poly2, 0],
[0, 0, 0, poly3]])
return pp_chi
# I \otimes Z channel or gate (two qubits)
two_qubit_paulis = n_qubit_pauli_basis(2)
IZKraus = two_qubit_paulis.ops_by_label['IZ']
IZSuper = np.diag([1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1])
# one and zero state as a density matrix
ONE_STATE = np.asarray([[0, 0], [0, 1]])
ZERO_STATE = np.asarray([[1, 0], [0, 0]])
# Amplitude damping Kraus operators with p = 0.1
AdKrausOps = amplitude_damping_kraus(.1)
# Use Kraus operators to find output of channel i.e.
# rho_out = A_0 rho A_0^\dag + A_1 rho A_1^\dag.
rho_out = np.matmul(np.matmul(AdKrausOps[0], ONE_STATE), AdKrausOps[0].transpose().conj()) + \
np.matmul(np.matmul(AdKrausOps[1], ONE_STATE), AdKrausOps[1].transpose().conj())
def test_vec():
A = np.asarray([[1, 2], [3, 4]])
B = np.asarray([[1, 2, 5], [3, 4, 6]])
np.testing.assert_array_equal(np.array([[1], [3], [2], [4]]), vec(A))
np.testing.assert_array_equal(np.array([[1], [3], [2], [4], [5], [6]]), vec(B))
def test_unvec():
A = np.asarray([[1, 2], [3, 4]])
C = np.asarray([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
np.testing.assert_array_equal(A, unvec(vec(A)))
np.testing.assert_array_equal(C, unvec(vec(C)))
def test_kraus_ops_sum_to_identity():
# Check kraus ops sum to identity
p = np.random.rand()
Ad0, Ad1 = amplitude_damping_kraus(p)
np.testing.assert_array_almost_equal_nulp(np.matmul(Ad0.transpose().conj(), Ad0)
+ np.matmul(Ad1.transpose().conj(), Ad1), np.eye(2))
def test_kraus2chi():
assert np.allclose(HADChi, kraus2chi(H))
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdChi = amplitude_damping_chi(p)
assert np.allclose(AdChi, kraus2chi(AdKraus))
assert np.allclose(superop2chi(IZSuper), kraus2chi(IZKraus))
def test_kraus2pauli_liouville():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(kraus2pauli_liouville(AdKraus), AdPauli)
assert np.allclose(kraus2pauli_liouville(H), HADPauli)
def test_kraus2superop():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdSuper = amplitude_damping_super(p)
np.testing.assert_array_almost_equal_nulp(kraus2superop(AdKraus), AdSuper)
# test application of super operator is the same as application of Kraus ops
ONE_STATE_VEC = vec(ONE_STATE)
np.testing.assert_array_almost_equal_nulp(unvec(np.matmul(kraus2superop(AdKrausOps),
ONE_STATE_VEC)), rho_out)
assert np.allclose(kraus2superop(H), HADSuper)
assert np.allclose(kraus2superop(IZKraus), IZSuper)
# Below here tests non square Kraus operators
# In this example The Kraus operator is M_0 = I \otimes <0| where <0| = (1,0)
Idd = np.asarray([[1, 0], [0, 1]])
M0 = np.kron(Idd, np.asarray([[1, 0]]))
attempt = kraus2superop(M0)
answer = np.kron(M0.conj(), M0)
assert np.allclose(answer, attempt)
def test_kraus2choi():
p = np.random.rand()
AdKraus = amplitude_damping_kraus(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(kraus2choi(AdKraus), AdChoi)
assert np.allclose(kraus2choi(H), HADChoi)
def test_chi2pauli_liouville():
p = np.random.rand()
AdChi = amplitude_damping_chi(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdPauli, chi2pauli_liouville(AdChi))
assert np.allclose(HADPauli, chi2pauli_liouville(HADChi))
def test_basis_transform_p_to_c():
xz_pauli_basis = np.zeros((16, 1))
xz_pauli_basis[7] = [1.]
assert np.allclose(unvec(pauli2computational_basis_matrix(4) @ xz_pauli_basis), np.kron(X, Z))
def test_basis_transform_c_to_p():
xz_pauli_basis = np.zeros((16, 1))
xz_pauli_basis[7] = [1.]
assert np.allclose(computational2pauli_basis_matrix(4) @ vec(np.kron(X, Z)), xz_pauli_basis)
def test_pl_to_choi():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
pl = kraus2pauli_liouville(pauli[1])
choi = kraus2choi(pauli[1])
assert np.allclose(choi, pauli_liouville2choi(pl))
pl = kraus2pauli_liouville(H)
choi = kraus2choi(H)
assert np.allclose(choi, pauli_liouville2choi(pl))
def test_superop_to_kraus():
assert np.allclose(superop2kraus(IZSuper), IZKraus)
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdKraus = amplitude_damping_kraus(p)
kraus_ops = superop2kraus(AdSuper)
# the order of the Kraus ops matters
# TODO: fix the sign problem in Kraus operators
assert np.allclose([np.abs(kraus_ops[1]), np.abs(kraus_ops[0])], AdKraus)
def test_superop_to_choi():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
superop = kraus2superop(pauli[1])
choi = kraus2choi(pauli[1])
assert np.allclose(choi, superop2choi(superop))
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(AdChoi, superop2choi(AdSuper))
superop = kraus2superop(H)
choi = kraus2choi(H)
assert np.allclose(choi, superop2choi(superop))
def test_superop_to_pl():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdPauli, superop2pauli_liouville(AdSuper))
AdKraus = amplitude_damping_kraus(p)
superop = kraus2superop(AdKraus)
pauli = kraus2pauli_liouville(AdKraus)
assert np.allclose(pauli, superop2pauli_liouville(superop))
def test_pauli_liouville_to_superop():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdPauli = amplitude_damping_pauli(p)
assert np.allclose(AdSuper, pauli_liouville2superop(AdPauli))
AdKraus = amplitude_damping_kraus(p)
superop = kraus2superop(AdKraus)
pauli = kraus2pauli_liouville(AdKraus)
assert np.allclose(superop, pauli_liouville2superop(pauli))
def test_choi_to_kraus():
for i, pauli in enumerate(n_qubit_pauli_basis(2)):
choi = kraus2choi(pauli[1])
kraus = choi2kraus(choi)
assert np.allclose(choi, kraus2choi(kraus))
id_choi = np.array([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]])
assert np.allclose(kraus2choi(choi2kraus(id_choi)), id_choi)
for kraus in choi2kraus(id_choi):
assert np.allclose(abs(kraus), np.eye(2)) or np.allclose(kraus, np.zeros((2, 2)))
def test_choi_to_super():
p = np.random.rand()
AdSuper = amplitude_damping_super(p)
AdChoi = amplitude_damping_choi(p)
assert np.allclose(AdSuper, choi2superop(AdChoi))
def test_choi_pl_bijectivity():
assert np.allclose(choi2superop(choi2superop(np.eye(4))), np.eye(4))
assert np.allclose(superop2choi(superop2choi(np.eye(4))), np.eye(4))
h_choi = kraus2choi(H)
h_superop = kraus2superop(H)
assert np.allclose(choi2superop(choi2superop(h_choi)), h_choi)
assert np.allclose(superop2choi(superop2choi(h_superop)), h_superop)
| [
"numpy.abs",
"numpy.eye",
"numpy.allclose",
"numpy.sqrt",
"numpy.random.rand",
"numpy.asarray",
"numpy.diag",
"numpy.kron",
"numpy.array",
"numpy.zeros",
"numpy.matmul"
] | [((3245, 3310), 'numpy.diag', 'np.diag', (['[1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1]'], {}), '([1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1])\n', (3252, 3310), True, 'import numpy as np\n'), ((3365, 3393), 'numpy.asarray', 'np.asarray', (['[[0, 0], [0, 1]]'], {}), '([[0, 0], [0, 1]])\n', (3375, 3393), True, 'import numpy as np\n'), ((3407, 3435), 'numpy.asarray', 'np.asarray', (['[[1, 0], [0, 0]]'], {}), '([[1, 0], [0, 0]])\n', (3417, 3435), True, 'import numpy as np\n'), ((979, 993), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (986, 993), True, 'import numpy as np\n'), ((1007, 1092), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0, 0], [0, poly1, 0, 0], [0, 0, poly1, 0], [p, 0, 0, 1 - p]]'], {}), '([[1, 0, 0, 0], [0, poly1, 0, 0], [0, 0, poly1, 0], [p, 0, 0, 1 - p]]\n )\n', (1017, 1092), True, 'import numpy as np\n'), ((1227, 1241), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (1234, 1241), True, 'import numpy as np\n'), ((1255, 1340), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0, p], [0, poly1, 0, 0], [0, 0, poly1, 0], [0, 0, 0, 1 - p]]'], {}), '([[1, 0, 0, p], [0, poly1, 0, 0], [0, 0, poly1, 0], [0, 0, 0, 1 - p]]\n )\n', (1265, 1340), True, 'import numpy as np\n'), ((1474, 1488), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (1481, 1488), True, 'import numpy as np\n'), ((1503, 1588), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0, poly1], [0, 0, 0, 0], [0, 0, p, 0], [poly1, 0, 0, 1 - p]]'], {}), '([[1, 0, 0, poly1], [0, 0, 0, 0], [0, 0, p, 0], [poly1, 0, 0, 1 - p]]\n )\n', (1513, 1588), True, 'import numpy as np\n'), ((1698, 1766), 'numpy.asarray', 'np.asarray', (['[[0, 0, 0, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 1, 0, 1]]'], {}), '([[0, 0, 0, 0], [0, 1, 0, 1], [0, 0, 0, 0], [0, 1, 0, 1]])\n', (1708, 1766), True, 'import numpy as np\n'), ((1866, 1935), 'numpy.asarray', 'np.asarray', (['[[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0], [0, 1, 0, 0]]'], {}), '([[1, 0, 0, 0], [0, 0, 0, 1], [0, 0, -1, 0], [0, 1, 0, 0]])\n', (1876, 1935), True, 'import numpy as np\n'), ((2041, 2115), 'numpy.asarray', 'np.asarray', (['[[1, 1, 1, 1], [1, -1, 1, -1], [1, 1, -1, -1], [1, -1, -1, 1]]'], {}), '([[1, 1, 1, 1], [1, -1, 1, -1], [1, 1, -1, -1], [1, -1, -1, 1]])\n', (2051, 2115), True, 'import numpy as np\n'), ((2220, 2294), 'numpy.asarray', 'np.asarray', (['[[1, 1, 1, -1], [1, 1, 1, -1], [1, 1, 1, -1], [-1, -1, -1, 1]]'], {}), '([[1, 1, 1, -1], [1, 1, 1, -1], [1, 1, 1, -1], [-1, -1, -1, 1]])\n', (2230, 2294), True, 'import numpy as np\n'), ((2487, 2562), 'numpy.asarray', 'np.asarray', (['[[1 - p, 0, 0, 0], [0, px, 0, 0], [0, 0, py, 0], [0, 0, 0, pz]]'], {}), '([[1 - p, 0, 0, 0], [0, px, 0, 0], [0, 0, py, 0], [0, 0, 0, pz]])\n', (2497, 2562), True, 'import numpy as np\n'), ((2924, 3012), 'numpy.asarray', 'np.asarray', (['[[poly1, 0, 0, 0], [0, poly2, 0, 0], [0, 0, poly2, 0], [0, 0, 0, poly3]]'], {}), '([[poly1, 0, 0, 0], [0, poly2, 0, 0], [0, 0, poly2, 0], [0, 0, 0,\n poly3]])\n', (2934, 3012), True, 'import numpy as np\n'), ((3845, 3873), 'numpy.asarray', 'np.asarray', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (3855, 3873), True, 'import numpy as np\n'), ((3882, 3916), 'numpy.asarray', 'np.asarray', (['[[1, 2, 5], [3, 4, 6]]'], {}), '([[1, 2, 5], [3, 4, 6]])\n', (3892, 3916), True, 'import numpy as np\n'), ((4103, 4131), 'numpy.asarray', 'np.asarray', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (4113, 4131), True, 'import numpy as np\n'), ((4140, 4185), 'numpy.asarray', 'np.asarray', (['[[1, 2, 3], [4, 5, 6], [7, 8, 9]]'], {}), '([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n', (4150, 4185), True, 'import numpy as np\n'), ((4376, 4392), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4390, 4392), True, 'import numpy as np\n'), ((4696, 4712), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4710, 4712), True, 'import numpy as np\n'), ((4950, 4966), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (4964, 4966), True, 'import numpy as np\n'), ((5208, 5224), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (5222, 5224), True, 'import numpy as np\n'), ((5929, 5957), 'numpy.asarray', 'np.asarray', (['[[1, 0], [0, 1]]'], {}), '([[1, 0], [0, 1]])\n', (5939, 5957), True, 'import numpy as np\n'), ((6081, 6109), 'numpy.allclose', 'np.allclose', (['answer', 'attempt'], {}), '(answer, attempt)\n', (6092, 6109), True, 'import numpy as np\n'), ((6143, 6159), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6157, 6159), True, 'import numpy as np\n'), ((6381, 6397), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (6395, 6397), True, 'import numpy as np\n'), ((6656, 6673), 'numpy.zeros', 'np.zeros', (['(16, 1)'], {}), '((16, 1))\n', (6664, 6673), True, 'import numpy as np\n'), ((6860, 6877), 'numpy.zeros', 'np.zeros', (['(16, 1)'], {}), '((16, 1))\n', (6868, 6877), True, 'import numpy as np\n'), ((7434, 7450), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7448, 7450), True, 'import numpy as np\n'), ((7971, 7987), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7985, 7987), True, 'import numpy as np\n'), ((8266, 8282), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8280, 8282), True, 'import numpy as np\n'), ((8665, 8681), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (8679, 8681), True, 'import numpy as np\n'), ((9233, 9299), 'numpy.array', 'np.array', (['[[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]]'], {}), '([[1, 0, 0, 1], [0, 0, 0, 0], [0, 0, 0, 0], [1, 0, 0, 1]])\n', (9241, 9299), True, 'import numpy as np\n'), ((9529, 9545), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9543, 9545), True, 'import numpy as np\n'), ((731, 826), 'numpy.asarray', 'np.asarray', (['[[poly1, 0, 0, p], [0, p, -1.0j * p, 0], [0, 1.0j * p, p, 0], [p, 0, 0, poly2]]'], {}), '([[poly1, 0, 0, p], [0, p, -1.0j * p, 0], [0, 1.0j * p, p, 0], [p,\n 0, 0, poly2]])\n', (741, 826), True, 'import numpy as np\n'), ((3653, 3688), 'numpy.matmul', 'np.matmul', (['AdKrausOps[0]', 'ONE_STATE'], {}), '(AdKrausOps[0], ONE_STATE)\n', (3662, 3688), True, 'import numpy as np\n'), ((3748, 3783), 'numpy.matmul', 'np.matmul', (['AdKrausOps[1]', 'ONE_STATE'], {}), '(AdKrausOps[1], ONE_STATE)\n', (3757, 3783), True, 'import numpy as np\n'), ((3951, 3981), 'numpy.array', 'np.array', (['[[1], [3], [2], [4]]'], {}), '([[1], [3], [2], [4]])\n', (3959, 3981), True, 'import numpy as np\n'), ((4025, 4065), 'numpy.array', 'np.array', (['[[1], [3], [2], [4], [5], [6]]'], {}), '([[1], [3], [2], [4], [5], [6]])\n', (4033, 4065), True, 'import numpy as np\n'), ((4608, 4617), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (4614, 4617), True, 'import numpy as np\n'), ((5980, 6000), 'numpy.asarray', 'np.asarray', (['[[1, 0]]'], {}), '([[1, 0]])\n', (5990, 6000), True, 'import numpy as np\n'), ((6787, 6800), 'numpy.kron', 'np.kron', (['X', 'Z'], {}), '(X, Z)\n', (6794, 6800), True, 'import numpy as np\n'), ((9776, 9785), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (9782, 9785), True, 'import numpy as np\n'), ((9849, 9858), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (9855, 9858), True, 'import numpy as np\n'), ((651, 665), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (658, 665), True, 'import numpy as np\n'), ((690, 704), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (697, 704), True, 'import numpy as np\n'), ((7690, 7710), 'numpy.abs', 'np.abs', (['kraus_ops[1]'], {}), '(kraus_ops[1])\n', (7696, 7710), True, 'import numpy as np\n'), ((7712, 7732), 'numpy.abs', 'np.abs', (['kraus_ops[0]'], {}), '(kraus_ops[0])\n', (7718, 7732), True, 'import numpy as np\n'), ((514, 528), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (521, 528), True, 'import numpy as np\n'), ((558, 568), 'numpy.sqrt', 'np.sqrt', (['p'], {}), '(p)\n', (565, 568), True, 'import numpy as np\n'), ((6972, 6985), 'numpy.kron', 'np.kron', (['X', 'Z'], {}), '(X, Z)\n', (6979, 6985), True, 'import numpy as np\n'), ((9442, 9451), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (9448, 9451), True, 'import numpy as np\n'), ((9475, 9491), 'numpy.zeros', 'np.zeros', (['(2, 2)'], {}), '((2, 2))\n', (9483, 9491), True, 'import numpy as np\n'), ((9763, 9772), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (9769, 9772), True, 'import numpy as np\n'), ((9836, 9845), 'numpy.eye', 'np.eye', (['(4)'], {}), '(4)\n', (9842, 9845), True, 'import numpy as np\n'), ((2824, 2838), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (2831, 2838), True, 'import numpy as np\n'), ((2887, 2901), 'numpy.sqrt', 'np.sqrt', (['(1 - p)'], {}), '(1 - p)\n', (2894, 2901), True, 'import numpy as np\n')] |
import numpy as np
import imageio
from PoissonTemperature import FiniteDifferenceMatrixConstruction
def ind_sub_conversion(img, ind2sub_fn, sub2ind_fn):
rows, cols = img.shape[:2]
num = rows*cols
arange = np.arange(rows*cols, dtype=np.int32)
ind2sub = np.empty((num, 2), dtype=np.int32)
ind2sub[:, 0] = np.floor(arange/cols)
ind2sub[:, 1] = np.remainder(arange, cols)
sub2ind = arange.reshape((rows, cols))
np.save(ind2sub_fn, ind2sub)
np.save(sub2ind_fn, sub2ind)
def pie(FDMC, background, foreground):
Lap, Lap_Solver_Array, Rhs, is_unknown, _, _ = \
FDMC.laplacian_matrix_construction(mask.ravel())
bg = background.reshape((-1, 3))
fg = foreground.reshape((-1, 3))
result = bg.copy()
lap = Lap.dot(fg[is_unknown, :])
lap_rhs = Rhs.dot(fg)
lap_unknown = lap - lap_rhs
poisson_sol = Lap_Solver_Array[0](lap_unknown+Rhs.dot(bg))
result[is_unknown, :] = poisson_sol
result = result.reshape(background.shape)
result[result < 0] = 0.0
result[result > 1] = 1.0
return (result*255).astype(np.uint8)
if __name__ == '__main__':
folder = './data/pie/'
mask = imageio.imread(folder+'mask.png')[:, :, 0].astype(np.float32)
background = imageio.imread(folder+'mona.png')[:, :, :3]/255
foreground = imageio.imread(folder+'gine.png')[:, :, :3]/255
mask[mask > 0] = np.nan
ind2sub_fn = folder+'ind2sub.npy'
sub2ind_fn = folder+'sub2ind.npy'
ind_sub_conversion(mask, ind2sub_fn, sub2ind_fn)
FDMC = FiniteDifferenceMatrixConstruction(ind2sub_fn, sub2ind_fn)
result = pie(FDMC, background, foreground)
imageio.imwrite(folder+'result.png', result)
| [
"numpy.arange",
"imageio.imwrite",
"numpy.floor",
"PoissonTemperature.FiniteDifferenceMatrixConstruction",
"numpy.remainder",
"numpy.empty",
"imageio.imread",
"numpy.save"
] | [((219, 257), 'numpy.arange', 'np.arange', (['(rows * cols)'], {'dtype': 'np.int32'}), '(rows * cols, dtype=np.int32)\n', (228, 257), True, 'import numpy as np\n'), ((270, 304), 'numpy.empty', 'np.empty', (['(num, 2)'], {'dtype': 'np.int32'}), '((num, 2), dtype=np.int32)\n', (278, 304), True, 'import numpy as np\n'), ((325, 348), 'numpy.floor', 'np.floor', (['(arange / cols)'], {}), '(arange / cols)\n', (333, 348), True, 'import numpy as np\n'), ((367, 393), 'numpy.remainder', 'np.remainder', (['arange', 'cols'], {}), '(arange, cols)\n', (379, 393), True, 'import numpy as np\n'), ((442, 470), 'numpy.save', 'np.save', (['ind2sub_fn', 'ind2sub'], {}), '(ind2sub_fn, ind2sub)\n', (449, 470), True, 'import numpy as np\n'), ((475, 503), 'numpy.save', 'np.save', (['sub2ind_fn', 'sub2ind'], {}), '(sub2ind_fn, sub2ind)\n', (482, 503), True, 'import numpy as np\n'), ((1525, 1583), 'PoissonTemperature.FiniteDifferenceMatrixConstruction', 'FiniteDifferenceMatrixConstruction', (['ind2sub_fn', 'sub2ind_fn'], {}), '(ind2sub_fn, sub2ind_fn)\n', (1559, 1583), False, 'from PoissonTemperature import FiniteDifferenceMatrixConstruction\n'), ((1635, 1681), 'imageio.imwrite', 'imageio.imwrite', (["(folder + 'result.png')", 'result'], {}), "(folder + 'result.png', result)\n", (1650, 1681), False, 'import imageio\n'), ((1242, 1277), 'imageio.imread', 'imageio.imread', (["(folder + 'mona.png')"], {}), "(folder + 'mona.png')\n", (1256, 1277), False, 'import imageio\n'), ((1307, 1342), 'imageio.imread', 'imageio.imread', (["(folder + 'gine.png')"], {}), "(folder + 'gine.png')\n", (1321, 1342), False, 'import imageio\n'), ((1163, 1198), 'imageio.imread', 'imageio.imread', (["(folder + 'mask.png')"], {}), "(folder + 'mask.png')\n", (1177, 1198), False, 'import imageio\n')] |
from distutils.version import LooseVersion
import requests
import os
import shutil
import threading
import webbrowser
from zipfile import ZipFile
from pathlib import Path
import traceback
import tempfile
# import concurrent.futures
from flask import Flask, url_for, make_response
from flask.json import dumps
from flask_restx import Api
from mindsdb.__about__ import __version__ as mindsdb_version
from mindsdb.interfaces.datastore.datastore import DataStore
from mindsdb.interfaces.model.model_interface import ModelInterface
from mindsdb.interfaces.database.integrations import IntegrationController
from mindsdb.utilities.ps import is_pid_listen_port, wait_func_is_true
from mindsdb.utilities.telemetry import inject_telemetry_to_static
from mindsdb.utilities.config import Config
from mindsdb.utilities.log import get_log
from mindsdb.interfaces.storage.db import session
from mindsdb.utilities.json_encoder import CustomJSONEncoder
class Swagger_Api(Api):
"""
This is a modification of the base Flask Restplus Api class due to the issue described here
https://github.com/noirbizarre/flask-restplus/issues/223
"""
@property
def specs_url(self):
return url_for(self.endpoint("specs"), _external=False)
def custom_output_json(data, code, headers=None):
resp = make_response(dumps(data), code)
resp.headers.extend(headers or {})
return resp
def get_last_compatible_gui_version() -> LooseVersion:
log = get_log('http')
try:
res = requests.get('https://mindsdb-web-builds.s3.amazonaws.com/compatible-config.json', timeout=5)
except (ConnectionError, requests.exceptions.ConnectionError) as e:
print(f'Is no connection. {e}')
return False
except Exception as e:
print(f'Is something wrong with getting compatible-config.json: {e}')
return False
if res.status_code != 200:
print(f'Cant get compatible-config.json: returned status code = {res.status_code}')
return False
try:
versions = res.json()
except Exception as e:
print(f'Cant decode compatible-config.json: {e}')
return False
current_mindsdb_lv = LooseVersion(mindsdb_version)
try:
gui_versions = {}
max_mindsdb_lv = None
max_gui_lv = None
for el in versions['mindsdb']:
if el['mindsdb_version'] is None:
gui_lv = LooseVersion(el['gui_version'])
else:
mindsdb_lv = LooseVersion(el['mindsdb_version'])
gui_lv = LooseVersion(el['gui_version'])
if mindsdb_lv.vstring not in gui_versions or gui_lv > gui_versions[mindsdb_lv.vstring]:
gui_versions[mindsdb_lv.vstring] = gui_lv
if max_mindsdb_lv is None or max_mindsdb_lv < mindsdb_lv:
max_mindsdb_lv = mindsdb_lv
if max_gui_lv is None or max_gui_lv < gui_lv:
max_gui_lv = gui_lv
all_mindsdb_lv = [LooseVersion(x) for x in gui_versions.keys()]
all_mindsdb_lv.sort()
if current_mindsdb_lv.vstring in gui_versions:
gui_version_lv = gui_versions[current_mindsdb_lv.vstring]
elif current_mindsdb_lv > all_mindsdb_lv[-1]:
gui_version_lv = max_gui_lv
else:
lower_versions = {key: value for key, value in gui_versions.items() if LooseVersion(key) < current_mindsdb_lv}
if len(lower_versions) == 0:
gui_version_lv = gui_versions[all_mindsdb_lv[0].vstring]
else:
all_lower_versions = [LooseVersion(x) for x in lower_versions.keys()]
gui_version_lv = gui_versions[all_lower_versions[-1].vstring]
except Exception as e:
log.error(f'Error in compatible-config.json structure: {e}')
return False
return gui_version_lv
def get_current_gui_version() -> LooseVersion:
config = Config()
static_path = Path(config['paths']['static'])
version_txt_path = static_path.joinpath('version.txt')
current_gui_version = None
if version_txt_path.is_file():
with open(version_txt_path, 'rt') as f:
current_gui_version = f.readline()
current_gui_lv = None if current_gui_version is None else LooseVersion(current_gui_version)
return current_gui_lv
def download_gui(destignation, version):
if isinstance(destignation, str):
destignation = Path(destignation)
log = get_log('http')
dist_zip_path = str(destignation.joinpath('dist.zip'))
bucket = "https://mindsdb-web-builds.s3.amazonaws.com/"
resources = [{
'url': bucket + 'dist-V' + version + '.zip',
'path': dist_zip_path
}]
def get_resources(resource):
response = requests.get(resource['url'])
if response.status_code != requests.status_codes.codes.ok:
raise Exception(f"Error {response.status_code} GET {resource['url']}")
open(resource['path'], 'wb').write(response.content)
try:
for r in resources:
get_resources(r)
except Exception as e:
log.error(f'Error during downloading files from s3: {e}')
return False
static_folder = destignation
static_folder.mkdir(mode=0o777, exist_ok=True, parents=True)
ZipFile(dist_zip_path).extractall(static_folder)
if static_folder.joinpath('dist').is_dir():
shutil.move(str(destignation.joinpath('dist').joinpath('index.html')), static_folder)
shutil.move(str(destignation.joinpath('dist').joinpath('assets')), static_folder)
shutil.rmtree(destignation.joinpath('dist'))
os.remove(dist_zip_path)
version_txt_path = destignation.joinpath('version.txt') # os.path.join(destignation, 'version.txt')
with open(version_txt_path, 'wt') as f:
f.write(version)
return True
'''
# to make downloading faster download each resource in a separate thread
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
future_to_url = {executor.submit(get_resources, r): r for r in resources}
for future in concurrent.futures.as_completed(future_to_url):
res = future.result()
if res is not None:
raise res
'''
def initialize_static():
success = update_static()
session.close()
return success
def update_static():
''' Update Scout files basing on compatible-config.json content.
Files will be downloaded and updated if new version of GUI > current.
Current GUI version stored in static/version.txt.
'''
config = Config()
log = get_log('http')
static_path = Path(config['paths']['static'])
last_gui_version_lv = get_last_compatible_gui_version()
current_gui_version_lv = get_current_gui_version()
if last_gui_version_lv is False:
return False
if current_gui_version_lv is not None:
if current_gui_version_lv >= last_gui_version_lv:
return True
log.info(f'New version of GUI available ({last_gui_version_lv.vstring}). Downloading...')
temp_dir = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
success = download_gui(temp_dir, last_gui_version_lv.vstring)
if success is False:
shutil.rmtree(temp_dir)
return False
temp_dir_for_rm = tempfile.mkdtemp(prefix='mindsdb_gui_files_')
shutil.rmtree(temp_dir_for_rm)
shutil.copytree(str(static_path), temp_dir_for_rm)
shutil.rmtree(str(static_path))
shutil.copytree(temp_dir, str(static_path))
shutil.rmtree(temp_dir_for_rm)
log.info(f'GUI version updated to {last_gui_version_lv.vstring}')
return True
def initialize_flask(config, init_static_thread, no_studio):
# Apparently there's a bug that causes the static path not to work if it's '/' -- https://github.com/pallets/flask/issues/3134, I think '' should achieve the same thing (???)
if no_studio:
app = Flask(
__name__
)
else:
static_path = os.path.join(config['paths']['static'], 'static/')
if os.path.isabs(static_path) is False:
static_path = os.path.join(os.getcwd(), static_path)
app = Flask(
__name__,
static_url_path='/static',
static_folder=static_path
)
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 60
app.config['SWAGGER_HOST'] = 'http://localhost:8000/mindsdb'
app.json_encoder = CustomJSONEncoder
authorizations = {
'apikey': {
'type': 'session',
'in': 'query',
'name': 'session'
}
}
api = Swagger_Api(
app,
authorizations=authorizations,
security=['apikey'],
url_prefix=':8000',
prefix='/api',
doc='/doc/'
)
api.representations['application/json'] = custom_output_json
port = config['api']['http']['port']
host = config['api']['http']['host']
# NOTE rewrite it, that hotfix to see GUI link
if not no_studio:
log = get_log('http')
if host in ('', '0.0.0.0'):
url = f'http://127.0.0.1:{port}/'
else:
url = f'http://{host}:{port}/'
log.info(f' - GUI available at {url}')
pid = os.getpid()
x = threading.Thread(target=_open_webbrowser, args=(url, pid, port, init_static_thread, config['paths']['static']), daemon=True)
x.start()
return app, api
def initialize_interfaces(app):
app.original_data_store = DataStore()
app.original_model_interface = ModelInterface()
app.original_integration_controller = IntegrationController()
config = Config()
app.config_obj = config
def _open_webbrowser(url: str, pid: int, port: int, init_static_thread, static_folder):
"""Open webbrowser with url when http service is started.
If some error then do nothing.
"""
init_static_thread.join()
inject_telemetry_to_static(static_folder)
logger = get_log('http')
try:
is_http_active = wait_func_is_true(func=is_pid_listen_port, timeout=10,
pid=pid, port=port)
if is_http_active:
webbrowser.open(url)
except Exception as e:
logger.error(f'Failed to open {url} in webbrowser with exception {e}')
logger.error(traceback.format_exc())
session.close()
| [
"zipfile.ZipFile",
"flask.Flask",
"webbrowser.open",
"mindsdb.utilities.log.get_log",
"mindsdb.utilities.ps.wait_func_is_true",
"os.remove",
"pathlib.Path",
"flask.json.dumps",
"os.getpid",
"distutils.version.LooseVersion",
"mindsdb.utilities.config.Config",
"os.path.isabs",
"requests.get",
... | [((1458, 1473), 'mindsdb.utilities.log.get_log', 'get_log', (['"""http"""'], {}), "('http')\n", (1465, 1473), False, 'from mindsdb.utilities.log import get_log\n'), ((2168, 2197), 'distutils.version.LooseVersion', 'LooseVersion', (['mindsdb_version'], {}), '(mindsdb_version)\n', (2180, 2197), False, 'from distutils.version import LooseVersion\n'), ((3915, 3923), 'mindsdb.utilities.config.Config', 'Config', ([], {}), '()\n', (3921, 3923), False, 'from mindsdb.utilities.config import Config\n'), ((3942, 3973), 'pathlib.Path', 'Path', (["config['paths']['static']"], {}), "(config['paths']['static'])\n", (3946, 3973), False, 'from pathlib import Path\n'), ((4452, 4467), 'mindsdb.utilities.log.get_log', 'get_log', (['"""http"""'], {}), "('http')\n", (4459, 4467), False, 'from mindsdb.utilities.log import get_log\n'), ((5622, 5646), 'os.remove', 'os.remove', (['dist_zip_path'], {}), '(dist_zip_path)\n', (5631, 5646), False, 'import os\n'), ((6313, 6328), 'mindsdb.interfaces.storage.db.session.close', 'session.close', ([], {}), '()\n', (6326, 6328), False, 'from mindsdb.interfaces.storage.db import session\n'), ((6597, 6605), 'mindsdb.utilities.config.Config', 'Config', ([], {}), '()\n', (6603, 6605), False, 'from mindsdb.utilities.config import Config\n'), ((6616, 6631), 'mindsdb.utilities.log.get_log', 'get_log', (['"""http"""'], {}), "('http')\n", (6623, 6631), False, 'from mindsdb.utilities.log import get_log\n'), ((6650, 6681), 'pathlib.Path', 'Path', (["config['paths']['static']"], {}), "(config['paths']['static'])\n", (6654, 6681), False, 'from pathlib import Path\n'), ((7094, 7139), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""mindsdb_gui_files_"""'}), "(prefix='mindsdb_gui_files_')\n", (7110, 7139), False, 'import tempfile\n'), ((7307, 7352), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {'prefix': '"""mindsdb_gui_files_"""'}), "(prefix='mindsdb_gui_files_')\n", (7323, 7352), False, 'import tempfile\n'), ((7357, 7387), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir_for_rm'], {}), '(temp_dir_for_rm)\n', (7370, 7387), False, 'import shutil\n'), ((7531, 7561), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir_for_rm'], {}), '(temp_dir_for_rm)\n', (7544, 7561), False, 'import shutil\n'), ((9479, 9490), 'mindsdb.interfaces.datastore.datastore.DataStore', 'DataStore', ([], {}), '()\n', (9488, 9490), False, 'from mindsdb.interfaces.datastore.datastore import DataStore\n'), ((9526, 9542), 'mindsdb.interfaces.model.model_interface.ModelInterface', 'ModelInterface', ([], {}), '()\n', (9540, 9542), False, 'from mindsdb.interfaces.model.model_interface import ModelInterface\n'), ((9585, 9608), 'mindsdb.interfaces.database.integrations.IntegrationController', 'IntegrationController', ([], {}), '()\n', (9606, 9608), False, 'from mindsdb.interfaces.database.integrations import IntegrationController\n'), ((9622, 9630), 'mindsdb.utilities.config.Config', 'Config', ([], {}), '()\n', (9628, 9630), False, 'from mindsdb.utilities.config import Config\n'), ((9889, 9930), 'mindsdb.utilities.telemetry.inject_telemetry_to_static', 'inject_telemetry_to_static', (['static_folder'], {}), '(static_folder)\n', (9915, 9930), False, 'from mindsdb.utilities.telemetry import inject_telemetry_to_static\n'), ((9944, 9959), 'mindsdb.utilities.log.get_log', 'get_log', (['"""http"""'], {}), "('http')\n", (9951, 9959), False, 'from mindsdb.utilities.log import get_log\n'), ((10327, 10342), 'mindsdb.interfaces.storage.db.session.close', 'session.close', ([], {}), '()\n', (10340, 10342), False, 'from mindsdb.interfaces.storage.db import session\n'), ((1317, 1328), 'flask.json.dumps', 'dumps', (['data'], {}), '(data)\n', (1322, 1328), False, 'from flask.json import dumps\n'), ((1498, 1600), 'requests.get', 'requests.get', (['"""https://mindsdb-web-builds.s3.amazonaws.com/compatible-config.json"""'], {'timeout': '(5)'}), "(\n 'https://mindsdb-web-builds.s3.amazonaws.com/compatible-config.json',\n timeout=5)\n", (1510, 1600), False, 'import requests\n'), ((4258, 4291), 'distutils.version.LooseVersion', 'LooseVersion', (['current_gui_version'], {}), '(current_gui_version)\n', (4270, 4291), False, 'from distutils.version import LooseVersion\n'), ((4423, 4441), 'pathlib.Path', 'Path', (['destignation'], {}), '(destignation)\n', (4427, 4441), False, 'from pathlib import Path\n'), ((4750, 4779), 'requests.get', 'requests.get', (["resource['url']"], {}), "(resource['url'])\n", (4762, 4779), False, 'import requests\n'), ((7239, 7262), 'shutil.rmtree', 'shutil.rmtree', (['temp_dir'], {}), '(temp_dir)\n', (7252, 7262), False, 'import shutil\n'), ((7923, 7938), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (7928, 7938), False, 'from flask import Flask, url_for, make_response\n'), ((7993, 8043), 'os.path.join', 'os.path.join', (["config['paths']['static']", '"""static/"""'], {}), "(config['paths']['static'], 'static/')\n", (8005, 8043), False, 'import os\n'), ((8171, 8240), 'flask.Flask', 'Flask', (['__name__'], {'static_url_path': '"""/static"""', 'static_folder': 'static_path'}), "(__name__, static_url_path='/static', static_folder=static_path)\n", (8176, 8240), False, 'from flask import Flask, url_for, make_response\n'), ((9010, 9025), 'mindsdb.utilities.log.get_log', 'get_log', (['"""http"""'], {}), "('http')\n", (9017, 9025), False, 'from mindsdb.utilities.log import get_log\n'), ((9227, 9238), 'os.getpid', 'os.getpid', ([], {}), '()\n', (9236, 9238), False, 'import os\n'), ((9251, 9379), 'threading.Thread', 'threading.Thread', ([], {'target': '_open_webbrowser', 'args': "(url, pid, port, init_static_thread, config['paths']['static'])", 'daemon': '(True)'}), "(target=_open_webbrowser, args=(url, pid, port,\n init_static_thread, config['paths']['static']), daemon=True)\n", (9267, 9379), False, 'import threading\n'), ((9994, 10068), 'mindsdb.utilities.ps.wait_func_is_true', 'wait_func_is_true', ([], {'func': 'is_pid_listen_port', 'timeout': '(10)', 'pid': 'pid', 'port': 'port'}), '(func=is_pid_listen_port, timeout=10, pid=pid, port=port)\n', (10011, 10068), False, 'from mindsdb.utilities.ps import is_pid_listen_port, wait_func_is_true\n'), ((2981, 2996), 'distutils.version.LooseVersion', 'LooseVersion', (['x'], {}), '(x)\n', (2993, 2996), False, 'from distutils.version import LooseVersion\n'), ((5278, 5300), 'zipfile.ZipFile', 'ZipFile', (['dist_zip_path'], {}), '(dist_zip_path)\n', (5285, 5300), False, 'from zipfile import ZipFile\n'), ((8055, 8081), 'os.path.isabs', 'os.path.isabs', (['static_path'], {}), '(static_path)\n', (8068, 8081), False, 'import os\n'), ((10151, 10171), 'webbrowser.open', 'webbrowser.open', (['url'], {}), '(url)\n', (10166, 10171), False, 'import webbrowser\n'), ((2400, 2431), 'distutils.version.LooseVersion', 'LooseVersion', (["el['gui_version']"], {}), "(el['gui_version'])\n", (2412, 2431), False, 'from distutils.version import LooseVersion\n'), ((2479, 2514), 'distutils.version.LooseVersion', 'LooseVersion', (["el['mindsdb_version']"], {}), "(el['mindsdb_version'])\n", (2491, 2514), False, 'from distutils.version import LooseVersion\n'), ((2540, 2571), 'distutils.version.LooseVersion', 'LooseVersion', (["el['gui_version']"], {}), "(el['gui_version'])\n", (2552, 2571), False, 'from distutils.version import LooseVersion\n'), ((8131, 8142), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8140, 8142), False, 'import os\n'), ((10299, 10321), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (10319, 10321), False, 'import traceback\n'), ((3584, 3599), 'distutils.version.LooseVersion', 'LooseVersion', (['x'], {}), '(x)\n', (3596, 3599), False, 'from distutils.version import LooseVersion\n'), ((3374, 3391), 'distutils.version.LooseVersion', 'LooseVersion', (['key'], {}), '(key)\n', (3386, 3391), False, 'from distutils.version import LooseVersion\n')] |
# Copyright Contributors to the Pyro-Cov project.
# SPDX-License-Identifier: Apache-2.0
import functools
import io
import logging
import math
import re
import sys
import torch
import torch.multiprocessing as mp
from Bio import AlignIO
from Bio.Phylo.NewickIO import Parser
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from .phylo import Phylogeny
logger = logging.getLogger(__name__)
FILE_FORMATS = {
"nex": "nexus",
"nexus": "nexus",
"fasta": "fasta",
"xml": "beast",
}
def _print_dot():
sys.stdout.write(".")
sys.stdout.flush()
def _handle_translate(lines, context):
map_lines = [line.rstrip(",").split() for line in lines[1:-1]]
context["translate"] = {key: value for key, value in map_lines}
def _handle_tree_count(lines, context):
return 1
def _handle_tree_newick(lines, context):
assert len(lines) == 1
tree, name, equal, newick = lines[0].split()
assert tree == "tree"
assert equal == "="
tree = next(Parser.from_string(newick).parse())
tree.name = name
# Add translations as .comment attributes
if "translate" in context:
translate = context["translate"]
for leaf in tree.get_terminals():
leaf.comment = translate[leaf.name]
return tree
def _handle_tree_torch(lines, context):
assert len(lines) == 1
tree, name, equal, newick = lines[0].split()
assert tree == "tree"
assert equal == "="
tree = next(Parser.from_string(newick).parse())
tree = Phylogeny.from_bio_phylo(tree)
_print_dot()
return tree
def _handle_raw(lines, context):
return lines, context
def _apply(fn, args):
return fn(*args)
def read_nexus_trees(filename, *, format="newick", max_num_trees=math.inf, processes=0):
"""
Parse and iterate over newick trees stored in a nexus file.
This streams the file and thus can handle larger files than
``Bio.Phylo.read(..., format="nexus")``.
Returns an iterator of ``Bio.Phylo`` tree objects.
"""
if format == "count":
context = {}
handlers = {"tree": _handle_tree_count}
elif format == "newick":
context = {"translate": {}}
handlers = {"translate": _handle_translate, "tree": _handle_tree_newick}
elif format == "_raw_newick":
context = {"translate": {}}
handlers = {"translate": _handle_translate, "tree": _handle_raw}
elif format == "torch":
context = None
handlers = {"tree": _handle_tree_torch}
elif format == "_raw_torch":
context = None
handlers = {"tree": _handle_raw}
else:
raise ValueError(f"unknown format: {format}")
if processes != 0:
trees = read_nexus_trees(
filename, format="_raw_" + format, max_num_trees=max_num_trees
)
with mp.Pool(processes) as pool:
handler = functools.partial(_apply, handlers["tree"])
yield from pool.imap(handler, trees)
return
with open(filename) as f:
lines = iter(f)
for line in lines:
if line.startswith("Begin trees;"):
break
part = []
for line in lines:
line = line.strip()
part.append(line)
if not line.endswith(";"):
continue
type_ = part[0].split()[0].lower()
handle = handlers.get(type_)
if handle is not None:
tree = handle(part, context)
if tree is not None:
yield tree
max_num_trees -= 1
if max_num_trees <= 0:
break
part = []
def count_nexus_trees(filename):
"""
Counts the number of trees in a nexus file.
"""
return sum(read_nexus_trees(filename, format="count"))
def stack_nexus_trees(filename, *, max_num_trees=math.inf, processes=0):
"""
Loads a batch of trees from a nexus file.
"""
trees = read_nexus_trees(
filename, format="torch", max_num_trees=max_num_trees, processes=processes
)
return Phylogeny.stack(trees)
def read_newick_tree(filename):
"""
Parse a single newick tree and convert to a ``Phylogeny``.
"""
with open(filename) as f:
line = f.read().strip()
tree = next(Parser.from_string(line).parse())
return Phylogeny.from_bio_phylo(tree)
def read_alignment(
filename, format=None, *, max_taxa=math.inf, max_characters=math.inf
):
"""
Reads a single alignment file to a torch tensor of probabilites.
:param str filename: Name of input file.
:param str format: Optional input format, e.g. "nexus" or "fasta".
:param int max_taxa: Optional number of taxa for truncation.
:param int max_characters: Optional number of characters for truncation.
:rtype: torch.Tensor
:returns: A float tensor of shape ``(num_sequences, num_characters,
num_bases)`` that is normalized along its rightmost dimension. Note
that ``num_bases`` is 5 = 4 + 1, where the final base denots a gap or
indel.
"""
# Load a Bio.Align.MultipleSeqAlignment object.
logger.info(f"Loading data from {filename}")
if format is None:
suffix = filename.split(".")[-1].lower()
format = FILE_FORMATS.get(suffix)
if format is None:
raise ValueError("Please specify a file format, e.g. 'nexus' or 'fasta'")
elif format == "nexus":
alignment = _read_alignment_nexus(filename)
elif format == "beast":
alignment = _read_alignment_beast(filename)
else:
alignment = AlignIO.read(filename, format)
# Convert to a single torch.Tensor.
num_taxa = min(len(alignment), max_taxa)
if num_taxa < len(alignment):
alignment = alignment[:num_taxa]
num_characters = min(len(alignment[0]), max_characters)
if num_characters < len(alignment[0]):
alignment = alignment[:, :num_characters]
logger.info(f"parsing {num_taxa} taxa x {num_characters} characters")
codebook = _get_codebook()
probs = torch.full((num_taxa, num_characters, 5), 1 / 5)
for i in range(num_taxa):
seq = alignment[i].seq
if not VALID_CODES.issuperset(seq):
raise ValueError(f"Invalid characters: {set(seq) - VALID_CODES}")
# Replace gaps at ends with missing.
beg, end = 0, probs.size(1)
if seq[0] in "-.N":
seq, old = seq.lstrip(seq[0]), seq
beg += len(old) - len(seq)
if seq[-1] in "-.N":
seq, old = seq.rstrip(seq[-1]), seq
end -= len(old) - len(seq)
probs[i, beg:end] = codebook[list(map(ord, seq))]
assert torch.isfinite(probs).all()
return probs
def _read_alignment_nexus(filename):
# Work around bugs in Bio.Nexus reader.
lines = []
section = None
done = set()
with open(filename) as f:
for line in f:
if line.startswith("BEGIN"):
section = line.split()[-1].strip()[:-1]
elif line.startswith("END;"):
done.add(section)
section = None
if "TAXA" in done and "CHARACTERS" in done:
lines.append(line)
break
elif section == "CHARACTERS":
if "{" in line:
line = re.sub("{([ATCG]+)}", _encode_ambiguity, line)
lines.append(line)
f = io.StringIO("".join(lines))
alignment = AlignIO.read(f, "nexus")
return alignment
def _read_alignment_beast(filename):
result = []
with open(filename) as f:
for line in f:
line = line.strip()
if not line.startswith("<sequence "):
continue
id_ = re.search(r'\bid="([^"]*)"', line).group(1)
seq = re.search(r'\bvalue="([^"]*)"', line).group(1)
result.append(SeqRecord(Seq(seq), id=id_))
return result
# See https://www.bioinformatics.org/sms/iupac.html
NUCLEOTIDE_CODES = {
# [ A, C, G, T, gap]
"?": [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], # missing
"n": [1 / 5, 1 / 5, 1 / 5, 1 / 5, 1 / 5], # missing
"A": [1 / 1, 0.0, 0.0, 0.0, 0.0], # adenine
"C": [0.0, 1 / 1, 0.0, 0.0, 0.0], # cytosine
"G": [0.0, 0.0, 1 / 1, 0.0, 0.0], # guanine
"T": [0.0, 0.0, 0.0, 1 / 1, 0.0], # thymine
"U": [0.0, 0.0, 0.0, 1 / 1, 0.0], # uracil
"R": [1 / 2, 0.0, 1 / 2, 0.0, 0.0],
"Y": [0.0, 1 / 2, 0.0, 1 / 2, 0.0],
"S": [0.0, 1 / 2, 1 / 2, 0.0, 0.0],
"W": [1 / 2, 0.0, 0.0, 1 / 2, 0.0],
"K": [0.0, 0.0, 1 / 2, 1 / 2, 0.0],
"M": [1 / 2, 1 / 2, 0.0, 0.0, 0.0],
"B": [0.0, 1 / 3, 1 / 3, 1 / 3, 0.0],
"D": [1 / 3, 0.0, 1 / 3, 1 / 3, 0.0],
"H": [1 / 3, 1 / 3, 0.0, 1 / 3, 0.0],
"V": [1 / 3, 1 / 3, 1 / 3, 0.0, 0.0],
"N": [1 / 4, 1 / 4, 1 / 4, 1 / 4, 0.0],
"-": [0.0, 0.0, 0.0, 0.0, 1 / 1], # gap
".": [0.0, 0.0, 0.0, 0.0, 1 / 1], # gap
}
VALID_CODES = set(NUCLEOTIDE_CODES)
AMBIGUOUS_CODES = {
frozenset("AG"): "R",
frozenset("CT"): "Y",
frozenset("CG"): "S",
frozenset("AT"): "W",
frozenset("GT"): "K",
frozenset("AC"): "M",
frozenset("CGT"): "B",
frozenset("AGT"): "D",
frozenset("ACT"): "H",
frozenset("ACG"): "V",
frozenset("ACGT"): "N",
}
assert len(AMBIGUOUS_CODES) == 6 + 4 + 1
def _encode_ambiguity(chars):
return AMBIGUOUS_CODES[frozenset(chars.group(1))]
def _get_codebook():
codes = torch.full((256, 5), math.nan)
keys = list(map(ord, NUCLEOTIDE_CODES.keys()))
values = torch.tensor(list(NUCLEOTIDE_CODES.values()))
assert values.sum(-1).sub(1).abs().le(1e-6).all()
codes[keys] = values
return codes
| [
"logging.getLogger",
"re.search",
"Bio.AlignIO.read",
"Bio.Phylo.NewickIO.Parser.from_string",
"torch.full",
"Bio.Seq.Seq",
"torch.isfinite",
"torch.multiprocessing.Pool",
"functools.partial",
"re.sub",
"sys.stdout.flush",
"sys.stdout.write"
] | [((375, 402), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (392, 402), False, 'import logging\n'), ((531, 552), 'sys.stdout.write', 'sys.stdout.write', (['"""."""'], {}), "('.')\n", (547, 552), False, 'import sys\n'), ((557, 575), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (573, 575), False, 'import sys\n'), ((6058, 6106), 'torch.full', 'torch.full', (['(num_taxa, num_characters, 5)', '(1 / 5)'], {}), '((num_taxa, num_characters, 5), 1 / 5)\n', (6068, 6106), False, 'import torch\n'), ((7462, 7486), 'Bio.AlignIO.read', 'AlignIO.read', (['f', '"""nexus"""'], {}), "(f, 'nexus')\n", (7474, 7486), False, 'from Bio import AlignIO\n'), ((9449, 9479), 'torch.full', 'torch.full', (['(256, 5)', 'math.nan'], {}), '((256, 5), math.nan)\n', (9459, 9479), False, 'import torch\n'), ((2812, 2830), 'torch.multiprocessing.Pool', 'mp.Pool', (['processes'], {}), '(processes)\n', (2819, 2830), True, 'import torch.multiprocessing as mp\n'), ((2862, 2905), 'functools.partial', 'functools.partial', (['_apply', "handlers['tree']"], {}), "(_apply, handlers['tree'])\n", (2879, 2905), False, 'import functools\n'), ((6670, 6691), 'torch.isfinite', 'torch.isfinite', (['probs'], {}), '(probs)\n', (6684, 6691), False, 'import torch\n'), ((992, 1018), 'Bio.Phylo.NewickIO.Parser.from_string', 'Parser.from_string', (['newick'], {}), '(newick)\n', (1010, 1018), False, 'from Bio.Phylo.NewickIO import Parser\n'), ((1459, 1485), 'Bio.Phylo.NewickIO.Parser.from_string', 'Parser.from_string', (['newick'], {}), '(newick)\n', (1477, 1485), False, 'from Bio.Phylo.NewickIO import Parser\n'), ((4302, 4326), 'Bio.Phylo.NewickIO.Parser.from_string', 'Parser.from_string', (['line'], {}), '(line)\n', (4320, 4326), False, 'from Bio.Phylo.NewickIO import Parser\n'), ((5596, 5626), 'Bio.AlignIO.read', 'AlignIO.read', (['filename', 'format'], {}), '(filename, format)\n', (5608, 5626), False, 'from Bio import AlignIO\n'), ((7741, 7775), 're.search', 're.search', (['"""\\\\bid="([^"]*)\\""""', 'line'], {}), '(\'\\\\bid="([^"]*)"\', line)\n', (7750, 7775), False, 'import re\n'), ((7803, 7840), 're.search', 're.search', (['"""\\\\bvalue="([^"]*)\\""""', 'line'], {}), '(\'\\\\bvalue="([^"]*)"\', line)\n', (7812, 7840), False, 'import re\n'), ((7886, 7894), 'Bio.Seq.Seq', 'Seq', (['seq'], {}), '(seq)\n', (7889, 7894), False, 'from Bio.Seq import Seq\n'), ((7332, 7378), 're.sub', 're.sub', (['"""{([ATCG]+)}"""', '_encode_ambiguity', 'line'], {}), "('{([ATCG]+)}', _encode_ambiguity, line)\n", (7338, 7378), False, 'import re\n')] |
import numpy as np
import pandas as pd
from sklearn.linear_model import LinearRegression
def convert_to_sqft(str):
tokens = str.split(' - ')
if len(tokens) == 2:
return (float(tokens[0]) + float(tokens[1])) / 2
try:
return float(tokens[0])
except Exception:
return np.NAN
def convert_to_num(num):
tokens = str(num).split(' ')
return float(tokens[0])
def train_model(X, Y):
regression = LinearRegression()
regression.fit(X, Y)
return regression
def get_training_data():
dataframe = pd.read_csv("./Bengaluru_House_Data.csv")
df = dataframe.drop(columns=["area_type", "balcony", "society", "availability"], axis='columns')
df['total_sqft'] = df['total_sqft'].apply(convert_to_sqft)
df['size'] = df['size'].apply(convert_to_num)
locations = pd.get_dummies(df["location"])
df_merge = pd.concat([df.drop(columns=["location"]), locations], axis='columns')
df_merge = df_merge.drop(columns=["Unnamed: 9"], axis='columns')
df_merge = df_merge.dropna()
X = df_merge.drop(['price'], axis='columns')
Y = df_merge['price']
return X, Y
def predict_price(regression, X, location, bhk, total_sqft, bath):
location_index = np.where(X.columns == location)[0][0]
x = np.zeros(len(X.columns))
x[0] = bhk
x[1] = total_sqft
x[2] = bath
if location_index >= 0:
x[location_index] = 1
return regression.predict([x])[0]
| [
"pandas.get_dummies",
"numpy.where",
"sklearn.linear_model.LinearRegression",
"pandas.read_csv"
] | [((441, 459), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (457, 459), False, 'from sklearn.linear_model import LinearRegression\n'), ((549, 590), 'pandas.read_csv', 'pd.read_csv', (['"""./Bengaluru_House_Data.csv"""'], {}), "('./Bengaluru_House_Data.csv')\n", (560, 590), True, 'import pandas as pd\n'), ((821, 851), 'pandas.get_dummies', 'pd.get_dummies', (["df['location']"], {}), "(df['location'])\n", (835, 851), True, 'import pandas as pd\n'), ((1219, 1250), 'numpy.where', 'np.where', (['(X.columns == location)'], {}), '(X.columns == location)\n', (1227, 1250), True, 'import numpy as np\n')] |
import warnings
from typing import Dict, Tuple
from lhotse import CutSet
from lhotse.dataset.sampling.base import CutSampler
def find_pessimistic_batches(
sampler: CutSampler, batch_tuple_index: int = 0
) -> Tuple[Dict[str, CutSet], Dict[str, float]]:
"""
Function for finding 'pessimistic' batches, i.e. batches that have the highest potential
to blow up the GPU memory during training. We will fully iterate the sampler and record
the most risky batches under several criteria:
- single longest cut
- single longest supervision
- largest batch cuts duration
- largest batch supervisions duration
- max num cuts
- max num supervisions
.. note: It is up to the users to convert the sampled CutSets into actual batches and test them
by running forward and backward passes with their model.
Example of how this function can be used with a PyTorch model
and a :class:`~lhotse.dataset.K2SpeechRecognitionDataset`::
sampler = SingleCutSampler(cuts, max_duration=300)
dataset = K2SpeechRecognitionDataset()
batches, scores = find_pessimistic_batches(sampler)
for reason, cuts in batches.items():
try:
batch = dset[cuts]
outputs = model(batch)
loss = loss_fn(outputs)
loss.backward()
except:
print(f"Exception caught when evaluating pessimistic batch for: {reason}={scores[reason]}")
raise
:param sampler: An instance of a Lhotse :class:`.CutSampler`.
:param batch_tuple_index: Applicable to samplers that return tuples of :class:`~lhotse.cut.CutSet`.
Indicates which position in the tuple we should look up for the CutSet.
:return: A tuple of dicts: the first with batches (as CutSets) and the other with criteria values, i.e.:
``({"<criterion>": <CutSet>, ...}, {"<criterion>": <value>, ...})``
"""
criteria = {
"single_longest_cut": lambda cuts: max(c.duration for c in cuts),
"single_longest_supervision": lambda cuts: max(
sum(s.duration for s in c.supervisions) for c in cuts
),
"largest_batch_cuts_duration": lambda cuts: sum(c.duration for c in cuts),
"largest_batch_supervisions_duration": lambda cuts: sum(
s.duration for c in cuts for s in c.supervisions
),
"max_num_cuts": len,
"max_num_supervisions": lambda cuts: sum(
1 for c in cuts for _ in c.supervisions
),
}
try:
sampler = iter(sampler)
first_batch = next(sampler)
if isinstance(first_batch, tuple):
first_batch = first_batch[batch_tuple_index]
except StopIteration:
warnings.warn("Empty sampler encountered in find_pessimistic_batches()")
return {}, {}
top_batches = {k: first_batch for k in criteria}
top_values = {k: fn(first_batch) for k, fn in criteria.items()}
for batch in sampler:
if isinstance(batch, tuple):
batch = batch[batch_tuple_index]
for crit, fn in criteria.items():
val = fn(batch)
if val > top_values[crit]:
top_values[crit] = val
top_batches[crit] = batch
return top_batches, top_values
| [
"warnings.warn"
] | [((2753, 2825), 'warnings.warn', 'warnings.warn', (['"""Empty sampler encountered in find_pessimistic_batches()"""'], {}), "('Empty sampler encountered in find_pessimistic_batches()')\n", (2766, 2825), False, 'import warnings\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-03 08:41
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('book', '0009_book_folder'),
]
operations = [
migrations.AddField(
model_name='book',
name='updated_at',
field=models.DateTimeField(auto_now=True),
),
migrations.AlterField(
model_name='book',
name='name',
field=models.CharField(max_length=400, unique=True),
),
]
| [
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((389, 424), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (409, 424), False, 'from django.db import migrations, models\n'), ((542, 587), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(400)', 'unique': '(True)'}), '(max_length=400, unique=True)\n', (558, 587), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module contains the necessary functions to load a text-corpus from
NLTK, contract all possible sentences, applying POS-tags to the
contracted sentences and compare that with the original text.
The information about which contraction+pos-tag pair gets expanded to
which full form will be saved in a dictionary for use in expander.py
"""
__author__ = "<NAME>"
# standard library imports
import pprint
import yaml
# third-party library imports
import nltk
# local library imports
import utils
# increase the allowed ram size that the models can use
# nltk.internals.config_java(options='-xmx2G')
def _find_sub_list(sublist, full_list):
"""
Args:
- sublist is a list of words that are supposed to be found in
the full list.
- full list is a list of words that is supposed to be searched
in.
Returns:
- List of tuples with the form
(first_index_of_occurence, last_index_of_occurence)
This function finds all occurences of sublist in the full_list.
"""
# this is the output list
results = []
sublist_len = len(sublist)
# loop over all ind if the word in full_list[ind] matches the first
# word of the sublist
for ind in (i for i, word in enumerate(full_list)
if word == sublist[0]):
# check that the complete sublist is matched
if full_list[ind:ind+sublist_len] == sublist:
# then append this to the results
results.append((ind, ind+sublist_len-1))
return results
def _contract_sentences(expansions,
sent_lst,
use_ner,
ner_args):
"""
Args:
- expansions is a dictionary containing the corresponding
contractions to the expanded words
- sent_lst is a list of sentences, which is itself a list of
words, i.e. [["I", "am", "blue"], [...]].
- use_ner is boolean to decide whether to use
named-entity-recognition for a potential increase in
accuracy but with the obvious costs of performance.
- ner_args is a list with an object of StanfordNERTagger and
the tag to be used. This only needs to be
supplied if use_ner is true.
Returns:
- yields tuples of the form
(index of first word that was replaced,
list of words that were replaced,
contracted sentence).
The above example would then give
(0, ["I", "am"], ["I", "'m", "blue"])
Note that uncontractible sentences are not added to the
output.
Since yield is used, iterate over the results. Otherwise it
takes too much time.
This function checks a list of sentences for whether they can be
contracted. It starts with the first two words, then the first three
and then goes on to the second+third, then the second+third+fourth
and so on.
"""
# first find the indices of the sentences that contain contractions
for sent in sent_lst:
if use_ner:
# replace all named entities with the tag in ner_args[1]
# throw away replacement info
sent = utils.sent_to_ner(ner_args[0], sent,
tag=ner_args[1])[0]
# check whether any expansion is present then add the index
# it has a True for every expansion that is present
expansion_bool = [expansion in ' '.join(sent) for expansion
in list(expansions.keys())]
if not any(expansion_bool):
# if no expansions present just continue
continue
# convert the boolean list to a list of indices
expansion_idx = [i for i, boolean in enumerate(expansion_bool)
if boolean]
# the list of relevant expansions for the sentence
relevant_exp = [list(expansions.keys())[i] for i in expansion_idx]
for expansion in relevant_exp:
# first split the contraction up into a list of the same
# length as the expanded string
if len(expansion.split()) in [2, 3, 4]:
# if you contract three or two words,
# just split at apostrophes
contraction = expansions[expansion].split("'")
assert len(contraction) == len(expansion.split())
# add the apostrophes again
contraction[1] = "'" + contraction[1]
if len(contraction) == 3:
contraction[2] = "'" + contraction[2]
if len(contraction) == 4:
contraction[3] = "'" + contraction[3]
else:
# this case is only entered when there is only one word
# input. So assert that this is the case.
assert len(expansion) == 1
# this is a completely pathological case, since
# ambiguous 1-word replacements are not in the common
# list of replacements from wikipedia. But since one can
# openly expand contractions.yaml it is checked.
contraction = expansions[expansion]
# find where the sublist occurs
occurences = _find_sub_list(expansion.split(), sent)
# loop over all first indices of occurences
# and insert the contracted part
for occurence in occurences:
contr_sent = sent[:occurence[0]] + contraction
contr_sent += sent[occurence[0]+len(contraction):]
yield (occurence[0],
sent[occurence[0]:occurence[0]+len(contraction)],
contr_sent)
def _invert_contractions_dict():
"""
This is just a short function to return the inverted dictionary
of the contraction dictionary.
"""
with open("contractions.yaml", "r") as stream:
# load the dictionary containing all the contractions
contractions = yaml.load(stream)
# invert the dictionary for quicker finding of contractions
expansions = dict()
for key, value in contractions.items():
if len(value) == 1:
continue
for expansion in value:
if expansion in expansions:
print("WARNING: As an contraction to {}, {} is replaced with"
" {}.".format(expansion,
expansions[expansion],
key))
expansions[expansion] = key
return expansions
def write_dictionary(pos_model,
sent_lst,
add_tags=0,
use_ner=False,
ner_args=None):
"""
Args:
- pos_model is an instance of StanfordPOSTagger
- sent-lst a list of sentences which themselves are lists of the
single words.
- add_tags is the amount of pos tags used after the
relevant contraction, this can be used to further
disambiguate but (of course) spreads out the data.
- use_ner is boolean to decide whether to use
named-entity-recognition for a potential increase in
accuracy but with the obvious costs of performance.
- ner_args is a list with an object of StanfordNERTagger and
the tag to be used. This only needs to be
supplied if use_ner is true.
Returns:
- None, but writes a disambiguations.yaml file with disambiguations
for the ambiguous contractions in contractions.yaml.
Raises:
ValueError if use_ner is True but no ner_model is supplied.
Using the provided list of sentences, contract them and pos-tag them.
Using the pos-tags it is then possible to classify which
(contraction, pos-tag) combinations get expanded to which ambiguous
long form.
"""
# pylint: disable=too-many-locals
if use_ner and (ner_args is None):
raise ValueError("The use_ner flag is True but no NER"
" model has been supplied!")
expansions = _invert_contractions_dict()
output_dict = dict()
ambiguity_counter = 0
for tuple_rslt in _contract_sentences(expansions,
sent_lst,
use_ner=use_ner,
ner_args=ner_args):
# pos tag the sentence
if use_ner:
# first replace the NER tag with "it"
pos_sent = [word.replace(ner_args[1], "it") for word
in tuple_rslt[2]]
# tag the sentence
pos_sent = pos_model.tag(pos_sent)
# and replace it with the tag again
pos_sent = [(tuple_rslt[2][i], word_pos[1]) for i, word_pos
in enumerate(pos_sent)]
else:
pos_sent = pos_model.tag(tuple_rslt[2])
# extract the pos tags on the contracted part
contr_word_pos = pos_sent[tuple_rslt[0]:(tuple_rslt[0] +
len(tuple_rslt[1]))]
if add_tags == 0:
contr_pos = tuple(contr_word_pos)
else:
add_pos_list = pos_sent[len(tuple_rslt[1]):(len(tuple_rslt[1]) +
add_tags)]
add_pos = [pos_word[1] for pos_word in add_pos_list]
contr_pos = tuple(contr_word_pos + add_pos)
# write a dictionary entry connecting the (words, pos) of the
# contraction to the expanded part
word = ' '.join(tuple_rslt[1])
if contr_pos not in output_dict:
output_dict[contr_pos] = dict()
output_dict[contr_pos][word] = 1
# keep track of the progress
print("\n\n ---- \n\n")
pprint.pprint(output_dict)
print("Ambiguity counter is {}.".format(ambiguity_counter))
print("\n\n ---- \n\n")
elif word in output_dict[contr_pos].keys():
# check whether the entry is already there
output_dict[contr_pos][word] += 1
continue
else:
# if the combination of pos tags with words already occured
# once then a list has to be made. Ideally this case doesn't
# occur
ambiguity_counter += 1
output_dict[contr_pos][word] = 1
print("\n\n ---- \n\n")
print("AMBIGUITY ADDED!")
pprint.pprint(output_dict)
print("Ambiguity counter is {}.".format(ambiguity_counter))
print("\n\n ---- \n\n")
with open("disambiguations.yaml", "w") as stream:
yaml.dump(output_dict, stream)
if __name__ == '__main__':
# if you call this function directly just build the disambiguation
# dictionary.
# load a corpus that has the form of list of sentences which is
# split up into a list of words
SENT_LST = nltk.corpus.brown.sents()
SENT_LST += nltk.corpus.gutenberg.sents()
SENT_LST += nltk.corpus.reuters.sents()
SENT_LST += nltk.corpus.inaugural.sents()
POS_MODEL = utils.load_stanford('pos')
NER_MODEL = utils.load_stanford('ner')
write_dictionary(POS_MODEL,
SENT_LST,
add_tags=1,
use_ner=False,
ner_args=[NER_MODEL, "<NE>"])
| [
"nltk.corpus.reuters.sents",
"nltk.corpus.inaugural.sents",
"yaml.dump",
"nltk.corpus.brown.sents",
"yaml.load",
"nltk.corpus.gutenberg.sents",
"utils.load_stanford",
"utils.sent_to_ner",
"pprint.pprint"
] | [((11038, 11063), 'nltk.corpus.brown.sents', 'nltk.corpus.brown.sents', ([], {}), '()\n', (11061, 11063), False, 'import nltk\n'), ((11080, 11109), 'nltk.corpus.gutenberg.sents', 'nltk.corpus.gutenberg.sents', ([], {}), '()\n', (11107, 11109), False, 'import nltk\n'), ((11126, 11153), 'nltk.corpus.reuters.sents', 'nltk.corpus.reuters.sents', ([], {}), '()\n', (11151, 11153), False, 'import nltk\n'), ((11170, 11199), 'nltk.corpus.inaugural.sents', 'nltk.corpus.inaugural.sents', ([], {}), '()\n', (11197, 11199), False, 'import nltk\n'), ((11216, 11242), 'utils.load_stanford', 'utils.load_stanford', (['"""pos"""'], {}), "('pos')\n", (11235, 11242), False, 'import utils\n'), ((11259, 11285), 'utils.load_stanford', 'utils.load_stanford', (['"""ner"""'], {}), "('ner')\n", (11278, 11285), False, 'import utils\n'), ((6081, 6098), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (6090, 6098), False, 'import yaml\n'), ((10770, 10800), 'yaml.dump', 'yaml.dump', (['output_dict', 'stream'], {}), '(output_dict, stream)\n', (10779, 10800), False, 'import yaml\n'), ((9919, 9945), 'pprint.pprint', 'pprint.pprint', (['output_dict'], {}), '(output_dict)\n', (9932, 9945), False, 'import pprint\n'), ((3267, 3320), 'utils.sent_to_ner', 'utils.sent_to_ner', (['ner_args[0]', 'sent'], {'tag': 'ner_args[1]'}), '(ner_args[0], sent, tag=ner_args[1])\n', (3284, 3320), False, 'import utils\n'), ((10573, 10599), 'pprint.pprint', 'pprint.pprint', (['output_dict'], {}), '(output_dict)\n', (10586, 10599), False, 'import pprint\n')] |
# pylint: disable = C0111
from setuptools import find_packages, setup
setup(name="paperai",
# version="1.5.0",
# author="NeuML",
# description="AI-powered literature discovery and review engine for medical/scientific papers",
# long_description=DESCRIPTION,
# long_description_content_type="text/markdown",
# url="https://github.com/neuml/paperai",
# project_urls={
# "Documentation": "https://github.com/neuml/paperai",
# "Issue Tracker": "https://github.com/neuml/paperai/issues",
# "Source Code": "https://github.com/neuml/paperai",
# },
# C:\Users\sxm\Desktop\paperai
# project_urls={
# "Documentation": "C:\\Users\\sxm\\Desktop\\paperai",
# "Source Code": "C:\\Users\\sxm\\Desktop\\paperai",
#},
license="Apache 2.0: C:\\Users\\sxm\\Desktop\\paperai\\LICENSE",
packages=find_packages(where="C:\\Users\\sxm\\Desktop\\paperai\\src\\python"),
package_dir={"": "src\\python"},
keywords="search embedding machine-learning nlp covid-19 medical scientific papers",
python_requires=">=3.6",
entry_points={
"console_scripts": [
"paperai = paperai.shell:main",
],
},
install_requires=[
"html2text>=2020.1.16",
# "mdv>=1.7.4",
"networkx>=2.4",
"PyYAML>=5.3",
"regex>=2020.5.14",
"txtai>=1.4.0",
"txtmarker>=1.0.0"
],
classifiers=[
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Text Processing :: Indexing",
"Topic :: Utilities"
]) | [
"setuptools.find_packages"
] | [((914, 982), 'setuptools.find_packages', 'find_packages', ([], {'where': '"""C:\\\\Users\\\\sxm\\\\Desktop\\\\paperai\\\\src\\\\python"""'}), "(where='C:\\\\Users\\\\sxm\\\\Desktop\\\\paperai\\\\src\\\\python')\n", (927, 982), False, 'from setuptools import find_packages, setup\n')] |
from io import BytesIO
from typing import List, Dict
from PIL import Image
from hit_analysis.commons.config import Config
from hit_analysis.commons.consts import IMAGE, CROP_X, CROP_Y, CROP_SIZE, FRAME_DECODED, CLASSIFIED, CLASS_ARTIFACT, ORIG_IMAGE
def append_to_frame(image: Image, detection: dict):
hit_img = detection.get(IMAGE)
cx = detection[CROP_X]
cy = detection[CROP_Y]
w, h = detection[CROP_SIZE]
image.paste(hit_img, (cx, cy, cx + w, cy + h))
# fix bug in early CREDO Detector App: black filled boundary 1px too large
image.paste(image.crop((cx + w - 1, cy, cx + w, cy + h)), (cx + w, cy, cx + w + 1, cy + h))
image.paste(image.crop((cx, cy + h - 1, cx + w, cy + h)), (cx, cy + h, cx + w, cy + h + 1))
image.paste(image.crop((cx + w - 1, cy + h - 1, cx + w, cy + h)), (cx + w, cy + h, cx + w + 1, cy + h + 1))
def replace_from_frame(image: Image, detection: dict):
cx = detection.get(CROP_X)
cy = detection.get(CROP_Y)
w, h = detection.get(CROP_SIZE)
hit_img = image.crop((cx, cy, cx + w, cy + h))
detection[ORIG_IMAGE] = detection[IMAGE]
detection[IMAGE] = hit_img
with BytesIO() as output:
hit_img.save(output, format="png")
# hit_img.save('/tmp/%d.png' % detection.get('id'))
detection[FRAME_DECODED] = output.getvalue()
def do_reconstruct(detections: List[dict], config: Config) -> None:
"""
Reconstruction the fill by black cropped frame in CREDO Detector app v2.
The detection[x]['frame_decoded'] will be replaced by new value, old value will be stored in detection[x]['frame_decoded_orig'].
No any changes when count of detections is less or equal 1
:param detections: should be sorted by detection_id
:param config: config object
"""
if len(detections) <= 1:
return
sp = [str(detections[0].get('device_id')), str(detections[0].get('timestamp'))]
image = Image.new('RGBA', (detections[0].get('width'), detections[0].get('height')), (0, 0, 0))
edge = 'no_edge'
for d in detections:
if d.get('edge'):
edge = 'edge'
for d in reversed(detections):
append_to_frame(image, d)
config.store_png(['recostruct', edge, *sp, 'orig'], d.get('id'), d.get(IMAGE))
for d in detections:
replace_from_frame(image, d)
config.store_png(['recostruct', edge, *sp], d.get('id'), d.get(IMAGE))
if config.out_dir:
image.save('%s/recostruct/%s/%s/frame.png' % (config.out_dir, edge, "/".join(sp)))
def check_all_artifacts(detections: List[dict]) -> bool:
"""
Check if all detections is just classified as artifacts
:param detections: list of detections to check
:return: True - all detections is artifacts
"""
for d in detections:
if d.get(CLASSIFIED) != CLASS_ARTIFACT:
return False
return True
def filter_unclassified(by_timestamp: Dict[int, List[dict]]) -> List[int]:
"""
Filter detections with one or more unclassified as artifact.
:param by_timestamp: detections grouped by timestamp
:return: list of filtered timestamp keys
"""
ret = []
for timestamp, detections in by_timestamp.items():
if not check_all_artifacts(detections):
ret.append(timestamp)
return ret
| [
"io.BytesIO"
] | [((1155, 1164), 'io.BytesIO', 'BytesIO', ([], {}), '()\n', (1162, 1164), False, 'from io import BytesIO\n')] |
import numpy as np
import numpy.testing as npt
import slippy
import slippy.core as core
"""
If you add a material you need to add the properties that it will be tested with to the material_parameters dict,
the key should be the name of the class (what ever it is declared as after the class key word).
The value should be a tuple of dicts:
The first dict in the tuple will be unpacked to instantiate the class,
The second will be used with the displacement from loads method
The third will be used with the loads from displacement method to ensure that the methods are inverses of each other
If there is a limit the applicability of the displacements from loads method (such as for a perfectly plastic material
the _max_load key word should be set in the second dict.
For more complex behaviour please also implement your own tests
"""
material_parameters = {
'Elastic': ({'name': 'steel_5', 'properties': {'E': 200e9, 'v': 0.3}},
{'grid_spacing': 0.01, 'simple': True},
{'grid_spacing': 0.01, 'simple': True, 'tol': 1e-9}),
'Rigid': ({}, {}, {})
}
exceptions = [core.Rigid]
def test_materials_basic():
# check that one of influence matrix or displacement from loading is given
for material in core.materials._IMMaterial._subclass_registry:
if material in exceptions:
continue
try:
mat_params = material_parameters[material.material_type]
except KeyError:
raise AssertionError(f"Material test parameters are not specified, for material {material.material_type}")
mat_instance = material(**mat_params[0])
max_load = mat_params[1].pop('_max_load', 1)
np.random.seed(0)
loads = np.random.rand(16, 16) * max_load
# check that the loads and displacement functions are inverse of each other
for direction in {'x', 'y', 'z'}:
load_in_direction = {direction: loads}
displacement = mat_instance.displacement_from_surface_loads(load_in_direction, **mat_params[1])
set_disp = displacement[direction]
loads_calc = mat_instance.loads_from_surface_displacement(displacements={direction: set_disp},
**mat_params[2])
npt.assert_allclose(loads, slippy.asnumpy(loads_calc[direction]), atol=max_load * 0.02)
def test_elastic_coupled():
mat = core.Elastic('steel_6', {'E': 200e9, 'v': 0.3})
np.random.seed(0)
loads1 = np.random.rand(16, 16)
loads2 = np.random.rand(16, 16)
directions = 'xyzx'
for i in range(3):
dir_1 = directions[i]
dir_2 = directions[i+1]
loads_in_direction = {dir_1: loads1, dir_2: loads2}
displacement = mat.displacement_from_surface_loads(loads_in_direction, grid_spacing=0.01, simple=True)
loads_calc = mat.loads_from_surface_displacement(displacements=displacement,
grid_spacing=0.01, simple=True)
for direction in [dir_1, dir_2]:
npt.assert_allclose(loads_in_direction[direction], slippy.asnumpy(loads_calc[direction]), atol=0.02)
displacement = mat.displacement_from_surface_loads(loads_in_direction, grid_spacing=0.01, simple=False)
loads_calc = mat.loads_from_surface_displacement(displacements=displacement,
grid_spacing=0.01, simple=False)
for direction in [dir_1, dir_2]:
npt.assert_allclose(loads_in_direction[direction], slippy.asnumpy(loads_calc[direction]), atol=0.02)
| [
"slippy.asnumpy",
"slippy.core.Elastic",
"numpy.random.rand",
"numpy.random.seed"
] | [((2428, 2484), 'slippy.core.Elastic', 'core.Elastic', (['"""steel_6"""', "{'E': 200000000000.0, 'v': 0.3}"], {}), "('steel_6', {'E': 200000000000.0, 'v': 0.3})\n", (2440, 2484), True, 'import slippy.core as core\n'), ((2480, 2497), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2494, 2497), True, 'import numpy as np\n'), ((2512, 2534), 'numpy.random.rand', 'np.random.rand', (['(16)', '(16)'], {}), '(16, 16)\n', (2526, 2534), True, 'import numpy as np\n'), ((2548, 2570), 'numpy.random.rand', 'np.random.rand', (['(16)', '(16)'], {}), '(16, 16)\n', (2562, 2570), True, 'import numpy as np\n'), ((1689, 1706), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (1703, 1706), True, 'import numpy as np\n'), ((1724, 1746), 'numpy.random.rand', 'np.random.rand', (['(16)', '(16)'], {}), '(16, 16)\n', (1738, 1746), True, 'import numpy as np\n'), ((2327, 2364), 'slippy.asnumpy', 'slippy.asnumpy', (['loads_calc[direction]'], {}), '(loads_calc[direction])\n', (2341, 2364), False, 'import slippy\n'), ((3131, 3168), 'slippy.asnumpy', 'slippy.asnumpy', (['loads_calc[direction]'], {}), '(loads_calc[direction])\n', (3145, 3168), False, 'import slippy\n'), ((3573, 3610), 'slippy.asnumpy', 'slippy.asnumpy', (['loads_calc[direction]'], {}), '(loads_calc[direction])\n', (3587, 3610), False, 'import slippy\n')] |
#!/usr/bin/env python
"""Restore files with ending BACKUP_ENDING to original files."""
# The copyright in this software is being made available under the BSD License,
# included below. This software may be subject to other third party and contributor
# rights, including patent rights, and no such rights are granted under this license.
#
# Copyright (c) 2016, Dash Industry Forum.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
# * Neither the name of Dash Industry Forum nor the names of its
# contributors may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS AS IS AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import sys
from backup_handler import BACKUP_ENDING
def main():
"Command-line function."
from optparse import OptionParser
parser = OptionParser()
#pylint: disable=unused-variable
(options, args) = parser.parse_args()
if len(args) < 1:
parser.error("Wrong number of arguments")
sys.exit(1)
for file_name in args:
if file_name.endswith(BACKUP_ENDING):
old_name = file_name[:-len(BACKUP_ENDING)]
print("moving %s to %s" % (file_name, old_name))
if os.path.exists(old_name):
os.unlink(old_name)
os.rename(file_name, old_name)
continue
if __name__ == "__main__":
main()
| [
"os.path.exists",
"os.rename",
"optparse.OptionParser",
"os.unlink",
"sys.exit"
] | [((2044, 2058), 'optparse.OptionParser', 'OptionParser', ([], {}), '()\n', (2056, 2058), False, 'from optparse import OptionParser\n'), ((2220, 2231), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (2228, 2231), False, 'import sys\n'), ((2436, 2460), 'os.path.exists', 'os.path.exists', (['old_name'], {}), '(old_name)\n', (2450, 2460), False, 'import os\n'), ((2510, 2540), 'os.rename', 'os.rename', (['file_name', 'old_name'], {}), '(file_name, old_name)\n', (2519, 2540), False, 'import os\n'), ((2478, 2497), 'os.unlink', 'os.unlink', (['old_name'], {}), '(old_name)\n', (2487, 2497), False, 'import os\n')] |
# Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pyglet
import numpy as np
import sklearn.preprocessing
class Joint_extractor:
def __init__(self, num_of_joints=18):
self.num_of_joints = num_of_joints
self.start_points = []
self.end_points = []
for j in range(18):
self.start_points.append([])
self.end_points.append([])
def compute_rays(self, cv_kps, image_width, image_height):
pmat = (pyglet.gl.GLdouble * 16)()
mvmat = (pyglet.gl.GLdouble * 16)()
view = (pyglet.gl.GLint * 4)()
pyglet.gl.glGetDoublev(pyglet.gl.GL_MODELVIEW_MATRIX, mvmat)
pyglet.gl.glGetDoublev(pyglet.gl.GL_PROJECTION_MATRIX, pmat)
pyglet.gl.glGetIntegerv(pyglet.gl.GL_VIEWPORT, view)
if cv_kps.size != 0:
for i, cv_kp in enumerate(cv_kps):
if cv_kp[0] != -1 and cv_kp[0] != -1:
start_x = pyglet.gl.GLdouble()
start_y = pyglet.gl.GLdouble()
start_z = pyglet.gl.GLdouble()
end_x = pyglet.gl.GLdouble()
end_y = pyglet.gl.GLdouble()
end_z = pyglet.gl.GLdouble()
pyglet.gl.gluUnProject(cv_kp[0], image_height - cv_kp[1], 0, mvmat, pmat, view, start_x,
start_y, start_z)
pyglet.gl.gluUnProject(cv_kp[0], image_height - cv_kp[1], 1, mvmat, pmat, view, end_x, end_y,
end_z)
self.start_points[i].append(np.asarray([start_x.value, start_y.value, start_z.value]))
self.end_points[i].append(np.asarray([end_x.value, end_y.value, end_z.value]))
@property
def compute_3D_positions(self):
for i in range(self.num_of_joints):
if len(self.start_points[i]) == 0 or len(self.end_points[i]) == 0:
print("Failed to estimate the position of the joints...")
return [[], []]
points_3D = []
dists_3D = []
inds_sorted = None
for i in range(self.num_of_joints):
d = 100
first_time = True
while d > 0.05:
if first_time:
s = np.asarray(self.start_points[i])
e = np.asarray(self.end_points[i])
else:
s = s[inds_sorted[:-1]]
e = e[inds_sorted[:-1]]
v = e - s
ni = sklearn.preprocessing.normalize(v, norm="l2")
nx = ni[:, 0]
ny = ni[:, 1]
nz = ni[:, 2]
sxx = np.sum(nx * nx - 1)
syy = np.sum(ny * ny - 1)
szz = np.sum(nz * nz - 1)
sxy = np.sum(nx * ny)
sxz = np.sum(nx * nz)
syz = np.sum(ny * nz)
S = np.asarray([np.asarray([sxx, sxy, sxz]), np.asarray([sxy, syy, syz]), np.asarray([sxz, syz, szz])])
cx = np.sum(s[:, 0] * (nx * nx - 1) + s[:, 1] * (nx * ny) + s[:, 2] * (nx * nz))
cy = np.sum(s[:, 0] * (nx * ny) + s[:, 1] * (ny * ny - 1) + s[:, 2] * (ny * nz))
cz = np.sum(s[:, 0] * (nx * nz) + s[:, 1] * (ny * nz) + s[:, 2] * (nz * nz - 1))
C = np.asarray([cx, cy, cz])
p_intersect = np.linalg.inv(np.asarray(S)).dot(C)
N = s.shape[0]
distances = np.zeros(N, dtype=np.float32)
for j in range(N):
ui = ((p_intersect - s[j, :]).dot(np.transpose(v[j, :]))) / (v[j, :].dot(v[j, :]))
distances[j] = np.linalg.norm(p_intersect - s[j, :] - ui * v[j, :])
# for i=1:N %http://mathworld.wolfram.com/Point-LineDistance3-Dimensional.html:
# distances(i) = norm(cross(p_intersect-PA(i,:),p_intersect-PB(i,:))) / norm(Si(i,:));
inds_sorted = np.argsort(distances)
d = distances[inds_sorted[-1]]
first_time = False
points_3D.append(p_intersect)
dists_3D.append(distances)
points_3D = np.asarray(points_3D, dtype=np.float32)
dists_3D = np.asarray(dists_3D, dtype=object)
return points_3D, dists_3D
| [
"pyglet.gl.glGetDoublev",
"numpy.asarray",
"numpy.argsort",
"numpy.sum",
"numpy.zeros",
"pyglet.gl.glGetIntegerv",
"numpy.linalg.norm",
"numpy.transpose",
"pyglet.gl.GLdouble",
"pyglet.gl.gluUnProject"
] | [((1131, 1191), 'pyglet.gl.glGetDoublev', 'pyglet.gl.glGetDoublev', (['pyglet.gl.GL_MODELVIEW_MATRIX', 'mvmat'], {}), '(pyglet.gl.GL_MODELVIEW_MATRIX, mvmat)\n', (1153, 1191), False, 'import pyglet\n'), ((1200, 1260), 'pyglet.gl.glGetDoublev', 'pyglet.gl.glGetDoublev', (['pyglet.gl.GL_PROJECTION_MATRIX', 'pmat'], {}), '(pyglet.gl.GL_PROJECTION_MATRIX, pmat)\n', (1222, 1260), False, 'import pyglet\n'), ((1269, 1321), 'pyglet.gl.glGetIntegerv', 'pyglet.gl.glGetIntegerv', (['pyglet.gl.GL_VIEWPORT', 'view'], {}), '(pyglet.gl.GL_VIEWPORT, view)\n', (1292, 1321), False, 'import pyglet\n'), ((4722, 4761), 'numpy.asarray', 'np.asarray', (['points_3D'], {'dtype': 'np.float32'}), '(points_3D, dtype=np.float32)\n', (4732, 4761), True, 'import numpy as np\n'), ((4781, 4815), 'numpy.asarray', 'np.asarray', (['dists_3D'], {'dtype': 'object'}), '(dists_3D, dtype=object)\n', (4791, 4815), True, 'import numpy as np\n'), ((3225, 3244), 'numpy.sum', 'np.sum', (['(nx * nx - 1)'], {}), '(nx * nx - 1)\n', (3231, 3244), True, 'import numpy as np\n'), ((3267, 3286), 'numpy.sum', 'np.sum', (['(ny * ny - 1)'], {}), '(ny * ny - 1)\n', (3273, 3286), True, 'import numpy as np\n'), ((3309, 3328), 'numpy.sum', 'np.sum', (['(nz * nz - 1)'], {}), '(nz * nz - 1)\n', (3315, 3328), True, 'import numpy as np\n'), ((3351, 3366), 'numpy.sum', 'np.sum', (['(nx * ny)'], {}), '(nx * ny)\n', (3357, 3366), True, 'import numpy as np\n'), ((3389, 3404), 'numpy.sum', 'np.sum', (['(nx * nz)'], {}), '(nx * nz)\n', (3395, 3404), True, 'import numpy as np\n'), ((3427, 3442), 'numpy.sum', 'np.sum', (['(ny * nz)'], {}), '(ny * nz)\n', (3433, 3442), True, 'import numpy as np\n'), ((3584, 3659), 'numpy.sum', 'np.sum', (['(s[:, 0] * (nx * nx - 1) + s[:, 1] * (nx * ny) + s[:, 2] * (nx * nz))'], {}), '(s[:, 0] * (nx * nx - 1) + s[:, 1] * (nx * ny) + s[:, 2] * (nx * nz))\n', (3590, 3659), True, 'import numpy as np\n'), ((3681, 3756), 'numpy.sum', 'np.sum', (['(s[:, 0] * (nx * ny) + s[:, 1] * (ny * ny - 1) + s[:, 2] * (ny * nz))'], {}), '(s[:, 0] * (nx * ny) + s[:, 1] * (ny * ny - 1) + s[:, 2] * (ny * nz))\n', (3687, 3756), True, 'import numpy as np\n'), ((3778, 3853), 'numpy.sum', 'np.sum', (['(s[:, 0] * (nx * nz) + s[:, 1] * (ny * nz) + s[:, 2] * (nz * nz - 1))'], {}), '(s[:, 0] * (nx * nz) + s[:, 1] * (ny * nz) + s[:, 2] * (nz * nz - 1))\n', (3784, 3853), True, 'import numpy as np\n'), ((3874, 3898), 'numpy.asarray', 'np.asarray', (['[cx, cy, cz]'], {}), '([cx, cy, cz])\n', (3884, 3898), True, 'import numpy as np\n'), ((4024, 4053), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'np.float32'}), '(N, dtype=np.float32)\n', (4032, 4053), True, 'import numpy as np\n'), ((4517, 4538), 'numpy.argsort', 'np.argsort', (['distances'], {}), '(distances)\n', (4527, 4538), True, 'import numpy as np\n'), ((1483, 1503), 'pyglet.gl.GLdouble', 'pyglet.gl.GLdouble', ([], {}), '()\n', (1501, 1503), False, 'import pyglet\n'), ((1534, 1554), 'pyglet.gl.GLdouble', 'pyglet.gl.GLdouble', ([], {}), '()\n', (1552, 1554), False, 'import pyglet\n'), ((1585, 1605), 'pyglet.gl.GLdouble', 'pyglet.gl.GLdouble', ([], {}), '()\n', (1603, 1605), False, 'import pyglet\n'), ((1634, 1654), 'pyglet.gl.GLdouble', 'pyglet.gl.GLdouble', ([], {}), '()\n', (1652, 1654), False, 'import pyglet\n'), ((1683, 1703), 'pyglet.gl.GLdouble', 'pyglet.gl.GLdouble', ([], {}), '()\n', (1701, 1703), False, 'import pyglet\n'), ((1732, 1752), 'pyglet.gl.GLdouble', 'pyglet.gl.GLdouble', ([], {}), '()\n', (1750, 1752), False, 'import pyglet\n'), ((1773, 1883), 'pyglet.gl.gluUnProject', 'pyglet.gl.gluUnProject', (['cv_kp[0]', '(image_height - cv_kp[1])', '(0)', 'mvmat', 'pmat', 'view', 'start_x', 'start_y', 'start_z'], {}), '(cv_kp[0], image_height - cv_kp[1], 0, mvmat, pmat,\n view, start_x, start_y, start_z)\n', (1795, 1883), False, 'import pyglet\n'), ((1943, 2047), 'pyglet.gl.gluUnProject', 'pyglet.gl.gluUnProject', (['cv_kp[0]', '(image_height - cv_kp[1])', '(1)', 'mvmat', 'pmat', 'view', 'end_x', 'end_y', 'end_z'], {}), '(cv_kp[0], image_height - cv_kp[1], 1, mvmat, pmat,\n view, end_x, end_y, end_z)\n', (1965, 2047), False, 'import pyglet\n'), ((2822, 2854), 'numpy.asarray', 'np.asarray', (['self.start_points[i]'], {}), '(self.start_points[i])\n', (2832, 2854), True, 'import numpy as np\n'), ((2879, 2909), 'numpy.asarray', 'np.asarray', (['self.end_points[i]'], {}), '(self.end_points[i])\n', (2889, 2909), True, 'import numpy as np\n'), ((4227, 4279), 'numpy.linalg.norm', 'np.linalg.norm', (['(p_intersect - s[j, :] - ui * v[j, :])'], {}), '(p_intersect - s[j, :] - ui * v[j, :])\n', (4241, 4279), True, 'import numpy as np\n'), ((2135, 2192), 'numpy.asarray', 'np.asarray', (['[start_x.value, start_y.value, start_z.value]'], {}), '([start_x.value, start_y.value, start_z.value])\n', (2145, 2192), True, 'import numpy as np\n'), ((2240, 2291), 'numpy.asarray', 'np.asarray', (['[end_x.value, end_y.value, end_z.value]'], {}), '([end_x.value, end_y.value, end_z.value])\n', (2250, 2291), True, 'import numpy as np\n'), ((3475, 3502), 'numpy.asarray', 'np.asarray', (['[sxx, sxy, sxz]'], {}), '([sxx, sxy, sxz])\n', (3485, 3502), True, 'import numpy as np\n'), ((3504, 3531), 'numpy.asarray', 'np.asarray', (['[sxy, syy, syz]'], {}), '([sxy, syy, syz])\n', (3514, 3531), True, 'import numpy as np\n'), ((3533, 3560), 'numpy.asarray', 'np.asarray', (['[sxz, syz, szz]'], {}), '([sxz, syz, szz])\n', (3543, 3560), True, 'import numpy as np\n'), ((3943, 3956), 'numpy.asarray', 'np.asarray', (['S'], {}), '(S)\n', (3953, 3956), True, 'import numpy as np\n'), ((4143, 4164), 'numpy.transpose', 'np.transpose', (['v[j, :]'], {}), '(v[j, :])\n', (4155, 4164), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 28 09:33:53 2020
@author: dhulls
"""
from __future__ import print_function
from __future__ import absolute_import
from argparse import ArgumentParser
import numpy as nm
import sys
sys.path.append('.')
from sfepy.base.base import IndexedStruct, Struct
from sfepy.discrete import (FieldVariable, Material, Integral, Function,
Equation, Equations, Problem)
from sfepy.discrete.fem import Mesh, FEDomain, Field
from sfepy.terms import Term
from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition
from sfepy.solvers.ls import ScipyDirect
from sfepy.solvers.nls import Newton
from sfepy.postprocess.viewer import Viewer
from sfepy.postprocess.probes_vtk import ProbeFromFile, Probe
import numpy as np
helps = {
'show' : 'show the results figure',
}
from sfepy import data_dir
parser = ArgumentParser()
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('-s', '--show',
action="store_true", dest='show',
default=False, help=helps['show'])
options = parser.parse_args()
mesh = Mesh.from_file(data_dir + '/meshes/3d/fluid_mesh.inp')
domain = FEDomain('domain', mesh)
omega = domain.create_region('Omega', 'all')
field_1 = Field.from_args(name='3_velocity', dtype=nm.float64, shape=3, region=omega, approx_order=1)
field_2 = Field.from_args(name='pressure', dtype=nm.float64, shape=1, region=omega, approx_order=1)
region_0 = domain.create_region(name='Walls1', select='vertices in (y < -0.049)', kind='facet')
region_1 = domain.create_region(name='Walls2', select='vertices in (y > 0.049)', kind='facet')
region_2 = domain.create_region(name='Inlet', select='vertices in (x < -0.499)', kind='facet')
region_3 = domain.create_region(name='Outlet', select='vertices in (x > -0.499)', kind='facet')
ebc_1 = EssentialBC(name='Walls1', region=region_0, dofs={'u.[0,1,2]' : 0.0})
ebc_2 = EssentialBC(name='Walls2', region=region_1, dofs={'u.[0,1,2]' : 0.0})
ebc_3 = EssentialBC(name='Inlet', region=region_2, dofs={'u.0' : 1.0, 'u.[1,2]' : 0.0})
ebc_4 = EssentialBC(name='Outlet', region=region_3, dofs={'p':0.0, 'u.[1,2]' : 0.0})
viscosity = Material(name='viscosity', value=1.25e-3)
variable_1 = FieldVariable('u', 'unknown', field_1)
variable_2 = FieldVariable(name='v', kind='test', field=field_1, primary_var_name='u')
variable_3 = FieldVariable(name='p', kind='unknown', field=field_2)
variable_4 = FieldVariable(name='q', kind='test', field=field_2, primary_var_name='p')
integral_1 = Integral('i1', order=2)
integral_2 = Integral('i2', order=3)
t1 = Term.new(name='dw_div_grad(viscosity.value, v, u)',
integral=integral_2, region=omega, viscosity=viscosity, v=variable_2, u=variable_1)
t2 = Term.new(name='dw_convect(v, u)',
integral=integral_2, region=omega, v=variable_2, u=variable_1)
t3 = Term.new(name='dw_stokes(v, p)',
integral=integral_1, region=omega, v=variable_2, p=variable_3)
t4 = Term.new(name='dw_stokes(u, q)',
integral=integral_1, region=omega, u=variable_1, q=variable_4)
eq1 = Equation('balance', t1+t2-t3)
eq2 = Equation('incompressibility', t4)
eqs = Equations([eq1,eq2])
ls = ScipyDirect({})
nls_status = IndexedStruct()
nls = Newton({'i_max' : 20, 'eps_a' : 1e-8, 'eps_r' : 1.0, 'macheps' : 1e-16, 'lin_red' : 1e-2, 'ls_red' : 0.1, 'ls_red_warp' : 0.001, 'ls_on' : 0.99999, 'ls_min' : 1e-5, 'check' : 0, 'delta' : 1e-6}, lin_solver=ls, status=nls_status)
pb = Problem('Navier-Stokes', equations=eqs)
pb.set_bcs(ebcs=Conditions([ebc_1, ebc_2, ebc_3]))
pb.set_solver(nls)
status = IndexedStruct()
state = pb.solve(status=status, save_results=True)
out = state.create_output_dict()
pb.save_state('Navier_Stokes.vtk', out=out)
view = Viewer('Navier_Stokes.vtk')
view(rel_scaling=2,
is_scalar_bar=True, is_wireframe=True) | [
"sfepy.discrete.Equations",
"sfepy.discrete.conditions.EssentialBC",
"sfepy.solvers.ls.ScipyDirect",
"sfepy.discrete.Equation",
"argparse.ArgumentParser",
"sfepy.solvers.nls.Newton",
"sfepy.discrete.conditions.Conditions",
"sfepy.discrete.fem.Mesh.from_file",
"sfepy.discrete.Integral",
"sfepy.disc... | [((253, 273), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (268, 273), False, 'import sys\n'), ((912, 928), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (926, 928), False, 'from argparse import ArgumentParser\n'), ((1183, 1237), 'sfepy.discrete.fem.Mesh.from_file', 'Mesh.from_file', (["(data_dir + '/meshes/3d/fluid_mesh.inp')"], {}), "(data_dir + '/meshes/3d/fluid_mesh.inp')\n", (1197, 1237), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1247, 1271), 'sfepy.discrete.fem.FEDomain', 'FEDomain', (['"""domain"""', 'mesh'], {}), "('domain', mesh)\n", (1255, 1271), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1328, 1423), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', ([], {'name': '"""3_velocity"""', 'dtype': 'nm.float64', 'shape': '(3)', 'region': 'omega', 'approx_order': '(1)'}), "(name='3_velocity', dtype=nm.float64, shape=3, region=omega,\n approx_order=1)\n", (1343, 1423), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1430, 1523), 'sfepy.discrete.fem.Field.from_args', 'Field.from_args', ([], {'name': '"""pressure"""', 'dtype': 'nm.float64', 'shape': '(1)', 'region': 'omega', 'approx_order': '(1)'}), "(name='pressure', dtype=nm.float64, shape=1, region=omega,\n approx_order=1)\n", (1445, 1523), False, 'from sfepy.discrete.fem import Mesh, FEDomain, Field\n'), ((1912, 1980), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', ([], {'name': '"""Walls1"""', 'region': 'region_0', 'dofs': "{'u.[0,1,2]': 0.0}"}), "(name='Walls1', region=region_0, dofs={'u.[0,1,2]': 0.0})\n", (1923, 1980), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((1990, 2058), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', ([], {'name': '"""Walls2"""', 'region': 'region_1', 'dofs': "{'u.[0,1,2]': 0.0}"}), "(name='Walls2', region=region_1, dofs={'u.[0,1,2]': 0.0})\n", (2001, 2058), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((2068, 2145), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', ([], {'name': '"""Inlet"""', 'region': 'region_2', 'dofs': "{'u.0': 1.0, 'u.[1,2]': 0.0}"}), "(name='Inlet', region=region_2, dofs={'u.0': 1.0, 'u.[1,2]': 0.0})\n", (2079, 2145), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((2156, 2232), 'sfepy.discrete.conditions.EssentialBC', 'EssentialBC', ([], {'name': '"""Outlet"""', 'region': 'region_3', 'dofs': "{'p': 0.0, 'u.[1,2]': 0.0}"}), "(name='Outlet', region=region_3, dofs={'p': 0.0, 'u.[1,2]': 0.0})\n", (2167, 2232), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n'), ((2246, 2287), 'sfepy.discrete.Material', 'Material', ([], {'name': '"""viscosity"""', 'value': '(0.00125)'}), "(name='viscosity', value=0.00125)\n", (2254, 2287), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2302, 2340), 'sfepy.discrete.FieldVariable', 'FieldVariable', (['"""u"""', '"""unknown"""', 'field_1'], {}), "('u', 'unknown', field_1)\n", (2315, 2340), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2354, 2427), 'sfepy.discrete.FieldVariable', 'FieldVariable', ([], {'name': '"""v"""', 'kind': '"""test"""', 'field': 'field_1', 'primary_var_name': '"""u"""'}), "(name='v', kind='test', field=field_1, primary_var_name='u')\n", (2367, 2427), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2441, 2495), 'sfepy.discrete.FieldVariable', 'FieldVariable', ([], {'name': '"""p"""', 'kind': '"""unknown"""', 'field': 'field_2'}), "(name='p', kind='unknown', field=field_2)\n", (2454, 2495), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2509, 2582), 'sfepy.discrete.FieldVariable', 'FieldVariable', ([], {'name': '"""q"""', 'kind': '"""test"""', 'field': 'field_2', 'primary_var_name': '"""p"""'}), "(name='q', kind='test', field=field_2, primary_var_name='p')\n", (2522, 2582), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2597, 2620), 'sfepy.discrete.Integral', 'Integral', (['"""i1"""'], {'order': '(2)'}), "('i1', order=2)\n", (2605, 2620), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2634, 2657), 'sfepy.discrete.Integral', 'Integral', (['"""i2"""'], {'order': '(3)'}), "('i2', order=3)\n", (2642, 2657), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((2664, 2803), 'sfepy.terms.Term.new', 'Term.new', ([], {'name': '"""dw_div_grad(viscosity.value, v, u)"""', 'integral': 'integral_2', 'region': 'omega', 'viscosity': 'viscosity', 'v': 'variable_2', 'u': 'variable_1'}), "(name='dw_div_grad(viscosity.value, v, u)', integral=integral_2,\n region=omega, viscosity=viscosity, v=variable_2, u=variable_1)\n", (2672, 2803), False, 'from sfepy.terms import Term\n'), ((2819, 2920), 'sfepy.terms.Term.new', 'Term.new', ([], {'name': '"""dw_convect(v, u)"""', 'integral': 'integral_2', 'region': 'omega', 'v': 'variable_2', 'u': 'variable_1'}), "(name='dw_convect(v, u)', integral=integral_2, region=omega, v=\n variable_2, u=variable_1)\n", (2827, 2920), False, 'from sfepy.terms import Term\n'), ((2935, 3035), 'sfepy.terms.Term.new', 'Term.new', ([], {'name': '"""dw_stokes(v, p)"""', 'integral': 'integral_1', 'region': 'omega', 'v': 'variable_2', 'p': 'variable_3'}), "(name='dw_stokes(v, p)', integral=integral_1, region=omega, v=\n variable_2, p=variable_3)\n", (2943, 3035), False, 'from sfepy.terms import Term\n'), ((3050, 3150), 'sfepy.terms.Term.new', 'Term.new', ([], {'name': '"""dw_stokes(u, q)"""', 'integral': 'integral_1', 'region': 'omega', 'u': 'variable_1', 'q': 'variable_4'}), "(name='dw_stokes(u, q)', integral=integral_1, region=omega, u=\n variable_1, q=variable_4)\n", (3058, 3150), False, 'from sfepy.terms import Term\n'), ((3166, 3199), 'sfepy.discrete.Equation', 'Equation', (['"""balance"""', '(t1 + t2 - t3)'], {}), "('balance', t1 + t2 - t3)\n", (3174, 3199), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3202, 3235), 'sfepy.discrete.Equation', 'Equation', (['"""incompressibility"""', 't4'], {}), "('incompressibility', t4)\n", (3210, 3235), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3242, 3263), 'sfepy.discrete.Equations', 'Equations', (['[eq1, eq2]'], {}), '([eq1, eq2])\n', (3251, 3263), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3269, 3284), 'sfepy.solvers.ls.ScipyDirect', 'ScipyDirect', (['{}'], {}), '({})\n', (3280, 3284), False, 'from sfepy.solvers.ls import ScipyDirect\n'), ((3298, 3313), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (3311, 3313), False, 'from sfepy.base.base import IndexedStruct, Struct\n'), ((3320, 3553), 'sfepy.solvers.nls.Newton', 'Newton', (["{'i_max': 20, 'eps_a': 1e-08, 'eps_r': 1.0, 'macheps': 1e-16, 'lin_red': \n 0.01, 'ls_red': 0.1, 'ls_red_warp': 0.001, 'ls_on': 0.99999, 'ls_min': \n 1e-05, 'check': 0, 'delta': 1e-06}"], {'lin_solver': 'ls', 'status': 'nls_status'}), "({'i_max': 20, 'eps_a': 1e-08, 'eps_r': 1.0, 'macheps': 1e-16,\n 'lin_red': 0.01, 'ls_red': 0.1, 'ls_red_warp': 0.001, 'ls_on': 0.99999,\n 'ls_min': 1e-05, 'check': 0, 'delta': 1e-06}, lin_solver=ls, status=\n nls_status)\n", (3326, 3553), False, 'from sfepy.solvers.nls import Newton\n'), ((3554, 3593), 'sfepy.discrete.Problem', 'Problem', (['"""Navier-Stokes"""'], {'equations': 'eqs'}), "('Navier-Stokes', equations=eqs)\n", (3561, 3593), False, 'from sfepy.discrete import FieldVariable, Material, Integral, Function, Equation, Equations, Problem\n'), ((3673, 3688), 'sfepy.base.base.IndexedStruct', 'IndexedStruct', ([], {}), '()\n', (3686, 3688), False, 'from sfepy.base.base import IndexedStruct, Struct\n'), ((3826, 3853), 'sfepy.postprocess.viewer.Viewer', 'Viewer', (['"""Navier_Stokes.vtk"""'], {}), "('Navier_Stokes.vtk')\n", (3832, 3853), False, 'from sfepy.postprocess.viewer import Viewer\n'), ((3610, 3643), 'sfepy.discrete.conditions.Conditions', 'Conditions', (['[ebc_1, ebc_2, ebc_3]'], {}), '([ebc_1, ebc_2, ebc_3])\n', (3620, 3643), False, 'from sfepy.discrete.conditions import Conditions, EssentialBC, InitialCondition\n')] |
#!/usr/bin/env python3
from scripts.workflow import get_app_name, is_name_valid
from scripts.workflow import get_args, is_args_valid
from scripts.workflow import create_dir, create_app, create_templates_folder, create_static_folder, create_dockerfile
from scripts.manual import print_manual
from scripts.messages import empty_name, success_msg, failure_msg
import sys
app_name = get_app_name()
args = get_args()
args.remove(app_name)
# validate name of app!!
if (is_name_valid(app_name)):
# validate all arguments first!!
if(is_args_valid(args)):
# Create folder named app_name
create_dir(app_name)
# Arguments
debugger_mode = False
import_css_js = False
use_docker = False
if '-d' in args or '--debugger' in args:
debugger_mode = True
print("- Debugger mode on")
print(" |__ added debug=True")
else:
print("- Debugger mode off")
if '-cj' in args or '--css-js' in args:
import_css_js = True
create_static_folder(app_name)
print("- Css and Js mode on")
print(" |__ import static/stylesheet/style.css")
print(" |__ import static/js/app.css")
else:
print("- Css and Js mode off")
if '-dc' in args or '--docker-container' in args:
use_docker = True
print("- Docker mode on")
print(' |__ cd %s' % app_name)
print(' |__ \"docker-compose up -d\" to start app')
else:
print("- Docker mode off")
# create templates folder to hold index.html
create_templates_folder(app_name, import_css_js)
# create app.py in root directory(app_name)
create_app(app_name, debugger_mode)
# move application to docker container;
if (use_docker):
# generate Dockerfile
create_dockerfile(app_name)
success_msg(app_name)
else:
print('Unknown argument detected! Please check the help section\n')
print_manual()
failure_msg(app_name)
else:
if (app_name == '-h' or app_name == '--help'):
print_manual()
else:
print('Please choose another app name')
failure_msg(app_name) | [
"scripts.messages.success_msg",
"scripts.workflow.get_args",
"scripts.workflow.create_app",
"scripts.manual.print_manual",
"scripts.workflow.is_args_valid",
"scripts.workflow.create_dir",
"scripts.workflow.create_templates_folder",
"scripts.workflow.get_app_name",
"scripts.workflow.create_dockerfile... | [((381, 395), 'scripts.workflow.get_app_name', 'get_app_name', ([], {}), '()\n', (393, 395), False, 'from scripts.workflow import get_app_name, is_name_valid\n'), ((403, 413), 'scripts.workflow.get_args', 'get_args', ([], {}), '()\n', (411, 413), False, 'from scripts.workflow import get_args, is_args_valid\n'), ((466, 489), 'scripts.workflow.is_name_valid', 'is_name_valid', (['app_name'], {}), '(app_name)\n', (479, 489), False, 'from scripts.workflow import get_app_name, is_name_valid\n'), ((537, 556), 'scripts.workflow.is_args_valid', 'is_args_valid', (['args'], {}), '(args)\n', (550, 556), False, 'from scripts.workflow import get_args, is_args_valid\n'), ((611, 631), 'scripts.workflow.create_dir', 'create_dir', (['app_name'], {}), '(app_name)\n', (621, 631), False, 'from scripts.workflow import create_dir, create_app, create_templates_folder, create_static_folder, create_dockerfile\n'), ((1651, 1699), 'scripts.workflow.create_templates_folder', 'create_templates_folder', (['app_name', 'import_css_js'], {}), '(app_name, import_css_js)\n', (1674, 1699), False, 'from scripts.workflow import create_dir, create_app, create_templates_folder, create_static_folder, create_dockerfile\n'), ((1761, 1796), 'scripts.workflow.create_app', 'create_app', (['app_name', 'debugger_mode'], {}), '(app_name, debugger_mode)\n', (1771, 1796), False, 'from scripts.workflow import create_dir, create_app, create_templates_folder, create_static_folder, create_dockerfile\n'), ((1955, 1976), 'scripts.messages.success_msg', 'success_msg', (['app_name'], {}), '(app_name)\n', (1966, 1976), False, 'from scripts.messages import empty_name, success_msg, failure_msg\n'), ((2071, 2085), 'scripts.manual.print_manual', 'print_manual', ([], {}), '()\n', (2083, 2085), False, 'from scripts.manual import print_manual\n'), ((2094, 2115), 'scripts.messages.failure_msg', 'failure_msg', (['app_name'], {}), '(app_name)\n', (2105, 2115), False, 'from scripts.messages import empty_name, success_msg, failure_msg\n'), ((2181, 2195), 'scripts.manual.print_manual', 'print_manual', ([], {}), '()\n', (2193, 2195), False, 'from scripts.manual import print_manual\n'), ((2262, 2283), 'scripts.messages.failure_msg', 'failure_msg', (['app_name'], {}), '(app_name)\n', (2273, 2283), False, 'from scripts.messages import empty_name, success_msg, failure_msg\n'), ((1056, 1086), 'scripts.workflow.create_static_folder', 'create_static_folder', (['app_name'], {}), '(app_name)\n', (1076, 1086), False, 'from scripts.workflow import create_dir, create_app, create_templates_folder, create_static_folder, create_dockerfile\n'), ((1918, 1945), 'scripts.workflow.create_dockerfile', 'create_dockerfile', (['app_name'], {}), '(app_name)\n', (1935, 1945), False, 'from scripts.workflow import create_dir, create_app, create_templates_folder, create_static_folder, create_dockerfile\n')] |
import os
from pathlib import Path
from jinja2 import Template
import parser
from utils import write_to_file
from utils import mkdir_p
parser.init()
# parse and assign to vars
spec = parser.spec
def _concat(slice: str) -> str:
"""helper to concatenate each template slice."""
return "{}\n".format(slice)
def slices_filename_content_hash() -> dict:
"""create a dict of filename: content for slices"""
docker_slices = {}
path = Path.cwd().joinpath(
os.path.join(os.path.dirname(os.path.realpath(__file__)), "slices")
)
for file in path.iterdir():
docker_slices[file.name] = file.read_text()
return docker_slices
def concat_slices(component: str = "tensorflow", flavor: str = "mkl") -> str:
"""concatenate templates based on the what user want"""
docker_slices = slices_filename_content_hash()
names = ["os.dockerfile"]
dockerfile = ""
if component == "tensorflow" and flavor == "mkl":
names.append("tensorflow.dockerfile")
names.append("horovod.dockerfile")
if component == "pytorch" and flavor == "mkl":
names.append("pytorch.dockerfile")
names.append("horovod.dockerfile")
for name in names:
dockerfile += _concat(docker_slices[name])
return "".join(dockerfile)
def insert_template_values(dockerfile: str, kwargs: dict):
dockerfile = Template(dockerfile)
dockerfile = dockerfile.render(**kwargs)
return dockerfile
def generate_dockerfile(os: str, framework: str, file_name: str = "Dockerfile"):
"""generate and write to dir dockerfiles per `os` and `framework`"""
dlrs = spec["stack"]["dlrs"]
os_version = dlrs[os]["version"]
pkgs = dlrs[os]["os_pkgs"]
tf_version = dlrs[os]["tensorflow"]["mkl"]["version"]
hvd_version = dlrs[os]["horovod"]["version"]
torch_version = dlrs[os]["pytorch"]["mkl"]["version"]
pkg_installer = "apt-get install -y" if os == "ubuntu" else "swupd bundle-add"
kwargs = {
"os": "{}:{}".format(os, os_version),
"pkg_install": "{} {}".format(pkg_installer, " ".join(pkgs)),
"tf_version": tf_version,
"hvd_version": hvd_version,
"torch_version": torch_version,
}
dockerfile_template = concat_slices(framework)
dockerfile = insert_template_values(dockerfile_template, kwargs)
write_to_file(file_name, dockerfile)
def generate_all_dockerfiles(generate: bool = True, build: bool = False) -> None:
"""generate all dockerfiles for all frameworks and OSes"""
if generate:
base_dir = "./dockerfiles"
for framework in ["pytorch", "tensorflow"]:
for _os in ["ubuntu", "clearlinux"]:
save_to_dir = mkdir_p(os.path.join(base_dir, _os, framework))
save_to_file = os.path.join(save_to_dir, "Dockerfile")
generate_dockerfile(_os, framework, save_to_file)
if build:
# TOOD(unrahul) build the dockerfiles
pass
| [
"pathlib.Path.cwd",
"os.path.join",
"jinja2.Template",
"os.path.realpath",
"utils.write_to_file",
"parser.init"
] | [((138, 151), 'parser.init', 'parser.init', ([], {}), '()\n', (149, 151), False, 'import parser\n'), ((1371, 1391), 'jinja2.Template', 'Template', (['dockerfile'], {}), '(dockerfile)\n', (1379, 1391), False, 'from jinja2 import Template\n'), ((2335, 2371), 'utils.write_to_file', 'write_to_file', (['file_name', 'dockerfile'], {}), '(file_name, dockerfile)\n', (2348, 2371), False, 'from utils import write_to_file\n'), ((455, 465), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (463, 465), False, 'from pathlib import Path\n'), ((513, 539), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (529, 539), False, 'import os\n'), ((2781, 2820), 'os.path.join', 'os.path.join', (['save_to_dir', '"""Dockerfile"""'], {}), "(save_to_dir, 'Dockerfile')\n", (2793, 2820), False, 'import os\n'), ((2710, 2748), 'os.path.join', 'os.path.join', (['base_dir', '_os', 'framework'], {}), '(base_dir, _os, framework)\n', (2722, 2748), False, 'import os\n')] |
import os
from datetime import datetime
import pytest
from entropylab import ExperimentResources, SqlAlchemyDB, PyNode, Graph
@pytest.mark.skipif(
datetime.utcnow() > datetime(2022, 6, 25),
reason="Please remove after two months have passed since the fix was merged",
)
def test_issue_204(initialized_project_dir_path, capsys):
# arrange
# remove DB files because when they are present, issue does not occur
db_files = [".entropy/params.db", ".entropy/entropy.db", ".entropy/entropy.hdf5"]
for file in db_files:
full_path = os.path.join(initialized_project_dir_path, file)
if os.path.exists(full_path):
os.remove(full_path)
# experiment to run
experiment_resources = ExperimentResources(
SqlAlchemyDB(initialized_project_dir_path)
)
def root_node():
print("root node")
# error that should be logged to stderr:
print(a)
return {}
node0 = PyNode(label="root_node", program=root_node)
experiment = Graph(resources=experiment_resources, graph={node0}, story="run_a")
# act
try:
experiment.run()
except RuntimeError:
pass
# assert
captured = capsys.readouterr()
assert "message: name 'a' is not defined" in captured.err
| [
"datetime.datetime",
"os.path.exists",
"entropylab.Graph",
"datetime.datetime.utcnow",
"os.path.join",
"entropylab.SqlAlchemyDB",
"entropylab.PyNode",
"os.remove"
] | [((957, 1001), 'entropylab.PyNode', 'PyNode', ([], {'label': '"""root_node"""', 'program': 'root_node'}), "(label='root_node', program=root_node)\n", (963, 1001), False, 'from entropylab import ExperimentResources, SqlAlchemyDB, PyNode, Graph\n'), ((1019, 1086), 'entropylab.Graph', 'Graph', ([], {'resources': 'experiment_resources', 'graph': '{node0}', 'story': '"""run_a"""'}), "(resources=experiment_resources, graph={node0}, story='run_a')\n", (1024, 1086), False, 'from entropylab import ExperimentResources, SqlAlchemyDB, PyNode, Graph\n'), ((561, 609), 'os.path.join', 'os.path.join', (['initialized_project_dir_path', 'file'], {}), '(initialized_project_dir_path, file)\n', (573, 609), False, 'import os\n'), ((621, 646), 'os.path.exists', 'os.path.exists', (['full_path'], {}), '(full_path)\n', (635, 646), False, 'import os\n'), ((762, 804), 'entropylab.SqlAlchemyDB', 'SqlAlchemyDB', (['initialized_project_dir_path'], {}), '(initialized_project_dir_path)\n', (774, 804), False, 'from entropylab import ExperimentResources, SqlAlchemyDB, PyNode, Graph\n'), ((155, 172), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (170, 172), False, 'from datetime import datetime\n'), ((175, 196), 'datetime.datetime', 'datetime', (['(2022)', '(6)', '(25)'], {}), '(2022, 6, 25)\n', (183, 196), False, 'from datetime import datetime\n'), ((660, 680), 'os.remove', 'os.remove', (['full_path'], {}), '(full_path)\n', (669, 680), False, 'import os\n')] |
"""users.country
Revision ID: 429d596c43a7
Revises: <PASSWORD>
Create Date: 2020-10-23 21:26:55.598146
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '429d596c43a7'
down_revision = '77e0c0edaa04'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('country', sa.String(length=4), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'country')
# ### end Alembic commands ###
| [
"sqlalchemy.String",
"alembic.op.drop_column"
] | [((587, 621), 'alembic.op.drop_column', 'op.drop_column', (['"""users"""', '"""country"""'], {}), "('users', 'country')\n", (601, 621), False, 'from alembic import op\n'), ((426, 445), 'sqlalchemy.String', 'sa.String', ([], {'length': '(4)'}), '(length=4)\n', (435, 445), True, 'import sqlalchemy as sa\n')] |
import pandas as pd
def get_seasonality_weekly(bills, date_column='dates', group_column='level_4_name',
regular_only=False, promo_fact_column=None):
bills['week'] = pd.to_datetime(bills[date_column]).dt.week
bills['year'] = pd.to_datetime(bills[date_column]).dt.year
# - Группируем по неделя-год, суммируем. Группируем по неделям, усредняем. (Если данные неравномерные)
if not regular_only:
num_per_week = bills.groupby([group_column, 'week', 'year'])[group_column].count().reset_index(name='num_sold')
num_per_week = num_per_week.groupby([group_column, 'week'])['num_sold'].mean().reset_index(name='num_sold')
else:
# - Выбираем только регулярные продажи, считаем кол-во продаж и кол-во plu продававшихся регулярно на неделе
num_per_week = bills[bills[promo_fact_column] == 0].groupby([group_column, 'week', 'year']).agg(
{group_column: 'count', 'PLU_ID': 'nunique'})
num_per_week = num_per_week.rename(columns={group_column: 'total_sold', 'PLU_ID': 'unique_plu'}).reset_index()
# - Берем среднее по кол-ву рег. продаж и кол-ву рег. PLU по неделям между годами
num_per_week = num_per_week.groupby([group_column, 'week'])[['total_sold', 'unique_plu']].mean().reset_index()
# - Считаем кол-во регулярных продаж на кол-во рег. PLU (другими словами, если будет много товаров в категории
# - На промо, то мы всё равно получим адекватную цифру.
# - +10 - регуляризация
num_per_week['num_sold'] = num_per_week['total_sold'] / (num_per_week['unique_plu']+10)
num_per_week.drop(['total_sold', 'unique_plu'], axis=1, inplace=True)
# - Делаем таблицу в которой есть все Категории и для каждого есть 52 недели
new_table = pd.concat(
[pd.DataFrame({group_column: x, 'week': [x + 1 for x in range(52)]}) for x in bills[group_column].unique()])
# - Добавляем туда фактические продажи и если продаж нет то заполняем нулями
new_table = new_table.merge(num_per_week, on=[group_column, 'week'], how='left').fillna(0)
# - Добавляем общее кол-во проданных PLU за всё время
total_sold = new_table.groupby([group_column])['num_sold'].sum().reset_index(name='total_sold')
new_table = new_table.merge(total_sold, on=group_column, how='left')
# - Добавляем кол-во проданных на следующей и предыдущей неделе
new_table['num_sold_prev'] = new_table.sort_values('week').groupby([group_column]).num_sold.shift(1)
new_table['num_sold_next'] = new_table.sort_values('week').groupby([group_column]).num_sold.shift(-1)
# - Обрабатываем граничные условия (52 и 1 неделя года)
plu_52_week_sales = dict(new_table[new_table['week'] == 52].set_index([group_column])['num_sold'])
plu_1_week_sales = dict(new_table[new_table['week'] == 1].set_index([group_column])['num_sold'])
new_table.loc[new_table['week'] == 1, 'num_sold_prev'] = new_table[new_table['week'] == 1][group_column].map(
lambda x: plu_52_week_sales[x])
new_table.loc[new_table['week'] == 52, 'num_sold_next'] = new_table[new_table['week'] == 52][group_column].map(
lambda x: plu_1_week_sales[x])
# - Считаем скользящее среднее
new_table['rolling_average'] = (new_table['num_sold_prev'] + new_table['num_sold'] + new_table['num_sold_next']) / \
(3 * new_table['total_sold'])
return new_table[[group_column, 'week', 'rolling_average']]
| [
"pandas.to_datetime"
] | [((198, 232), 'pandas.to_datetime', 'pd.to_datetime', (['bills[date_column]'], {}), '(bills[date_column])\n', (212, 232), True, 'import pandas as pd\n'), ((261, 295), 'pandas.to_datetime', 'pd.to_datetime', (['bills[date_column]'], {}), '(bills[date_column])\n', (275, 295), True, 'import pandas as pd\n')] |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
import copy
import traceback
from telemetry import value as value_module
from telemetry.results import page_run
from telemetry.results import progress_reporter as progress_reporter_module
from telemetry.value import failure
from telemetry.value import skip
class PageTestResults(object):
def __init__(self, output_stream=None, output_formatters=None,
progress_reporter=None, trace_tag=''):
"""
Args:
output_stream: The output stream to use to write test results.
output_formatters: A list of output formatters. The output
formatters are typically used to format the test results, such
as CsvOutputFormatter, which output the test results as CSV.
progress_reporter: An instance of progress_reporter.ProgressReporter,
to be used to output test status/results progressively.
trace_tag: A string to append to the buildbot trace
name. Currently only used for buildbot.
"""
# TODO(chrishenry): Figure out if trace_tag is still necessary.
super(PageTestResults, self).__init__()
self._output_stream = output_stream
self._progress_reporter = (
progress_reporter if progress_reporter is not None
else progress_reporter_module.ProgressReporter())
self._output_formatters = (
output_formatters if output_formatters is not None else [])
self._trace_tag = trace_tag
self._current_page_run = None
self._all_page_runs = []
self._representative_value_for_each_value_name = {}
self._all_summary_values = []
def __copy__(self):
cls = self.__class__
result = cls.__new__(cls)
for k, v in self.__dict__.items():
if isinstance(v, collections.Container):
v = copy.copy(v)
setattr(result, k, v)
return result
@property
def all_page_specific_values(self):
values = []
for run in self._all_page_runs:
values += run.values
if self._current_page_run:
values += self._current_page_run.values
return values
@property
def all_summary_values(self):
return self._all_summary_values
@property
def current_page(self):
assert self._current_page_run, 'Not currently running test.'
return self._current_page_run.page
@property
def current_page_run(self):
assert self._current_page_run, 'Not currently running test.'
return self._current_page_run
@property
def all_page_runs(self):
return self._all_page_runs
@property
def pages_that_succeeded(self):
"""Returns the set of pages that succeeded."""
pages = set(run.page for run in self.all_page_runs)
pages.difference_update(self.pages_that_failed)
return pages
@property
def pages_that_failed(self):
"""Returns the set of failed pages."""
failed_pages = set()
for run in self.all_page_runs:
if run.failed:
failed_pages.add(run.page)
return failed_pages
@property
def failures(self):
values = self.all_page_specific_values
return [v for v in values if isinstance(v, failure.FailureValue)]
@property
def skipped_values(self):
values = self.all_page_specific_values
return [v for v in values if isinstance(v, skip.SkipValue)]
def _GetStringFromExcInfo(self, err):
return ''.join(traceback.format_exception(*err))
def WillRunPage(self, page):
assert not self._current_page_run, 'Did not call DidRunPage.'
self._current_page_run = page_run.PageRun(page)
self._progress_reporter.WillRunPage(self)
def DidRunPage(self, page, discard_run=False): # pylint: disable=W0613
"""
Args:
page: The current page under test.
discard_run: Whether to discard the entire run and all of its
associated results.
"""
assert self._current_page_run, 'Did not call WillRunPage.'
self._progress_reporter.DidRunPage(self)
if not discard_run:
self._all_page_runs.append(self._current_page_run)
self._current_page_run = None
def WillAttemptPageRun(self, attempt_count, max_attempts):
"""To be called when a single attempt on a page run is starting.
This is called between WillRunPage and DidRunPage and can be
called multiple times, one for each attempt.
Args:
attempt_count: The current attempt number, start at 1
(attempt_count == 1 for the first attempt, 2 for second
attempt, and so on).
max_attempts: Maximum number of page run attempts before failing.
"""
self._progress_reporter.WillAttemptPageRun(
self, attempt_count, max_attempts)
# Clear any values from previous attempts for this page run.
self._current_page_run.ClearValues()
def AddValue(self, value):
assert self._current_page_run, 'Not currently running test.'
self._ValidateValue(value)
# TODO(eakuefner/chrishenry): Add only one skip per pagerun assert here
self._current_page_run.AddValue(value)
self._progress_reporter.DidAddValue(value)
def AddSummaryValue(self, value):
assert value.page is None
self._ValidateValue(value)
self._all_summary_values.append(value)
def _ValidateValue(self, value):
assert isinstance(value, value_module.Value)
if value.name not in self._representative_value_for_each_value_name:
self._representative_value_for_each_value_name[value.name] = value
representative_value = self._representative_value_for_each_value_name[
value.name]
assert value.IsMergableWith(representative_value)
def PrintSummary(self):
self._progress_reporter.DidFinishAllTests(self)
for output_formatter in self._output_formatters:
output_formatter.Format(self)
def FindPageSpecificValuesForPage(self, page, value_name):
values = []
for value in self.all_page_specific_values:
if value.page == page and value.name == value_name:
values.append(value)
return values
def FindAllPageSpecificValuesNamed(self, value_name):
values = []
for value in self.all_page_specific_values:
if value.name == value_name:
values.append(value)
return values
| [
"telemetry.results.page_run.PageRun",
"copy.copy",
"traceback.format_exception",
"telemetry.results.progress_reporter.ProgressReporter"
] | [((3594, 3616), 'telemetry.results.page_run.PageRun', 'page_run.PageRun', (['page'], {}), '(page)\n', (3610, 3616), False, 'from telemetry.results import page_run\n'), ((1399, 1442), 'telemetry.results.progress_reporter.ProgressReporter', 'progress_reporter_module.ProgressReporter', ([], {}), '()\n', (1440, 1442), True, 'from telemetry.results import progress_reporter as progress_reporter_module\n'), ((3433, 3465), 'traceback.format_exception', 'traceback.format_exception', (['*err'], {}), '(*err)\n', (3459, 3465), False, 'import traceback\n'), ((1906, 1918), 'copy.copy', 'copy.copy', (['v'], {}), '(v)\n', (1915, 1918), False, 'import copy\n')] |
from tensorflow.keras.models import Model
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
import cv2
import numpy as np
import pandas as pd
from tensorflow.keras.models import load_model
import tensorflow as tf
import os
#---------------------------------------------------------------------------------
# get data
#---------------------------------------------------------------------------------
def data(input_channel, i, val_save_dir, test_save_dir):
### load train data based on input channels
if run_type == 'val':
if input_channel == 1:
fn = 'val_arr_1ch.npy'
elif input_channel == 3:
fn = 'val_arr_3ch.npy'
data = np.load(os.path.join(pro_data_dir, fn))
df = pd.read_csv(os.path.join(val_save_dir, 'val_pred_df.csv'))
elif run_type == 'test':
if input_channel == 1:
fn = 'test_arr_1ch.npy'
elif input_channel == 3:
fn = 'test_arr_3ch.npy'
data = np.load(os.path.join(pro_data_dir, fn))
df = pd.read_csv(os.path.join(test_save_dir, 'test_pred_df.csv'))
elif run_type == 'exval':
if input_channel == 1:
fn = 'exval_arr_1ch.npy'
elif input_channel == 3:
fn = 'exval_arr_3ch.npy'
data = np.load(os.path.join(pro_data_dir, fn))
df = pd.read_csv(os.path.join(exval_save_dir, 'exval_pred_df.csv'))
### load label
y_true = df['label']
y_pred_class = df['y_pred_class']
y_pred = df['y_pred']
ID = df['fn']
### find the ith image to show grad-cam map
img = data[i, :, :, :]
img = img.reshape((1, 192, 192, 3))
label = y_true[i]
pred_index = y_pred_class[i]
y_pred = y_pred[i]
ID = ID[i]
return img, label, pred_index, y_pred, ID
#------------------------------------------------------------------------------------
# find last conv layer
#-----------------------------------------------------------------------------------
def find_target_layer(model, saved_model):
# find the final conv layer by looping layers in reverse order
for layer in reversed(model.layers):
# check to see if the layer has a 4D output
if len(layer.output_shape) == 4:
return layer.name
raise ValueError("Could not find 4D layer. Cannot apply GradCAM.")
#----------------------------------------------------------------------------------
# calculate gradient class actiavtion map
#----------------------------------------------------------------------------------
def compute_heatmap(model, saved_model, image, pred_index, last_conv_layer):
"""
construct our gradient model by supplying (1) the inputs
to our pre-trained model, (2) the output of the (presumably)
final 4D layer in the network, and (3) the output of the
softmax activations from the model
"""
gradModel = Model(
inputs=[model.inputs],
outputs=[model.get_layer(last_conv_layer).output, model.output]
)
# record operations for automatic differentiation
with tf.GradientTape() as tape:
"""
cast the image tensor to a float-32 data type, pass the
image through the gradient model, and grab the loss
associated with the specific class index
"""
print(pred_index)
inputs = tf.cast(image, tf.float32)
print(image.shape)
last_conv_layer_output, preds = gradModel(inputs)
print(preds)
print(preds.shape)
# class_channel = preds[:, pred_index]
class_channel = preds
# use automatic differentiation to compute the gradients
grads = tape.gradient(class_channel, last_conv_layer_output)
"""
This is a vector where each entry is the mean intensity of the gradient
over a specific feature map channel
"""
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
"""
We multiply each channel in the feature map array
by "how important this channel is" with regard to the top predicted class
then sum all the channels to obtain the heatmap class activation
"""
last_conv_layer_output = last_conv_layer_output[0]
heatmap = last_conv_layer_output @ pooled_grads[..., tf.newaxis]
heatmap = tf.squeeze(heatmap)
# For visualization purpose, we will also normalize the heatmap between 0 & 1
heatmap = tf.maximum(heatmap, 0) / tf.math.reduce_max(heatmap)
heatmap = heatmap.numpy()
return heatmap
#------------------------------------------------------------------------------------
# save gradcam heat map
#-----------------------------------------------------------------------------------
def save_gradcam(image, heatmap, val_gradcam_dir, test_gradcam_dir, alpha, i):
# print('heatmap:', heatmap.shape)
# Rescale heatmap to a range 0-255
heatmap = np.uint8(255 * heatmap)
# Use jet colormap to colorize heatmap
jet = cm.get_cmap("jet")
# Use RGB values of the colormap
jet_colors = jet(np.arange(256))[:, :3]
jet_heatmap = jet_colors[heatmap]
# resize heatmap
jet_heatmap = keras.preprocessing.image.array_to_img(jet_heatmap)
jet_heatmap0 = jet_heatmap.resize(re_size)
jet_heatmap1 = keras.preprocessing.image.img_to_array(jet_heatmap0)
# print('jet_heatmap:', jet_heatmap1.shape)
# resize background CT image
img = image.reshape((192, 192, 3))
img = keras.preprocessing.image.array_to_img(img)
img0 = img.resize(re_size)
img1 = keras.preprocessing.image.img_to_array(img0)
# print('img shape:', img1.shape)
# Superimpose the heatmap on original image
superimposed_img = jet_heatmap1 * alpha + img1
superimposed_img = keras.preprocessing.image.array_to_img(superimposed_img)
# Save the superimposed image
if run_type == 'val':
save_dir = val_gradcam_dir
elif run_type == 'test':
save_dir = test_gradcam_dir
elif run_type == 'exval':
save_dir = exval_gradcam_dir
fn1 = str(conv_n) + '_' + str(i) + '_' + 'gradcam.png'
fn2 = str(conv_n) + '_' + str(i) + '_' + 'heatmap.png'
fn3 = str(conv_n) + '_' + str(i) + '_' + 'heatmap_raw.png'
fn4 = str(i) + '_' + 'CT.png'
superimposed_img.save(os.path.join(save_dir, fn1))
# jet_heatmap0.save(os.path.join(save_dir, fn2))
# jet_heatmap.save(os.path.join(save_dir, fn3))
# img0.save(os.path.join(save_dir, fn4))
if __name__ == '__main__':
train_img_dir = '/media/bhkann/HN_RES1/HN_CONTRAST/train_img_dir'
val_save_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/val'
test_save_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/test'
exval_save_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/exval'
val_gradcam_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/val/gradcam'
test_gradcam_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/test/gradcam'
exval_gradcam_dir = '/mnt/aertslab/USERS/Zezhong/constrast_detection/test/gradcam'
pro_data_dir = '/home/bhkann/zezhong/git_repo/IV-Contrast-CNN-Project/pro_data'
model_dir = '/mnt/aertslab/USERS/Zezhong/contrast_detection/model'
input_channel = 3
re_size = (192, 192)
i = 72
crop = True
alpha = 0.9
saved_model = 'ResNet_2021_07_18_06_28_40'
show_network = False
conv_n = 'conv5'
run_type = 'val'
#---------------------------------------------------------
# run main function
#--------------------------------------------------------
if run_type == 'val':
save_dir = val_save_dir
elif run_type == 'test':
save_dir = test_save_dir
## load model and find conv layers
model = load_model(os.path.join(model_dir, saved_model))
# model.summary()
list_i = [100, 105, 110, 115, 120, 125]
for i in list_i:
image, label, pred_index, y_pred, ID = data(
input_channel=input_channel,
i=i,
val_save_dir=val_save_dir,
test_save_dir=test_save_dir
)
conv_list = ['conv2', 'conv3', 'conv4', 'conv5']
conv_list = ['conv4']
for conv_n in conv_list:
if conv_n == 'conv2':
last_conv_layer = 'conv2_block3_1_conv'
elif conv_n == 'conv3':
last_conv_layer = 'conv3_block4_1_conv'
elif conv_n == 'conv4':
last_conv_layer = 'conv4_block6_1_conv'
elif conv_n == 'conv5':
last_conv_layer = 'conv5_block3_out'
heatmap = compute_heatmap(
model=model,
saved_model=saved_model,
image=image,
pred_index=pred_index,
last_conv_layer=last_conv_layer
)
save_gradcam(
image=image,
heatmap=heatmap,
val_gradcam_dir=val_gradcam_dir,
test_gradcam_dir=test_gradcam_dir,
alpha=alpha,
i=i
)
print('label:', label)
print('ID:', ID)
print('y_pred:', y_pred)
print('prediction:', pred_index)
print('conv layer:', conv_n)
# if last_conv_layer is None:
# last_conv_layer = find_target_layer(
# model=model,
# saved_model=saved_model
# )
# print(last_conv_layer)
#
# if show_network == True:
# for idx in range(len(model.layers)):
# print(model.get_layer(index = idx).name)
# # compute the guided gradients
# castConvOutputs = tf.cast(convOutputs > 0, "float32")
# castGrads = tf.cast(grads > 0, "float32")
# guidedGrads = castConvOutputs * castGrads * grads
# # the convolution and guided gradients have a batch dimension
# # (which we don't need) so let's grab the volume itself and
# # discard the batch
# convOutputs = convOutputs[0]
# guidedGrads = guidedGrads[0]
#
# # compute the average of the gradient values, and using them
# # as weights, compute the ponderation of the filters with
# # respect to the weights
# weights = tf.reduce_mean(guidedGrads, axis=(0, 1))
# cam = tf.reduce_sum(tf.multiply(weights, convOutputs), axis=-1)
#
# # grab the spatial dimensions of the input image and resize
# # the output class activation map to match the input image
# # dimensions
## (w, h) = (image.shape[2], image.shape[1])
## heatmap = cv2.resize(cam.numpy(), (w, h))
# heatmap = cv2.resize(heatmap.numpy(), (64, 64))
# # normalize the heatmap such that all values lie in the range
## # [0, 1], scale the resulting values to the range [0, 255],
## # and then convert to an unsigned 8-bit integer
# numer = heatmap - np.min(heatmap)
# eps = 1e-8
# denom = (heatmap.max() - heatmap.min()) + eps
# heatmap = numer / denom
# heatmap = (heatmap * 255).astype("uint8")
# colormap=cv2.COLORMAP_VIRIDIS
# heatmap = cv2.applyColorMap(heatmap, colormap)
# print('heatmap shape:', heatmap.shape)
## img = image[:, :, :, 0]
## print('img shape:', img.shape)
# img = image.reshape((64, 64, 3))
# print(img.shape)
# output = cv2.addWeighted(img, 0.5, heatmap, 0.5, 0)
#
#
# return heatmap, output
| [
"numpy.uint8",
"os.path.join",
"tensorflow.keras.preprocessing.image.array_to_img",
"tensorflow.GradientTape",
"tensorflow.squeeze",
"tensorflow.math.reduce_max",
"tensorflow.maximum",
"tensorflow.reduce_mean",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.cast",
"matplotlib.c... | [((3903, 3940), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['grads'], {'axis': '(0, 1, 2)'}), '(grads, axis=(0, 1, 2))\n', (3917, 3940), True, 'import tensorflow as tf\n'), ((4296, 4315), 'tensorflow.squeeze', 'tf.squeeze', (['heatmap'], {}), '(heatmap)\n', (4306, 4315), True, 'import tensorflow as tf\n'), ((4888, 4911), 'numpy.uint8', 'np.uint8', (['(255 * heatmap)'], {}), '(255 * heatmap)\n', (4896, 4911), True, 'import numpy as np\n'), ((4965, 4983), 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""jet"""'], {}), "('jet')\n", (4976, 4983), True, 'import matplotlib.cm as cm\n'), ((5143, 5194), 'tensorflow.keras.preprocessing.image.array_to_img', 'keras.preprocessing.image.array_to_img', (['jet_heatmap'], {}), '(jet_heatmap)\n', (5181, 5194), False, 'from tensorflow import keras\n'), ((5261, 5313), 'tensorflow.keras.preprocessing.image.img_to_array', 'keras.preprocessing.image.img_to_array', (['jet_heatmap0'], {}), '(jet_heatmap0)\n', (5299, 5313), False, 'from tensorflow import keras\n'), ((5444, 5487), 'tensorflow.keras.preprocessing.image.array_to_img', 'keras.preprocessing.image.array_to_img', (['img'], {}), '(img)\n', (5482, 5487), False, 'from tensorflow import keras\n'), ((5530, 5574), 'tensorflow.keras.preprocessing.image.img_to_array', 'keras.preprocessing.image.img_to_array', (['img0'], {}), '(img0)\n', (5568, 5574), False, 'from tensorflow import keras\n'), ((5735, 5791), 'tensorflow.keras.preprocessing.image.array_to_img', 'keras.preprocessing.image.array_to_img', (['superimposed_img'], {}), '(superimposed_img)\n', (5773, 5791), False, 'from tensorflow import keras\n'), ((3126, 3143), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3141, 3143), True, 'import tensorflow as tf\n'), ((3393, 3419), 'tensorflow.cast', 'tf.cast', (['image', 'tf.float32'], {}), '(image, tf.float32)\n', (3400, 3419), True, 'import tensorflow as tf\n'), ((4413, 4435), 'tensorflow.maximum', 'tf.maximum', (['heatmap', '(0)'], {}), '(heatmap, 0)\n', (4423, 4435), True, 'import tensorflow as tf\n'), ((4438, 4465), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['heatmap'], {}), '(heatmap)\n', (4456, 4465), True, 'import tensorflow as tf\n'), ((6262, 6289), 'os.path.join', 'os.path.join', (['save_dir', 'fn1'], {}), '(save_dir, fn1)\n', (6274, 6289), False, 'import os\n'), ((7719, 7755), 'os.path.join', 'os.path.join', (['model_dir', 'saved_model'], {}), '(model_dir, saved_model)\n', (7731, 7755), False, 'import os\n'), ((771, 801), 'os.path.join', 'os.path.join', (['pro_data_dir', 'fn'], {}), '(pro_data_dir, fn)\n', (783, 801), False, 'import os\n'), ((828, 873), 'os.path.join', 'os.path.join', (['val_save_dir', '"""val_pred_df.csv"""'], {}), "(val_save_dir, 'val_pred_df.csv')\n", (840, 873), False, 'import os\n'), ((5042, 5056), 'numpy.arange', 'np.arange', (['(256)'], {}), '(256)\n', (5051, 5056), True, 'import numpy as np\n'), ((1063, 1093), 'os.path.join', 'os.path.join', (['pro_data_dir', 'fn'], {}), '(pro_data_dir, fn)\n', (1075, 1093), False, 'import os\n'), ((1120, 1167), 'os.path.join', 'os.path.join', (['test_save_dir', '"""test_pred_df.csv"""'], {}), "(test_save_dir, 'test_pred_df.csv')\n", (1132, 1167), False, 'import os\n'), ((1360, 1390), 'os.path.join', 'os.path.join', (['pro_data_dir', 'fn'], {}), '(pro_data_dir, fn)\n', (1372, 1390), False, 'import os\n'), ((1417, 1466), 'os.path.join', 'os.path.join', (['exval_save_dir', '"""exval_pred_df.csv"""'], {}), "(exval_save_dir, 'exval_pred_df.csv')\n", (1429, 1466), False, 'import os\n')] |
import subprocess
import sys
import os
import setup_util
from os.path import expanduser
def start(args, logfile, errfile):
fwroot = args.fwroot
setup_util.replace_text("cakephp/app/Config/database.php", "'host' => '.*',", "'host' => '" + args.database_host + "',")
setup_util.replace_text("cakephp/app/Config/core.php", "'REDISSERVER'", "'" + args.database_host + "'")
setup_util.replace_text("cakephp/deploy/cake", "\".*\/FrameworkBenchmarks/cakephp", "\"%s" % args.troot)
setup_util.replace_text("cakephp/deploy/cake", "Directory .*\/FrameworkBenchmarks/cakephp", "Directory %s" % args.troot)
setup_util.replace_text("cakephp/deploy/nginx.conf", "root .*\/FrameworkBenchmarks/cakephp", "root %s" % args.troot)
try:
if os.name == 'nt':
setup_util.replace_text("cakephp/app/Config/core.php", "'Redis'", "'Wincache'")
subprocess.check_call('icacls "C:\\FrameworkBenchmarks\\cakephp" /grant "IIS_IUSRS:(OI)(CI)F"', shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call('appcmd add site /name:PHP /bindings:http/*:8080: /physicalPath:"C:\\FrameworkBenchmarks\\cakephp\\app\\webroot"', shell=True, stderr=errfile, stdout=logfile)
return 0
#subprocess.check_call("sudo cp cake/deploy/cake /etc/apache2/sites-available/", shell=True)
#subprocess.check_call("sudo a2ensite cake", shell=True)
subprocess.check_call("sudo chown -R www-data:www-data cakephp", shell=True, stderr=errfile, stdout=logfile)
# Sudo needed to switch to correct user
# This is a bit tricky as sudo normally resets the PATH for security
# To work around that in this one case, we use the full
# path to the php-fpm binary we setup in bash_profile.sh
subprocess.check_call("sudo $PHP_FPM --fpm-config $FWROOT/config/php-fpm.conf -g $TROOT/deploy/php-fpm.pid", shell=True, stderr=errfile, stdout=logfile)
subprocess.check_call("sudo /usr/local/nginx/sbin/nginx -c $TROOT/deploy/nginx.conf", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
try:
if os.name == 'nt':
subprocess.call('appcmd delete site PHP', shell=True, stderr=errfile, stdout=logfile)
return 0
subprocess.call("sudo /usr/local/nginx/sbin/nginx -s stop", shell=True, stderr=errfile, stdout=logfile)
subprocess.call("sudo kill -QUIT $( cat $TROOT/deploy/php-fpm.pid )", shell=True, stderr=errfile, stdout=logfile)
#subprocess.check_call("sudo a2dissite cake", shell=True)
#subprocess.check_call("sudo /etc/init.d/apache2 stop", shell=True)
subprocess.check_call("sudo chown -R $USER:$USER cakephp", shell=True, stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
| [
"setup_util.replace_text",
"subprocess.call",
"subprocess.check_call"
] | [((150, 274), 'setup_util.replace_text', 'setup_util.replace_text', (['"""cakephp/app/Config/database.php"""', '"""\'host\' => \'.*\',"""', '("\'host\' => \'" + args.database_host + "\',")'], {}), '(\'cakephp/app/Config/database.php\',\n "\'host\' => \'.*\',", "\'host\' => \'" + args.database_host + "\',")\n', (173, 274), False, 'import setup_util\n'), ((273, 380), 'setup_util.replace_text', 'setup_util.replace_text', (['"""cakephp/app/Config/core.php"""', '"""\'REDISSERVER\'"""', '("\'" + args.database_host + "\'")'], {}), '(\'cakephp/app/Config/core.php\', "\'REDISSERVER\'", "\'" +\n args.database_host + "\'")\n', (296, 380), False, 'import setup_util\n'), ((379, 486), 'setup_util.replace_text', 'setup_util.replace_text', (['"""cakephp/deploy/cake"""', '"""".*\\\\/FrameworkBenchmarks/cakephp"""', '(\'"%s\' % args.troot)'], {}), '(\'cakephp/deploy/cake\',\n \'".*\\\\/FrameworkBenchmarks/cakephp\', \'"%s\' % args.troot)\n', (402, 486), False, 'import setup_util\n'), ((486, 611), 'setup_util.replace_text', 'setup_util.replace_text', (['"""cakephp/deploy/cake"""', '"""Directory .*\\\\/FrameworkBenchmarks/cakephp"""', "('Directory %s' % args.troot)"], {}), "('cakephp/deploy/cake',\n 'Directory .*\\\\/FrameworkBenchmarks/cakephp', 'Directory %s' % args.troot)\n", (509, 611), False, 'import setup_util\n'), ((609, 730), 'setup_util.replace_text', 'setup_util.replace_text', (['"""cakephp/deploy/nginx.conf"""', '"""root .*\\\\/FrameworkBenchmarks/cakephp"""', "('root %s' % args.troot)"], {}), "('cakephp/deploy/nginx.conf',\n 'root .*\\\\/FrameworkBenchmarks/cakephp', 'root %s' % args.troot)\n", (632, 730), False, 'import setup_util\n'), ((1354, 1466), 'subprocess.check_call', 'subprocess.check_call', (['"""sudo chown -R www-data:www-data cakephp"""'], {'shell': '(True)', 'stderr': 'errfile', 'stdout': 'logfile'}), "('sudo chown -R www-data:www-data cakephp', shell=True,\n stderr=errfile, stdout=logfile)\n", (1375, 1466), False, 'import subprocess\n'), ((1717, 1879), 'subprocess.check_call', 'subprocess.check_call', (['"""sudo $PHP_FPM --fpm-config $FWROOT/config/php-fpm.conf -g $TROOT/deploy/php-fpm.pid"""'], {'shell': '(True)', 'stderr': 'errfile', 'stdout': 'logfile'}), "(\n 'sudo $PHP_FPM --fpm-config $FWROOT/config/php-fpm.conf -g $TROOT/deploy/php-fpm.pid'\n , shell=True, stderr=errfile, stdout=logfile)\n", (1738, 1879), False, 'import subprocess\n'), ((1874, 2013), 'subprocess.check_call', 'subprocess.check_call', (['"""sudo /usr/local/nginx/sbin/nginx -c $TROOT/deploy/nginx.conf"""'], {'shell': '(True)', 'stderr': 'errfile', 'stdout': 'logfile'}), "(\n 'sudo /usr/local/nginx/sbin/nginx -c $TROOT/deploy/nginx.conf', shell=\n True, stderr=errfile, stdout=logfile)\n", (1895, 2013), False, 'import subprocess\n'), ((2240, 2347), 'subprocess.call', 'subprocess.call', (['"""sudo /usr/local/nginx/sbin/nginx -s stop"""'], {'shell': '(True)', 'stderr': 'errfile', 'stdout': 'logfile'}), "('sudo /usr/local/nginx/sbin/nginx -s stop', shell=True,\n stderr=errfile, stdout=logfile)\n", (2255, 2347), False, 'import subprocess\n'), ((2348, 2466), 'subprocess.call', 'subprocess.call', (['"""sudo kill -QUIT $( cat $TROOT/deploy/php-fpm.pid )"""'], {'shell': '(True)', 'stderr': 'errfile', 'stdout': 'logfile'}), "('sudo kill -QUIT $( cat $TROOT/deploy/php-fpm.pid )', shell\n =True, stderr=errfile, stdout=logfile)\n", (2363, 2466), False, 'import subprocess\n'), ((2600, 2706), 'subprocess.check_call', 'subprocess.check_call', (['"""sudo chown -R $USER:$USER cakephp"""'], {'shell': '(True)', 'stderr': 'errfile', 'stdout': 'logfile'}), "('sudo chown -R $USER:$USER cakephp', shell=True,\n stderr=errfile, stdout=logfile)\n", (2621, 2706), False, 'import subprocess\n'), ((764, 843), 'setup_util.replace_text', 'setup_util.replace_text', (['"""cakephp/app/Config/core.php"""', '"""\'Redis\'"""', '"""\'Wincache\'"""'], {}), '(\'cakephp/app/Config/core.php\', "\'Redis\'", "\'Wincache\'")\n', (787, 843), False, 'import setup_util\n'), ((850, 998), 'subprocess.check_call', 'subprocess.check_call', (['"""icacls "C:\\\\FrameworkBenchmarks\\\\cakephp" /grant "IIS_IUSRS:(OI)(CI)F\\""""'], {'shell': '(True)', 'stderr': 'errfile', 'stdout': 'logfile'}), '(\n \'icacls "C:\\\\FrameworkBenchmarks\\\\cakephp" /grant "IIS_IUSRS:(OI)(CI)F"\',\n shell=True, stderr=errfile, stdout=logfile)\n', (871, 998), False, 'import subprocess\n'), ((996, 1186), 'subprocess.check_call', 'subprocess.check_call', (['"""appcmd add site /name:PHP /bindings:http/*:8080: /physicalPath:"C:\\\\FrameworkBenchmarks\\\\cakephp\\\\app\\\\webroot\\""""'], {'shell': '(True)', 'stderr': 'errfile', 'stdout': 'logfile'}), '(\n \'appcmd add site /name:PHP /bindings:http/*:8080: /physicalPath:"C:\\\\FrameworkBenchmarks\\\\cakephp\\\\app\\\\webroot"\'\n , shell=True, stderr=errfile, stdout=logfile)\n', (1017, 1186), False, 'import subprocess\n'), ((2135, 2224), 'subprocess.call', 'subprocess.call', (['"""appcmd delete site PHP"""'], {'shell': '(True)', 'stderr': 'errfile', 'stdout': 'logfile'}), "('appcmd delete site PHP', shell=True, stderr=errfile,\n stdout=logfile)\n", (2150, 2224), False, 'import subprocess\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Common utility functions
Created on Sun May 27 16:37:42 2018
@author: chen
"""
import math
import cv2
import os
from imutils import paths
import numpy as np
import scipy.ndimage
def rotate_cooridinate(cooridinate_og,rotate_angle,rotate_center):
"""
calculate the coordinates after rotation
"""
rotate_angle = rotate_angle*(math.pi/180)
rotated_x = (cooridinate_og[0]-rotate_center[0])*math.cos(rotate_angle)\
-(cooridinate_og[1]-rotate_center[1])*math.sin(rotate_angle)+rotate_center[0]
rotated_y = (cooridinate_og[0]-rotate_center[0])*math.sin(rotate_angle)\
+(cooridinate_og[1]-rotate_center[1])*math.cos(rotate_angle)+rotate_center[1]
rotated_coordinate = np.array([rotated_x,rotated_y])
rotated_coordinate = np.round(rotated_coordinate).astype(np.int)
return rotated_coordinate
def mkdir(path):
"""
create new folder automatically
"""
folder = os.path.exists(path)
if not folder:
os.makedirs(path)
def load_data(path):
"""
load data from specified folder
"""
print("[INFO] loading images...")
imgs = []
# grab the image paths and randomly shuffle them
imagePaths = sorted(list(paths.list_images(path)))
for imagePath in imagePaths:
# load the image, pre-process it, and store it in the data list
image = cv2.imread(imagePath,cv2.IMREAD_GRAYSCALE)
imgs.append(image)
return imgs
def sigmoid(x):
return 1 / (1 + math.exp(-x))
def normfun(x,sigma):
"""
function of normal distribution
"""
mu = 45
pdf = np.exp(-((x - mu)**2)/(2*sigma**2)) / (sigma * np.sqrt(2*np.pi))
return pdf
def calc_box(box,x_gap,y_gap,rotate_angle,center):
"""
calculate the size of the required surrounding environment for doorway segmentation
box: four corners' coordinates of doorway
x_gap: remained space in the vertical way
y_gap: remained space in the horizontal way
"""
door_box = np.array([box[0][::-1]+[y_gap,x_gap],box[1][::-1]+[y_gap,-x_gap],
box[2][::-1]-[y_gap,x_gap],box[3][::-1]-[y_gap,-x_gap]])
rotated_box = []
for coordinate in door_box:
box_coordinate = rotate_cooridinate(coordinate,rotate_angle,center)
rotated_box.append(box_coordinate)
rotated_box = np.array(rotated_box)
box = [np.min(rotated_box[:,0]),np.min(rotated_box[:,1]),np.max(rotated_box[:,0]),np.max(rotated_box[:,1])]
return box
def calc_IoU(candidateBound, groundTruthBounds):
"""
calculate the intersection over union
"""
cx1 = candidateBound[0]
cy1 = candidateBound[1]
cx2 = candidateBound[2]
cy2 = candidateBound[3]
gx1 = groundTruthBounds[:,0]
gy1 = groundTruthBounds[:,1]
gx2 = groundTruthBounds[:,2]
gy2 = groundTruthBounds[:,3]
carea = (cx2 - cx1) * (cy2 - cy1)
garea = (gx2 - gx1) * (gy2 - gy1)
x1 = np.maximum(cx1, gx1)
y1 = np.maximum(cy1, gy1)
x2 = np.minimum(cx2, gx2)
y2 = np.minimum(cy2, gy2)
w = np.maximum(0, x2 - x1)
h = np.maximum(0, y2 - y1)
area = w * h
ious = area / (carea + garea - area)
return ious
def overlapp(candidateBound, groundTruthBounds):
"""
calculate the proportion of prediction to groundtruth
"""
cx1 = candidateBound[0]
cy1 = candidateBound[1]
cx2 = candidateBound[2]
cy2 = candidateBound[3]
gx1 = groundTruthBounds[:,0]
gy1 = groundTruthBounds[:,1]
gx2 = groundTruthBounds[:,2]
gy2 = groundTruthBounds[:,3]
garea = (gx2 - gx1) * (gy2 - gy1)
x1 = np.maximum(cx1, gx1)
y1 = np.maximum(cy1, gy1)
x2 = np.minimum(cx2, gx2)
y2 = np.minimum(cy2, gy2)
w = np.maximum(0, x2 - x1)
h = np.maximum(0, y2 - y1)
area = w * h
reious = area / garea
return reious
def calc_corner(door_center,door_size,door_depth,side):
"""
calculate the corners' coordinates from the centroid, size and depth of doorway
door_corners_inside is a list of coordinates of corners close to the corridor
door_corners_outside is a list of coordinates of corners close to the room
"""
door_corners_inside = [door_center-np.array([np.int(door_size/2),0]),
door_center+np.array([door_size-np.int(door_size/2),0])]
door_corners_outside = [x-np.array([0,np.power(-1,side)*door_depth[side]])
for x in door_corners_inside]
door_corners_outside = np.array(door_corners_outside)
return door_corners_inside,door_corners_outside
def draw_door(mask,complete_map,door,door_depth,side):
"""
label the doorway on the mask and add some error inside the doorway region
"""
door_size = abs(door[1,0]-door[0,0])
door_area_inside = door+np.array([0,np.power(-1,side)*door_depth[side]])
# label the doorway on the mask
cv2.rectangle(mask,tuple(door[0][::-1]),tuple(door_area_inside[1][::-1]),255,-1)
# add a small point to emulate the error in the doorway region
if door_size>20:
if np.random.randint(4)==0:
if side ==0:
pt_center = [np.random.randint(door[0,0]+4,door[1,0]-3),np.random.randint(door[0,1],door_area_inside[0,1])]
else:
pt_center = [np.random.randint(door[0,0]+3,door[1,0]-2),np.random.randint(door_area_inside[0,1],door[0,1])]
cv2.circle(complete_map,tuple(pt_center[::-1]),np.random.choice([1,2,3]),0,-1)
return door_size
def room_division(room_space,num_room):
"""
assign the lengths of rooms according to the length of corridor and number of rooms
room_space: coordinates of corridor's side
num_room: the number of rooms on one side
rooms: a list of the coordinates belonging to different rooms
rooms_corners: a list of only the top and bottom cooridnates of different rooms
"""
rooms = []
rooms_corners=[]
a = num_room
thickness = np.random.randint(2,5)
length = room_space.shape[0]-(num_room-1)*thickness
start_point = 0
for i in range(num_room-1):
room_size = np.random.randint(length/(a+0.7),length/(a-0.7))
room = room_space[start_point:start_point+room_size,:]
rooms.append(room)
start_point +=room_size+thickness
room = room_space[start_point:,:]
rooms.append(room)
rooms = [room.astype(np.int) for room in rooms]
for x in rooms:
rooms_corner = np.concatenate((x[0,:][np.newaxis,:],x[-1,:][np.newaxis,:]),axis = 0)
rooms_corners.append(rooms_corner)
return rooms,rooms_corners
def calc_gradient(gmap):
"""
calculate the gradient of image to find the contour
"""
kernel = np.array([[1,1,1],[1,-8,1],[1,1,1]])
img = gmap.astype(np.int16)
gradient = scipy.ndimage.correlate(img,kernel,mode = 'constant',cval =127)
return gradient
| [
"numpy.sqrt",
"math.cos",
"numpy.array",
"imutils.paths.list_images",
"math.exp",
"os.path.exists",
"numpy.max",
"numpy.exp",
"numpy.concatenate",
"numpy.min",
"numpy.maximum",
"numpy.round",
"numpy.random.choice",
"numpy.int",
"cv2.imread",
"numpy.minimum",
"os.makedirs",
"numpy.p... | [((774, 806), 'numpy.array', 'np.array', (['[rotated_x, rotated_y]'], {}), '([rotated_x, rotated_y])\n', (782, 806), True, 'import numpy as np\n'), ((988, 1008), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1002, 1008), False, 'import os\n'), ((2068, 2209), 'numpy.array', 'np.array', (['[box[0][::-1] + [y_gap, x_gap], box[1][::-1] + [y_gap, -x_gap], box[2][::-1\n ] - [y_gap, x_gap], box[3][::-1] - [y_gap, -x_gap]]'], {}), '([box[0][::-1] + [y_gap, x_gap], box[1][::-1] + [y_gap, -x_gap], \n box[2][::-1] - [y_gap, x_gap], box[3][::-1] - [y_gap, -x_gap]])\n', (2076, 2209), True, 'import numpy as np\n'), ((2407, 2428), 'numpy.array', 'np.array', (['rotated_box'], {}), '(rotated_box)\n', (2415, 2428), True, 'import numpy as np\n'), ((2998, 3018), 'numpy.maximum', 'np.maximum', (['cx1', 'gx1'], {}), '(cx1, gx1)\n', (3008, 3018), True, 'import numpy as np\n'), ((3028, 3048), 'numpy.maximum', 'np.maximum', (['cy1', 'gy1'], {}), '(cy1, gy1)\n', (3038, 3048), True, 'import numpy as np\n'), ((3058, 3078), 'numpy.minimum', 'np.minimum', (['cx2', 'gx2'], {}), '(cx2, gx2)\n', (3068, 3078), True, 'import numpy as np\n'), ((3088, 3108), 'numpy.minimum', 'np.minimum', (['cy2', 'gy2'], {}), '(cy2, gy2)\n', (3098, 3108), True, 'import numpy as np\n'), ((3117, 3139), 'numpy.maximum', 'np.maximum', (['(0)', '(x2 - x1)'], {}), '(0, x2 - x1)\n', (3127, 3139), True, 'import numpy as np\n'), ((3148, 3170), 'numpy.maximum', 'np.maximum', (['(0)', '(y2 - y1)'], {}), '(0, y2 - y1)\n', (3158, 3170), True, 'import numpy as np\n'), ((3667, 3687), 'numpy.maximum', 'np.maximum', (['cx1', 'gx1'], {}), '(cx1, gx1)\n', (3677, 3687), True, 'import numpy as np\n'), ((3697, 3717), 'numpy.maximum', 'np.maximum', (['cy1', 'gy1'], {}), '(cy1, gy1)\n', (3707, 3717), True, 'import numpy as np\n'), ((3727, 3747), 'numpy.minimum', 'np.minimum', (['cx2', 'gx2'], {}), '(cx2, gx2)\n', (3737, 3747), True, 'import numpy as np\n'), ((3757, 3777), 'numpy.minimum', 'np.minimum', (['cy2', 'gy2'], {}), '(cy2, gy2)\n', (3767, 3777), True, 'import numpy as np\n'), ((3786, 3808), 'numpy.maximum', 'np.maximum', (['(0)', '(x2 - x1)'], {}), '(0, x2 - x1)\n', (3796, 3808), True, 'import numpy as np\n'), ((3817, 3839), 'numpy.maximum', 'np.maximum', (['(0)', '(y2 - y1)'], {}), '(0, y2 - y1)\n', (3827, 3839), True, 'import numpy as np\n'), ((4545, 4575), 'numpy.array', 'np.array', (['door_corners_outside'], {}), '(door_corners_outside)\n', (4553, 4575), True, 'import numpy as np\n'), ((6006, 6029), 'numpy.random.randint', 'np.random.randint', (['(2)', '(5)'], {}), '(2, 5)\n', (6023, 6029), True, 'import numpy as np\n'), ((6766, 6810), 'numpy.array', 'np.array', (['[[1, 1, 1], [1, -8, 1], [1, 1, 1]]'], {}), '([[1, 1, 1], [1, -8, 1], [1, 1, 1]])\n', (6774, 6810), True, 'import numpy as np\n'), ((1055, 1072), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (1066, 1072), False, 'import os\n'), ((1441, 1484), 'cv2.imread', 'cv2.imread', (['imagePath', 'cv2.IMREAD_GRAYSCALE'], {}), '(imagePath, cv2.IMREAD_GRAYSCALE)\n', (1451, 1484), False, 'import cv2\n'), ((1677, 1718), 'numpy.exp', 'np.exp', (['(-(x - mu) ** 2 / (2 * sigma ** 2))'], {}), '(-(x - mu) ** 2 / (2 * sigma ** 2))\n', (1683, 1718), True, 'import numpy as np\n'), ((2440, 2465), 'numpy.min', 'np.min', (['rotated_box[:, 0]'], {}), '(rotated_box[:, 0])\n', (2446, 2465), True, 'import numpy as np\n'), ((2465, 2490), 'numpy.min', 'np.min', (['rotated_box[:, 1]'], {}), '(rotated_box[:, 1])\n', (2471, 2490), True, 'import numpy as np\n'), ((2490, 2515), 'numpy.max', 'np.max', (['rotated_box[:, 0]'], {}), '(rotated_box[:, 0])\n', (2496, 2515), True, 'import numpy as np\n'), ((2515, 2540), 'numpy.max', 'np.max', (['rotated_box[:, 1]'], {}), '(rotated_box[:, 1])\n', (2521, 2540), True, 'import numpy as np\n'), ((6157, 6214), 'numpy.random.randint', 'np.random.randint', (['(length / (a + 0.7))', '(length / (a - 0.7))'], {}), '(length / (a + 0.7), length / (a - 0.7))\n', (6174, 6214), True, 'import numpy as np\n'), ((6494, 6567), 'numpy.concatenate', 'np.concatenate', (['(x[0, :][np.newaxis, :], x[-1, :][np.newaxis, :])'], {'axis': '(0)'}), '((x[0, :][np.newaxis, :], x[-1, :][np.newaxis, :]), axis=0)\n', (6508, 6567), True, 'import numpy as np\n'), ((831, 859), 'numpy.round', 'np.round', (['rotated_coordinate'], {}), '(rotated_coordinate)\n', (839, 859), True, 'import numpy as np\n'), ((1293, 1316), 'imutils.paths.list_images', 'paths.list_images', (['path'], {}), '(path)\n', (1310, 1316), False, 'from imutils import paths\n'), ((1562, 1574), 'math.exp', 'math.exp', (['(-x)'], {}), '(-x)\n', (1570, 1574), False, 'import math\n'), ((1724, 1742), 'numpy.sqrt', 'np.sqrt', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (1731, 1742), True, 'import numpy as np\n'), ((5118, 5138), 'numpy.random.randint', 'np.random.randint', (['(4)'], {}), '(4)\n', (5135, 5138), True, 'import numpy as np\n'), ((460, 482), 'math.cos', 'math.cos', (['rotate_angle'], {}), '(rotate_angle)\n', (468, 482), False, 'import math\n'), ((538, 560), 'math.sin', 'math.sin', (['rotate_angle'], {}), '(rotate_angle)\n', (546, 560), False, 'import math\n'), ((631, 653), 'math.sin', 'math.sin', (['rotate_angle'], {}), '(rotate_angle)\n', (639, 653), False, 'import math\n'), ((709, 731), 'math.cos', 'math.cos', (['rotate_angle'], {}), '(rotate_angle)\n', (717, 731), False, 'import math\n'), ((5493, 5520), 'numpy.random.choice', 'np.random.choice', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (5509, 5520), True, 'import numpy as np\n'), ((4271, 4292), 'numpy.int', 'np.int', (['(door_size / 2)'], {}), '(door_size / 2)\n', (4277, 4292), True, 'import numpy as np\n'), ((4861, 4879), 'numpy.power', 'np.power', (['(-1)', 'side'], {}), '(-1, side)\n', (4869, 4879), True, 'import numpy as np\n'), ((5197, 5246), 'numpy.random.randint', 'np.random.randint', (['(door[0, 0] + 4)', '(door[1, 0] - 3)'], {}), '(door[0, 0] + 4, door[1, 0] - 3)\n', (5214, 5246), True, 'import numpy as np\n'), ((5240, 5293), 'numpy.random.randint', 'np.random.randint', (['door[0, 1]', 'door_area_inside[0, 1]'], {}), '(door[0, 1], door_area_inside[0, 1])\n', (5257, 5293), True, 'import numpy as np\n'), ((5339, 5388), 'numpy.random.randint', 'np.random.randint', (['(door[0, 0] + 3)', '(door[1, 0] - 2)'], {}), '(door[0, 0] + 3, door[1, 0] - 2)\n', (5356, 5388), True, 'import numpy as np\n'), ((5382, 5435), 'numpy.random.randint', 'np.random.randint', (['door_area_inside[0, 1]', 'door[0, 1]'], {}), '(door_area_inside[0, 1], door[0, 1])\n', (5399, 5435), True, 'import numpy as np\n'), ((4355, 4376), 'numpy.int', 'np.int', (['(door_size / 2)'], {}), '(door_size / 2)\n', (4361, 4376), True, 'import numpy as np\n'), ((4422, 4440), 'numpy.power', 'np.power', (['(-1)', 'side'], {}), '(-1, side)\n', (4430, 4440), True, 'import numpy as np\n')] |
import logging
import time
import re
import serial
from threading import Thread, Event
from respeaker import Microphone
from respeaker import BingSpeechAPI
from respeaker import PixelRing,pixel_ring
BING_KEY = '95e4fe8b3a324389be4595bd1813121c'
ser = serial.Serial('/dev/ttyS1',115200,timeout=0)
data=[0xAA,0x01,0x64,0x55]
data1=[0xAA,0x01,0x00,0x55]
data2=[0xAA,0x01,0x00,0x55,0xAA,0x00,0x00,0x55]
data3=[0xAA,0x01,0x64,0x55,0xAA,0x00,0x64,0x55]
lefthand = [0xAA,0x00,0x32,0x55]
righthand = [0xAA,0x01,0x32,0x55]
nodhead = [0xAA,0x02,0x32,0x55]
shakehead = [0xAA,0x03,0x32,0x55]
wakeup = [0xAA,0x02,0x64,0x55,0xAA,0x03,0x64,0x55]
origin = [lefthand,righthand,nodhead,shakehead]
def robot(action):
if action == "LeftHand":
data[1] = 0x00
data1[1] = 0x00
for i in range(0,3):
ser.write(data)
time.sleep(.4)
ser.write(data1)
time.sleep(.4)
ser.write(lefthand)
elif action == "RightHand":
data[1] = 0x01
data1[1] = 0x01
for i in range(0,3):
ser.write(data)
time.sleep(.4)
ser.write(data1)
time.sleep(.4)
ser.write(righthand)
elif action == "NodHead":
data[1] = 0x02
data1[1] = 0x02
for i in range(0,3):
ser.write(data)
time.sleep(.4)
ser.write(data1)
time.sleep(.4)
ser.write(nodhead)
elif action == "ShakeHead":
data[1] = 0x03
data1[1] = 0x03
for i in range(0,3):
ser.write(data)
time.sleep(.4)
ser.write(data1)
time.sleep(.4)
ser.write(shakehead)
elif action == "ShakeHand":
for i in range(0,3):
ser.write(data2)
time.sleep(.5)
ser.write(data3)
time.sleep(.5)
elif action == "WakeUp":
ser.write(wakeup)
time.sleep(.5)
for i in range(0,4):
ser.write(origin[i])
def task(quit_event):
mic = Microphone(quit_event=quit_event)
bing = BingSpeechAPI(BING_KEY)
while not quit_event.is_set():
if mic.wakeup('respeaker'):
print('Wake up')
pixel_ring.listen()
robot("WakeUp")
time.sleep(.1)
data = mic.listen()
try:
pixel_ring.wait()
text = bing.recognize(data, language='en-US')
# spi.write('answer\n')
print('\nBing:' + text.encode('utf-8'))
if re.search(r'shake', text) and re.search(r'left hand', text):
robot("LeftHand")
print("Shake Left hand")
elif re.search(r'shake',text) and re.search(r'right hand',text):
robot("RightHand")
print("Shake right hand")
elif re.search(r'shake.*(head).*',text):
robot("ShakeHead")
print("Shake head")
elif re.search(r'head',text) or re.search(r'had',text):
robot("NodHead")
print("Nod head")
elif re.search(r'hand',text) :
robot("ShakeHand")
print("Shake hand")
elif re.search(r'hello',text):
robot("RightHand")
print("Hello")
else:
print("Other")
except Exception as e:
print("\nCould not request results from Microsoft Bing Voice Recognition service; {0}".format(e))
# if text:
# print('Recognized %s' % text)
pixel_ring.off()
def main():
logging.basicConfig(level=logging.DEBUG)
quit_event = Event()
thread = Thread(target=task, args=(quit_event,))
thread.start()
while True:
try:
time.sleep(1)
except KeyboardInterrupt:
print('Quit')
quit_event.set()
ser.close()
break
thread.join()
if __name__ == '__main__':
main() | [
"logging.basicConfig",
"respeaker.pixel_ring.off",
"respeaker.pixel_ring.wait",
"respeaker.BingSpeechAPI",
"time.sleep",
"respeaker.Microphone",
"threading.Event",
"respeaker.pixel_ring.listen",
"serial.Serial",
"threading.Thread",
"re.search"
] | [((254, 300), 'serial.Serial', 'serial.Serial', (['"""/dev/ttyS1"""', '(115200)'], {'timeout': '(0)'}), "('/dev/ttyS1', 115200, timeout=0)\n", (267, 300), False, 'import serial\n'), ((2031, 2064), 'respeaker.Microphone', 'Microphone', ([], {'quit_event': 'quit_event'}), '(quit_event=quit_event)\n', (2041, 2064), False, 'from respeaker import Microphone\n'), ((2076, 2099), 'respeaker.BingSpeechAPI', 'BingSpeechAPI', (['BING_KEY'], {}), '(BING_KEY)\n', (2089, 2099), False, 'from respeaker import BingSpeechAPI\n'), ((3759, 3799), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (3778, 3799), False, 'import logging\n'), ((3822, 3829), 'threading.Event', 'Event', ([], {}), '()\n', (3827, 3829), False, 'from threading import Thread, Event\n'), ((3843, 3882), 'threading.Thread', 'Thread', ([], {'target': 'task', 'args': '(quit_event,)'}), '(target=task, args=(quit_event,))\n', (3849, 3882), False, 'from threading import Thread, Event\n'), ((852, 867), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (862, 867), False, 'import time\n'), ((908, 923), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (918, 923), False, 'import time\n'), ((2213, 2232), 'respeaker.pixel_ring.listen', 'pixel_ring.listen', ([], {}), '()\n', (2230, 2232), False, 'from respeaker import PixelRing, pixel_ring\n'), ((2286, 2301), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (2296, 2301), False, 'import time\n'), ((3725, 3741), 'respeaker.pixel_ring.off', 'pixel_ring.off', ([], {}), '()\n', (3739, 3741), False, 'from respeaker import PixelRing, pixel_ring\n'), ((3943, 3956), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (3953, 3956), False, 'import time\n'), ((1099, 1114), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (1109, 1114), False, 'import time\n'), ((1155, 1170), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (1165, 1170), False, 'import time\n'), ((2379, 2396), 'respeaker.pixel_ring.wait', 'pixel_ring.wait', ([], {}), '()\n', (2394, 2396), False, 'from respeaker import PixelRing, pixel_ring\n'), ((1345, 1360), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (1355, 1360), False, 'import time\n'), ((1401, 1416), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (1411, 1416), False, 'import time\n'), ((2580, 2604), 're.search', 're.search', (['"""shake"""', 'text'], {}), "('shake', text)\n", (2589, 2604), False, 'import re\n'), ((2610, 2638), 're.search', 're.search', (['"""left hand"""', 'text'], {}), "('left hand', text)\n", (2619, 2638), False, 'import re\n'), ((1593, 1608), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (1603, 1608), False, 'import time\n'), ((1649, 1664), 'time.sleep', 'time.sleep', (['(0.4)'], {}), '(0.4)\n', (1659, 1664), False, 'import time\n'), ((2745, 2769), 're.search', 're.search', (['"""shake"""', 'text'], {}), "('shake', text)\n", (2754, 2769), False, 'import re\n'), ((2774, 2803), 're.search', 're.search', (['"""right hand"""', 'text'], {}), "('right hand', text)\n", (2783, 2803), False, 'import re\n'), ((2911, 2945), 're.search', 're.search', (['"""shake.*(head).*"""', 'text'], {}), "('shake.*(head).*', text)\n", (2920, 2945), False, 'import re\n'), ((1795, 1810), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1805, 1810), False, 'import time\n'), ((1851, 1866), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1861, 1866), False, 'import time\n'), ((1929, 1944), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1939, 1944), False, 'import time\n'), ((3047, 3070), 're.search', 're.search', (['"""head"""', 'text'], {}), "('head', text)\n", (3056, 3070), False, 'import re\n'), ((3074, 3096), 're.search', 're.search', (['"""had"""', 'text'], {}), "('had', text)\n", (3083, 3096), False, 'import re\n'), ((3194, 3217), 're.search', 're.search', (['"""hand"""', 'text'], {}), "('hand', text)\n", (3203, 3217), False, 'import re\n'), ((3320, 3344), 're.search', 're.search', (['"""hello"""', 'text'], {}), "('hello', text)\n", (3329, 3344), False, 'import re\n')] |
#!/usr/bin/env python
# encoding: utf-8
'''
@author: caroline
@license: (C) Copyright 2019-2022, Node Supply Chain Manager Corporation Limited.
@contact: <EMAIL>
@software: pycharm
@file: account_voteCredit.py
@time: 2020/1/8 11:23 上午
@desc:
'''
from apis.API import request_Api
def voteCredit(api_name, params):
'''
投票
curl -H "Content-Type: application/json" -X post --data '{"jsonrpc":"2.0","method":"account_voteCredit","params":["0x300fc5a14e578be28c64627c0e7e321771c58cd4","0x0ad472fd967eb77fb6e36ec40901790065155d5e","0xf4240","0x110","0x30000"],"id":1}' http://127.0.0.1:15645
:param api_name:
:param params:投票参数from地址与to地址 投多少钱 gas价格 手续费
:return:交易hash
'''
try:
result = request_Api(api_name, params)
print("投票api返回值为{}".format(result))
except Exception as e:
print("投票api返回错误:{}".format(e))
if __name__ == '__main__':
api_name = "account_voteCredit"
params = ["0xaD3dC2D8aedef155eabA42Ab72C1FE480699336c", "0xef32f718642426fba949b42e3aff6c56fe08b23c", "0xf4240", "0x110", "0x30000"]
voteCredit(api_name, params) | [
"apis.API.request_Api"
] | [((693, 722), 'apis.API.request_Api', 'request_Api', (['api_name', 'params'], {}), '(api_name, params)\n', (704, 722), False, 'from apis.API import request_Api\n')] |
####################
# ES-DOC CIM Questionnaire
# Copyright (c) 2017 ES-DOC. All rights reserved.
#
# University of Colorado, Boulder
# http://cires.colorado.edu/
#
# This project is distributed according to the terms of the MIT license [http://www.opensource.org/licenses/MIT].
####################
from django.db import models
from django.conf import settings
import os
from Q.questionnaire import APP_LABEL, q_logger
from Q.questionnaire.q_fields import QVersionField
from Q.questionnaire.q_utils import EnumeratedType, EnumeratedTypeList
from Q.questionnaire.q_constants import *
###################
# local constants #
###################
PUBLICATION_UPLOAD_DIR = "publications"
PUBLICATION_UPLOAD_PATH = os.path.join(APP_LABEL, PUBLICATION_UPLOAD_DIR)
class QPublicactionFormat(EnumeratedType):
def __str__(self):
return "{0}".format(self.get_type())
QPublicationFormats = EnumeratedTypeList([
QPublicactionFormat("CIM2_XML", "CIM2 XML"),
])
####################
# the actual class #
####################
class QPublication(models.Model):
class Meta:
app_label = APP_LABEL
abstract = False
unique_together = ("name", "version")
verbose_name = "Questionnaire Publication"
verbose_name_plural = "Questionnaire Publications"
name = models.UUIDField(blank=False)
created = models.DateTimeField(auto_now_add=True, editable=False)
modified = models.DateTimeField(auto_now=True, editable=False)
version = QVersionField(blank=False)
format = models.CharField(max_length=LIL_STRING, blank=False, choices=[(pf.get_type(), pf.get_name()) for pf in QPublicationFormats])
model = models.ForeignKey("QModelRealization", blank=False, null=False, related_name="publications")
content = models.TextField()
def __str__(self):
return "{0}_{1}".format(self.name, self.get_version_major())
def get_file_path(self):
file_name = "{0}.xml".format(str(self))
file_path = os.path.join(
settings.MEDIA_ROOT,
PUBLICATION_UPLOAD_PATH,
self.model.project.name,
file_name
)
return file_path
def write(self):
publication_path = self.get_file_path()
if not os.path.exists(os.path.dirname(publication_path)):
os.makedirs(os.path.dirname(publication_path))
with open(publication_path, "w") as f:
f.write(self.content)
f.closed
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"os.path.join",
"os.path.dirname",
"django.db.models.DateTimeField",
"Q.questionnaire.q_fields.QVersionField",
"django.db.models.UUIDField"
] | [((725, 772), 'os.path.join', 'os.path.join', (['APP_LABEL', 'PUBLICATION_UPLOAD_DIR'], {}), '(APP_LABEL, PUBLICATION_UPLOAD_DIR)\n', (737, 772), False, 'import os\n'), ((1323, 1352), 'django.db.models.UUIDField', 'models.UUIDField', ([], {'blank': '(False)'}), '(blank=False)\n', (1339, 1352), False, 'from django.db import models\n'), ((1367, 1422), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)', 'editable': '(False)'}), '(auto_now_add=True, editable=False)\n', (1387, 1422), False, 'from django.db import models\n'), ((1438, 1489), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)', 'editable': '(False)'}), '(auto_now=True, editable=False)\n', (1458, 1489), False, 'from django.db import models\n'), ((1504, 1530), 'Q.questionnaire.q_fields.QVersionField', 'QVersionField', ([], {'blank': '(False)'}), '(blank=False)\n', (1517, 1530), False, 'from Q.questionnaire.q_fields import QVersionField\n'), ((1683, 1779), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""QModelRealization"""'], {'blank': '(False)', 'null': '(False)', 'related_name': '"""publications"""'}), "('QModelRealization', blank=False, null=False,\n related_name='publications')\n", (1700, 1779), False, 'from django.db import models\n'), ((1791, 1809), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (1807, 1809), False, 'from django.db import models\n'), ((2001, 2100), 'os.path.join', 'os.path.join', (['settings.MEDIA_ROOT', 'PUBLICATION_UPLOAD_PATH', 'self.model.project.name', 'file_name'], {}), '(settings.MEDIA_ROOT, PUBLICATION_UPLOAD_PATH, self.model.\n project.name, file_name)\n', (2013, 2100), False, 'import os\n'), ((2279, 2312), 'os.path.dirname', 'os.path.dirname', (['publication_path'], {}), '(publication_path)\n', (2294, 2312), False, 'import os\n'), ((2339, 2372), 'os.path.dirname', 'os.path.dirname', (['publication_path'], {}), '(publication_path)\n', (2354, 2372), False, 'import os\n')] |
# -*- coding: utf-8 -*-
import os
import shutil
from django.conf import settings
from django.core.management import call_command
from django.test import TestCase
from six import StringIO
try:
from unittest.mock import patch
except ImportError:
from mock import patch
class CreateCommandTests(TestCase):
"""Tests for create_command command."""
def setUp(self): # noqa
self.management_command_path = os.path.join(
settings.BASE_DIR, 'tests/testapp/management')
self.command_template_path = os.path.join(
settings.BASE_DIR, 'django_extensions/conf/command_template')
self.files = [
'__init__.py',
'commands/__init__.py',
'commands/sample.py',
]
def tearDown(self): # noqa
shutil.rmtree(self.management_command_path,
ignore_errors=True)
shutil.rmtree(os.path.join(self.command_template_path, '.hidden'),
ignore_errors=True)
test_pyc_path = os.path.join(self.command_template_path, 'test.pyc')
if os.path.isfile(test_pyc_path):
os.remove(test_pyc_path)
def _create_management_command_with_empty_files(self):
os.mkdir(self.management_command_path)
os.mkdir(os.path.join(self.management_command_path, 'commands'))
for f in self.files:
os.mknod(os.path.join(self.management_command_path, f))
def _create__pycache__in_command_template_directory(self):
os.mknod(os.path.join(self.command_template_path, 'test.pyc'))
def _create_hidden_directory_in_command_template_directory(self):
os.mkdir(os.path.join(self.command_template_path, '.hidden'))
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_management_command_files_only_on_dry_run(self, m_stdout): # noqa
call_command('create_command', 'testapp', '--dry-run', verbosity=2)
for f in self.files:
filepath = os.path.join(self.management_command_path, f)
self.assertIn(filepath, m_stdout.getvalue())
self.assertFalse(os.path.isfile(filepath))
@patch('sys.stdout', new_callable=StringIO)
def test_should_create_management_command_files_and_print_filepaths(self, m_stdout): # noqa
call_command('create_command', 'testapp', verbosity=2)
for f in self.files:
filepath = os.path.join(self.management_command_path, f)
self.assertIn(filepath, m_stdout.getvalue())
self.assertTrue(os.path.isfile(filepath))
@patch('sys.stdout', new_callable=StringIO)
def test_should_print_that_filepaths_already_exists(self, m_stdout): # noqa
self._create_management_command_with_empty_files()
call_command('create_command', 'testapp', verbosity=2)
for f in self.files:
filepath = os.path.join(self.management_command_path, f)
self.assertIn(
'{} already exists'.format(filepath), m_stdout.getvalue())
self.assertTrue(os.path.isfile(filepath))
self.assertEqual(os.path.getsize(filepath), 0)
@patch('sys.stderr', new_callable=StringIO)
@patch('django_extensions.management.commands.create_command._make_writeable') # noqa
def test_should_print_error_on_OSError_exception(self, m__make_writeable, m_stderr): # noqa
m__make_writeable.side_effect = OSError
self._create__pycache__in_command_template_directory()
self._create_hidden_directory_in_command_template_directory()
call_command('create_command', 'testapp')
for f in self.files:
filepath = os.path.join(self.management_command_path, f)
self.assertIn("Notice: Couldn't set permission bits on {}. You're probably using an uncommon filesystem setup. No problem.\n".format(filepath), # noqa
m_stderr.getvalue())
| [
"os.path.getsize",
"mock.patch",
"django.core.management.call_command",
"os.path.join",
"os.path.isfile",
"os.mkdir",
"shutil.rmtree",
"os.remove"
] | [((1716, 1758), 'mock.patch', 'patch', (['"""sys.stdout"""'], {'new_callable': 'StringIO'}), "('sys.stdout', new_callable=StringIO)\n", (1721, 1758), False, 'from mock import patch\n'), ((2144, 2186), 'mock.patch', 'patch', (['"""sys.stdout"""'], {'new_callable': 'StringIO'}), "('sys.stdout', new_callable=StringIO)\n", (2149, 2186), False, 'from mock import patch\n'), ((2563, 2605), 'mock.patch', 'patch', (['"""sys.stdout"""'], {'new_callable': 'StringIO'}), "('sys.stdout', new_callable=StringIO)\n", (2568, 2605), False, 'from mock import patch\n'), ((3130, 3172), 'mock.patch', 'patch', (['"""sys.stderr"""'], {'new_callable': 'StringIO'}), "('sys.stderr', new_callable=StringIO)\n", (3135, 3172), False, 'from mock import patch\n'), ((3178, 3255), 'mock.patch', 'patch', (['"""django_extensions.management.commands.create_command._make_writeable"""'], {}), "('django_extensions.management.commands.create_command._make_writeable')\n", (3183, 3255), False, 'from mock import patch\n'), ((428, 487), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""tests/testapp/management"""'], {}), "(settings.BASE_DIR, 'tests/testapp/management')\n", (440, 487), False, 'import os\n'), ((538, 612), 'os.path.join', 'os.path.join', (['settings.BASE_DIR', '"""django_extensions/conf/command_template"""'], {}), "(settings.BASE_DIR, 'django_extensions/conf/command_template')\n", (550, 612), False, 'import os\n'), ((798, 861), 'shutil.rmtree', 'shutil.rmtree', (['self.management_command_path'], {'ignore_errors': '(True)'}), '(self.management_command_path, ignore_errors=True)\n', (811, 861), False, 'import shutil\n'), ((1025, 1077), 'os.path.join', 'os.path.join', (['self.command_template_path', '"""test.pyc"""'], {}), "(self.command_template_path, 'test.pyc')\n", (1037, 1077), False, 'import os\n'), ((1089, 1118), 'os.path.isfile', 'os.path.isfile', (['test_pyc_path'], {}), '(test_pyc_path)\n', (1103, 1118), False, 'import os\n'), ((1225, 1263), 'os.mkdir', 'os.mkdir', (['self.management_command_path'], {}), '(self.management_command_path)\n', (1233, 1263), False, 'import os\n'), ((1859, 1926), 'django.core.management.call_command', 'call_command', (['"""create_command"""', '"""testapp"""', '"""--dry-run"""'], {'verbosity': '(2)'}), "('create_command', 'testapp', '--dry-run', verbosity=2)\n", (1871, 1926), False, 'from django.core.management import call_command\n'), ((2292, 2346), 'django.core.management.call_command', 'call_command', (['"""create_command"""', '"""testapp"""'], {'verbosity': '(2)'}), "('create_command', 'testapp', verbosity=2)\n", (2304, 2346), False, 'from django.core.management import call_command\n'), ((2755, 2809), 'django.core.management.call_command', 'call_command', (['"""create_command"""', '"""testapp"""'], {'verbosity': '(2)'}), "('create_command', 'testapp', verbosity=2)\n", (2767, 2809), False, 'from django.core.management import call_command\n'), ((3551, 3592), 'django.core.management.call_command', 'call_command', (['"""create_command"""', '"""testapp"""'], {}), "('create_command', 'testapp')\n", (3563, 3592), False, 'from django.core.management import call_command\n'), ((906, 957), 'os.path.join', 'os.path.join', (['self.command_template_path', '""".hidden"""'], {}), "(self.command_template_path, '.hidden')\n", (918, 957), False, 'import os\n'), ((1132, 1156), 'os.remove', 'os.remove', (['test_pyc_path'], {}), '(test_pyc_path)\n', (1141, 1156), False, 'import os\n'), ((1281, 1335), 'os.path.join', 'os.path.join', (['self.management_command_path', '"""commands"""'], {}), "(self.management_command_path, 'commands')\n", (1293, 1335), False, 'import os\n'), ((1515, 1567), 'os.path.join', 'os.path.join', (['self.command_template_path', '"""test.pyc"""'], {}), "(self.command_template_path, 'test.pyc')\n", (1527, 1567), False, 'import os\n'), ((1657, 1708), 'os.path.join', 'os.path.join', (['self.command_template_path', '""".hidden"""'], {}), "(self.command_template_path, '.hidden')\n", (1669, 1708), False, 'import os\n'), ((1980, 2025), 'os.path.join', 'os.path.join', (['self.management_command_path', 'f'], {}), '(self.management_command_path, f)\n', (1992, 2025), False, 'import os\n'), ((2400, 2445), 'os.path.join', 'os.path.join', (['self.management_command_path', 'f'], {}), '(self.management_command_path, f)\n', (2412, 2445), False, 'import os\n'), ((2863, 2908), 'os.path.join', 'os.path.join', (['self.management_command_path', 'f'], {}), '(self.management_command_path, f)\n', (2875, 2908), False, 'import os\n'), ((3645, 3690), 'os.path.join', 'os.path.join', (['self.management_command_path', 'f'], {}), '(self.management_command_path, f)\n', (3657, 3690), False, 'import os\n'), ((1387, 1432), 'os.path.join', 'os.path.join', (['self.management_command_path', 'f'], {}), '(self.management_command_path, f)\n', (1399, 1432), False, 'import os\n'), ((2112, 2136), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (2126, 2136), False, 'import os\n'), ((2531, 2555), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (2545, 2555), False, 'import os\n'), ((3039, 3063), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (3053, 3063), False, 'import os\n'), ((3094, 3119), 'os.path.getsize', 'os.path.getsize', (['filepath'], {}), '(filepath)\n', (3109, 3119), False, 'import os\n')] |
import ADT_of_person as AP
import datetime as dm
#ADT Staff()
# Staff(self, str name, str sex, tuple birthday, tuple entey_date, int salary, str position)
# name(self)
# sex(self)
# en_year(self)
# salary(self)
# set_salary(self, new_salary)
# position(self)
# set_position(self, new_position)
# birthday(self)
# detail(self)
class Staff(AP.Person):
_id_num = 0
@classmethod
def _id_gen(cls, birthday):
cls._id_num += 1
birth_year = dm.date(*birthday).year
return '0{:04}{:05}'.format(birth_year, cls._id_num)
def __init__(self, name, sex, birthday, entry_date, salary, position):
if not isinstance(name, str) or sex not in ('male', 'female') or\
not isinstance(salary, int):
raise AP.PersonValueError()
try:
birth = dm.date(*birthday)
entry = dm.date(*enter_date)
except:
raise AP.PersonValueError()
self._name = name
self._sex = sex
self._birthday = birth
self._entry_date = entry
self._position = position
self._salary = salary
def name(self):
return self._name
def sex(self):
return self._sex
def en_year(self):
return self._entry_date.year
def set_salary(self, new_salary):
if not isinstance(new_salary, int):
raise TypeError
self._salaey = new_salary
def position(self):
return self._position
def set_position(self, new_position):
self._position = new_position
def birthday(self):
return self._birthday
def detail(self):
return ','.join((super.detail(),
'entry_date' + str(self._entry_date),
'position' + str(self._position),
'salary' + str(self._salary)))
| [
"datetime.date",
"ADT_of_person.PersonValueError"
] | [((503, 521), 'datetime.date', 'dm.date', (['*birthday'], {}), '(*birthday)\n', (510, 521), True, 'import datetime as dm\n'), ((806, 827), 'ADT_of_person.PersonValueError', 'AP.PersonValueError', ([], {}), '()\n', (825, 827), True, 'import ADT_of_person as AP\n'), ((863, 881), 'datetime.date', 'dm.date', (['*birthday'], {}), '(*birthday)\n', (870, 881), True, 'import datetime as dm\n'), ((903, 923), 'datetime.date', 'dm.date', (['*enter_date'], {}), '(*enter_date)\n', (910, 923), True, 'import datetime as dm\n'), ((960, 981), 'ADT_of_person.PersonValueError', 'AP.PersonValueError', ([], {}), '()\n', (979, 981), True, 'import ADT_of_person as AP\n')] |
"""
Test signal handlers for completion.
"""
from datetime import datetime
from unittest.mock import patch
import ddt
import pytest
from completion import handlers
from completion.models import BlockCompletion
from completion.test_utils import CompletionSetUpMixin
from django.test import TestCase
from pytz import utc
from xblock.completable import XBlockCompletionMode
from xblock.core import XBlock
from lms.djangoapps.grades.api import signals as grades_signals
from openedx.core.djangolib.testing.utils import skip_unless_lms
class CustomScorableBlock(XBlock):
"""
A scorable block with a custom completion strategy.
"""
has_score = True
has_custom_completion = True
completion_mode = XBlockCompletionMode.COMPLETABLE
class ExcludedScorableBlock(XBlock):
"""
A scorable block that is excluded from completion tracking.
"""
has_score = True
has_custom_completion = False
completion_mode = XBlockCompletionMode.EXCLUDED
@ddt.ddt
@skip_unless_lms
class ScorableCompletionHandlerTestCase(CompletionSetUpMixin, TestCase):
"""
Test the signal handler
"""
COMPLETION_SWITCH_ENABLED = True
def setUp(self):
super().setUp()
self.block_key = self.context_key.make_usage_key(block_type='problem', block_id='red')
def call_scorable_block_completion_handler(self, block_key, score_deleted=None):
"""
Call the scorable completion signal handler for the specified block.
Optionally takes a value to pass as score_deleted.
"""
if score_deleted is None:
params = {}
else:
params = {'score_deleted': score_deleted}
handlers.scorable_block_completion(
sender=self,
user_id=self.user.id,
course_id=str(self.context_key),
usage_id=str(block_key),
weighted_earned=0.0,
weighted_possible=3.0,
modified=datetime.utcnow().replace(tzinfo=utc),
score_db_table='submissions',
**params
)
@ddt.data(
(True, 0.0),
(False, 1.0),
(None, 1.0),
)
@ddt.unpack
def test_handler_submits_completion(self, score_deleted, expected_completion):
self.call_scorable_block_completion_handler(self.block_key, score_deleted)
completion = BlockCompletion.objects.get(
user=self.user,
context_key=self.context_key,
block_key=self.block_key,
)
assert completion.completion == expected_completion
@XBlock.register_temp_plugin(CustomScorableBlock, 'custom_scorable')
def test_handler_skips_custom_block(self):
custom_block_key = self.context_key.make_usage_key(block_type='custom_scorable', block_id='green')
self.call_scorable_block_completion_handler(custom_block_key)
completion = BlockCompletion.objects.filter(
user=self.user,
context_key=self.context_key,
block_key=custom_block_key,
)
assert not completion.exists()
@XBlock.register_temp_plugin(ExcludedScorableBlock, 'excluded_scorable')
def test_handler_skips_excluded_block(self):
excluded_block_key = self.context_key.make_usage_key(block_type='excluded_scorable', block_id='blue')
self.call_scorable_block_completion_handler(excluded_block_key)
completion = BlockCompletion.objects.filter(
user=self.user,
context_key=self.context_key,
block_key=excluded_block_key,
)
assert not completion.exists()
def test_handler_skips_discussion_block(self):
discussion_block_key = self.context_key.make_usage_key(block_type='discussion', block_id='blue')
self.call_scorable_block_completion_handler(discussion_block_key)
completion = BlockCompletion.objects.filter(
user=self.user,
context_key=self.context_key,
block_key=discussion_block_key,
)
assert not completion.exists()
def test_signal_calls_handler(self):
with patch('completion.handlers.BlockCompletion.objects.submit_completion') as mock_handler:
grades_signals.PROBLEM_WEIGHTED_SCORE_CHANGED.send_robust(
sender=self,
user_id=self.user.id,
course_id=str(self.context_key),
usage_id=str(self.block_key),
weighted_earned=0.0,
weighted_possible=3.0,
modified=datetime.utcnow().replace(tzinfo=utc),
score_db_table='submissions',
)
mock_handler.assert_called()
@skip_unless_lms
class DisabledCompletionHandlerTestCase(CompletionSetUpMixin, TestCase):
"""
Test that disabling the ENABLE_COMPLETION_TRACKING waffle switch prevents
the signal handler from submitting a completion.
"""
COMPLETION_SWITCH_ENABLED = False
def setUp(self):
super().setUp()
self.block_key = self.context_key.make_usage_key(block_type='problem', block_id='red')
def test_disabled_handler_does_not_submit_completion(self):
handlers.scorable_block_completion(
sender=self,
user_id=self.user.id,
course_id=str(self.context_key),
usage_id=str(self.block_key),
weighted_earned=0.0,
weighted_possible=3.0,
modified=datetime.utcnow().replace(tzinfo=utc),
score_db_table='submissions',
)
with pytest.raises(BlockCompletion.DoesNotExist):
BlockCompletion.objects.get(
user=self.user,
context_key=self.context_key,
block_key=self.block_key
)
| [
"datetime.datetime.utcnow",
"completion.models.BlockCompletion.objects.get",
"xblock.core.XBlock.register_temp_plugin",
"pytest.raises",
"completion.models.BlockCompletion.objects.filter",
"ddt.data",
"unittest.mock.patch"
] | [((2066, 2114), 'ddt.data', 'ddt.data', (['(True, 0.0)', '(False, 1.0)', '(None, 1.0)'], {}), '((True, 0.0), (False, 1.0), (None, 1.0))\n', (2074, 2114), False, 'import ddt\n'), ((2562, 2629), 'xblock.core.XBlock.register_temp_plugin', 'XBlock.register_temp_plugin', (['CustomScorableBlock', '"""custom_scorable"""'], {}), "(CustomScorableBlock, 'custom_scorable')\n", (2589, 2629), False, 'from xblock.core import XBlock\n'), ((3072, 3143), 'xblock.core.XBlock.register_temp_plugin', 'XBlock.register_temp_plugin', (['ExcludedScorableBlock', '"""excluded_scorable"""'], {}), "(ExcludedScorableBlock, 'excluded_scorable')\n", (3099, 3143), False, 'from xblock.core import XBlock\n'), ((2349, 2452), 'completion.models.BlockCompletion.objects.get', 'BlockCompletion.objects.get', ([], {'user': 'self.user', 'context_key': 'self.context_key', 'block_key': 'self.block_key'}), '(user=self.user, context_key=self.context_key,\n block_key=self.block_key)\n', (2376, 2452), False, 'from completion.models import BlockCompletion\n'), ((2875, 2983), 'completion.models.BlockCompletion.objects.filter', 'BlockCompletion.objects.filter', ([], {'user': 'self.user', 'context_key': 'self.context_key', 'block_key': 'custom_block_key'}), '(user=self.user, context_key=self.context_key,\n block_key=custom_block_key)\n', (2905, 2983), False, 'from completion.models import BlockCompletion\n'), ((3396, 3506), 'completion.models.BlockCompletion.objects.filter', 'BlockCompletion.objects.filter', ([], {'user': 'self.user', 'context_key': 'self.context_key', 'block_key': 'excluded_block_key'}), '(user=self.user, context_key=self.context_key,\n block_key=excluded_block_key)\n', (3426, 3506), False, 'from completion.models import BlockCompletion\n'), ((3841, 3953), 'completion.models.BlockCompletion.objects.filter', 'BlockCompletion.objects.filter', ([], {'user': 'self.user', 'context_key': 'self.context_key', 'block_key': 'discussion_block_key'}), '(user=self.user, context_key=self.context_key,\n block_key=discussion_block_key)\n', (3871, 3953), False, 'from completion.models import BlockCompletion\n'), ((4091, 4161), 'unittest.mock.patch', 'patch', (['"""completion.handlers.BlockCompletion.objects.submit_completion"""'], {}), "('completion.handlers.BlockCompletion.objects.submit_completion')\n", (4096, 4161), False, 'from unittest.mock import patch\n'), ((5515, 5558), 'pytest.raises', 'pytest.raises', (['BlockCompletion.DoesNotExist'], {}), '(BlockCompletion.DoesNotExist)\n', (5528, 5558), False, 'import pytest\n'), ((5572, 5675), 'completion.models.BlockCompletion.objects.get', 'BlockCompletion.objects.get', ([], {'user': 'self.user', 'context_key': 'self.context_key', 'block_key': 'self.block_key'}), '(user=self.user, context_key=self.context_key,\n block_key=self.block_key)\n', (5599, 5675), False, 'from completion.models import BlockCompletion\n'), ((1948, 1965), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (1963, 1965), False, 'from datetime import datetime\n'), ((5411, 5428), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (5426, 5428), False, 'from datetime import datetime\n'), ((4513, 4530), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (4528, 4530), False, 'from datetime import datetime\n')] |
from typing import Tuple
import pygame
import numpy as np
from .wall import Wall
class MovingWall(Wall):
def __init__(self,
top: int = 0,
left: int = 0,
bottom: int = 1,
right: int = 1):
super().__init__(top, left, bottom, right)
def update(self, left, top, right, bottom):
# Change size -> new surface
width = right - left
height = bottom - top
self.surf = pygame.Surface((width, height)).convert_alpha()
self.surf.fill((0, 255, 0, 90))
# Mask used for collision detection
self.mask = pygame.mask.from_surface(self.surf, 50)
# Rectangle and initial position
self.rect = self.surf.get_rect()
# New rectangle to fit in
new_rect = pygame.Rect(left, top, width, height)
# Fit
self.rect = self.rect.fit(new_rect)
| [
"pygame.Surface",
"pygame.mask.from_surface",
"pygame.Rect"
] | [((633, 672), 'pygame.mask.from_surface', 'pygame.mask.from_surface', (['self.surf', '(50)'], {}), '(self.surf, 50)\n', (657, 672), False, 'import pygame\n'), ((810, 847), 'pygame.Rect', 'pygame.Rect', (['left', 'top', 'width', 'height'], {}), '(left, top, width, height)\n', (821, 847), False, 'import pygame\n'), ((480, 511), 'pygame.Surface', 'pygame.Surface', (['(width, height)'], {}), '((width, height))\n', (494, 511), False, 'import pygame\n')] |
import tensorflow as tf
import os
from bayes_filter import logging
from bayes_filter.filters import FreeTransitionVariationalBayes
from bayes_filter.feeds import DatapackFeed, IndexFeed
from bayes_filter.misc import make_example_datapack, maybe_create_posterior_solsets, get_screen_directions
from bayes_filter.datapack import DataPack, _load_array_file
import numpy as np
if __name__ == '__main__':
output_folder = os.path.join(os.path.abspath('test_filter_vi_P126+65'), 'run15')
os.makedirs(output_folder, exist_ok=True)
# datapack = make_example_datapack(5, 10, 2, name=os.path.join(output_folder, 'test_data.h5'), gain_noise=0.3,
# index_n=1, obs_type='DTEC', clobber=True,
# kernel_hyperparams={'variance': 3.5 ** 2, 'lengthscales': 15., 'a': 250.,
# 'b': 100., 'timescale': 50.})
datapack = DataPack('/net/lofar1/data1/albert/imaging/data/P126+65_compact_raw/P126+65_full_compact_raw.h5')
datapack.current_solset = 'sol000'
actual_antenna_labels, _ = datapack.antennas
antenna_labels, antennas = _load_array_file(DataPack.lofar_array)
antennas = np.stack([antennas[list(antenna_labels).index(a.astype(antenna_labels.dtype)),:] for a in actual_antenna_labels],axis=0)
datapack.set_antennas(antenna_labels, antennas)
patch_names, _ = datapack.directions
_, screen_directions = datapack.get_directions(patch_names)
screen_directions = get_screen_directions('/home/albert/ftp/image.pybdsm.srl.fits', max_N=None)
maybe_create_posterior_solsets(datapack, 'sol000', posterior_name='posterior', screen_directions=screen_directions)
# config = tf.ConfigProto(allow_soft_placement = True)
sess = tf.Session(graph=tf.Graph())#,config=config)
# from tensorflow.python import debug as tf_debug
# sess = tf_debug.LocalCLIDebugWrapperSession(sess)
with sess:
with tf.device('/device:CPU:0'):
logging.info("Setting up the index and datapack feeds.")
datapack_feed = DatapackFeed(datapack,
selection={'ant': list(range(1,7,2)) + list(range(45, 62, 1)),'dir':None, 'pol':slice(0,1,1), 'time':slice(0,None,1)},
solset='sol000',
postieror_name='posterior',
index_n=1)
logging.info("Setting up the filter.")
free_transition = FreeTransitionVariationalBayes(datapack_feed=datapack_feed, output_folder=output_folder)
free_transition.init_filter()
filter_op = free_transition.filter(
parallel_iterations=10,
kernel_params={'resolution': 4, 'fed_kernel': 'M52', 'obs_type': 'DTEC'},
num_parallel_filters=10,
solver_params=dict(iters=200,
learning_rate=0.1,
gamma=0.3,
stop_patience=6),
num_mcmc_param_samples_learn=50,
num_mcmc_param_samples_infer=100,
minibatch_size=None,
y_sigma=0.1)
logging.info("Initializing the filter")
sess.run(free_transition.initializer)
# print(sess.run([free_transition.full_block_size, free_transition.datapack_feed.time_feed.slice_size, free_transition.datapack_feed.index_feed.step]))
logging.info("Running the filter")
sess.run(filter_op)
| [
"tensorflow.Graph",
"bayes_filter.misc.get_screen_directions",
"tensorflow.device",
"bayes_filter.filters.FreeTransitionVariationalBayes",
"os.makedirs",
"os.path.abspath",
"bayes_filter.logging.info",
"bayes_filter.datapack._load_array_file",
"bayes_filter.misc.maybe_create_posterior_solsets",
"b... | [((490, 531), 'os.makedirs', 'os.makedirs', (['output_folder'], {'exist_ok': '(True)'}), '(output_folder, exist_ok=True)\n', (501, 531), False, 'import os\n'), ((945, 1052), 'bayes_filter.datapack.DataPack', 'DataPack', (['"""/net/lofar1/data1/albert/imaging/data/P126+65_compact_raw/P126+65_full_compact_raw.h5"""'], {}), "(\n '/net/lofar1/data1/albert/imaging/data/P126+65_compact_raw/P126+65_full_compact_raw.h5'\n )\n", (953, 1052), False, 'from bayes_filter.datapack import DataPack, _load_array_file\n'), ((1162, 1200), 'bayes_filter.datapack._load_array_file', '_load_array_file', (['DataPack.lofar_array'], {}), '(DataPack.lofar_array)\n', (1178, 1200), False, 'from bayes_filter.datapack import DataPack, _load_array_file\n'), ((1519, 1594), 'bayes_filter.misc.get_screen_directions', 'get_screen_directions', (['"""/home/albert/ftp/image.pybdsm.srl.fits"""'], {'max_N': 'None'}), "('/home/albert/ftp/image.pybdsm.srl.fits', max_N=None)\n", (1540, 1594), False, 'from bayes_filter.misc import make_example_datapack, maybe_create_posterior_solsets, get_screen_directions\n'), ((1599, 1719), 'bayes_filter.misc.maybe_create_posterior_solsets', 'maybe_create_posterior_solsets', (['datapack', '"""sol000"""'], {'posterior_name': '"""posterior"""', 'screen_directions': 'screen_directions'}), "(datapack, 'sol000', posterior_name=\n 'posterior', screen_directions=screen_directions)\n", (1629, 1719), False, 'from bayes_filter.misc import make_example_datapack, maybe_create_posterior_solsets, get_screen_directions\n'), ((434, 475), 'os.path.abspath', 'os.path.abspath', (['"""test_filter_vi_P126+65"""'], {}), "('test_filter_vi_P126+65')\n", (449, 475), False, 'import os\n'), ((3257, 3296), 'bayes_filter.logging.info', 'logging.info', (['"""Initializing the filter"""'], {}), "('Initializing the filter')\n", (3269, 3296), False, 'from bayes_filter import logging\n'), ((3511, 3545), 'bayes_filter.logging.info', 'logging.info', (['"""Running the filter"""'], {}), "('Running the filter')\n", (3523, 3545), False, 'from bayes_filter import logging\n'), ((1802, 1812), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1810, 1812), True, 'import tensorflow as tf\n'), ((1968, 1994), 'tensorflow.device', 'tf.device', (['"""/device:CPU:0"""'], {}), "('/device:CPU:0')\n", (1977, 1994), True, 'import tensorflow as tf\n'), ((2008, 2064), 'bayes_filter.logging.info', 'logging.info', (['"""Setting up the index and datapack feeds."""'], {}), "('Setting up the index and datapack feeds.')\n", (2020, 2064), False, 'from bayes_filter import logging\n'), ((2468, 2506), 'bayes_filter.logging.info', 'logging.info', (['"""Setting up the filter."""'], {}), "('Setting up the filter.')\n", (2480, 2506), False, 'from bayes_filter import logging\n'), ((2537, 2630), 'bayes_filter.filters.FreeTransitionVariationalBayes', 'FreeTransitionVariationalBayes', ([], {'datapack_feed': 'datapack_feed', 'output_folder': 'output_folder'}), '(datapack_feed=datapack_feed, output_folder=\n output_folder)\n', (2567, 2630), False, 'from bayes_filter.filters import FreeTransitionVariationalBayes\n')] |
from typing import Dict, Any, Optional, List
import gym
import numpy as np
from collections import defaultdict
from flatland.core.grid.grid4_utils import get_new_position
from flatland.envs.agent_utils import EnvAgent, RailAgentStatus
from flatland.envs.rail_env import RailEnv, RailEnvActions
from envs.flatland.observations.segment_graph import Graph
from envs.flatland.utils.gym_env import StepOutput
def available_actions(env: RailEnv, agent: EnvAgent, allow_noop=False) -> List[int]:
if agent.position is None:
return [0, 1, 0, 1]
else:
possible_transitions = env.rail.get_transitions(*agent.position, agent.direction)
# some actions are always available:
available_acts = [0] * len(RailEnvActions)
available_acts[RailEnvActions.MOVE_FORWARD] = 1
available_acts[RailEnvActions.STOP_MOVING] = 1
if allow_noop:
available_acts[RailEnvActions.DO_NOTHING] = 1
# check if turn left/right are available:
for movement in range(4):
if possible_transitions[movement]:
if movement == (agent.direction + 1) % 4:
available_acts[RailEnvActions.MOVE_RIGHT] = 1
elif movement == (agent.direction - 1) % 4:
available_acts[RailEnvActions.MOVE_LEFT] = 1
return available_acts[1:]
def potential_deadlock_action_masking(env: RailEnv, potential_deadlock: List) -> List[int]:
avaliable_actions = [0, 0, 0, 1]
avaliable_actions[0] = 0 if potential_deadlock[0] != 1 and potential_deadlock[0] != -1 else 1
avaliable_actions[1] = 0 if potential_deadlock[1] != 1 and potential_deadlock[1] != -1 else 1
avaliable_actions[2] = 0 if potential_deadlock[2] != 1 and potential_deadlock[2] != -1 else 1
return avaliable_actions
def priority_dist_action_masking(dist_ind, priority) -> List[int]:
available_actions = [0, 0, 0, 0]
if priority == 0:
return [0, 0, 0, 1]
else:
available_actions[dist_ind] = 1
return available_actions
class AvailableActionsWrapper(gym.Wrapper):
def __init__(self, env, allow_noop=False, potential_deadlock_masking=False) -> None:
super().__init__(env)
self._allow_noop = allow_noop
self._potential_deadlock_masking = potential_deadlock_masking
self.observation_space = gym.spaces.Dict({
'obs': self.env.observation_space,
'available_actions': gym.spaces.Box(low=0, high=1, shape=(self.action_space.n,), dtype=np.int32)
})
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
obs, reward, done, info = self.env.step(action_dict)
return StepOutput(self._transform_obs(obs), reward, done, info)
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
return self._transform_obs(self.env.reset(random_seed))
def _transform_obs(self, obs):
rail_env = self.unwrapped.rail_env
if not self._potential_deadlock_masking:
return {
agent_id: {
'obs': agent_obs,
'available_actions': np.asarray(
available_actions(rail_env, rail_env.agents[agent_id], self._allow_noop))
} for agent_id, agent_obs in obs.items()
}
else:
return {
agent_id: {
'obs': agent_obs,
'available_actions': np.asarray(
priority_dist_action_masking(agent_obs[0], agent_obs[1]))
} for agent_id, agent_obs in obs.items()
}
def find_all_cells_where_agent_can_choose(rail_env: RailEnv):
switches = []
switches_neighbors = []
directions = list(range(4))
for h in range(rail_env.height):
for w in range(rail_env.width):
pos = (w, h)
is_switch = False
# Check for switch: if there is more than one outgoing transition
for orientation in directions:
possible_transitions = rail_env.rail.get_transitions(*pos, orientation)
num_transitions = np.count_nonzero(possible_transitions)
if num_transitions > 1:
switches.append(pos)
is_switch = True
break
if is_switch:
# Add all neighbouring rails, if pos is a switch
for orientation in directions:
possible_transitions = rail_env.rail.get_transitions(*pos, orientation)
for movement in directions:
if possible_transitions[movement]:
switches_neighbors.append(get_new_position(pos, movement))
decision_cells = switches + switches_neighbors
return tuple(map(set, (switches, switches_neighbors, decision_cells)))
class SkipNoChoiceCellsWrapper(gym.Wrapper):
def __init__(self, env, accumulate_skipped_rewards: bool, discounting: float) -> None:
super().__init__(env)
self._switches = None
self._switches_neighbors = None
self._decision_cells = None
self._accumulate_skipped_rewards = accumulate_skipped_rewards
self._discounting = discounting
self._skipped_rewards = defaultdict(list)
def _on_decision_cell(self, agent: EnvAgent):
return agent.position is None \
or agent.position == agent.initial_position \
or agent.position in self._decision_cells
def _on_switch(self, agent: EnvAgent):
return agent.position in self._switches
def _next_to_switch(self, agent: EnvAgent):
return agent.position in self._switches_neighbors
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
o, r, d, i = {}, {}, {}, {}
while len(o) == 0:
obs, reward, done, info = self.env.step(action_dict)
for agent_id, agent_obs in obs.items():
if done[agent_id] or self._on_decision_cell(self.unwrapped.rail_env.agents[agent_id]):
o[agent_id] = agent_obs
r[agent_id] = reward[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
if self._accumulate_skipped_rewards:
discounted_skipped_reward = r[agent_id]
for skipped_reward in reversed(self._skipped_rewards[agent_id]):
discounted_skipped_reward = self._discounting * discounted_skipped_reward + skipped_reward
r[agent_id] = discounted_skipped_reward
self._skipped_rewards[agent_id] = []
elif self._accumulate_skipped_rewards:
self._skipped_rewards[agent_id].append(reward[agent_id])
d['__all__'] = done['__all__']
action_dict = {}
return StepOutput(o, r, d, i)
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
obs = self.env.reset(random_seed)
self._switches, self._switches_neighbors, self._decision_cells = \
find_all_cells_where_agent_can_choose(self.unwrapped.rail_env)
return obs
class RewardWrapperShortestPathObs(gym.Wrapper):
def __init__(self, env, rewards) -> None:
super().__init__(env)
self._finished_reward = rewards['finished_reward']
self._invalid_action_reward = rewards['invalid_action_reward']
self._not_finished_reward = rewards['not_finished_reward']
self._step_reward = rewards['step_reward']
self._step_shortest_path = rewards['step_shortest_path']
self._step_second_shortest_path = rewards['step_second_shortest_path']
self._deadlock_reward = rewards['deadlock_reward']
self._dont_move_reward = rewards['dont_move_reward']
self._deadlock_avoidance_reward = rewards['deadlock_avoidance_reward']
self._stop_on_switch_reward = rewards['stop_on_switch_reward']
self._stop_potential_deadlock_reward = rewards['stop_potential_deadlock_reward']
self._deadlock_unusable_switch_avoidance_reward = rewards['deadlock_unusable_switch_avoidance']
self._priority_reward = rewards['priority_reward']
self._priority_reward_shortest_path = rewards['priority_reward_shortest_path']
self._priority_reward_alternative_path = rewards['priority_reward_alternative_path']
self._priority_penalty = rewards['priority_penalty']
self._priority_no_path_penalty = rewards['priority_no_path_penalty']
rail_env: RailEnv = self.unwrapped.rail_env
self._prev_dist = {agent.handle: [-1, -1] for agent in rail_env.agents}
self._prev_action_mask = {agent.handle: available_actions(rail_env, agent, False) for agent in rail_env.agents}
self._prev_pos = {agent.handle: Graph.get_virtual_position(agent.handle) for agent in rail_env.agents}
self._prev_potential_deadlock = {agent.handle: (0, 0, 0) for agent in rail_env.agents}
self._prev_on_switch = {agent.handle: 0 for agent in rail_env.agents}
@staticmethod
def reward_function(handle, agent_obs, agent_action, agent_done, agent_status, agent_virtual_pos,
_prev_potential_deadlock, _prev_dist, _prev_action_mask, _prev_pos, _prev_on_switch,
_finished_reward, _invalid_action_reward, _not_finished_reward, _step_reward,
_step_shortest_path, _step_second_shortest_path, _deadlock_reward, _dont_move_reward,
_deadlock_avoidance_reward, _stop_on_switch_reward, _stop_potential_deadlock_reward,
_deadlock_unusable_switch_avoidance_reward, _priority_reward, _priority_reward_shortest_path,
_priority_reward_alternative_path, _priority_penalty, _priority_no_path_penalty):
if agent_done: # done
if agent_status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
reward = _finished_reward
elif agent_obs[7] == 1:
reward = _deadlock_reward
else:
reward = _not_finished_reward
elif agent_obs[7] == 1: # deadlock
reward = _deadlock_reward
else:
potential_deadlock = [agent_obs[19], agent_obs[20], agent_obs[21]]
available_dirs = sum(1 for d in potential_deadlock if d != -1)
deadlock_dirs = sum(1 for d in potential_deadlock if d == 1)
if agent_action == RailEnvActions.STOP_MOVING:
if agent_obs[30] == 1:
reward = _stop_on_switch_reward
elif agent_obs[36] == 1:
#TODO think about this
reward = _deadlock_unusable_switch_avoidance_reward * 1 / agent_obs[35] if agent_obs[35] >= 1 else _stop_on_switch_reward
# elif (deadlock_dirs / available_dirs) == 1. and agent_action == RailEnvActions.STOP_MOVING:
# reward = _stop_potential_deadlock_reward * 1/agent_obs[35] if agent_obs[35] >= 1 else _stop_on_switch_reward
elif agent_obs[39] == 0:
reward = _priority_reward
else:
reward = _dont_move_reward
elif agent_action in [RailEnvActions.MOVE_LEFT, RailEnvActions.MOVE_RIGHT, RailEnvActions.MOVE_FORWARD]:
deadlock_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == 1]
unavaliable_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == -1]
if _prev_on_switch == 1 and (agent_action not in deadlock_actions and len(deadlock_actions) > 0) and (
agent_action not in unavaliable_actions):
reward = _deadlock_avoidance_reward
elif agent_obs[39] == 1:
if agent_obs[9] < _prev_dist[0] and agent_obs[9] < 5000:
reward = _priority_reward_shortest_path
elif agent_obs[9] < _prev_dist[1] < 5000:
reward = _priority_reward_alternative_path
else:
reward = _priority_no_path_penalty
elif agent_obs[39] == 0:
reward = _priority_penalty
else:
reward = _step_reward
else:
reward = -1
return reward
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
rail_env: RailEnv = self.unwrapped.rail_env
for handle in action_dict:
action_dict[handle] += 1
obs, reward, done, info = self.env.step(action_dict)
o, r, d, i = {}, {}, {}, {}
for agent_id, agent_obs in obs.items():
o[agent_id] = obs[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
r[agent_id] = self.reward_function(handle=agent_id,
agent_obs=agent_obs,
agent_action=action_dict[agent_id],
agent_done=done[agent_id],
agent_status=rail_env.agents[agent_id].status,
agent_virtual_pos=Graph.get_virtual_position(agent_id),
_prev_potential_deadlock=self._prev_potential_deadlock[agent_id],
_prev_dist=self._prev_dist[agent_id],
_prev_pos=self._prev_pos[agent_id],
_prev_action_mask=self._prev_action_mask[agent_id],
_prev_on_switch=self._prev_on_switch[agent_id],
_finished_reward=self._finished_reward,
_invalid_action_reward=self._invalid_action_reward,
_not_finished_reward=self._not_finished_reward,
_step_reward=self._step_reward,
_step_shortest_path=self._step_shortest_path,
_step_second_shortest_path=self._step_second_shortest_path,
_deadlock_reward=self._deadlock_reward,
_dont_move_reward=self._dont_move_reward,
_deadlock_avoidance_reward=self._deadlock_avoidance_reward,
_stop_on_switch_reward=self._stop_on_switch_reward,
_stop_potential_deadlock_reward=self._stop_potential_deadlock_reward,
_deadlock_unusable_switch_avoidance_reward=self._deadlock_unusable_switch_avoidance_reward,
_priority_penalty=self._priority_penalty,
_priority_reward=self._priority_reward,
_priority_reward_alternative_path=self._priority_reward_alternative_path,
_priority_reward_shortest_path=self._priority_reward_shortest_path,
_priority_no_path_penalty=self._priority_no_path_penalty
)
# set prev_states to the length of shortest path if you go L, then F, then R (L,F,R). That corresponds to
# features 9, 10, 11 in the feature vector
# print(f"obs: {o}, reward: {r}, prev_dist: {self._prev_dist}")
self._prev_dist[agent_id] = (agent_obs[9], agent_obs[15])
self._prev_action_mask[agent_id] = available_actions(rail_env, rail_env.agents[agent_id], False)
# update potential_deadlock attribute
self._prev_potential_deadlock[agent_id] = (agent_obs[19], agent_obs[20], agent_obs[21])
# update prev_pos
self._prev_pos[agent_id] = Graph.get_virtual_position(agent_id)
self._prev_on_switch[agent_id] = agent_obs[30]
d['__all__'] = done['__all__'] or all(d.values())
return StepOutput(o, r, d, info={agent: {
'max_episode_steps': int(4 * 2 * (
self.rail_env.width + self.rail_env.height + self.rail_env.get_num_agents() / self.num_cities)),
'num_agents': self.rail_env.get_num_agents(),
'agent_done': d[agent] and agent not in self.rail_env.active_agents,
} for agent in o.keys()})
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
obs = self.env.reset(random_seed=random_seed)
self._prev_dist = {k: (o[9], o[15]) for k, o in obs.items()}
return obs
class RewardWrapper(gym.Wrapper):
def __init__(self, env, rewards) -> None:
super().__init__(env)
# self._finished_reward = rewards['finished_reward']
# self._invalid_action_reward = rewards['invalid_action_reward']
# self._not_finished_reward = rewards['not_finished_reward']
# self._step_reward = rewards['step_reward']
# self._step_shortest_path = rewards['step_shortest_path']
# self._step_second_shortest_path = rewards['step_second_shortest_path']
# self._deadlock_reward = rewards['deadlock_reward']
# self._dont_move_reward = rewards['dont_move_reward']
# self._deadlock_avoidance_reward = rewards['deadlock_avoidance_reward']
# self._stop_on_switch_reward = rewards['stop_on_switch_reward']
# self._stop_potential_deadlock_reward = rewards['stop_potential_deadlock_reward']
# self._deadlock_unusable_switch_avoidance_reward = rewards['deadlock_unusable_switch_avoidance']
# self._priority_reward = rewards['priority_reward']
# self._priority_reward_shortest_path = rewards['priority_reward_shortest_path']
# self._priority_reward_alternative_path = rewards['priority_reward_alternative_path']
# self._priority_penalty = rewards['priority_penalty']
# self._priority_no_path_penalty = rewards['priority_no_path_penalty']
self._finished_reward = rewards['finished_reward']
self._deadlock_reward = rewards['deadlock_reward']
self._step_reward = rewards['step_reward']
self._deadlock_unusable_switch_avoidance_reward = rewards['deadlock_unusable_switch_avoidance']
self._stop_priority_depart = rewards['stop_priority_depart']
self._stop_no_deadlocks_reward = rewards['stop_no_deadlocks_reward']
rail_env: RailEnv = self.unwrapped.rail_env
# self._prev_dist = {}
# self._prev_action_mask = {agent.handle: available_actions(rail_env, agent, False) for agent in rail_env.agents}
# self._prev_pos = {agent.handle: Graph.get_virtual_position(agent.handle) for agent in rail_env.agents}
#
# self._prev_potential_deadlock = {}
# self._prev_on_switch = {}
# self._prev_deadlock_unusable = {}
self._prev_shortest_action = {}
self._prev_priority = {}
def reward_function(self, handle, agent_obs, agent_action, agent_done, agent_status):
if agent_done:
if agent_status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
reward = self._finished_reward
else:
reward = self._step_reward
elif agent_obs[5] == 1 and agent_action == RailEnvActions.STOP_MOVING:
reward = 0
elif agent_obs[5] == 1 and agent_action != RailEnvActions.STOP_MOVING:
reward = -10
else:
if self._prev_priority[handle] == 0:
if agent_action == RailEnvActions.STOP_MOVING:
reward = 0
else:
reward = -10
else:
#if (agent_action - 1) == np.argmax(self._prev_shortest_action[handle]):
if agent_action != RailEnvActions.STOP_MOVING:
reward = 0
else:
reward = -10
# reward = (1 / agent_obs[4] + self._step_reward) * 0.70
# if agent_action == RailEnvActions.STOP_MOVING:
# if self._prev_priority[handle] == 0 and agent_status == RailAgentStatus.READY_TO_DEPART:
# reward = self._stop_priority_depart
# elif self._prev_deadlock_unusable[handle] == 1:
# reward = self._deadlock_unusable_switch_avoidance_reward
#
# elif 1 not in self._prev_potential_deadlock[handle] and self._prev_deadlock_unusable[handle] == 0 and self._prev_priority[handle] == 1:
# reward = self._stop_no_deadlocks_reward
# if agent_done: # done
# if agent_status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
# reward = _finished_reward
# elif agent_obs[7] == 1:
# reward = _deadlock_reward
# else:
# reward = _not_finished_reward
#
# elif agent_obs[7] == 1: # deadlock
# reward = _deadlock_reward
#
# else:
# potential_deadlock = [agent_obs[19], agent_obs[20], agent_obs[21]]
# available_dirs = sum(1 for d in potential_deadlock if d != -1)
# deadlock_dirs = sum(1 for d in potential_deadlock if d == 1)
#
# if agent_action == RailEnvActions.STOP_MOVING:
# if agent_obs[30] == 1:
# reward = _stop_on_switch_reward
# elif agent_obs[36] == 1:
# # TODO think about this
# if agent_obs[35] == 1:
# reward = _deadlock_unusable_switch_avoidance_reward
# else:
# r = -_deadlock_unusable_switch_avoidance_reward
# reward = -(r**(1/agent_obs[35])*0.5)
# # elif (deadlock_dirs / available_dirs) == 1. and agent_action == RailEnvActions.STOP_MOVING:
# # reward = _stop_potential_deadlock_reward * 1/agent_obs[35] if agent_obs[35] >= 1 else _stop_on_switch_reward
# elif agent_obs[39] == 0:
# reward = _priority_reward
# else:
# reward = _dont_move_reward
#
# elif agent_action in [RailEnvActions.MOVE_LEFT, RailEnvActions.MOVE_RIGHT, RailEnvActions.MOVE_FORWARD]:
# deadlock_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == 1]
# unavaliable_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == -1]
# if _prev_on_switch == 1 and (agent_action not in deadlock_actions and len(deadlock_actions) > 0) and (
# agent_action not in unavaliable_actions):
# reward = _deadlock_avoidance_reward
#
# elif agent_obs[39] == 1:
# if agent_obs[9] < _prev_dist[0] and agent_obs[9] < 5000:
# reward = _priority_reward_shortest_path
# elif agent_obs[9] < _prev_dist[1] < 5000:
# reward = _priority_reward_alternative_path
# else:
# reward = _priority_no_path_penalty
# elif agent_obs[39] == 0:
# reward = _priority_penalty
#
# else:
# reward = _step_reward
#
# else:
# reward = -1
#
# return reward
#
# if agent_done:
# if agent_status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
# # agent is done and really done -> give finished reward
# reward = _finished_reward
# else:
# # agent is done but not really done -> give not_finished reward
# if agent_obs[7] == 1:
# reward = _deadlock_reward
# else:
# reward = _not_finished_reward
#
# elif agent_obs[7] == 1:
# reward = _deadlock_reward
#
# else:
# if agent_obs[9] < _prev_dist[0] and agent_obs[9] != -1:
# reward = _step_shortest_path
#
# elif agent_obs[15] < _prev_dist[1] and agent_obs[15] != -1:
# reward = _step_second_shortest_path
#
# else:
# reward = _step_reward
#
#
#
# # invalid action reward
# if _prev_action_mask[agent_action-1] == 0:
# reward += _invalid_action_reward
#
# # if agent not moving
# if tuple(_prev_pos) == tuple(agent_virtual_pos):
# reward += _dont_move_reward
#
# # stop on switch
# if agent_obs[30] == 1 and agent_action == RailEnvActions.STOP_MOVING:
# reward += _stop_on_switch_reward
#
# potential_deadlock = [agent_obs[19], agent_obs[20], agent_obs[21]]
# available_dirs = sum(1 for d in potential_deadlock if d != -1)
# deadlock_dirs = sum(1 for d in potential_deadlock if d == 1)
# if (deadlock_dirs / available_dirs) == 1. and agent_action == RailEnvActions.STOP_MOVING:
# reward += _stop_potential_deadlock_reward * 1/agent_obs[35] if agent_obs[35] >= 1 else 0
#
#
# if agent_obs[36] == 1 and agent_action == RailEnvActions.STOP_MOVING:
# reward += _deadlock_unusable_switch_avoidance_reward * 1 / agent_obs[35] if agent_obs[35] >= 1 else 0
#
# # reward if agent avoided deadlock
# if _prev_on_switch == 1:
# deadlock_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == 1]
# unavaliable_actions = [idx + 1 for idx, action in enumerate(_prev_potential_deadlock) if action == -1]
# if (agent_action not in deadlock_actions and len(deadlock_actions) > 0) and (
# agent_action not in unavaliable_actions) and (agent_action != RailEnvActions.DO_NOTHING) \
# and (agent_action != RailEnvActions.STOP_MOVING):
# reward = _deadlock_avoidance_reward
return reward
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
rail_env: RailEnv = self.unwrapped.rail_env
for handle in action_dict:
action_dict[handle] += 1
if action_dict[handle] < 4:
action_dict[handle] = possible_actions_sorted_by_distance(rail_env, handle)[0][0]
obs, reward, done, info = self.env.step(action_dict)
o, r, d, i = {}, {}, {}, {}
for agent_id, agent_obs in obs.items():
o[agent_id] = obs[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
r[agent_id] = self.reward_function(handle=agent_id,
agent_obs=agent_obs,
agent_action=action_dict[agent_id],
agent_done=done[agent_id],
agent_status=rail_env.agents[agent_id].status,
)
# print(f"Agent {agent_id}, obs: {agent_obs}, prev_priority: {self._prev_priority[agent_id]}, prev_dist_action: {self._prev_shortest_action[agent_id]}, reward: {r[agent_id]}, action: {action_dict[agent_id] - 1}")
# set prev_states to the length of shortest path if you go L, then F, then R (L,F,R). That corresponds to
# features 9, 10, 11 in the feature vector
# print(f"obs: {o}, reward: {r}, prev_dist: {self._prev_dist}")
# self._prev_dist[agent_id] = (agent_obs[9], agent_obs[15])
# self._prev_action_mask[agent_id] = available_actions(rail_env, rail_env.agents[agent_id], False)
# update potential_deadlock attribute
# self._prev_potential_deadlock[agent_id] = (agent_obs[10], agent_obs[11], agent_obs[12])
# update prev_pos
# self._prev_pos[agent_id] = Graph.get_virtual_position(agent_id)
# self._prev_on_switch[agent_id] = agent_obs[13]
self._prev_shortest_action[agent_id] = [agent_obs[0], agent_obs[1], agent_obs[2]]
self._prev_priority[agent_id] = agent_obs[3]
# self._prev_deadlock_unusable[agent_id] = agent_obs[19]
d['__all__'] = done['__all__'] or all(d.values())
return StepOutput(o, r, d, info={agent: {
'max_episode_steps': int(4 * 2 * (
self.rail_env.width + self.rail_env.height + self.rail_env.get_num_agents() / self.num_cities)),
'num_agents': self.rail_env.get_num_agents(),
'agent_done': d[agent] and agent not in self.rail_env.active_agents,
} for agent in o.keys()})
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
obs = self.env.reset(random_seed=random_seed)
# self._prev_dist = {k: (o[9], o[15]) for k, o in obs.items()}
# self._prev_potential_deadlock = {k: (o[10], o[11], o[12]) for k, o in obs.items()}
# self._prev_on_switch = {k: o[13] for k, o in obs.items()}
self._prev_shortest_action = {k: [o[0], o[1], o[2]] for k, o in obs.items()}
self._prev_priority = {k: o[3] for k, o in obs.items()}
# self._prev_deadlock_unusable = {k: o[19] for k, o in obs.items()}
return obs
class SparseRewardWrapper(gym.Wrapper):
def __init__(self, env, finished_reward=1, not_finished_reward=-1) -> None:
super().__init__(env)
self._finished_reward = finished_reward
self._not_finished_reward = not_finished_reward
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
rail_env: RailEnv = self.unwrapped.rail_env
for handle in action_dict:
action_dict[handle] += 1
obs, reward, done, info = self.env.step(action_dict)
o, r, d, i = {}, {}, {}, {}
for agent_id, agent_obs in obs.items():
o[agent_id] = obs[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
if done[agent_id]:
if rail_env.agents[agent_id].status in [RailAgentStatus.DONE, RailAgentStatus.DONE_REMOVED]:
# agent is done and really done -> give finished reward
r[agent_id] = self._finished_reward
else:
# agent is done but not really done -> give not_finished reward
r[agent_id] = self._not_finished_reward
else:
r[agent_id] = 0
d['__all__'] = done['__all__'] or all(d.values())
return StepOutput(o, r, d, i)
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
return self.env.reset(random_seed)
class DeadlockWrapper(gym.Wrapper):
def __init__(self, env, deadlock_reward=-1) -> None:
super().__init__(env)
self._deadlock_reward = deadlock_reward
self._deadlocked_agents = []
def check_deadlock(self): # -> Set[int]:
rail_env: RailEnv = self.unwrapped.rail_env
new_deadlocked_agents = []
for agent in rail_env.agents:
if agent.status == RailAgentStatus.ACTIVE and agent.handle not in self._deadlocked_agents:
position = agent.position
direction = agent.direction
while position is not None:
possible_transitions = rail_env.rail.get_transitions(*position, direction)
num_transitions = np.count_nonzero(possible_transitions)
if num_transitions == 1:
new_direction_me = np.argmax(possible_transitions)
new_cell_me = get_new_position(position, new_direction_me)
opp_agent = rail_env.agent_positions[new_cell_me]
if opp_agent != -1:
opp_position = rail_env.agents[opp_agent].position
opp_direction = rail_env.agents[opp_agent].direction
opp_possible_transitions = rail_env.rail.get_transitions(*opp_position, opp_direction)
opp_num_transitions = np.count_nonzero(opp_possible_transitions)
if opp_num_transitions == 1:
if opp_direction != direction:
self._deadlocked_agents.append(agent.handle)
new_deadlocked_agents.append(agent.handle)
position = None
else:
position = new_cell_me
direction = new_direction_me
else:
position = new_cell_me
direction = new_direction_me
else:
position = None
else:
position = None
return new_deadlocked_agents
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
obs, reward, done, info = self.env.step(action_dict)
if self._deadlock_reward != 0:
new_deadlocked_agents = self.check_deadlock()
else:
new_deadlocked_agents = []
o, r, d, i = {}, {}, {}, {}
for agent_id, agent_obs in obs.items():
if agent_id not in self._deadlocked_agents or agent_id in new_deadlocked_agents:
o[agent_id] = obs[agent_id]
d[agent_id] = done[agent_id]
i[agent_id] = info[agent_id]
r[agent_id] = reward[agent_id]
if agent_id in new_deadlocked_agents:
# agent is in deadlocked (and was not before) -> give deadlock reward and set to done
r[agent_id] += self._deadlock_reward
d[agent_id] = True
d['__all__'] = done['__all__'] or all(d.values())
return StepOutput(o, r, d, i)
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
self._deadlocked_agents = []
return self.env.reset(random_seed)
def possible_actions_sorted_by_distance(env: RailEnv, handle: int):
agent = env.agents[handle]
if agent.status == RailAgentStatus.READY_TO_DEPART:
agent_virtual_position = agent.initial_position
elif agent.status == RailAgentStatus.ACTIVE:
agent_virtual_position = agent.position
elif agent.status == RailAgentStatus.DONE:
agent_virtual_position = agent.target
else:
return None
possible_transitions = env.rail.get_transitions(*agent_virtual_position, agent.direction)
distance_map = env.distance_map.get()[handle]
possible_steps = []
for movement in list(range(4)):
if possible_transitions[movement]:
if movement == agent.direction:
action = RailEnvActions.MOVE_FORWARD
elif movement == (agent.direction + 1) % 4:
action = RailEnvActions.MOVE_RIGHT
elif movement == (agent.direction - 1) % 4:
action = RailEnvActions.MOVE_LEFT
else:
raise ValueError("Wtf, debug this shit.")
distance = distance_map[get_new_position(agent_virtual_position, movement) + (movement,)]
possible_steps.append((action, distance))
possible_steps = sorted(possible_steps, key=lambda step: step[1])
if len(possible_steps) == 1:
return possible_steps * 2
else:
return possible_steps
class ShortestPathActionWrapper(gym.Wrapper):
def __init__(self, env) -> None:
super().__init__(env)
print("Apply ShortestPathActionWrapper")
self.action_space = gym.spaces.Discrete(n=3) # stop, shortest path, other direction
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
rail_env: RailEnv = self.env.unwrapped.rail_env
transformed_action_dict = {}
for agent_id, action in action_dict.items():
if action == 0:
transformed_action_dict[agent_id] = action
else:
assert action in [1, 2]
transformed_action_dict[agent_id] = possible_actions_sorted_by_distance(rail_env, agent_id)[action - 1][
0]
step_output = self.env.step(transformed_action_dict)
return step_output
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
return self.env.reset(random_seed)
class DeadlockResolutionWrapper(gym.Wrapper):
def __init__(self, env, deadlock_reward=0) -> None:
super().__init__(env)
self._deadlock_reward = deadlock_reward
self._num_swaps = defaultdict(int)
def get_deadlocks(self, agent: EnvAgent, seen: List[int]) -> EnvAgent:
# abort if agent already checked
if agent.handle in seen:
# handle circular deadlock
seen.append(agent.handle)
# return
return []
# add agent to seen agents
seen.append(agent.handle)
# get rail environment
rail_env: RailEnv = self.unwrapped.rail_env
# get transitions for agent's position and direction
transitions = rail_env.rail.get_transitions(*agent.position, agent.direction)
num_possible_transitions = np.count_nonzero(transitions)
# initialize list to assign deadlocked agents to directions
deadlocked_agents = [None] * len(transitions)
# check if all possible transitions are blocked
for direction, transition in enumerate(transitions):
# only check transitions > 0 but iterate through all to get direction
if transition > 0:
# get opposite agent in direction of travel if cell is occuppied
new_position = get_new_position(agent.position, direction)
i_opp_agent = rail_env.agent_positions[new_position]
if i_opp_agent != -1:
opp_agent = rail_env.agents[i_opp_agent]
# get blocking agents of opposite agent
blocking_agents = self.get_deadlocks(opp_agent, seen)
# add opposite agent to deadlocked agents if blocked by
# checking agent. also add opposite agent if it is part
# of a circular blocking structure.
if agent in blocking_agents or seen[0] == seen[-1]:
deadlocked_agents[direction] = opp_agent
# return deadlocked agents if applicable
num_deadlocked_agents = np.count_nonzero(deadlocked_agents)
if num_deadlocked_agents > 0:
# deadlock has to be resolved only if no transition is possible
if num_deadlocked_agents == num_possible_transitions:
return deadlocked_agents
# workaround for already commited agent inside cell that is blocked by at least one agent
if agent.speed_data['position_fraction'] > 1:
return deadlocked_agents
return []
def step(self, action_dict: Dict[int, RailEnvActions]) -> StepOutput:
obs, reward, done, info = self.env.step(action_dict)
# get rail environment
rail_env: RailEnv = self.unwrapped.rail_env
# check agents that have status ACTIVE for deadlocks, env.active_agents contains also other agents
active_agents = [agent for agent in rail_env.agents if agent.status == RailAgentStatus.ACTIVE]
for agent in active_agents:
deadlocked_agents = self.get_deadlocks(agent, [])
if len(deadlocked_agents) > 0:
# favor transition in front as most natural
d_agent = deadlocked_agents[agent.direction]
# get most likely transition if straight forward is no valid transition
if d_agent is None:
transitions = rail_env.rail.get_transitions(*agent.position, agent.direction)
agent.direction = np.argmax(transitions)
d_agent = deadlocked_agents[agent.direction]
# already commited agent can have only one transition blocked
if d_agent is None:
d_agent = [a for a in deadlocked_agents if a is not None][0]
# swap the deadlocked pair
agent.position, d_agent.position = d_agent.position, agent.position
rail_env.agent_positions[agent.position] = agent.handle
rail_env.agent_positions[d_agent.position] = d_agent.handle
# set direction of blocking agent because of corners
d_agent.direction = (agent.direction + 2) % 4
# position is exact after swap
agent.speed_data['position_fraction'] = 0.0
d_agent.speed_data['position_fraction'] = 0.0
# punish agents for deadlock
reward[agent.handle] += self._deadlock_reward
reward[d_agent.handle] += self._deadlock_reward
# increase swap counter in info dict
self._num_swaps[agent.handle] += 1
self._num_swaps[d_agent.handle] += 1
for i_agent in info:
info[i_agent]['num_swaps'] = self._num_swaps[i_agent]
return obs, reward, done, info
def reset(self, random_seed: Optional[int] = None) -> Dict[int, Any]:
self._num_swaps = defaultdict(int)
return self.env.reset(random_seed)
class FlatlandRenderWrapper(RailEnv, gym.Env):
# reward_range = (-float('inf'), float('inf'))
# spec = None
# # Set these in ALL subclasses
# observation_space = None
def __init__(self, use_renderer=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.use_renderer = use_renderer
self.renderer = None
self.metadata = {
'render.modes': ['human', 'rgb_array'],
'video.frames_per_second': 10,
'semantics.autoreset': True
}
if self.use_renderer:
self.initialize_renderer()
def reset(self, *args, **kwargs):
if self.use_renderer:
if self.renderer: # TODO: Errors with RLLib with renderer as None.
self.renderer.reset()
return super().reset(*args, **kwargs)
def render(self, mode='human'):
"""
This methods provides the option to render the
environment's behavior to a window which should be
readable to the human eye if mode is set to 'human'.
"""
if not self.use_renderer:
return
if not self.renderer:
self.initialize_renderer(mode=mode)
return self.update_renderer(mode=mode)
def initialize_renderer(self, mode="human"):
# Initiate the renderer
from flatland.utils.rendertools import RenderTool, AgentRenderVariant
self.renderer = RenderTool(self, gl="PGL", # gl="TKPILSVG",
agent_render_variant=AgentRenderVariant.ONE_STEP_BEHIND,
show_debug=False,
screen_height=600, # Adjust these parameters to fit your resolution
screen_width=800) # Adjust these parameters to fit your resolution
def update_renderer(self, mode='human'):
image = self.renderer.render_env(show=True, show_observations=False, show_predictions=False,
return_image=True)
return image[:, :, :3]
def set_renderer(self, renderer):
self.use_renderer = renderer
if self.use_renderer:
self.initialize_renderer(mode=self.use_renderer)
def close(self):
super().close()
if self.renderer:
try:
self.renderer.close_window()
self.renderer = None
except Exception as e:
# This is since the last step(Due to a stopping criteria) is skipped by rllib
# Due to this done is not true and the env does not close
# Finally the env is closed when RLLib exits but at that time there is no window
# and hence the error
print("Could Not close window due to:", e)
| [
"gym.spaces.Discrete",
"envs.flatland.observations.segment_graph.Graph.get_virtual_position",
"gym.spaces.Box",
"numpy.count_nonzero",
"numpy.argmax",
"collections.defaultdict",
"envs.flatland.utils.gym_env.StepOutput",
"flatland.utils.rendertools.RenderTool",
"flatland.core.grid.grid4_utils.get_new... | [((5246, 5263), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (5257, 5263), False, 'from collections import defaultdict\n'), ((6896, 6918), 'envs.flatland.utils.gym_env.StepOutput', 'StepOutput', (['o', 'r', 'd', 'i'], {}), '(o, r, d, i)\n', (6906, 6918), False, 'from envs.flatland.utils.gym_env import StepOutput\n'), ((31423, 31445), 'envs.flatland.utils.gym_env.StepOutput', 'StepOutput', (['o', 'r', 'd', 'i'], {}), '(o, r, d, i)\n', (31433, 31445), False, 'from envs.flatland.utils.gym_env import StepOutput\n'), ((34838, 34860), 'envs.flatland.utils.gym_env.StepOutput', 'StepOutput', (['o', 'r', 'd', 'i'], {}), '(o, r, d, i)\n', (34848, 34860), False, 'from envs.flatland.utils.gym_env import StepOutput\n'), ((36611, 36635), 'gym.spaces.Discrete', 'gym.spaces.Discrete', ([], {'n': '(3)'}), '(n=3)\n', (36630, 36635), False, 'import gym\n'), ((37601, 37617), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (37612, 37617), False, 'from collections import defaultdict\n'), ((38223, 38252), 'numpy.count_nonzero', 'np.count_nonzero', (['transitions'], {}), '(transitions)\n', (38239, 38252), True, 'import numpy as np\n'), ((39490, 39525), 'numpy.count_nonzero', 'np.count_nonzero', (['deadlocked_agents'], {}), '(deadlocked_agents)\n', (39506, 39525), True, 'import numpy as np\n'), ((42341, 42357), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (42352, 42357), False, 'from collections import defaultdict\n'), ((43835, 43978), 'flatland.utils.rendertools.RenderTool', 'RenderTool', (['self'], {'gl': '"""PGL"""', 'agent_render_variant': 'AgentRenderVariant.ONE_STEP_BEHIND', 'show_debug': '(False)', 'screen_height': '(600)', 'screen_width': '(800)'}), "(self, gl='PGL', agent_render_variant=AgentRenderVariant.\n ONE_STEP_BEHIND, show_debug=False, screen_height=600, screen_width=800)\n", (43845, 43978), False, 'from flatland.utils.rendertools import RenderTool, AgentRenderVariant\n'), ((8860, 8900), 'envs.flatland.observations.segment_graph.Graph.get_virtual_position', 'Graph.get_virtual_position', (['agent.handle'], {}), '(agent.handle)\n', (8886, 8900), False, 'from envs.flatland.observations.segment_graph import Graph\n'), ((16333, 16369), 'envs.flatland.observations.segment_graph.Graph.get_virtual_position', 'Graph.get_virtual_position', (['agent_id'], {}), '(agent_id)\n', (16359, 16369), False, 'from envs.flatland.observations.segment_graph import Graph\n'), ((2400, 2475), 'gym.spaces.Box', 'gym.spaces.Box', ([], {'low': '(0)', 'high': '(1)', 'shape': '(self.action_space.n,)', 'dtype': 'np.int32'}), '(low=0, high=1, shape=(self.action_space.n,), dtype=np.int32)\n', (2414, 2475), False, 'import gym\n'), ((4095, 4133), 'numpy.count_nonzero', 'np.count_nonzero', (['possible_transitions'], {}), '(possible_transitions)\n', (4111, 4133), True, 'import numpy as np\n'), ((38717, 38760), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['agent.position', 'direction'], {}), '(agent.position, direction)\n', (38733, 38760), False, 'from flatland.core.grid.grid4_utils import get_new_position\n'), ((13406, 13442), 'envs.flatland.observations.segment_graph.Graph.get_virtual_position', 'Graph.get_virtual_position', (['agent_id'], {}), '(agent_id)\n', (13432, 13442), False, 'from envs.flatland.observations.segment_graph import Graph\n'), ((32313, 32351), 'numpy.count_nonzero', 'np.count_nonzero', (['possible_transitions'], {}), '(possible_transitions)\n', (32329, 32351), True, 'import numpy as np\n'), ((36120, 36170), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['agent_virtual_position', 'movement'], {}), '(agent_virtual_position, movement)\n', (36136, 36170), False, 'from flatland.core.grid.grid4_utils import get_new_position\n'), ((40918, 40940), 'numpy.argmax', 'np.argmax', (['transitions'], {}), '(transitions)\n', (40927, 40940), True, 'import numpy as np\n'), ((32440, 32471), 'numpy.argmax', 'np.argmax', (['possible_transitions'], {}), '(possible_transitions)\n', (32449, 32471), True, 'import numpy as np\n'), ((32510, 32554), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['position', 'new_direction_me'], {}), '(position, new_direction_me)\n', (32526, 32554), False, 'from flatland.core.grid.grid4_utils import get_new_position\n'), ((32998, 33040), 'numpy.count_nonzero', 'np.count_nonzero', (['opp_possible_transitions'], {}), '(opp_possible_transitions)\n', (33014, 33040), True, 'import numpy as np\n'), ((4669, 4700), 'flatland.core.grid.grid4_utils.get_new_position', 'get_new_position', (['pos', 'movement'], {}), '(pos, movement)\n', (4685, 4700), False, 'from flatland.core.grid.grid4_utils import get_new_position\n')] |
import torch
from torch import nn
class CBOWClassifier(nn.Module):
"""
Continuous bag of words classifier.
"""
def __init__(self, hidden_size, input_size, max_pool, dropout=0.5):
"""
:param hidden_size:
:param input_size:
:param max_pool: if true then max pool over word embeddings,
else sum word embeddings
"""
super(CBOWClassifier, self).__init__()
self.hidden_size = hidden_size
self.input_size = input_size
self.max_pool = max_pool
self.dropout = nn.Dropout(p=dropout)
self.i2h = nn.Linear(self.input_size, self.hidden_size)
self.h2o = nn.Linear(self.hidden_size, 1)
self.sigmoid = nn.Sigmoid()
self.tanh = nn.Tanh()
def forward(self, x):
if self.max_pool:
encoding = nn.functional.max_pool1d(x.transpose(1, 2),
x.shape[1])
encoding = encoding.transpose(1, 2).squeeze()
else:
encoding = x.sum(1)
encoding = self.dropout(encoding)
hidden = self.tanh(self.dropout(self.i2h(encoding)))
out = self.sigmoid(self.h2o(hidden))
return out
| [
"torch.nn.Sigmoid",
"torch.nn.Dropout",
"torch.nn.Tanh",
"torch.nn.Linear"
] | [((573, 594), 'torch.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (583, 594), False, 'from torch import nn\n'), ((614, 658), 'torch.nn.Linear', 'nn.Linear', (['self.input_size', 'self.hidden_size'], {}), '(self.input_size, self.hidden_size)\n', (623, 658), False, 'from torch import nn\n'), ((678, 708), 'torch.nn.Linear', 'nn.Linear', (['self.hidden_size', '(1)'], {}), '(self.hidden_size, 1)\n', (687, 708), False, 'from torch import nn\n'), ((732, 744), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (742, 744), False, 'from torch import nn\n'), ((765, 774), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (772, 774), False, 'from torch import nn\n')] |
import numpy as np
import cv2 as cv
img = cv.imread('1.jpeg',cv.IMREAD_COLOR)
#for polygon we need to have set of points so we create a numpy array. and pts is an object.
pts = np.array([[20,33],[300,120], [67,79], [123,111], [144,134]], np.int32)
#the method polylines will actully draws a polygon by taking different parametes, 1.where to draw (img),
#2.which set of points, 3.checks first and last point should be connected or not by (bool), 4.color, 5.widht of line.
cv.polylines(img, [pts], True,(0,231,123), 1)
cv.imshow('image',img)
cv.waitKey(0)
cv.destroyAllWindows() | [
"cv2.polylines",
"cv2.imshow",
"numpy.array",
"cv2.destroyAllWindows",
"cv2.waitKey",
"cv2.imread"
] | [((43, 79), 'cv2.imread', 'cv.imread', (['"""1.jpeg"""', 'cv.IMREAD_COLOR'], {}), "('1.jpeg', cv.IMREAD_COLOR)\n", (52, 79), True, 'import cv2 as cv\n'), ((180, 256), 'numpy.array', 'np.array', (['[[20, 33], [300, 120], [67, 79], [123, 111], [144, 134]]', 'np.int32'], {}), '([[20, 33], [300, 120], [67, 79], [123, 111], [144, 134]], np.int32)\n', (188, 256), True, 'import numpy as np\n'), ((475, 523), 'cv2.polylines', 'cv.polylines', (['img', '[pts]', '(True)', '(0, 231, 123)', '(1)'], {}), '(img, [pts], True, (0, 231, 123), 1)\n', (487, 523), True, 'import cv2 as cv\n'), ((524, 547), 'cv2.imshow', 'cv.imshow', (['"""image"""', 'img'], {}), "('image', img)\n", (533, 547), True, 'import cv2 as cv\n'), ((547, 560), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (557, 560), True, 'import cv2 as cv\n'), ((561, 583), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (581, 583), True, 'import cv2 as cv\n')] |
#!/usr/bin/env python2
from BeautifulSoup import BeautifulSoup, NavigableString
import urllib
import string
import re
class Entry(object):
def __init__(self, name, value, description, url):
self.name = name
self.value = value
self.description = description
self.url = url
def __str__(self):
fields = [
self.name, # title
'A', # type
'', # redirect
'', # otheruses
'', # categories
'', # references
'', # see_also
'', # further_reading
'', # external_links
'', # disambiguation
'', # images
self.description, # abstract
self.url # source_url
]
return '%s' % ('\t'.join(fields))
class Parser(object):
def __init__(self, input='download/About:config_entries'):
self.soup = BeautifulSoup(open(input))
# Requires trailing / for relative link replacement
self.baseURL = "http://kb.mozillazine.org/"
def findEntries(self):
self.entries = []
headers = map(lambda x: x.string, self.soup.findAll('h1')[2:])
table = self.soup.findAll('div', id="bodyContent")[0]
for table in table.findAll('table'):
header = True
for tr in table.findAll('tr'):
if header:
header = False
continue
i = 0
for th in tr.findAll('td'):
description = ''
if i == 0:
name = ''.join(th.b.findAll(text=True)).replace(' ','')
anchor = string.capitalize(urllib.quote(name.split('.')[0])) + "."
if anchor in headers:
url = self.baseURL + 'About:config_entries#' + anchor
else:
url = self.baseURL + 'About:config_entries'
elif i == 1:
value = th.text
elif i == 2:
if value:
article = 'a'
if value[0] == 'I': article += 'n'
optionType = "it accepts " + article + " " + value.lower() + "."
synopsis = '"' + name + '"' + ' is a configuration option ' \
'for the Firefox web browser; ' + optionType + "<br>"
for tag in th.findAll('br'):
tag.insert(0, NavigableString("\n"))
description = ''.join(th.findAll(text=True))
description = description.rstrip().replace('\n', '<br>').strip()
expandedURL = 'href="' + self.baseURL
description = description.replace('href="/', expandedURL)
description = re.sub('<\s*b\s*>', '<i>', description)
description = re.sub('<\s*/\s*b\s*>', '</i>', description)
description = '<blockquote>' + description + '</blockquote>'
description = synopsis + description
i = -1
self.entries.append(Entry(name, value, description.strip(), url))
i += 1
if __name__ == "__main__":
parser = Parser()
parser.findEntries()
with open('output.txt', 'w') as file:
for entry in parser.entries:
file.write(entry.__str__().encode('UTF-8') + '\n')
| [
"BeautifulSoup.NavigableString",
"re.sub"
] | [((3236, 3277), 're.sub', 're.sub', (['"""<\\\\s*b\\\\s*>"""', '"""<i>"""', 'description'], {}), "('<\\\\s*b\\\\s*>', '<i>', description)\n", (3242, 3277), False, 'import re\n'), ((3314, 3361), 're.sub', 're.sub', (['"""<\\\\s*/\\\\s*b\\\\s*>"""', '"""</i>"""', 'description'], {}), "('<\\\\s*/\\\\s*b\\\\s*>', '</i>', description)\n", (3320, 3361), False, 'import re\n'), ((2873, 2894), 'BeautifulSoup.NavigableString', 'NavigableString', (['"""\n"""'], {}), "('\\n')\n", (2888, 2894), False, 'from BeautifulSoup import BeautifulSoup, NavigableString\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
#
# Copyright 2018-2019 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
"""Portable pipe implementation for Linux, MacOS, and Windows."""
import asyncio
import errno
import logging
import os
import socket
import struct
import tempfile
from abc import ABC, abstractmethod
from asyncio import AbstractEventLoop
from asyncio.streams import StreamWriter
from shutil import rmtree
from typing import IO, Optional
from aea.exceptions import enforce
_default_logger = logging.getLogger(__name__)
PIPE_CONN_TIMEOUT = 10.0
PIPE_CONN_ATTEMPTS = 10
TCP_SOCKET_PIPE_CLIENT_CONN_ATTEMPTS = 5
class IPCChannelClient(ABC):
"""Multi-platform interprocess communication channel for the client side."""
@abstractmethod
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Connect to communication channel
:param timeout: timeout for other end to connect
:return: connection status
"""
@abstractmethod
async def write(self, data: bytes) -> None:
"""
Write `data` bytes to the other end of the channel
Will first write the size than the actual data
:param data: bytes to write
"""
@abstractmethod
async def read(self) -> Optional[bytes]:
"""
Read bytes from the other end of the channel
Will first read the size than the actual data
:return: read bytes
"""
@abstractmethod
async def close(self) -> None:
"""Close the communication channel."""
class IPCChannel(IPCChannelClient):
"""Multi-platform interprocess communication channel."""
@property
@abstractmethod
def in_path(self) -> str:
"""
Rendezvous point for incoming communication.
:return: path
"""
@property
@abstractmethod
def out_path(self) -> str:
"""
Rendezvous point for outgoing communication.
:return: path
"""
class PosixNamedPipeProtocol:
"""Posix named pipes async wrapper communication protocol."""
def __init__(
self,
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Initialize a new posix named pipe.
:param in_path: rendezvous point for incoming data
:param out_path: rendezvous point for outgoing data
:param logger: the logger
:param loop: the event loop
"""
self.logger = logger
self._loop = loop
self._in_path = in_path
self._out_path = out_path
self._in = -1
self._out = -1
self._stream_reader = None # type: Optional[asyncio.StreamReader]
self._reader_protocol = None # type: Optional[asyncio.StreamReaderProtocol]
self._fileobj = None # type: Optional[IO[str]]
self._connection_attempts = PIPE_CONN_ATTEMPTS
self._connection_timeout = PIPE_CONN_TIMEOUT
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Connect to the other end of the pipe
:param timeout: timeout before failing
:return: connection success
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._connection_timeout = timeout / PIPE_CONN_ATTEMPTS if timeout > 0 else 0
if self._connection_attempts <= 1: # pragma: no cover
return False
self._connection_attempts -= 1
self.logger.debug(
"Attempt opening pipes {}, {}...".format(self._in_path, self._out_path)
)
self._in = os.open(self._in_path, os.O_RDONLY | os.O_NONBLOCK | os.O_SYNC)
try:
self._out = os.open(self._out_path, os.O_WRONLY | os.O_NONBLOCK)
except OSError as e: # pragma: no cover
if e.errno == errno.ENXIO:
self.logger.debug("Sleeping for {}...".format(self._connection_timeout))
await asyncio.sleep(self._connection_timeout)
return await self.connect(timeout)
raise e
# setup reader
enforce(
self._in != -1 and self._out != -1 and self._loop is not None,
"Incomplete initialization.",
)
self._stream_reader = asyncio.StreamReader(loop=self._loop)
self._reader_protocol = asyncio.StreamReaderProtocol(
self._stream_reader, loop=self._loop
)
self._fileobj = os.fdopen(self._in, "r")
await self._loop.connect_read_pipe(
lambda: self.__reader_protocol, self._fileobj
)
return True
@property
def __reader_protocol(self) -> asyncio.StreamReaderProtocol:
"""Get reader protocol."""
if self._reader_protocol is None:
raise ValueError("reader protocol not set!") # pragma: nocover
return self._reader_protocol
async def write(self, data: bytes) -> None:
"""
Write to pipe.
:param data: bytes to write to pipe
"""
self.logger.debug("writing {}...".format(len(data)))
size = struct.pack("!I", len(data))
os.write(self._out, size + data)
await asyncio.sleep(0.0)
async def read(self) -> Optional[bytes]:
"""
Read from pipe.
:return: read bytes
"""
if self._stream_reader is None: # pragma: nocover
raise ValueError("StreamReader not set, call connect first!")
try:
self.logger.debug("waiting for messages (in={})...".format(self._in_path))
buf = await self._stream_reader.readexactly(4)
if not buf: # pragma: no cover
return None
size = struct.unpack("!I", buf)[0]
if size <= 0: # pragma: no cover
return None
data = await self._stream_reader.readexactly(size)
if not data: # pragma: no cover
return None
return data
except asyncio.IncompleteReadError as e: # pragma: no cover
self.logger.info(
"Connection disconnected while reading from pipe ({}/{})".format(
len(e.partial), e.expected
)
)
return None
except asyncio.CancelledError: # pragma: no cover
return None
async def close(self) -> None:
"""Disconnect pipe."""
self.logger.debug("closing pipe (in={})...".format(self._in_path))
if self._fileobj is None:
raise ValueError("Pipe not connected") # pragma: nocover
try:
# hack for MacOSX
size = struct.pack("!I", 0)
os.write(self._out, size)
os.close(self._out)
self._fileobj.close()
except OSError: # pragma: no cover
pass
await asyncio.sleep(0)
class TCPSocketProtocol:
"""TCP socket communication protocol."""
def __init__(
self,
reader: asyncio.StreamReader,
writer: asyncio.StreamWriter,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Initialize the tcp socket protocol.
:param reader: established asyncio reader
:param writer: established asyncio writer
:param logger: the logger
:param loop: the event loop
"""
self.logger = logger
self.loop = loop if loop is not None else asyncio.get_event_loop()
self._reader = reader
self._writer = writer
@property
def writer(self) -> StreamWriter:
"""Get a writer associated with protocol."""
return self._writer
async def write(self, data: bytes) -> None:
"""
Write to socket.
:param data: bytes to write
"""
if self._writer is None:
raise ValueError("writer not set!") # pragma: nocover
self.logger.debug("writing {}...".format(len(data)))
size = struct.pack("!I", len(data))
self._writer.write(size + data)
await self._writer.drain()
async def read(self) -> Optional[bytes]:
"""
Read from socket.
:return: read bytes
"""
try:
self.logger.debug("waiting for messages...")
buf = await self._reader.readexactly(4)
if not buf: # pragma: no cover
return None
size = struct.unpack("!I", buf)[0]
data = await self._reader.readexactly(size)
if not data: # pragma: no cover
return None
if len(data) != size: # pragma: no cover
raise ValueError(
f"Incomplete Read Error! Expected size={size}, got: {len(data)}"
)
return data
except asyncio.IncompleteReadError as e: # pragma: no cover
self.logger.info(
"Connection disconnected while reading from pipe ({}/{})".format(
len(e.partial), e.expected
)
)
return None
except asyncio.CancelledError: # pragma: no cover
return None
async def close(self) -> None:
"""Disconnect socket."""
if self._writer.can_write_eof():
self._writer.write_eof()
await self._writer.drain()
self._writer.close()
wait_closed = getattr(self._writer, "wait_closed", None)
if wait_closed:
# in py3.6 writer does not have the coroutine
await wait_closed() # pragma: nocover
class TCPSocketChannel(IPCChannel):
"""Interprocess communication channel implementation using tcp sockets."""
def __init__(
self,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""Initialize tcp socket interprocess communication channel."""
self.logger = logger
self._loop = loop
self._server = None # type: Optional[asyncio.AbstractServer]
self._connected = None # type: Optional[asyncio.Event]
self._sock = None # type: Optional[TCPSocketProtocol]
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("127.0.0.1", 0))
s.listen(1)
self._port = s.getsockname()[1]
s.close()
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Setup communication channel and wait for other end to connect.
:param timeout: timeout for the connection to be established
:return: connection status
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._connected = asyncio.Event()
self._server = await asyncio.start_server(
self._handle_connection, host="127.0.0.1", port=self._port
)
if self._server.sockets is None:
raise ValueError("Server sockets is None!") # pragma: nocover
self._port = self._server.sockets[0].getsockname()[1]
self.logger.debug("socket pipe rdv point: {}".format(self._port))
try:
await asyncio.wait_for(self._connected.wait(), timeout)
except asyncio.TimeoutError: # pragma: no cover
return False
self._server.close()
await self._server.wait_closed()
return True
async def _handle_connection(
self, reader: asyncio.StreamReader, writer: asyncio.StreamWriter
) -> None:
"""Handle connection."""
if self._connected is None:
raise ValueError("Connected is None!") # pragma: nocover
self._connected.set()
self._sock = TCPSocketProtocol(
reader, writer, logger=self.logger, loop=self._loop
)
async def write(self, data: bytes) -> None:
"""
Write to channel.
:param data: bytes to write
"""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
await self._sock.write(data)
async def read(self) -> Optional[bytes]:
"""
Read from channel.
:return: read bytes
"""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
return await self._sock.read()
async def close(self) -> None:
"""Disconnect from channel and clean it up."""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
await self._sock.close()
@property
def in_path(self) -> str:
"""Rendezvous point for incoming communication."""
return str(self._port)
@property
def out_path(self) -> str:
"""Rendezvous point for outgoing communication."""
return str(self._port)
class PosixNamedPipeChannel(IPCChannel):
"""Interprocess communication channel implementation using Posix named pipes."""
def __init__(
self,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""Initialize posix named pipe interprocess communication channel."""
self.logger = logger
self._loop = loop
self._pipe_dir = tempfile.mkdtemp()
self._in_path = "{}/process_to_aea".format(self._pipe_dir)
self._out_path = "{}/aea_to_process".format(self._pipe_dir)
# setup fifos
self.logger.debug(
"Creating pipes ({}, {})...".format(self._in_path, self._out_path)
)
if os.path.exists(self._in_path):
os.remove(self._in_path) # pragma: no cover
if os.path.exists(self._out_path):
os.remove(self._out_path) # pragma: no cover
os.mkfifo(self._in_path)
os.mkfifo(self._out_path)
self._pipe = PosixNamedPipeProtocol(
self._in_path, self._out_path, logger=logger, loop=loop
)
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Setup communication channel and wait for other end to connect.
:param timeout: timeout for connection to be established
:return: bool, indicating success
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
return await self._pipe.connect(timeout)
async def write(self, data: bytes) -> None:
"""
Write to the channel.
:param data: data to write to channel
"""
await self._pipe.write(data)
async def read(self) -> Optional[bytes]:
"""
Read from the channel.
:return: read bytes
"""
return await self._pipe.read()
async def close(self) -> None:
"""Close the channel and clean it up."""
await self._pipe.close()
rmtree(self._pipe_dir)
@property
def in_path(self) -> str:
"""Rendezvous point for incoming communication."""
return self._in_path
@property
def out_path(self) -> str:
"""Rendezvous point for outgoing communication."""
return self._out_path
class TCPSocketChannelClient(IPCChannelClient):
"""Interprocess communication channel client using tcp sockets."""
def __init__( # pylint: disable=unused-argument
self,
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Initialize a tcp socket communication channel client.
:param in_path: rendezvous point for incoming data
:param out_path: rendezvous point for outgoing data
:param logger: the logger
:param loop: the event loop
"""
self.logger = logger
self._loop = loop
parts = in_path.split(":")
if len(parts) == 1:
self._port = int(in_path)
self._host = "127.0.0.1"
else: # pragma: nocover
self._port = int(parts[1])
self._host = parts[0]
self._sock = None # type: Optional[TCPSocketProtocol]
self._attempts = TCP_SOCKET_PIPE_CLIENT_CONN_ATTEMPTS
self._timeout = PIPE_CONN_TIMEOUT / self._attempts
self.last_exception: Optional[Exception] = None
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Connect to the other end of the communication channel.
:param timeout: timeout for connection to be established
:return: connection status
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._timeout = timeout / TCP_SOCKET_PIPE_CLIENT_CONN_ATTEMPTS
self.logger.debug(
"Attempting to connect to {}:{}.....".format("127.0.0.1", self._port)
)
connected = False
while self._attempts > 0:
self._attempts -= 1
try:
self._sock = await self._open_connection()
connected = True
break
except ConnectionRefusedError:
await asyncio.sleep(self._timeout)
except Exception as e: # pylint: disable=broad-except # pragma: nocover
self.last_exception = e
return False
return connected
async def _open_connection(self) -> TCPSocketProtocol:
reader, writer = await asyncio.open_connection(
self._host, self._port, loop=self._loop, # pylint: disable=protected-access
)
return TCPSocketProtocol(reader, writer, logger=self.logger, loop=self._loop)
async def write(self, data: bytes) -> None:
"""
Write data to channel.
:param data: bytes to write
"""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
await self._sock.write(data)
async def read(self) -> Optional[bytes]:
"""
Read data from channel.
:return: read bytes
"""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
return await self._sock.read()
async def close(self) -> None:
"""Disconnect from communication channel."""
if self._sock is None:
raise ValueError("Socket pipe not connected.") # pragma: nocover
await self._sock.close()
class PosixNamedPipeChannelClient(IPCChannelClient):
"""Interprocess communication channel client using Posix named pipes."""
def __init__(
self,
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> None:
"""
Initialize a posix named pipe communication channel client.
:param in_path: rendezvous point for incoming data
:param out_path: rendezvous point for outgoing data
:param logger: the logger
:param loop: the event loop
"""
self.logger = logger
self._loop = loop
self._in_path = in_path
self._out_path = out_path
self._pipe = None # type: Optional[PosixNamedPipeProtocol]
self.last_exception: Optional[Exception] = None
async def connect(self, timeout: float = PIPE_CONN_TIMEOUT) -> bool:
"""
Connect to the other end of the communication channel.
:param timeout: timeout for connection to be established
:return: connection status
"""
if self._loop is None:
self._loop = asyncio.get_event_loop()
self._pipe = PosixNamedPipeProtocol(
self._in_path, self._out_path, logger=self.logger, loop=self._loop
)
try:
return await self._pipe.connect()
except Exception as e: # pragma: nocover # pylint: disable=broad-except
self.last_exception = e
return False
async def write(self, data: bytes) -> None:
"""
Write data to channel.
:param data: bytes to write
"""
if self._pipe is None:
raise ValueError("Pipe not connected.") # pragma: nocover
await self._pipe.write(data)
async def read(self) -> Optional[bytes]:
"""
Read data from channel.
:return: read bytes
"""
if self._pipe is None:
raise ValueError("Pipe not connected.") # pragma: nocover
return await self._pipe.read()
async def close(self) -> None:
"""Disconnect from communication channel."""
if self._pipe is None:
raise ValueError("Pipe not connected.") # pragma: nocover
return await self._pipe.close()
def make_ipc_channel(
logger: logging.Logger = _default_logger, loop: Optional[AbstractEventLoop] = None
) -> IPCChannel:
"""
Build a portable bidirectional InterProcess Communication channel
:param logger: the logger
:param loop: the loop
:return: IPCChannel
"""
if os.name == "posix":
return PosixNamedPipeChannel(logger=logger, loop=loop)
if os.name == "nt": # pragma: nocover
return TCPSocketChannel(logger=logger, loop=loop)
raise NotImplementedError( # pragma: nocover
"make ipc channel is not supported on platform {}".format(os.name)
)
def make_ipc_channel_client(
in_path: str,
out_path: str,
logger: logging.Logger = _default_logger,
loop: Optional[AbstractEventLoop] = None,
) -> IPCChannelClient:
"""
Build a portable bidirectional InterProcess Communication client channel
:param in_path: rendezvous point for incoming communication
:param out_path: rendezvous point for outgoing outgoing
:param logger: the logger
:param loop: the loop
:return: IPCChannel
"""
if os.name == "posix":
return PosixNamedPipeChannelClient(in_path, out_path, logger=logger, loop=loop)
if os.name == "nt": # pragma: nocover
return TCPSocketChannelClient(in_path, out_path, logger=logger, loop=loop)
raise NotImplementedError( # pragma: nocover
"make ip channel client is not supported on platform {}".format(os.name)
)
| [
"logging.getLogger",
"os.open",
"os.remove",
"os.path.exists",
"asyncio.StreamReader",
"asyncio.StreamReaderProtocol",
"asyncio.sleep",
"asyncio.get_event_loop",
"os.close",
"os.write",
"struct.pack",
"aea.exceptions.enforce",
"asyncio.open_connection",
"tempfile.mkdtemp",
"struct.unpack... | [((1210, 1237), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1227, 1237), False, 'import logging\n'), ((4405, 4468), 'os.open', 'os.open', (['self._in_path', '(os.O_RDONLY | os.O_NONBLOCK | os.O_SYNC)'], {}), '(self._in_path, os.O_RDONLY | os.O_NONBLOCK | os.O_SYNC)\n', (4412, 4468), False, 'import os\n'), ((4902, 5006), 'aea.exceptions.enforce', 'enforce', (['(self._in != -1 and self._out != -1 and self._loop is not None)', '"""Incomplete initialization."""'], {}), "(self._in != -1 and self._out != -1 and self._loop is not None,\n 'Incomplete initialization.')\n", (4909, 5006), False, 'from aea.exceptions import enforce\n'), ((5068, 5105), 'asyncio.StreamReader', 'asyncio.StreamReader', ([], {'loop': 'self._loop'}), '(loop=self._loop)\n', (5088, 5105), False, 'import asyncio\n'), ((5138, 5204), 'asyncio.StreamReaderProtocol', 'asyncio.StreamReaderProtocol', (['self._stream_reader'], {'loop': 'self._loop'}), '(self._stream_reader, loop=self._loop)\n', (5166, 5204), False, 'import asyncio\n'), ((5251, 5275), 'os.fdopen', 'os.fdopen', (['self._in', '"""r"""'], {}), "(self._in, 'r')\n", (5260, 5275), False, 'import os\n'), ((5933, 5965), 'os.write', 'os.write', (['self._out', '(size + data)'], {}), '(self._out, size + data)\n', (5941, 5965), False, 'import os\n'), ((10998, 11047), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (11011, 11047), False, 'import socket\n'), ((11542, 11557), 'asyncio.Event', 'asyncio.Event', ([], {}), '()\n', (11555, 11557), False, 'import asyncio\n'), ((14099, 14117), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (14115, 14117), False, 'import tempfile\n'), ((14403, 14432), 'os.path.exists', 'os.path.exists', (['self._in_path'], {}), '(self._in_path)\n', (14417, 14432), False, 'import os\n'), ((14502, 14532), 'os.path.exists', 'os.path.exists', (['self._out_path'], {}), '(self._out_path)\n', (14516, 14532), False, 'import os\n'), ((14600, 14624), 'os.mkfifo', 'os.mkfifo', (['self._in_path'], {}), '(self._in_path)\n', (14609, 14624), False, 'import os\n'), ((14633, 14658), 'os.mkfifo', 'os.mkfifo', (['self._out_path'], {}), '(self._out_path)\n', (14642, 14658), False, 'import os\n'), ((15674, 15696), 'shutil.rmtree', 'rmtree', (['self._pipe_dir'], {}), '(self._pipe_dir)\n', (15680, 15696), False, 'from shutil import rmtree\n'), ((4024, 4048), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (4046, 4048), False, 'import asyncio\n'), ((4507, 4559), 'os.open', 'os.open', (['self._out_path', '(os.O_WRONLY | os.O_NONBLOCK)'], {}), '(self._out_path, os.O_WRONLY | os.O_NONBLOCK)\n', (4514, 4559), False, 'import os\n'), ((5980, 5998), 'asyncio.sleep', 'asyncio.sleep', (['(0.0)'], {}), '(0.0)\n', (5993, 5998), False, 'import asyncio\n'), ((7442, 7462), 'struct.pack', 'struct.pack', (['"""!I"""', '(0)'], {}), "('!I', 0)\n", (7453, 7462), False, 'import struct\n'), ((7475, 7500), 'os.write', 'os.write', (['self._out', 'size'], {}), '(self._out, size)\n', (7483, 7500), False, 'import os\n'), ((7514, 7533), 'os.close', 'os.close', (['self._out'], {}), '(self._out)\n', (7522, 7533), False, 'import os\n'), ((7643, 7659), 'asyncio.sleep', 'asyncio.sleep', (['(0)'], {}), '(0)\n', (7656, 7659), False, 'import asyncio\n'), ((8275, 8299), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (8297, 8299), False, 'import asyncio\n'), ((11490, 11514), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (11512, 11514), False, 'import asyncio\n'), ((11587, 11672), 'asyncio.start_server', 'asyncio.start_server', (['self._handle_connection'], {'host': '"""127.0.0.1"""', 'port': 'self._port'}), "(self._handle_connection, host='127.0.0.1', port=self._port\n )\n", (11607, 11672), False, 'import asyncio\n'), ((14446, 14470), 'os.remove', 'os.remove', (['self._in_path'], {}), '(self._in_path)\n', (14455, 14470), False, 'import os\n'), ((14546, 14571), 'os.remove', 'os.remove', (['self._out_path'], {}), '(self._out_path)\n', (14555, 14571), False, 'import os\n'), ((15117, 15141), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15139, 15141), False, 'import asyncio\n'), ((17448, 17472), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (17470, 17472), False, 'import asyncio\n'), ((18255, 18319), 'asyncio.open_connection', 'asyncio.open_connection', (['self._host', 'self._port'], {'loop': 'self._loop'}), '(self._host, self._port, loop=self._loop)\n', (18278, 18319), False, 'import asyncio\n'), ((20435, 20459), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (20457, 20459), False, 'import asyncio\n'), ((6505, 6529), 'struct.unpack', 'struct.unpack', (['"""!I"""', 'buf'], {}), "('!I', buf)\n", (6518, 6529), False, 'import struct\n'), ((9248, 9272), 'struct.unpack', 'struct.unpack', (['"""!I"""', 'buf'], {}), "('!I', buf)\n", (9261, 9272), False, 'import struct\n'), ((4759, 4798), 'asyncio.sleep', 'asyncio.sleep', (['self._connection_timeout'], {}), '(self._connection_timeout)\n', (4772, 4798), False, 'import asyncio\n'), ((17954, 17982), 'asyncio.sleep', 'asyncio.sleep', (['self._timeout'], {}), '(self._timeout)\n', (17967, 17982), False, 'import asyncio\n')] |
import scipy, numpy, typing, numbers
from tequila.objective import Objective
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from .optimizer_base import Optimizer
from ._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from collections import namedtuple
from tequila.utils.exceptions import TequilaException
from tequila.circuit.noise import NoiseModel
from tequila.tools.qng import get_qng_combos
class TequilaScipyException(TequilaException):
""" """
pass
SciPyReturnType = namedtuple('SciPyReturnType', 'energy angles history scipy_output')
class OptimizerSciPy(Optimizer):
""" """
gradient_free_methods = ['NELDER-MEAD', 'COBYLA', 'POWELL', 'SLSQP']
gradient_based_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC']
hessian_based_methods = ["TRUST-KRYLOV", "NEWTON-CG", "DOGLEG", "TRUST-NCG", "TRUST-EXACT", "TRUST-CONSTR"]
@classmethod
def available_methods(cls):
""":return: All tested available methods"""
return cls.gradient_free_methods + cls.gradient_based_methods + cls.hessian_based_methods
def __init__(self, method: str = "L-BFGS-B",
tol: numbers.Real = None,
method_options=None,
method_bounds=None,
method_constraints=None,
silent: bool = True,
**kwargs):
"""
Optimize a circuit to minimize a given objective using scipy
See the Optimizer class for all other parameters to initialize
:param method: The scipy method passed as string
:param use_gradient: do gradient based optimization
:param tol: See scipy documentation for the method you picked
:param method_options: See scipy documentation for the method you picked
:param method_bounds: See scipy documentation for the method you picked
:param method_constraints: See scipy documentation for the method you picked
:param silent: if False the optimizer print out all evaluated energies
:param use_gradient: select if gradients shall be used. Can be done automatically for most methods
"""
super().__init__(**kwargs)
if hasattr(method, "upper"):
self.method = method.upper()
else:
self.method = method
self.tol = tol
self.method_options = method_options
if method_bounds is not None:
method_bounds = {assign_variable(k): v for k, v in method_bounds.items()}
self.method_bounds = method_bounds
self.silent = silent
if method_options is None:
self.method_options = {'maxiter': self.maxiter}
else:
self.method_options = method_options
if 'maxiter' not in method_options:
self.method_options['maxiter'] = self.maxiter
self.method_options['disp'] = not silent
if method_constraints is None:
self.method_constraints = ()
else:
self.method_constraints = method_constraints
def __call__(self, objective: Objective,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyReturnType:
"""
Optimizes with scipy and gives back the optimized angles
Get the optimized energies over the history
:param objective: The tequila Objective to minimize
:param initial_values: initial values for the objective
:param return_scipy_output: chose if the full scipy output shall be returned
:param reset_history: reset the history before optimization starts (has no effect if self.save_history is False)
:return: tuple of optimized energy ,optimized angles and scipy output
"""
infostring = "{:15} : {}\n".format("Method", self.method)
infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(objective, initial_values, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = numpy.array(param_values)
# process and initialize scipy bounds
bounds = None
if self.method_bounds is not None:
bounds = {k: None for k in active_angles}
for k, v in self.method_bounds.items():
if k in bounds:
bounds[k] = v
infostring += "{:15} : {}\n".format("bounds", self.method_bounds)
names, bounds = zip(*bounds.items())
assert (names == param_keys) # make sure the bounds are not shuffled
# do the compilation here to avoid costly recompilation during the optimization
compiled_objective = self.compile_objective(objective=objective)
E = _EvalContainer(objective=compiled_objective,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
backend_options=self.backend_options,
print_level=self.print_level)
compile_gradient = self.method in (self.gradient_based_methods + self.hessian_based_methods)
compile_hessian = self.method in self.hessian_based_methods
dE = None
ddE = None
# detect if numerical gradients shall be used
# switch off compiling if so
if isinstance(gradient, str):
if gradient.lower() == 'qng':
compile_gradient = False
if compile_hessian:
raise TequilaException('Sorry, QNG and hessian not yet tested together.')
combos = get_qng_combos(objective, initial_values=initial_values, backend=self.backend,
samples=self.samples, noise=self.noise,
backend_options=self.backend_options)
dE = _QngContainer(combos=combos, param_keys=param_keys, passive_angles=passive_angles)
infostring += "{:15} : QNG {}\n".format("gradient", dE)
else:
dE = gradient
compile_gradient = False
if compile_hessian:
compile_hessian = False
if hessian is None:
hessian = gradient
infostring += "{:15} : scipy numerical {}\n".format("gradient", dE)
infostring += "{:15} : scipy numerical {}\n".format("hessian", ddE)
if isinstance(hessian, str):
ddE = hessian
compile_hessian = False
if compile_gradient:
grad_obj, comp_grad_obj = self.compile_gradient(objective=objective, variables=variables, gradient=gradient)
expvals = sum([o.count_expectationvalues() for o in comp_grad_obj.values()])
infostring += "{:15} : {} expectationvalues\n".format("gradient", expvals)
dE = _GradContainer(objective=comp_grad_obj,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level,
backend_options=self.backend_options)
if compile_hessian:
hess_obj, comp_hess_obj = self.compile_hessian(variables=variables,
hessian=hessian,
grad_obj=grad_obj,
comp_grad_obj=comp_grad_obj)
expvals = sum([o.count_expectationvalues() for o in comp_hess_obj.values()])
infostring += "{:15} : {} expectationvalues\n".format("hessian", expvals)
ddE = _HessContainer(objective=comp_hess_obj,
param_keys=param_keys,
samples=self.samples,
passive_angles=passive_angles,
save_history=self.save_history,
print_level=self.print_level,
backend_options=self.backend_options)
if self.print_level > 0:
print(self)
print(infostring)
print("{:15} : {}\n".format("active variables", len(active_angles)))
Es = []
class SciPyCallback:
energies = []
gradients = []
hessians = []
angles = []
real_iterations = 0
def __call__(self, *args, **kwargs):
self.energies.append(E.history[-1])
self.angles.append(E.history_angles[-1])
if dE is not None and not isinstance(dE, str):
self.gradients.append(dE.history[-1])
if ddE is not None and not isinstance(ddE, str):
self.hessians.append(ddE.history[-1])
self.real_iterations += 1
callback = SciPyCallback()
res = scipy.optimize.minimize(E, x0=param_values, jac=dE, hess=ddE,
args=(Es,),
method=self.method, tol=self.tol,
bounds=bounds,
constraints=self.method_constraints,
options=self.method_options,
callback=callback)
# failsafe since callback is not implemented everywhere
if callback.real_iterations == 0:
real_iterations = range(len(E.history))
if self.save_history:
self.history.energies = callback.energies
self.history.energy_evaluations = E.history
self.history.angles = callback.angles
self.history.angles_evaluations = E.history_angles
self.history.gradients = callback.gradients
self.history.hessians = callback.hessians
if dE is not None and not isinstance(dE, str):
self.history.gradients_evaluations = dE.history
if ddE is not None and not isinstance(ddE, str):
self.history.hessians_evaluations = ddE.history
# some methods like "cobyla" do not support callback functions
if len(self.history.energies) == 0:
self.history.energies = E.history
self.history.angles = E.history_angles
E_final = res.fun
angles_final = dict((param_keys[i], res.x[i]) for i in range(len(param_keys)))
angles_final = {**angles_final, **passive_angles}
return SciPyReturnType(energy=E_final, angles=format_variable_dictionary(angles_final), history=self.history,
scipy_output=res)
def available_methods(energy=True, gradient=True, hessian=True) -> typing.List[str]:
"""Convenience
:return: Available methods of the scipy optimizer
Parameters
----------
energy :
(Default value = True)
gradient :
(Default value = True)
hessian :
(Default value = True)
Returns
-------
"""
methods = []
if energy:
methods += OptimizerSciPy.gradient_free_methods
if gradient:
methods += OptimizerSciPy.gradient_based_methods
if hessian:
methods += OptimizerSciPy.hessian_based_methods
return methods
def minimize(objective: Objective,
gradient: typing.Union[str, typing.Dict[Variable, Objective]] = None,
hessian: typing.Union[str, typing.Dict[typing.Tuple[Variable, Variable], Objective]] = None,
initial_values: typing.Dict[typing.Hashable, numbers.Real] = None,
variables: typing.List[typing.Hashable] = None,
samples: int = None,
maxiter: int = 100,
backend: str = None,
backend_options: dict = None,
noise: NoiseModel = None,
method: str = "BFGS",
tol: float = 1.e-3,
method_options: dict = None,
method_bounds: typing.Dict[typing.Hashable, numbers.Real] = None,
method_constraints=None,
silent: bool = False,
save_history: bool = True,
*args,
**kwargs) -> SciPyReturnType:
"""
Parameters
----------
objective: Objective :
The tequila objective to optimize
gradient: typing.Union[str, typing.Dict[Variable, Objective], None] : (Default value = None) :
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary of variables and tequila objective to define own gradient,
None for automatic construction (default)
Other options include 'qng' to use the quantum natural gradient.
hessian: typing.Union[str, typing.Dict[Variable, Objective], None] : (Default value = None) :
'2-point', 'cs' or '3-point' for numerical gradient evaluation (does not work in combination with all optimizers),
dictionary (keys:tuple of variables, values:tequila objective) to define own gradient,
None for automatic construction (default)
initial_values: typing.Dict[typing.Hashable, numbers.Real]: (Default value = None):
Initial values as dictionary of Hashable types (variable keys) and floating point numbers. If given None they will all be set to zero
variables: typing.List[typing.Hashable] :
(Default value = None)
List of Variables to optimize
samples: int :
(Default value = None)
samples/shots to take in every run of the quantum circuits (None activates full wavefunction simulation)
maxiter: int :
(Default value = 100)
backend: str :
(Default value = None)
Simulator backend, will be automatically chosen if set to None
backend_options: dict:
(Default value = None)
Additional options for the backend
Will be unpacked and passed to the compiled objective in every call
noise: NoiseModel:
(Default value =None)
a NoiseModel to apply to all expectation values in the objective.
method: str :
(Default value = "BFGS")
Optimization method (see scipy documentation, or 'available methods')
tol: float :
(Default value = 1.e-3)
Convergence tolerance for optimization (see scipy documentation)
method_options: dict :
(Default value = None)
Dictionary of options
(see scipy documentation)
method_bounds: typing.Dict[typing.Hashable, typing.Tuple[float, float]]:
(Default value = None)
bounds for the variables (see scipy documentation)
method_constraints :
(Default value = None)
(see scipy documentation
silent: bool :
(Default value = False)
No printout if True
save_history: bool:
(Default value = True)
Save the history throughout the optimization
Returns
-------
"""
if isinstance(gradient, dict) or hasattr(gradient, "items"):
if all([isinstance(x, Objective) for x in gradient.values()]):
gradient = format_variable_dictionary(gradient)
if isinstance(hessian, dict) or hasattr(hessian, "items"):
if all([isinstance(x, Objective) for x in hessian.values()]):
hessian = {(assign_variable(k[0]), assign_variable([k[1]])): v for k, v in hessian.items()}
method_bounds = format_variable_dictionary(method_bounds)
# set defaults
optimizer = OptimizerSciPy(save_history=save_history,
maxiter=maxiter,
method=method,
method_options=method_options,
method_bounds=method_bounds,
method_constraints=method_constraints,
silent=silent,
backend=backend,
backend_options=backend_options,
samples=samples,
noise_model=noise,
tol=tol,
*args,
**kwargs)
if initial_values is not None:
initial_values = {assign_variable(k): v for k, v in initial_values.items()}
return optimizer(objective=objective,
gradient=gradient,
hessian=hessian,
initial_values=initial_values,
variables=variables, *args, **kwargs)
| [
"collections.namedtuple",
"tequila.objective.objective.format_variable_dictionary",
"tequila.tools.qng.get_qng_combos",
"scipy.optimize.minimize",
"numpy.array",
"tequila.utils.exceptions.TequilaException",
"tequila.objective.objective.assign_variable"
] | [((587, 654), 'collections.namedtuple', 'namedtuple', (['"""SciPyReturnType"""', '"""energy angles history scipy_output"""'], {}), "('SciPyReturnType', 'energy angles history scipy_output')\n", (597, 654), False, 'from collections import namedtuple\n'), ((16511, 16552), 'tequila.objective.objective.format_variable_dictionary', 'format_variable_dictionary', (['method_bounds'], {}), '(method_bounds)\n', (16537, 16552), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n'), ((4853, 4878), 'numpy.array', 'numpy.array', (['param_values'], {}), '(param_values)\n', (4864, 4878), False, 'import scipy, numpy, typing, numbers\n'), ((9994, 10210), 'scipy.optimize.minimize', 'scipy.optimize.minimize', (['E'], {'x0': 'param_values', 'jac': 'dE', 'hess': 'ddE', 'args': '(Es,)', 'method': 'self.method', 'tol': 'self.tol', 'bounds': 'bounds', 'constraints': 'self.method_constraints', 'options': 'self.method_options', 'callback': 'callback'}), '(E, x0=param_values, jac=dE, hess=ddE, args=(Es,),\n method=self.method, tol=self.tol, bounds=bounds, constraints=self.\n method_constraints, options=self.method_options, callback=callback)\n', (10017, 10210), False, 'import scipy, numpy, typing, numbers\n'), ((16217, 16253), 'tequila.objective.objective.format_variable_dictionary', 'format_variable_dictionary', (['gradient'], {}), '(gradient)\n', (16243, 16253), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n'), ((17354, 17372), 'tequila.objective.objective.assign_variable', 'assign_variable', (['k'], {}), '(k)\n', (17369, 17372), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n'), ((2505, 2523), 'tequila.objective.objective.assign_variable', 'assign_variable', (['k'], {}), '(k)\n', (2520, 2523), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n'), ((6505, 6671), 'tequila.tools.qng.get_qng_combos', 'get_qng_combos', (['objective'], {'initial_values': 'initial_values', 'backend': 'self.backend', 'samples': 'self.samples', 'noise': 'self.noise', 'backend_options': 'self.backend_options'}), '(objective, initial_values=initial_values, backend=self.\n backend, samples=self.samples, noise=self.noise, backend_options=self.\n backend_options)\n', (6519, 6671), False, 'from tequila.tools.qng import get_qng_combos\n'), ((11659, 11699), 'tequila.objective.objective.format_variable_dictionary', 'format_variable_dictionary', (['angles_final'], {}), '(angles_final)\n', (11685, 11699), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n'), ((6411, 6478), 'tequila.utils.exceptions.TequilaException', 'TequilaException', (['"""Sorry, QNG and hessian not yet tested together."""'], {}), "('Sorry, QNG and hessian not yet tested together.')\n", (6427, 6478), False, 'from tequila.utils.exceptions import TequilaException\n'), ((16411, 16432), 'tequila.objective.objective.assign_variable', 'assign_variable', (['k[0]'], {}), '(k[0])\n', (16426, 16432), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n'), ((16434, 16457), 'tequila.objective.objective.assign_variable', 'assign_variable', (['[k[1]]'], {}), '([k[1]])\n', (16449, 16457), False, 'from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list\n')] |
#
# This file is part of the chi repository
# (https://github.com/DavAug/chi/) which is released under the
# BSD 3-clause license. See accompanying LICENSE.md for copyright notice and
# full license details.
#
import copy
import myokit
import myokit.formats.sbml as sbml
import numpy as np
class MechanisticModel(object):
"""
A base class for models that are specified by sbml files.
Parameters
----------
sbml_file
A path to the SBML model file that specifies the model.
"""
def __init__(self, sbml_file):
super(MechanisticModel, self).__init__()
# Import model
self._model = sbml.SBMLImporter().model(sbml_file)
# Set default number and names of states, parameters and outputs.
self._set_number_and_names()
# Get time unit
self._time_unit = self._get_time_unit()
# Create simulator without sensitivities
# (intentionally public property)
self.simulator = myokit.Simulation(self._model)
self._has_sensitivities = False
def _get_time_unit(self):
"""
Gets the model's time unit.
"""
# Get bound variables
bound_variables = [var for var in self._model.variables(bound=True)]
# Get the variable that is bound to time
# (only one can exist in myokit.Model)
for var in bound_variables:
if var._binding == 'time':
return var.unit()
def _set_const(self, parameters):
"""
Sets values of constant model parameters.
"""
for id_var, var in enumerate(self._const_names):
self.simulator.set_constant(var, float(parameters[id_var]))
def _set_state(self, parameters):
"""
Sets initial values of states.
"""
parameters = np.array(parameters)
parameters = parameters[self._original_order]
self.simulator.set_state(parameters)
def _set_number_and_names(self):
"""
Sets the number of states, parameters and outputs, as well as their
names. If the model is ``None`` the self._model is taken.
"""
# Get the number of states and parameters
self._n_states = self._model.count_states()
n_const = self._model.count_variables(const=True)
self._n_parameters = self._n_states + n_const
# Get constant variable names and state names
names = [var.qname() for var in self._model.states()]
self._state_names = sorted(names)
self._const_names = sorted(
[var.qname() for var in self._model.variables(const=True)])
# Remember original order of state names for simulation
order_after_sort = np.argsort(names)
self._original_order = np.argsort(order_after_sort)
# Set default parameter names
self._parameter_names = self._state_names + self._const_names
# Set default outputs
self._output_names = self._state_names
self._n_outputs = self._n_states
# Create references of displayed parameter and output names to
# orginal myokit names (defaults to identity map)
# (Key: myokit name, value: displayed name)
self._parameter_name_map = dict(
zip(self._parameter_names, self._parameter_names))
self._output_name_map = dict(
zip(self._output_names, self._output_names))
def copy(self):
"""
Returns a deep copy of the mechanistic model.
.. note::
Copying the model resets the sensitivity settings.
"""
# Copy model manually and get protocol
myokit_model = self._model.clone()
protocol = self.simulator._protocol
# Copy the mechanistic model
model = copy.deepcopy(self)
# Replace myokit model by safe copy and create simulator
model._model = myokit_model
model.simulator = myokit.Simulation(myokit_model, protocol)
return model
def enable_sensitivities(self, enabled, parameter_names=None):
"""
Enables the computation of the model output sensitivities to the model
parameters if set to ``True``.
The sensitivities are computed using the forward sensitivities method,
where an ODE for each sensitivity is derived. The sensitivities are
returned together with the solution to the orginal system of ODEs when
simulating the mechanistic model :meth:`simulate`.
The optional parameter names argument can be used to set which
sensitivities are computed. By default the sensitivities to all
parameters are computed.
:param enabled: A boolean flag which enables (``True``) / disables
(``False``) the computation of sensitivities.
:type enabled: bool
:param parameter_names: A list of parameter names of the model. If
``None`` sensitivities for all parameters are computed.
:type parameter_names: list[str], optional
"""
enabled = bool(enabled)
# Get dosing regimen from existing simulator
protocol = self.simulator._protocol
if not enabled:
if self._has_sensitivities:
# Disable sensitivities
sim = myokit.Simulation(self._model, protocol)
self.simulator = sim
self._has_sensitivities = False
return None
# Sensitivities are already disabled
return None
# Get parameters whose output sensitivities are computed
parameters = []
for param_id, param in enumerate(self._parameter_names):
if param_id < self._n_states:
# Convert initial value parameters to the correct syntax
parameters.append('init(' + param + ')')
continue
# Other parameters can be appended without modification
parameters.append(param)
if parameter_names is not None:
# Get myokit names for input parameter names
container = []
for index, public_name in enumerate(
self._parameter_name_map.values()):
if public_name in parameter_names:
container.append(parameters[index])
parameters = container
if not parameters:
raise ValueError(
'None of the parameters could be identified. The valid '
'parameter names are <' + str(self._parameter_names) + '>.')
# Create simulator
sensitivities = (self._output_names, parameters)
sim = myokit.Simulation(self._model, protocol, sensitivities)
# Update simulator and sensitivity state
self.simulator = sim
self._has_sensitivities = True
def has_sensitivities(self):
"""
Returns a boolean indicating whether sensitivities have been enabled.
"""
return self._has_sensitivities
def n_outputs(self):
"""
Returns the number of output dimensions.
By default this is the number of states.
"""
return self._n_outputs
def n_parameters(self):
"""
Returns the number of parameters in the model.
Parameters of the model are initial state values and structural
parameter values.
"""
return self._n_parameters
def outputs(self):
"""
Returns the output names of the model.
"""
# Get user specified output names
output_names = [
self._output_name_map[name] for name in self._output_names]
return output_names
def parameters(self):
"""
Returns the parameter names of the model.
"""
# Get user specified parameter names
parameter_names = [
self._parameter_name_map[name] for name in self._parameter_names]
return parameter_names
def set_outputs(self, outputs):
"""
Sets outputs of the model.
The outputs can be set to any quantifiable variable name of the
:class:`myokit.Model`, e.g. `compartment.variable`.
.. note::
Setting outputs resets the sensitivity settings (by default
sensitivities are disabled.)
:param outputs:
A list of output names.
:type outputs: list[str]
"""
outputs = list(outputs)
# Translate public names to myokit names, if set previously
for myokit_name, public_name in self._output_name_map.items():
if public_name in outputs:
# Replace public name by myokit name
index = outputs.index(public_name)
outputs[index] = myokit_name
# Check that outputs are valid
for output in outputs:
try:
var = self.simulator._model.get(output)
if not (var.is_state() or var.is_intermediary()):
raise ValueError(
'Outputs have to be state or intermediary variables.')
except KeyError:
raise KeyError(
'The variable <' + str(output) + '> does not exist in the '
'model.')
# Remember outputs
self._output_names = outputs
self._n_outputs = len(outputs)
# Create an updated output name map
output_name_map = {}
for myokit_name in self._output_names:
try:
output_name_map[myokit_name] = self._output_name_map[
myokit_name]
except KeyError:
# The output did not exist before, so create an identity map
output_name_map[myokit_name] = myokit_name
self._output_name_map = output_name_map
# Disable sensitivities
self.enable_sensitivities(False)
def set_output_names(self, names):
"""
Assigns names to the model outputs. By default the
:class:`myokit.Model` names are assigned to the outputs.
:param names: A dictionary that maps the current output names to new
names.
:type names: dict[str, str]
"""
if not isinstance(names, dict):
raise TypeError(
'Names has to be a dictionary with the current output names'
'as keys and the new output names as values.')
# Check that new output names are unique
new_names = list(names.values())
n_unique_new_names = len(set(names.values()))
if len(new_names) != n_unique_new_names:
raise ValueError(
'The new output names have to be unique.')
# Check that new output names do not exist already
for new_name in new_names:
if new_name in list(self._output_name_map.values()):
raise ValueError(
'The output names cannot coincide with existing '
'output names. One output is already called '
'<' + str(new_name) + '>.')
# Replace currently displayed names by new names
for myokit_name in self._output_names:
old_name = self._output_name_map[myokit_name]
try:
new_name = names[old_name]
self._output_name_map[myokit_name] = str(new_name)
except KeyError:
# KeyError indicates that the current output is not being
# renamed.
pass
def set_parameter_names(self, names):
"""
Assigns names to the parameters. By default the :class:`myokit.Model`
names are assigned to the parameters.
:param names: A dictionary that maps the current parameter names to new
names.
:type names: dict[str, str]
"""
if not isinstance(names, dict):
raise TypeError(
'Names has to be a dictionary with the current parameter names'
'as keys and the new parameter names as values.')
# Check that new parameter names are unique
new_names = list(names.values())
n_unique_new_names = len(set(names.values()))
if len(new_names) != n_unique_new_names:
raise ValueError(
'The new parameter names have to be unique.')
# Check that new parameter names do not exist already
for new_name in new_names:
if new_name in list(self._parameter_name_map.values()):
raise ValueError(
'The parameter names cannot coincide with existing '
'parameter names. One parameter is already called '
'<' + str(new_name) + '>.')
# Replace currently displayed names by new names
for myokit_name in self._parameter_names:
old_name = self._parameter_name_map[myokit_name]
try:
new_name = names[old_name]
self._parameter_name_map[myokit_name] = str(new_name)
except KeyError:
# KeyError indicates that the current parameter is not being
# renamed.
pass
def simulate(self, parameters, times):
"""
Returns the numerical solution of the model outputs (and optionally
the sensitivites) for the specified parameters and times.
The model outputs are returned as a 2 dimensional NumPy array of shape
(n_outputs, n_times). If sensitivities are enabled, a tuple is returned
with the NumPy array of the model outputs and a NumPy array of the
sensitivities of shape (n_times, n_outputs, n_parameters).
:param parameters: An array-like object with values for the model
parameters.
:type parameters: list, numpy.ndarray
:param times: An array-like object with time points at which the output
values are returned.
:type times: list, numpy.ndarray
"""
# Reset simulation
self.simulator.reset()
# Set initial conditions
self._set_state(parameters[:self._n_states])
# Set constant model parameters
self._set_const(parameters[self._n_states:])
# Simulate
if not self._has_sensitivities:
output = self.simulator.run(
times[-1] + 1, log=self._output_names, log_times=times)
output = np.array([output[name] for name in self._output_names])
return output
output, sensitivities = self.simulator.run(
times[-1] + 1, log=self._output_names, log_times=times)
output = np.array([output[name] for name in self._output_names])
sensitivities = np.array(sensitivities)
return output, sensitivities
def time_unit(self):
"""
Returns the model's unit of time.
"""
return self._time_unit
class PharmacodynamicModel(MechanisticModel):
"""
Converts a pharmacodynamic model specified by an SBML file into a forward
model that can be solved numerically.
Extends :class:`MechanisticModel`.
Parameters
----------
sbml_file
A path to the SBML model file that specifies the pharmacodynamic model.
"""
def __init__(self, sbml_file):
super(PharmacodynamicModel, self).__init__(sbml_file)
# Set default pharmacokinetic input variable
# (Typically drug concentration)
self._pk_input = None
if self._model.has_variable('myokit.drug_concentration'):
self._pk_input = 'myokit.drug_concentration'
def pk_input(self):
"""
Returns the pharmacokinetic input variable. In most models this will be
the concentration of the drug.
Defaults to ``None`` or ``myokit.drug_concentration`` if the latter is
among the model parameters.
"""
return self._pk_input
def set_pk_input(self, name):
"""
Sets the pharmacokinetic input variable. In most models this will be
the concentration of the drug.
The name has to match a parameter of the model.
"""
if name not in self._parameter_names:
raise ValueError(
'The name does not match a model parameter.')
self._pk_input = name
class PharmacokineticModel(MechanisticModel):
"""
Converts a pharmacokinetic model specified by an SBML file into a forward
model that can be solved numerically.
Extends :class:`MechanisticModel`.
Parameters
----------
sbml_file
A path to the SBML model file that specifies the pharmacokinetic model.
"""
def __init__(self, sbml_file):
super(PharmacokineticModel, self).__init__(sbml_file)
# Set default dose administration
self._administration = None
# Safe vanilla model
self._vanilla_model = self._model.clone()
# Set default output variable that interacts with the pharmacodynamic
# model
# (Typically drug concentration in central compartment)
self._pd_output = None
if self._model.has_variable('central.drug_concentration'):
self._pd_output = 'central.drug_concentration'
# Set default output to pd output if not None
if self._pd_output is not None:
self.set_outputs([self._pd_output])
def _add_dose_compartment(self, model, drug_amount):
"""
Adds a dose compartment to the model with a linear absorption rate to
the connected compartment.
"""
# Add a dose compartment to the model
dose_comp = model.add_component_allow_renaming('dose')
# Create a state variable for the drug amount in the dose compartment
dose_drug_amount = dose_comp.add_variable('drug_amount')
dose_drug_amount.set_rhs(0)
dose_drug_amount.set_unit(drug_amount.unit())
dose_drug_amount.promote()
# Create an absorption rate variable
absorption_rate = dose_comp.add_variable('absorption_rate')
absorption_rate.set_rhs(1)
absorption_rate.set_unit(1 / self.time_unit())
# Add outflow expression to dose compartment
dose_drug_amount.set_rhs(
myokit.Multiply(
myokit.PrefixMinus(myokit.Name(absorption_rate)),
myokit.Name(dose_drug_amount)
)
)
# Add inflow expression to connected compartment
rhs = drug_amount.rhs()
drug_amount.set_rhs(
myokit.Plus(
rhs,
myokit.Multiply(
myokit.Name(absorption_rate),
myokit.Name(dose_drug_amount)
)
)
)
# Update number of parameters and states, as well as their names
self._model = model
self._set_number_and_names()
# Set default output to pd_output if it is not None
if self._pd_output is not None:
self.set_outputs([self._pd_output])
return model, dose_drug_amount
def _add_dose_rate(self, compartment, drug_amount):
"""
Adds a dose rate variable to the state variable, which is bound to the
dosing regimen.
"""
# Register a dose rate variable to the compartment and bind it to
# pace, i.e. tell myokit that its value is set by the dosing regimen/
# myokit.Protocol
dose_rate = compartment.add_variable_allow_renaming(
str('dose_rate'))
dose_rate.set_binding('pace')
# Set initial value to 0 and unit to unit of drug amount over unit of
# time
dose_rate.set_rhs(0)
dose_rate.set_unit(drug_amount.unit() / self.time_unit())
# Add the dose rate to the rhs of the drug amount variable
rhs = drug_amount.rhs()
drug_amount.set_rhs(
myokit.Plus(
rhs,
myokit.Name(dose_rate)
)
)
def administration(self):
"""
Returns the mode of administration in form of a dictionary.
The dictionary has the keys 'compartment' and 'direct'. The former
provides information about which compartment is dosed, and the latter
whether the dose is administered directly ot indirectly to the
compartment.
"""
return self._administration
def dosing_regimen(self):
"""
Returns the dosing regimen of the compound in form of a
:class:`myokit.Protocol`. If the protocol has not been set, ``None`` is
returned.
"""
return self.simulator._protocol
def set_administration(
self, compartment, amount_var='drug_amount', direct=True):
r"""
Sets the route of administration of the compound.
The compound is administered to the selected compartment either
directly or indirectly. If it is administered directly, a dose rate
variable is added to the drug amount's rate of change expression
.. math ::
\frac{\text{d}A}{\text{d}t} = \text{RHS} + r_d,
where :math:`A` is the drug amount in the selected compartment, RHS is
the rate of change of :math:`A` prior to adding the dose rate, and
:math:`r_d` is the dose rate.
The dose rate can be set by :meth:`set_dosing_regimen`.
If the route of administration is indirect, a dosing compartment
is added to the model, which is connected to the selected compartment.
The dose rate variable is then added to the rate of change expression
of the dose amount variable in the dosing compartment. The drug amount
in the dosing compartment flows at a linear absorption rate into the
selected compartment
.. math ::
\frac{\text{d}A_d}{\text{d}t} = -k_aA_d + r_d \\
\frac{\text{d}A}{\text{d}t} = \text{RHS} + k_aA_d,
where :math:`A_d` is the amount of drug in the dose compartment and
:math:`k_a` is the absorption rate.
Setting an indirect administration route changes the number of
parameters of the model, and resets the parameter names to their
defaults.
.. note:
Setting the route of administration will reset the sensitivity
settings.
:param compartment: Compartment to which doses are either directly or
indirectly administered.
:type compartment: str
:param amount_var: Drug amount variable in the compartment. By default
the drug amount variable is assumed to be 'drug_amount'.
:type amount_var: str, optional
:param direct: A boolean flag that indicates whether the dose is
administered directly or indirectly to the compartment.
:type direct: bool, optional
"""
# Check inputs
model = self._vanilla_model.clone()
if not model.has_component(compartment):
raise ValueError(
'The model does not have a compartment named <'
+ str(compartment) + '>.')
comp = model.get(compartment, class_filter=myokit.Component)
if not comp.has_variable(amount_var):
raise ValueError(
'The drug amount variable <' + str(amount_var) + '> could not '
'be found in the compartment.')
drug_amount = comp.get(amount_var)
if not drug_amount.is_state():
raise ValueError(
'The variable <' + str(drug_amount) + '> is not a state '
'variable, and can therefore not be dosed.')
# If administration is indirect, add a dosing compartment and update
# the drug amount variable to the one in the dosing compartment
if not direct:
model, drug_amount = self._add_dose_compartment(model, drug_amount)
comp = model.get(compartment, class_filter=myokit.Component)
# Add dose rate variable to the right hand side of the drug amount
self._add_dose_rate(comp, drug_amount)
# Update model and simulator
# (otherwise simulator won't know about pace bound variable)
self._model = model
self.simulator = myokit.Simulation(model)
self._has_sensitivities = False
# Remember type of administration
self._administration = dict(
{'compartment': compartment, 'direct': direct})
def set_dosing_regimen(
self, dose, start, duration=0.01, period=None, num=None):
"""
Sets the dosing regimen with which the compound is administered.
The route of administration can be set with :meth:`set_administration`.
However, the type of administration, e.g. bolus injection or infusion,
may be controlled with the duration input.
By default the dose is administered as a bolus injection (duration on
a time scale that is 100 fold smaller than the basic time unit). To
model an infusion of the dose over a longer time period, the
``duration`` can be adjusted to the appropriate time scale.
By default the dose is administered once. To apply multiple doses
provide a dose administration period.
Parameters
----------
dose
The amount of the compound that is injected at each administration.
start
Start time of the treatment.
duration
Duration of dose administration. For a bolus injection, a dose
duration of 1% of the time unit should suffice. By default the
duration is set to 0.01 (bolus).
period
Periodicity at which doses are administered. If ``None`` the dose
is administered only once.
num
Number of administered doses. If ``None`` and the periodicity of
the administration is not ``None``, doses are administered
indefinitely.
"""
if self._administration is None:
raise ValueError(
'The route of administration of the dose has not been set.')
if num is None:
# Myokits default is zero, i.e. infinitely many doses
num = 0
if period is None:
# If period is not provided, we administer a single dose
# Myokits defaults are 0s for that.
period = 0
num = 0
# Translate dose to dose rate
dose_rate = dose / duration
# Set dosing regimen
dosing_regimen = myokit.pacing.blocktrain(
period=period, duration=duration, offset=start, level=dose_rate,
limit=num)
self.simulator.set_protocol(dosing_regimen)
def pd_output(self):
"""
Returns the variable which interacts with the pharmacodynamic model.
In most models this will be the concentration of the drug in the
central compartment.
This variable is mapped to the
:meth:`chi.PharmacodynamicModel.pk_input` variable when a
:class:`PKPDModel` is instantiated.
Defaults to ``None`` or ``central.drug_concentration`` if the latter is
among the model parameters.
"""
return self._pd_output
def set_pd_output(self, name):
"""
Sets the variable which interacts with the pharmacodynamic model.
In most models this will be the concentration of the drug in the
central compartment.
The name has to match a parameter of the model.
This variable is mapped to the
:meth:`chi.PharmacodynamicModel.pk_input` variable when a
:class:`PKPDModel` is instantiated.
"""
# Get intermediate variable names
inter_names = [
var.qname() for var in self._model.variables(inter=True)]
names = inter_names + self._parameter_names
if name not in names:
raise ValueError(
'The name does not match a model variable.')
self._pd_output = name
class ReducedMechanisticModel(object):
"""
A class that can be used to permanently fix model parameters of a
:class:`MechanisticModel` instance.
This may be useful to explore simplified versions of a model before
defining a new SBML file.
Parameters
----------
mechanistic_model
An instance of a :class:`MechanisticModel`.
"""
def __init__(self, mechanistic_model):
super(ReducedMechanisticModel, self).__init__()
# Check input
if not isinstance(mechanistic_model, MechanisticModel):
raise ValueError(
'The mechanistic model has to be an instance of a '
'chi.MechanisticModel')
self._mechanistic_model = mechanistic_model
self.simulator = mechanistic_model.simulator
# Set defaults
self._fixed_params_mask = None
self._fixed_params_values = None
self._n_parameters = mechanistic_model.n_parameters()
self._parameter_names = mechanistic_model.parameters()
def copy(self):
"""
Returns a deep copy of the reduced model.
.. note::
Copying the model resets the sensitivity settings.
"""
# Get a safe copy of the mechanistic model
mechanistic_model = self._mechanistic_model.copy()
# Copy the reduced model
# (this possibly corrupts the mechanistic model and the
# simulator)
model = copy.deepcopy(self)
# Replace mechanistic model and simulator
model._mechanistic_model = mechanistic_model
model.simulator = mechanistic_model.simulator
return model
def dosing_regimen(self):
"""
Returns the dosing regimen of the compound in form of a
:class:`myokit.Protocol`. If the protocol has not been set, ``None`` is
returned.
If the model does not support dose administration, ``None`` is
returned.
"""
try:
return self._mechanistic_model.dosing_regimen()
except AttributeError:
return None
def enable_sensitivities(self, enabled):
"""
Enables the computation of the output sensitivities with respect to
the free model parameters.
"""
if not enabled:
self._mechanistic_model.enable_sensitivities(enabled)
return None
# Get free parameters
free_parameters = np.array(self._parameter_names)
if self._fixed_params_mask is not None:
free_parameters = free_parameters[~self._fixed_params_mask]
# Set sensitivities
self._mechanistic_model.enable_sensitivities(
enabled, free_parameters)
def fix_parameters(self, name_value_dict):
"""
Fixes the value of model parameters, and effectively removes them as a
parameter from the model. Fixing the value of a parameter at ``None``,
sets the parameter free again.
Parameters
----------
name_value_dict
A dictionary with model parameter names as keys, and parameter
values as values.
"""
# Check type
try:
name_value_dict = dict(name_value_dict)
except (TypeError, ValueError):
raise ValueError(
'The name-value dictionary has to be convertable to a python '
'dictionary.')
# If no model parameters have been fixed before, instantiate a mask
# and values
if self._fixed_params_mask is None:
self._fixed_params_mask = np.zeros(
shape=self._n_parameters, dtype=bool)
if self._fixed_params_values is None:
self._fixed_params_values = np.empty(shape=self._n_parameters)
# Update the mask and values
for index, name in enumerate(self._parameter_names):
try:
value = name_value_dict[name]
except KeyError:
# KeyError indicates that parameter name is not being fixed
continue
# Fix parameter if value is not None, else unfix it
self._fixed_params_mask[index] = value is not None
self._fixed_params_values[index] = value
# If all parameters are free, set mask and values to None again
if np.alltrue(~self._fixed_params_mask):
self._fixed_params_mask = None
self._fixed_params_values = None
# Remove sensitivities for fixed parameters
if self.has_sensitivities() is True:
self.enable_sensitivities(True)
def has_sensitivities(self):
"""
Returns a boolean indicating whether sensitivities have been enabled.
"""
return self._mechanistic_model.has_sensitivities()
def mechanistic_model(self):
"""
Returns the original mechanistic model.
"""
return self._mechanistic_model
def n_fixed_parameters(self):
"""
Returns the number of fixed model parameters.
"""
if self._fixed_params_mask is None:
return 0
n_fixed = int(np.sum(self._fixed_params_mask))
return n_fixed
def n_outputs(self):
"""
Returns the number of output dimensions.
By default this is the number of states.
"""
return self._mechanistic_model.n_outputs()
def n_parameters(self):
"""
Returns the number of parameters in the model.
Parameters of the model are initial state values and structural
parameter values.
"""
# Get number of fixed parameters
n_fixed = 0
if self._fixed_params_mask is not None:
n_fixed = int(np.sum(self._fixed_params_mask))
# Subtract fixed parameters from total number
n_parameters = self._n_parameters - n_fixed
return n_parameters
def outputs(self):
"""
Returns the output names of the model.
"""
return self._mechanistic_model.outputs()
def parameters(self):
"""
Returns the parameter names of the model.
"""
# Remove fixed model parameters
names = self._parameter_names
if self._fixed_params_mask is not None:
names = np.array(names)
names = names[~self._fixed_params_mask]
names = list(names)
return copy.copy(names)
def pd_output(self):
"""
Returns the variable which interacts with the pharmacodynamic model.
In most models this will be the concentration of the drug in the
central compartment.
This variable is mapped to the
:meth:`chi.PharmacodynamicModel.pk_input` variable when a
:class:`PKPDModel` is instantiated.
Defaults to ``None`` or ``central.drug_concentration`` if the latter is
among the model parameters.
If the model does not support a pd output, ``None`` is returned.
"""
try:
return self._mechanistic_model.pd_output()
except AttributeError:
return None
def pk_input(self):
"""
Returns the pharmacokinetic input variable. In most models this will be
the concentration of the drug.
Defaults to ``None`` or ``myokit.drug_concentration`` if the latter is
among the model parameters.
If the model does not support a pk input, ``None`` is returned.
"""
try:
return self._mechanistic_model.pk_input()
except AttributeError:
return None
def set_dosing_regimen(
self, dose, start, duration=0.01, period=None, num=None):
"""
Sets the dosing regimen with which the compound is administered.
The route of administration can be set with :meth:`set_administration`.
However, the type of administration, e.g. bolus injection or infusion,
may be controlled with the duration input.
By default the dose is administered as a bolus injection (duration on
a time scale that is 100 fold smaller than the basic time unit). To
model an infusion of the dose over a longer time period, the
``duration`` can be adjusted to the appropriate time scale.
By default the dose is administered once. To apply multiple doses
provide a dose administration period.
Parameters
----------
dose
The amount of the compound that is injected at each administration.
start
Start time of the treatment.
duration
Duration of dose administration. For a bolus injection, a dose
duration of 1% of the time unit should suffice. By default the
duration is set to 0.01 (bolus).
period
Periodicity at which doses are administered. If ``None`` the dose
is administered only once.
num
Number of administered doses. If ``None`` and the periodicity of
the administration is not ``None``, doses are administered
indefinitely.
"""
try:
self._mechanistic_model.set_dosing_regimen(
dose, start, duration, period, num)
except AttributeError:
raise AttributeError(
'The mechanistic model does not support dosing regimens.')
def set_outputs(self, outputs):
"""
Sets outputs of the model.
Parameters
----------
outputs
A list of quantifiable variable names of the :class:`myokit.Model`,
e.g. `compartment.variable`.
"""
self._mechanistic_model.set_outputs(outputs)
def set_output_names(self, names):
"""
Assigns names to the outputs. By default the :class:`myokit.Model`
names are assigned to the outputs.
Parameters
----------
names
A dictionary that maps the current output names to new names.
"""
self._mechanistic_model.set_output_names(names)
def set_parameter_names(self, names):
"""
Assigns names to the parameters. By default the :class:`myokit.Model`
names are assigned to the parameters.
Parameters
----------
names
A dictionary that maps the current parameter names to new names.
"""
# Set parameter names
self._mechanistic_model.set_parameter_names(names)
self._parameter_names = self._mechanistic_model.parameters()
def simulate(self, parameters, times):
"""
Returns the numerical solution of the model outputs (and optionally
the sensitivites) for the specified parameters and times.
The model outputs are returned as a 2 dimensional NumPy array of shape
(n_outputs, n_times). If sensitivities are enabled, a tuple is returned
with the NumPy array of the model outputs and a NumPy array of the
sensitivities of shape (n_times, n_outputs, n_parameters).
:param parameters: An array-like object with values for the model
parameters.
:type parameters: list, numpy.ndarray
:param times: An array-like object with time points at which the output
values are returned.
:type times: list, numpy.ndarray
"""
# Insert fixed parameter values
if self._fixed_params_mask is not None:
self._fixed_params_values[
~self._fixed_params_mask] = parameters
parameters = self._fixed_params_values
return self._mechanistic_model.simulate(parameters, times)
def time_unit(self):
"""
Returns the model's unit of time.
"""
return self._mechanistic_model.time_unit()
| [
"numpy.alltrue",
"myokit.formats.sbml.SBMLImporter",
"myokit.pacing.blocktrain",
"myokit.Simulation",
"numpy.argsort",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.empty",
"copy.deepcopy",
"copy.copy",
"myokit.Name"
] | [((984, 1014), 'myokit.Simulation', 'myokit.Simulation', (['self._model'], {}), '(self._model)\n', (1001, 1014), False, 'import myokit\n'), ((1824, 1844), 'numpy.array', 'np.array', (['parameters'], {}), '(parameters)\n', (1832, 1844), True, 'import numpy as np\n'), ((2721, 2738), 'numpy.argsort', 'np.argsort', (['names'], {}), '(names)\n', (2731, 2738), True, 'import numpy as np\n'), ((2770, 2798), 'numpy.argsort', 'np.argsort', (['order_after_sort'], {}), '(order_after_sort)\n', (2780, 2798), True, 'import numpy as np\n'), ((3777, 3796), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (3790, 3796), False, 'import copy\n'), ((3925, 3966), 'myokit.Simulation', 'myokit.Simulation', (['myokit_model', 'protocol'], {}), '(myokit_model, protocol)\n', (3942, 3966), False, 'import myokit\n'), ((6650, 6705), 'myokit.Simulation', 'myokit.Simulation', (['self._model', 'protocol', 'sensitivities'], {}), '(self._model, protocol, sensitivities)\n', (6667, 6705), False, 'import myokit\n'), ((14685, 14740), 'numpy.array', 'np.array', (['[output[name] for name in self._output_names]'], {}), '([output[name] for name in self._output_names])\n', (14693, 14740), True, 'import numpy as np\n'), ((14765, 14788), 'numpy.array', 'np.array', (['sensitivities'], {}), '(sensitivities)\n', (14773, 14788), True, 'import numpy as np\n'), ((24314, 24338), 'myokit.Simulation', 'myokit.Simulation', (['model'], {}), '(model)\n', (24331, 24338), False, 'import myokit\n'), ((26633, 26737), 'myokit.pacing.blocktrain', 'myokit.pacing.blocktrain', ([], {'period': 'period', 'duration': 'duration', 'offset': 'start', 'level': 'dose_rate', 'limit': 'num'}), '(period=period, duration=duration, offset=start,\n level=dose_rate, limit=num)\n', (26657, 26737), False, 'import myokit\n'), ((29580, 29599), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (29593, 29599), False, 'import copy\n'), ((30567, 30598), 'numpy.array', 'np.array', (['self._parameter_names'], {}), '(self._parameter_names)\n', (30575, 30598), True, 'import numpy as np\n'), ((32466, 32502), 'numpy.alltrue', 'np.alltrue', (['(~self._fixed_params_mask)'], {}), '(~self._fixed_params_mask)\n', (32476, 32502), True, 'import numpy as np\n'), ((34550, 34566), 'copy.copy', 'copy.copy', (['names'], {}), '(names)\n', (34559, 34566), False, 'import copy\n'), ((14464, 14519), 'numpy.array', 'np.array', (['[output[name] for name in self._output_names]'], {}), '([output[name] for name in self._output_names])\n', (14472, 14519), True, 'import numpy as np\n'), ((31723, 31769), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self._n_parameters', 'dtype': 'bool'}), '(shape=self._n_parameters, dtype=bool)\n', (31731, 31769), True, 'import numpy as np\n'), ((31874, 31908), 'numpy.empty', 'np.empty', ([], {'shape': 'self._n_parameters'}), '(shape=self._n_parameters)\n', (31882, 31908), True, 'import numpy as np\n'), ((33275, 33306), 'numpy.sum', 'np.sum', (['self._fixed_params_mask'], {}), '(self._fixed_params_mask)\n', (33281, 33306), True, 'import numpy as np\n'), ((34434, 34449), 'numpy.array', 'np.array', (['names'], {}), '(names)\n', (34442, 34449), True, 'import numpy as np\n'), ((645, 664), 'myokit.formats.sbml.SBMLImporter', 'sbml.SBMLImporter', ([], {}), '()\n', (662, 664), True, 'import myokit.formats.sbml as sbml\n'), ((5283, 5323), 'myokit.Simulation', 'myokit.Simulation', (['self._model', 'protocol'], {}), '(self._model, protocol)\n', (5300, 5323), False, 'import myokit\n'), ((18403, 18432), 'myokit.Name', 'myokit.Name', (['dose_drug_amount'], {}), '(dose_drug_amount)\n', (18414, 18432), False, 'import myokit\n'), ((20004, 20026), 'myokit.Name', 'myokit.Name', (['dose_rate'], {}), '(dose_rate)\n', (20015, 20026), False, 'import myokit\n'), ((33874, 33905), 'numpy.sum', 'np.sum', (['self._fixed_params_mask'], {}), '(self._fixed_params_mask)\n', (33880, 33905), True, 'import numpy as np\n'), ((18356, 18384), 'myokit.Name', 'myokit.Name', (['absorption_rate'], {}), '(absorption_rate)\n', (18367, 18384), False, 'import myokit\n'), ((18683, 18711), 'myokit.Name', 'myokit.Name', (['absorption_rate'], {}), '(absorption_rate)\n', (18694, 18711), False, 'import myokit\n'), ((18733, 18762), 'myokit.Name', 'myokit.Name', (['dose_drug_amount'], {}), '(dose_drug_amount)\n', (18744, 18762), False, 'import myokit\n')] |
## Copyright (c) 2022, Team FirmWire
## SPDX-License-Identifier: BSD-3-Clause
from enum import Enum, auto
from .hw.soc import SOCPeripheral
class MemoryMapEntryType(Enum):
GENERIC = auto()
FILE_BACKED = auto()
PERIPHERAL = auto()
ANNOTATION = auto()
class MemoryMapEntry:
def __init__(self, ty, start, size, **kwargs):
assert type(ty) == MemoryMapEntryType
assert isinstance(start, int), start
assert isinstance(size, int), size
self.ty = ty
self.start = start
self.size = size
self.kwargs = kwargs
def __repr__(self):
return "<MemoryMapEntry %s [%x - %x]>" % (
self.ty,
self.start,
self.start + self.size,
)
class MemoryMap:
def __init__(self):
self.memory_map = []
def add_file_backed_memory(self, start, size, file, **kwargs):
self.memory_map += [
MemoryMapEntry(
MemoryMapEntryType.FILE_BACKED, start, size, file=file, **kwargs
)
]
def add_memory_range(self, start, size, **kwargs):
# backwards compatibility
if "emulate" in kwargs:
peripheral_cls = kwargs["emulate"]
del kwargs["emulate"]
return self.create_peripheral(peripheral_cls, start, size, **kwargs)
self.memory_map += [
MemoryMapEntry(MemoryMapEntryType.GENERIC, start, size, **kwargs)
]
def add_memory_annotation(self, start, size, name):
self.memory_map += [
MemoryMapEntry(MemoryMapEntryType.ANNOTATION, start, size, name=name)
]
def create_peripheral(self, peripheral_cls, start, size, **kwargs):
self.memory_map += [
MemoryMapEntry(
MemoryMapEntryType.PERIPHERAL,
start,
size,
emulate=peripheral_cls,
**kwargs
)
]
def create_soc_peripheral(self, peripheral):
assert isinstance(peripheral, SOCPeripheral)
# The SOCPeripheral class captures the reference
self.create_peripheral(
peripheral, peripheral._address, peripheral._size, **peripheral._attr
)
| [
"enum.auto"
] | [((188, 194), 'enum.auto', 'auto', ([], {}), '()\n', (192, 194), False, 'from enum import Enum, auto\n'), ((213, 219), 'enum.auto', 'auto', ([], {}), '()\n', (217, 219), False, 'from enum import Enum, auto\n'), ((237, 243), 'enum.auto', 'auto', ([], {}), '()\n', (241, 243), False, 'from enum import Enum, auto\n'), ((261, 267), 'enum.auto', 'auto', ([], {}), '()\n', (265, 267), False, 'from enum import Enum, auto\n')] |
from vue.bridge import Object
import javascript
class VueDecorator:
__key__ = None
__parents__ = ()
__id__ = None
__value__ = None
def update(self, vue_dict):
base = vue_dict
for parent in self.__parents__:
base = vue_dict.setdefault(parent, {})
if self.__id__ is None:
base[self.__key__] = self.__value__
else:
base = base.setdefault(self.__key__, {})
value = self.__value__
if isinstance(base.get(self.__id__), dict):
base[self.__id__].update(value)
else:
base[self.__id__] = value
def pyjs_bridge(fn, inject_vue_instance=False):
def wrapper(*args, **kwargs):
args = (javascript.this(), *args) if inject_vue_instance else args
args = tuple(Object.from_js(arg) for arg in args)
kwargs = {k: Object.from_js(v) for k, v in kwargs.items()}
return Object.to_js(fn(*args, **kwargs))
wrapper.__name__ = fn.__name__
return wrapper
| [
"vue.bridge.Object.from_js",
"javascript.this"
] | [((882, 899), 'vue.bridge.Object.from_js', 'Object.from_js', (['v'], {}), '(v)\n', (896, 899), False, 'from vue.bridge import Object\n'), ((744, 761), 'javascript.this', 'javascript.this', ([], {}), '()\n', (759, 761), False, 'import javascript\n'), ((824, 843), 'vue.bridge.Object.from_js', 'Object.from_js', (['arg'], {}), '(arg)\n', (838, 843), False, 'from vue.bridge import Object\n')] |
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import keras
from keras.models import Model, load_model
from keras import backend as K
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) # mute deprecation warnings
from keras.optimizers import Adam, SGD
from tensorflow import ConfigProto
from tensorflow import InteractiveSession
import numpy as np
import sys
from PIL import Image
import argparse
from matplotlib import pyplot as plt
from .dataloader import *
from .model import *
from .metrics import *
def train(args):
# load data
x_val, y_val = load_data(args.valid_data, args.valid_dataset)
x_train, y_train = load_data(args.train_data, 'monuseg')
print('data loading finished.')
K.clear_session()
config = ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
K.set_learning_phase(1)
input_shape = x_train[0].shape
# create model
model = BiONet(
input_shape,
num_classes=args.num_class,
num_layers=4,
iterations=args.iter,
multiplier=args.multiplier,
integrate=args.integrate
).build()
# augmentation
train_gen = get_augmented(
x_train, y_train, batch_size=args.batch_size,
data_gen_args = dict(
rotation_range=15.,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=50,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
fill_mode='constant'
))
model.compile(
optimizer=Adam(lr=args.lr,decay=args.lr_decay),
loss = 'binary_crossentropy',
metrics=[iou, dice_coef]
)
print('model successfully built and compiled.')
integrate = '_int' if args.integrate else ''
weights = '_weights' if args.save_weight else ''
cpt_name = 'iter_'+str(args.iter)+'_mul_'+str(args.multiplier)+integrate+'_best'+weights+'.h5'
callbacks = [keras.callbacks.ModelCheckpoint("checkpoints/"+args.exp+"/"+cpt_name,monitor='val_iou', mode='max',verbose=0, save_weights_only=args.save_weight, save_best_only=True)]
if not os.path.isdir("checkpoints/"+args.exp):
os.mkdir("checkpoints/"+args.exp)
print('\nStart training...')
history = model.fit_generator(
train_gen,
steps_per_epoch=args.steps,
epochs=args.epochs,
validation_data=(x_val, y_val),
callbacks=callbacks
)
print('\nTraining fininshed!')
K.clear_session()
def evaluate(args):
# load data
x_val, y_val = load_data(args.valid_data, args.valid_dataset)
print('data loading finished.')
K.clear_session()
K.set_learning_phase(1)
config = ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
if args.model_path is None:
integrate = '_int' if args.integrate else ''
weights = '_weights' if args.save_weight else ''
cpt_name = 'iter_'+str(args.iter)+'_mul_'+str(args.multiplier)+integrate+'_best'+weights+'.h5'
model_path = "checkpoints/"+args.exp+"/"+cpt_name
else:
model_path = args.model_path
print('Restoring model from path: '+model_path)
if args.save_weight:
model = BiONet(
input_shape,
num_classes=args.num_class,
num_layers=4,
iterations=args.iter,
multiplier=args.multiplier,
integrate=args.integrate
).build().load_weights(model_path)
else:
model = load_model(model_path, compile=False)
model.compile(
optimizer=Adam(lr=args.lr,decay=args.lr_decay),
loss='binary_crossentropy',
metrics=[iou, dice_coef]
)
print('\nStart evaluation...')
result = model.evaluate(x_val,y_val,batch_size=args.batch_size)
print('Validation loss:\t', result[0])
print('Validation iou:\t', result[1])
print('Validation dice:\t', result[2])
print('\nEvaluation finished!')
if args.save_result:
# save metrics
if not os.path.exists("checkpoints/"+args.exp+"/outputs"):
os.mkdir("checkpoints/"+args.exp+"/outputs")
with open("checkpoints/"+args.exp+"/outputs/result.txt", 'w+') as f:
f.write('Validation loss:\t'+str(result[0])+'\n')
f.write('Validation iou:\t'+str(result[1])+'\n')
f.write('Validation dice:\t'+str(result[2])+'\n')
print('Metrics have been saved to:', "checkpoints/"+args.exp+"/outputs/result.txt")
# predict and save segmentations
results = model.predict(x_val,batch_size=args.batch_size,verbose=1)
results = (results > 0.5).astype(np.float32) # Binarization. Comment out this line if you don't want to
print('\nPrediction finished!')
print('Saving segmentations...')
if not os.path.exists("checkpoints/"+args.exp+"/outputs/segmentations"):
os.mkdir("checkpoints/"+args.exp+"/outputs/segmentations")
for i in range(results.shape[0]):
plt.imsave("checkpoints/"+args.exp+"/outputs/segmentations/"+str(i)+".png",results[i,:,:,0],cmap='gray') # binary segmenation
print('A total of '+str(results.shape[0])+' segmentation results have been saved to:', "checkpoints/"+args.exp+"/outputs/segmentations/")
K.clear_session()
def get_augmented(
X_train,
Y_train,
X_val=None,
Y_val=None,
batch_size=32,
seed=0,
data_gen_args = dict(
rotation_range=10.,
#width_shift_range=0.02,
height_shift_range=0.02,
shear_range=5,
#zoom_range=0.3,
horizontal_flip=True,
vertical_flip=False,
fill_mode='constant'
)):
# Train data, provide the same seed and keyword arguments to the fit and flow methods
X_datagen = ImageDataGenerator(**data_gen_args)
Y_datagen = ImageDataGenerator(**data_gen_args)
X_datagen.fit(X_train, augment=True, seed=seed)
Y_datagen.fit(Y_train, augment=True, seed=seed)
X_train_augmented = X_datagen.flow(X_train, batch_size=batch_size, shuffle=True, seed=seed)
Y_train_augmented = Y_datagen.flow(Y_train, batch_size=batch_size, shuffle=True, seed=seed)
train_generator = zip(X_train_augmented, Y_train_augmented)
if not (X_val is None) and not (Y_val is None):
# Validation data, no data augmentation, but we create a generator anyway
X_datagen_val = ImageDataGenerator(**data_gen_args)
Y_datagen_val = ImageDataGenerator(**data_gen_args)
X_datagen_val.fit(X_val, augment=True, seed=seed)
Y_datagen_val.fit(Y_val, augment=True, seed=seed)
X_val_augmented = X_datagen_val.flow(X_val, batch_size=batch_size, shuffle=True, seed=seed)
Y_val_augmented = Y_datagen_val.flow(Y_val, batch_size=batch_size, shuffle=True, seed=seed)
# combine generators into one which yields image and masks
val_generator = zip(X_val_augmented, Y_val_augmented)
return train_generator, val_generator
else:
return train_generator
| [
"keras.optimizers.Adam",
"os.path.exists",
"keras.models.load_model",
"keras.callbacks.ModelCheckpoint",
"tensorflow.Session",
"tensorflow.compat.v1.logging.set_verbosity",
"keras.preprocessing.image.ImageDataGenerator",
"os.path.isdir",
"keras.backend.clear_session",
"os.mkdir",
"tensorflow.Con... | [((220, 282), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (254, 282), True, 'import tensorflow as tf\n'), ((797, 814), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (812, 814), True, 'from keras import backend as K\n'), ((826, 839), 'tensorflow.ConfigProto', 'ConfigProto', ([], {}), '()\n', (837, 839), False, 'from tensorflow import ConfigProto\n'), ((893, 918), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (903, 918), True, 'import tensorflow as tf\n'), ((921, 944), 'keras.backend.set_learning_phase', 'K.set_learning_phase', (['(1)'], {}), '(1)\n', (941, 944), True, 'from keras import backend as K\n'), ((2442, 2459), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (2457, 2459), True, 'from keras import backend as K\n'), ((2596, 2613), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (2611, 2613), True, 'from keras import backend as K\n'), ((2616, 2639), 'keras.backend.set_learning_phase', 'K.set_learning_phase', (['(1)'], {}), '(1)\n', (2636, 2639), True, 'from keras import backend as K\n'), ((2651, 2664), 'tensorflow.ConfigProto', 'ConfigProto', ([], {}), '()\n', (2662, 2664), False, 'from tensorflow import ConfigProto\n'), ((2718, 2743), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (2728, 2743), True, 'import tensorflow as tf\n'), ((5101, 5118), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (5116, 5118), True, 'from keras import backend as K\n'), ((5569, 5604), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**data_gen_args)\n', (5587, 5604), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((5619, 5654), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**data_gen_args)\n', (5637, 5654), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1939, 2122), 'keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (["('checkpoints/' + args.exp + '/' + cpt_name)"], {'monitor': '"""val_iou"""', 'mode': '"""max"""', 'verbose': '(0)', 'save_weights_only': 'args.save_weight', 'save_best_only': '(True)'}), "('checkpoints/' + args.exp + '/' + cpt_name,\n monitor='val_iou', mode='max', verbose=0, save_weights_only=args.\n save_weight, save_best_only=True)\n", (1970, 2122), False, 'import keras\n'), ((2116, 2156), 'os.path.isdir', 'os.path.isdir', (["('checkpoints/' + args.exp)"], {}), "('checkpoints/' + args.exp)\n", (2129, 2156), False, 'import os\n'), ((2160, 2195), 'os.mkdir', 'os.mkdir', (["('checkpoints/' + args.exp)"], {}), "('checkpoints/' + args.exp)\n", (2168, 2195), False, 'import os\n'), ((3416, 3453), 'keras.models.load_model', 'load_model', (['model_path'], {'compile': '(False)'}), '(model_path, compile=False)\n', (3426, 3453), False, 'from keras.models import Model, load_model\n'), ((6161, 6196), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**data_gen_args)\n', (6179, 6196), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((6219, 6254), 'keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), '(**data_gen_args)\n', (6237, 6254), False, 'from keras.preprocessing.image import ImageDataGenerator\n'), ((1565, 1602), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'args.lr', 'decay': 'args.lr_decay'}), '(lr=args.lr, decay=args.lr_decay)\n', (1569, 1602), False, 'from keras.optimizers import Adam, SGD\n'), ((3488, 3525), 'keras.optimizers.Adam', 'Adam', ([], {'lr': 'args.lr', 'decay': 'args.lr_decay'}), '(lr=args.lr, decay=args.lr_decay)\n', (3492, 3525), False, 'from keras.optimizers import Adam, SGD\n'), ((3909, 3963), 'os.path.exists', 'os.path.exists', (["('checkpoints/' + args.exp + '/outputs')"], {}), "('checkpoints/' + args.exp + '/outputs')\n", (3923, 3963), False, 'import os\n'), ((3967, 4015), 'os.mkdir', 'os.mkdir', (["('checkpoints/' + args.exp + '/outputs')"], {}), "('checkpoints/' + args.exp + '/outputs')\n", (3975, 4015), False, 'import os\n'), ((4653, 4721), 'os.path.exists', 'os.path.exists', (["('checkpoints/' + args.exp + '/outputs/segmentations')"], {}), "('checkpoints/' + args.exp + '/outputs/segmentations')\n", (4667, 4721), False, 'import os\n'), ((4725, 4787), 'os.mkdir', 'os.mkdir', (["('checkpoints/' + args.exp + '/outputs/segmentations')"], {}), "('checkpoints/' + args.exp + '/outputs/segmentations')\n", (4733, 4787), False, 'import os\n')] |
from setuptools import setup, find_packages
__name__ = "appJar"
__version__ = "0.94.0"
__author__ = "<NAME>"
__desc__ = "An easy-to-use, feature-rich GUI wrapper for tKinter. Designed specifically for use in the classroom, but powerful enough to be used anywhere."
__author_email__ = "<EMAIL>"
__license__ = "Apache 2.0"
__url__ = "http://appJar.info"
__keywords__ = ["python", "gui", "tkinter", "appJar", "interface"]
__packages__= ["appJar"]
__classifiers__ = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Education',
'Topic :: Software Development',
'Topic :: Software Development :: User Interfaces',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: Apache Software License',
]
__long_description__ = """# appJar
Simple tKinter GUIs in Python.
"""
setup(
name=__name__,
packages=__packages__,
version=__version__,
description=__desc__,
long_description=__long_description__,
long_description_content_type="text/markdown",
author=__author__,
author_email=__author_email__,
url=__url__,
keywords=__keywords__,
license=__license__,
classifiers=__classifiers__,
package_data = {
"appJar": ["lib/*.py", "lib/*.txt", "lib/tkdnd2.8/*.tcl", "lib/tkdnd2.8/tcl_files/*.tcl", "lib/tkdnd2.8/tcl_libs/*", "resources/icons/*", "examples/showcase.py", "PYPI.md"]
}
)
| [
"setuptools.setup"
] | [((1268, 1794), 'setuptools.setup', 'setup', ([], {'name': '__name__', 'packages': '__packages__', 'version': '__version__', 'description': '__desc__', 'long_description': '__long_description__', 'long_description_content_type': '"""text/markdown"""', 'author': '__author__', 'author_email': '__author_email__', 'url': '__url__', 'keywords': '__keywords__', 'license': '__license__', 'classifiers': '__classifiers__', 'package_data': "{'appJar': ['lib/*.py', 'lib/*.txt', 'lib/tkdnd2.8/*.tcl',\n 'lib/tkdnd2.8/tcl_files/*.tcl', 'lib/tkdnd2.8/tcl_libs/*',\n 'resources/icons/*', 'examples/showcase.py', 'PYPI.md']}"}), "(name=__name__, packages=__packages__, version=__version__,\n description=__desc__, long_description=__long_description__,\n long_description_content_type='text/markdown', author=__author__,\n author_email=__author_email__, url=__url__, keywords=__keywords__,\n license=__license__, classifiers=__classifiers__, package_data={\n 'appJar': ['lib/*.py', 'lib/*.txt', 'lib/tkdnd2.8/*.tcl',\n 'lib/tkdnd2.8/tcl_files/*.tcl', 'lib/tkdnd2.8/tcl_libs/*',\n 'resources/icons/*', 'examples/showcase.py', 'PYPI.md']})\n", (1273, 1794), False, 'from setuptools import setup, find_packages\n')] |
import shapefile
class Andir:
def __init__(self):
self.kelurahan = shapefile.Writer(
'kelurahan_andir', shapeType=shapefile.POLYGON)
self.kelurahan.shapeType
self.kelurahan.field('kelurahan_di_andir', 'C')
self.kantor = shapefile.Writer(
'kantor_kelurahan_andir', shapeType=shapefile.POINT)
self.kantor.shapeType
self.kantor.field('kantor_kelurahan_di_andir', 'C')
self.jalan = shapefile.Writer(
'jalan_andir', shapeType=shapefile.POLYLINE)
self.jalan.shapeType
self.jalan.field('jalan_di_andir', 'C')
# Kelurahan
def kelurahanCampaka(self, nama):
self.kelurahan.record(nama)
self.kelurahan.poly([[
[107.5688412, -6.9100128],
[107.5691201, -6.9097865],
[107.5691094, -6.9097226],
[107.569163, -6.9097412],
[107.5691523, -6.9096667],
[107.5692167, -6.9096507],
[107.5693562, -6.9094057],
[107.5693213, -6.9093125],
[107.5693401, -6.9092593],
[107.5693937, -6.909254],
[107.5693588, -6.9092193],
[107.5694259, -6.9090809],
[107.5694232, -6.9089158],
[107.5694956, -6.9086948],
[107.5695412, -6.9086335],
[107.5695734, -6.9085164],
[107.5696029, -6.9084791],
[107.5696002, -6.9084285],
[107.5696888, -6.908298],
[107.5697504, -6.9080557],
[107.5697183, -6.9079758],
[107.5699838, -6.9070998],
[107.570016, -6.9070679],
[107.5701353, -6.9069387],
[107.5701655, -6.906761],
[107.5701471, -6.9066948],
[107.5701499, -6.906643],
[107.570264, -6.9064906],
[107.5702372, -6.9063532],
[107.5701916, -6.9062912],
[107.5701796, -6.9062082],
[107.5702621, -6.9061069],
[107.5702705, -6.9060224],
[107.5702735, -6.9059272],
[107.5702582, -6.9058752],
[107.5702812, -6.9058353],
[107.570331, -6.9053586],
[107.5703968, -6.9051163],
[107.5703898, -6.9050277],
[107.5704728, -6.9047611],
[107.570537, -6.9044838],
[107.5705486, -6.9044575],
[107.5705852, -6.904431],
[107.5706371, -6.9043805],
[107.5706415, -6.9043086],
[107.5706277, -6.9042101],
[107.570663, -6.9041775],
[107.5706565, -6.9041212],
[107.5707491, -6.9039007],
[107.5707861, -6.9038517],
[107.5708515, -6.903726],
[107.571029, -6.9033889],
[107.5711822, -6.9031591],
[107.5712829, -6.9031001],
[107.5713198, -6.9030174],
[107.5713183, -6.9029548],
[107.5712337, -6.9028629],
[107.5709224, -6.9026604],
[107.5709029, -6.9026318],
[107.5708951, -6.9026101],
[107.570902, -6.9025687],
[107.5709255, -6.902512],
[107.5699489, -6.9018673],
[107.5693062, -6.9014289],
[107.5681693, -6.9006596],
[107.567794, -6.9004107],
[107.5674402, -6.9001352],
[107.5671834, -6.8999973],
[107.5668738, -6.8997586],
[107.5673784, -6.8990922],
[107.5674804, -6.898867],
[107.5674938, -6.8987944],
[107.5676575, -6.8983809],
[107.5678829, -6.8980119],
[107.5679768, -6.8978061],
[107.5680828, -6.897634],
[107.5682431, -6.8973935],
[107.5683634, -6.897188],
[107.5684758, -6.8969693],
[107.5685892, -6.8969163],
[107.5686339, -6.8968247],
[107.5686241, -6.8967202],
[107.5686192, -6.8965988],
[107.5685952, -6.8964875],
[107.5685484, -6.8964145],
[107.5685499, -6.8963149],
[107.5685818, -6.896162],
[107.5685851, -6.8960259],
[107.5684635, -6.8959323],
[107.5684081, -6.8959108],
[107.5683958, -6.8958547],
[107.5683944, -6.8957056],
[107.568337, -6.8955877],
[107.5682957, -6.895382],
[107.5681746, -6.8949567],
[107.5682117, -6.894851],
[107.5680265, -6.8945171],
[107.5680009, -6.8944163],
[107.5678567, -6.8942487],
[107.56782, -6.8941755],
[107.5678101, -6.894097],
[107.5678994, -6.8939173],
[107.5679365, -6.8938042],
[107.5679396, -6.8937164],
[107.5678969, -6.8936179],
[107.5677161, -6.8934741],
[107.5671385, -6.8930081],
[107.5671064, -6.8928797],
[107.5669697, -6.8927356],
[107.5669268, -6.8925901],
[107.5670463, -6.8923704],
[107.5668794, -6.8920902],
[107.5668064, -6.892011],
[107.56655, -6.8918584],
[107.5662883, -6.891759],
[107.5660598, -6.8918053],
[107.5648658, -6.8913936],
[107.5648508, -6.8916644],
[107.5646481, -6.8921482],
[107.564771, -6.8922401],
[107.5649719, -6.892278],
[107.5651165, -6.8924518],
[107.5647567, -6.8931561],
[107.5645768, -6.8931904],
[107.5645606, -6.8932806],
[107.5642931, -6.8933683],
[107.5639611, -6.8939379],
[107.5639608, -6.8940531],
[107.5650172, -6.894951],
[107.5649834, -6.8950079],
[107.5650354, -6.895118],
[107.5650229, -6.8952121],
[107.5648317, -6.8955468],
[107.5646603, -6.8960059],
[107.5645372, -6.8961801],
[107.5644152, -6.8963125],
[107.5643791, -6.896405],
[107.5642962, -6.8964301],
[107.5642355, -6.8965784],
[107.563983, -6.8967268],
[107.5639292, -6.8968649],
[107.563634, -6.8969206],
[107.5630975, -6.896794],
[107.5630412, -6.8968213],
[107.5628963, -6.8970962],
[107.5617891, -6.8964114],
[107.5614757, -6.8962167],
[107.5611903, -6.8960248],
[107.5608727, -6.8958382],
[107.5602911, -6.8954171],
[107.5601647, -6.8953644],
[107.5599015, -6.8953304],
[107.5621585, -6.8990243],
[107.5648286, -6.9034798],
[107.5675416, -6.9078873],
[107.568771, -6.909903],
[107.5688412, -6.9100128],
]])
def kelurahanCiroyom(self, nama):
self.kelurahan.record(nama)
self.kelurahan.poly([[
[107.5835421, -6.9108951],
[107.5834349, -6.9110762],
[107.5831184, -6.9118271],
[107.5827777, -6.912759],
[107.5825149, -6.913691],
[107.5830862, -6.9138388],
[107.5833719, -6.9138914],
[107.5835388, -6.9138911],
[107.5836441, -6.9139414],
[107.5833732, -6.9150303],
[107.5835301, -6.9150849],
[107.5847384, -6.9154724],
[107.5852265, -6.9156322],
[107.5851635, -6.9173336],
[107.5851589, -6.9176585],
[107.5850925, -6.9185052],
[107.592997, -6.9194399],
[107.5933806, -6.9166693],
[107.5933698, -6.9164523],
[107.593343, -6.9163811],
[107.593351, -6.9162309],
[107.5932464, -6.9160569],
[107.5931579, -6.9159776],
[107.5928159, -6.9157064],
[107.5928199, -6.9154431],
[107.5925933, -6.91527],
[107.5926596, -6.9150317],
[107.5926606, -6.9149552],
[107.5928542, -6.9143204],
[107.5929564, -6.9143133],
[107.5929807, -6.9142711],
[107.592966, -6.9140969],
[107.5929728, -6.9140132],
[107.5916209, -6.9139234],
[107.5901283, -6.9139075],
[107.5891399, -6.9137692],
[107.588118, -6.9135059],
[107.5868466, -6.9130123],
[107.5852239, -6.9120075],
[107.5847424, -6.9116616],
[107.5835421, -6.9108951],
]])
def kelurahanDungusCariang(self, nama):
self.kelurahan.record(nama)
self.kelurahan.poly([[
[107.5791752, -6.9079447],
[107.57897, -6.9082596],
[107.5787527, -6.9087209],
[107.5769161, -6.9168328],
[107.5768158, -6.9174711],
[107.57737, -6.9175158],
[107.578077, -6.9176377],
[107.5808391, -6.9179676],
[107.581166, -6.918042],
[107.5850925, -6.9185052],
[107.5851589, -6.9176585],
[107.5851635, -6.9173336],
[107.5851729, -6.9170687],
[107.5852265, -6.9156322],
[107.5847384, -6.9154724],
[107.5835301, -6.9150849],
[107.5833732, -6.9150303],
[107.5836441, -6.9139414],
[107.5835388, -6.9138911],
[107.5833719, -6.9138914],
[107.5830862, -6.9138388],
[107.5825149, -6.913691],
[107.5827777, -6.912759],
[107.5831184, -6.9118271],
[107.5834349, -6.9110762],
[107.5835421, -6.9108951],
[107.5791752, -6.9079447],
]])
def kelurahanGaruda(self, nama):
self.kelurahan.record(nama)
self.kelurahan.poly([[
[107.5768158, -6.9174711],
[107.5769161, -6.9168328],
[107.5772109, -6.91553],
[107.5773243, -6.9150268],
[107.5774343, -6.9145455],
[107.5775151, -6.9141873],
[107.5775811, -6.9138958],
[107.5777239, -6.9132647],
[107.5779033, -6.9124703],
[107.57808, -6.9116905],
[107.578135, -6.9114494],
[107.5781879, -6.9112091],
[107.5785809, -6.9094757],
[107.5786521, -6.9091642],
[107.5787527, -6.9087209],
[107.57897, -6.9082596],
[107.5791752, -6.9079447],
[107.5752133, -6.9053072],
[107.5749994, -6.9056383],
[107.5748938, -6.905937],
[107.5746716, -6.9063303],
[107.5749331, -6.9064624],
[107.574801, -6.9066536],
[107.5747065, -6.9068635],
[107.5744047, -6.9067304],
[107.5742339, -6.9071245],
[107.5739906, -6.9076517],
[107.5739769, -6.9079446],
[107.5740325, -6.9080411],
[107.5737868, -6.9084887],
[107.5734708, -6.9094448],
[107.5733059, -6.909819],
[107.5737331, -6.9100447],
[107.5749462, -6.910534],
[107.5760434, -6.9109361],
[107.5760824, -6.9109827],
[107.5761964, -6.9110346],
[107.5760496, -6.9114401],
[107.5763302, -6.9115669],
[107.5760199, -6.9123399],
[107.5758189, -6.9122928],
[107.5755187, -6.9124428],
[107.5753446, -6.912919],
[107.5753835, -6.9129355],
[107.5750134, -6.9137162],
[107.5746031, -6.9135943],
[107.5745069, -6.913683],
[107.5743743, -6.9139497],
[107.5739091, -6.9150764],
[107.5736437, -6.9150009],
[107.5734682, -6.9149867],
[107.5730218, -6.9153425],
[107.5729636, -6.9155205],
[107.572743, -6.9155911],
[107.5725292, -6.9158665],
[107.5724994, -6.9158819],
[107.573832, -6.9170058],
[107.5753239, -6.9173423],
[107.5768158, -6.9174711],
]])
def kelurahanKebonJeruk(self, nama):
self.kelurahan.record(nama)
self.kelurahan.poly([[
[107.592997, -6.9194399],
[107.6041614, -6.9207853],
[107.6045322, -6.9159498],
[107.6045972, -6.9154333],
[107.6046274, -6.9148208],
[107.6046281, -6.9145413],
[107.6022538, -6.9144167],
[107.5983239, -6.9143134],
[107.5952277, -6.914224],
[107.5934297, -6.9139855],
[107.5929728, -6.9140132],
[107.592966, -6.9140969],
[107.5929807, -6.9142711],
[107.5929564, -6.9143133],
[107.5928542, -6.9143204],
[107.5926606, -6.9149552],
[107.5926596, -6.9150317],
[107.5925933, -6.91527],
[107.5928199, -6.9154431],
[107.5928159, -6.9157064],
[107.5931579, -6.9159776],
[107.5932464, -6.9160569],
[107.593351, -6.9162309],
[107.593343, -6.9163811],
[107.5933698, -6.9164523],
[107.5933806, -6.9166693],
[107.592997, -6.9194399],
]])
def kelurahanMaleber(self, nama):
self.kelurahan.record(nama)
self.kelurahan.poly([[
[107.5709255, -6.902512],
[107.570902, -6.9025687],
[107.5708951, -6.9026101],
[107.5709029, -6.9026318],
[107.5709224, -6.9026604],
[107.5712337, -6.9028629],
[107.5713183, -6.9029548],
[107.5713198, -6.9030174],
[107.5712829, -6.9031001],
[107.5711822, -6.9031591],
[107.571029, -6.9033889],
[107.5707861, -6.9038517],
[107.5707491, -6.9039007],
[107.5706565, -6.9041212],
[107.570663, -6.9041775],
[107.5706277, -6.9042101],
[107.5706415, -6.9043086],
[107.5706371, -6.9043805],
[107.5705852, -6.904431],
[107.5705486, -6.9044575],
[107.570537, -6.9044838],
[107.5704728, -6.9047611],
[107.5703898, -6.9050277],
[107.5703968, -6.9051163],
[107.570331, -6.9053586],
[107.5702812, -6.9058353],
[107.5702582, -6.9058752],
[107.5702735, -6.9059272],
[107.5702705, -6.9060224],
[107.5702621, -6.9061069],
[107.5701796, -6.9062082],
[107.5701916, -6.9062912],
[107.5702372, -6.9063532],
[107.570264, -6.9064906],
[107.5701499, -6.906643],
[107.5701471, -6.9066948],
[107.5701655, -6.906761],
[107.5701353, -6.9069387],
[107.570016, -6.9070679],
[107.5699838, -6.9070998],
[107.5697183, -6.9079758],
[107.5697504, -6.9080557],
[107.5696888, -6.908298],
[107.5696002, -6.9084285],
[107.5696029, -6.9084791],
[107.5695734, -6.9085164],
[107.5695412, -6.9086335],
[107.5694956, -6.9086948],
[107.5694232, -6.9089158],
[107.5694259, -6.9090809],
[107.5693588, -6.9092193],
[107.5693937, -6.909254],
[107.5693401, -6.9092593],
[107.5693213, -6.9093125],
[107.5693403, -6.9093623],
[107.5693562, -6.9094057],
[107.5692167, -6.9096507],
[107.5691523, -6.9096667],
[107.569163, -6.9097412],
[107.5691094, -6.9097226],
[107.5691201, -6.9097865],
[107.5688412, -6.9100128],
[107.5700534, -6.9120367],
[107.5714501, -6.9143263],
[107.5715541, -6.9146136],
[107.5716975, -6.914762],
[107.5720703, -6.9154531],
[107.5724994, -6.9158819],
[107.5725292, -6.9158665],
[107.572743, -6.9155911],
[107.5729636, -6.9155205],
[107.5730218, -6.9153425],
[107.5734682, -6.9149867],
[107.5736437, -6.9150009],
[107.5739091, -6.9150764],
[107.5742, -6.9143704],
[107.5743743, -6.9139497],
[107.5745069, -6.913683],
[107.5746031, -6.9135943],
[107.5750134, -6.9137162],
[107.5753835, -6.9129355],
[107.5753446, -6.912919],
[107.5755187, -6.9124428],
[107.5758189, -6.9122928],
[107.5760199, -6.9123399],
[107.5763302, -6.9115669],
[107.5760496, -6.9114401],
[107.5761964, -6.9110346],
[107.5760824, -6.9109827],
[107.5760434, -6.9109361],
[107.5749462, -6.910534],
[107.5737331, -6.9100447],
[107.5733059, -6.909819],
[107.5734708, -6.9094448],
[107.5737868, -6.9084887],
[107.5740325, -6.9080411],
[107.5739769, -6.9079446],
[107.5739906, -6.9076517],
[107.5742339, -6.9071245],
[107.5744047, -6.9067304],
[107.5747065, -6.9068635],
[107.574801, -6.9066536],
[107.5749331, -6.9064624],
[107.5746716, -6.9063303],
[107.5748938, -6.905937],
[107.5749994, -6.9056383],
[107.5752133, -6.9053072],
[107.5709255, -6.902512],
]])
# Kantor Kelurahan
def kantorKelurahanCampaka(self, nama):
self.kantor.record(nama)
self.kantor.point(107.5631291, -6.8977897)
def kantorKelurahanCiroyom(self, nama):
self.kantor.record(nama)
self.kantor.point(107.5875214, -6.9144205)
def kantorKelurahanDungusCariang(self, nama):
self.kantor.record(nama)
self.kantor.point(107.5806731, -6.9125569)
def kantorKelurahanGaruda(self, nama):
self.kantor.record(nama)
self.kantor.point(107.5764865, -6.9160994)
def kantorKelurahanKebonJeruk(self, nama):
self.kantor.record(nama)
self.kantor.point(107.6011905, -6.918966)
def kantorKelurahanMaleber(self, nama):
self.kantor.record(nama)
self.kantor.point(107.5734513, -6.9073058)
# Jalan
def jalanKelurahanCampaka(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.5696727,-6.903724],
[107.5697268,-6.9035733],
[107.5696674,-6.9035376],
[107.5697241,-6.9035733],
[107.5699168,-6.9036068],
[107.5701264,-6.9035813],
[107.5702467,-6.9034683],
[107.5703674,-6.9031728],
[107.5704988,-6.9029597],
[107.5707939,-6.9027334],
[107.5708904,-6.902808],
[107.5709445,-6.902873],
[107.5710889,-6.9029757],
[107.5709418,-6.9028703],
[107.5708904,-6.902808],
[107.5707939,-6.9027334],
[107.5707916,-6.9025375],
[107.5709002,-6.902592],
[107.5707862,-6.9025348],
[107.5705998,-6.9023923],
[107.5704737,-6.902576],
[107.5705998,-6.9023923],
[107.5704469,-6.9022991],
[107.570117,-6.9028236],
[107.5699051,-6.9027145],
[107.5702619,-6.9021713],
[107.5704469,-6.9022991],
[107.5702619,-6.9021713],
[107.5697415,-6.9018784],
[107.5696905,-6.9020248],
[107.5697415,-6.9018784],
[107.5694612,-6.9017094],
[107.5693271,-6.9019916],
[107.5694639,-6.901704],
[107.5692372,-6.9016014],
[107.569192,-6.9015038],
[107.5679511,-6.9007108],
[107.5667267,-6.8999008],
[107.5679511,-6.9007108],
[107.569192,-6.9015038],
[107.5692372,-6.9016014],
[107.5691916,-6.9016361],
[107.5690106,-6.9015496],
[107.5677473,-6.9007854],
[107.5676952,-6.9007974],
[107.5672929,-6.9015004],
[107.5673036,-6.9015696],
[107.5678079,-6.9018359],
[107.5678814,-6.9018904],
[107.5678814,-6.9019703],
[107.567817,-6.9020156],
[107.567722,-6.9019957],
[107.567773,-6.9018186],
[107.567722,-6.9019957],
[107.5676416,-6.9022779],
[107.5676843,-6.902328],
[107.5675397,-6.9027626],
[107.5677621,-6.9028579],
[107.5675397,-6.9027626],
[107.5676816,-6.9023226],
[107.5684194,-6.9025176],
[107.5684326,-6.9024771],
[107.5688217,-6.9014312],
[107.5690106,-6.9015496],
[107.5691916,-6.9016361],
[107.5689154,-6.9023657],
[107.5699051,-6.9027145],
[107.5689154,-6.9023657],
[107.5688493,-6.902523],
[107.5703674,-6.9031728],
[107.5702467,-6.9034683],
[107.569681,-6.9032161],
[107.5697827,-6.9029278],
[107.5688493,-6.902523],
[107.5687598,-6.9027677],
[107.569426,-6.9030878],
[107.5687598,-6.9027677],
[107.5686428,-6.9029574],
[107.5689566,-6.9030772],
[107.5692597,-6.9032343],
[107.5689566,-6.9030772],
[107.5686428,-6.9029574],
[107.5684084,-6.9033349],
[107.5680066,-6.9031487],
[107.5684084,-6.9033349],
[107.5681912,-6.9035905],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5688412,-6.9100128],
[107.5684508,-6.9093516],
[107.5687715,-6.9091011],
[107.5688948,-6.9088615],
[107.5689996,-6.9083494],
[107.5688948,-6.9088615],
[107.5687715,-6.9091011],
[107.5684508,-6.9093516],
[107.5682389,-6.9089975],
[107.568412,-6.908723],
[107.5682967,-6.9089014],
[107.5681156,-6.9087791],
[107.5683933,-6.908313],
[107.5683718,-6.9082517],
[107.5683933,-6.908313],
[107.5685354,-6.9083875],
[107.5687366,-6.9081665],
[107.5688761,-6.9082224],
[107.5687366,-6.9081665],
[107.5685354,-6.9083875],
[107.5683933,-6.908313],
[107.5681867,-6.9086591],
[107.5680821,-6.9085792],
[107.5681867,-6.9086591],
[107.5681156,-6.9087791],
[107.5682389,-6.9089975],
[107.5681156,-6.9087791],
[107.5675416,-6.9078873],
[107.5665894,-6.9062815],
[107.5675416,-6.9078873],
[107.5680661,-6.9075027],
[107.5683826,-6.9072897],
[107.5689941,-6.9070767],
[107.5685623,-6.9072264],
[107.568726,-6.9067624],
[107.5687947,-6.9066466],
[107.5689887,-6.9064269],
[107.5689083,-6.9063497],
[107.5686454,-6.9062326],
[107.5689083,-6.9063497],
[107.5691032,-6.9059463],
[107.5693821,-6.9055415],
[107.5693419,-6.905435],
[107.569248,-6.9052673],
[107.5691914,-6.905117],
[107.5691941,-6.9050238],
[107.5692531,-6.9048854],
[107.5691941,-6.9050238],
[107.5691914,-6.905117],
[107.569248,-6.9052726],
[107.5693419,-6.9054377],
[107.5693658,-6.905322],
[107.5698378,-6.9049999],
[107.5699505,-6.90488],
[107.5700098,-6.9047161],
[107.5699505,-6.90488],
[107.5698378,-6.9049999],
[107.5693658,-6.905322],
[107.5693419,-6.9054404],
[107.5693821,-6.9055442],
[107.5695592,-6.9057013],
[107.5696459,-6.9058065],
[107.5689887,-6.9064269],
[107.5696459,-6.9058065],
[107.5700026,-6.9054763],
[107.5701421,-6.9055136],
[107.570185,-6.9054923],
[107.5702655,-6.9051222],
[107.5702145,-6.9050184],
[107.5701957,-6.9049278],
[107.5702923,-6.904784],
[107.5703826,-6.9046682],
[107.5704148,-6.9045137],
[107.5703746,-6.9044711],
[107.569957,-6.9042754],
[107.5698873,-6.9043047],
[107.5697764,-6.9042687],
[107.5696459,-6.9041396],
[107.5696539,-6.9040651],
[107.5696459,-6.9041396],
[107.5697737,-6.9042687],
[107.5698873,-6.9043047],
[107.569957,-6.9042754],
[107.5699454,-6.9042022],
[107.569983,-6.9039732],
[107.5700151,-6.9038534],
[107.5700876,-6.9036909],
[107.5701492,-6.903659],
[107.5702512,-6.9036084],
[107.5703692,-6.903433],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5654548,-6.9044202],
[107.565461,-6.9044548],
[107.5665894,-6.9062815],
[107.5669547,-6.9055636],
[107.5676279,-6.9058565],
[107.5669547,-6.9055636],
[107.5669681,-6.9054997],
[107.5666784,-6.9053719],
[107.5669681,-6.9054997],
[107.5670673,-6.9052734],
[107.5668984,-6.9052042],
[107.5670673,-6.9052734],
[107.5670888,-6.9052121],
[107.5673731,-6.9053639],
[107.5678103,-6.905537],
[107.5677298,-6.9057314],
[107.5676467,-6.9057048],
[107.5677298,-6.9057314],
[107.5678103,-6.905537],
[107.5679069,-6.9053666],
[107.5680946,-6.9054278],
[107.5679069,-6.9053666],
[107.5678103,-6.905537],
[107.5673731,-6.9053639],
[107.5670888,-6.9052121],
[107.5671639,-6.9050471],
[107.5673976,-6.9051418],
[107.5671639,-6.9050471],
[107.5673141,-6.9046796],
[107.5669654,-6.9045518],
[107.5673141,-6.9046796],
[107.5673838,-6.9045358],
[107.5676574,-6.9046423],
[107.5676789,-6.9047302],
[107.5678589,-6.9049181],
[107.5681107,-6.9050271],
[107.5684379,-6.9051376],
[107.5681107,-6.9050271],
[107.567982,-6.9049718],
[107.5678961,-6.9049349],
[107.5679954,-6.9046343],
[107.5682126,-6.9046876],
[107.5682475,-6.9045491],
[107.568336,-6.9045624],
[107.5684084,-6.9043068],
[107.5684648,-6.9042882],
[107.5686207,-6.9043856],
[107.5688376,-6.9044905],
[107.5690575,-6.9039127],
[107.568787,-6.9037945],
[107.5687437,-6.9038062],
[107.5686874,-6.9039207],
[107.5686096,-6.9039154],
[107.5684648,-6.9042882],
[107.5686096,-6.9039154],
[107.5686874,-6.9039207],
[107.5687437,-6.9038062],
[107.568556,-6.9037343],
[107.568784,-6.9032177],
[107.568556,-6.9037343],
[107.5680624,-6.9035426],
[107.5681322,-6.9034095],
[107.5680624,-6.9035426],
[107.567864,-6.9035053],
[107.5676682,-6.9039234],
[107.5679954,-6.9040672],
[107.5676682,-6.9039234],
[107.5674589,-6.9043707],
[107.5679954,-6.9046343],
[107.5674589,-6.9043707],
[107.5673838,-6.9045358],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5652,-6.9039968],
[107.5654548,-6.9044202],
[107.5658421,-6.9040966],
[107.5657401,-6.9039155],
[107.5658421,-6.9040966],
[107.5659385,-6.9041193],
[107.5660191,-6.9041552],
[107.5663946,-6.9039155],
[107.5665502,-6.9037025],
[107.5666735,-6.9036812],
[107.5667326,-6.9035641],
[107.5667326,-6.9034575],
[107.5665931,-6.9034096],
[107.5667326,-6.9034575],
[107.5667326,-6.9035641],
[107.5672553,-6.9037584],
[107.5674243,-6.9037904],
[107.5672553,-6.9037584],
[107.5675265,-6.9032179],
[107.5675855,-6.9031939],
[107.5676123,-6.9030981],
[107.5675855,-6.9031939],
[107.5676821,-6.9032179],
[107.567733,-6.9031966],
[107.5678108,-6.902933],
[107.567733,-6.9031966],
[107.5676821,-6.9032179],
[107.5675855,-6.9031939],
[107.5675265,-6.9032179],
[107.5670491,-6.9041925],
[107.5663946,-6.9039155],
[107.5667755,-6.9040806],
[107.5665984,-6.9044587],
[107.5664697,-6.9046984],
[107.5665984,-6.9044587],
[107.5660191,-6.9041552],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5671064,-6.8928797],
[107.5670844,-6.8929687],
[107.5671826,-6.8930938],
[107.567197,-6.8931524],
[107.5671853,-6.8930912],
[107.5672721,-6.8931284],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5655931,-6.8933281],
[107.5659042,-6.8934826],
[107.5658211,-6.8936264],
[107.5659042,-6.8934826],
[107.5663951,-6.8937382],
[107.5665453,-6.8933974],
[107.5663951,-6.8937382],
[107.5663887,-6.8937888],
[107.5661386,-6.8941398],
[107.5661306,-6.8942223],
[107.5659696,-6.8944886],
[107.5661306,-6.8942223],
[107.5661386,-6.8941398],
[107.5663914,-6.8937888],
[107.5663951,-6.8937382],
[107.5666767,-6.8939246],
[107.5665345,-6.8941536],
[107.5666767,-6.8939246],
[107.5667062,-6.8939379],
[107.5670378,-6.8933335],
[107.5671129,-6.893171],
[107.567197,-6.8931524],
[107.5675913,-6.8934347],
[107.5675618,-6.8934853],
[107.5673945,-6.8934293],
[107.5673473,-6.8934426],
[107.567197,-6.8937036],
[107.5674331,-6.8938474],
[107.567197,-6.8937036],
[107.5670468,-6.8939539],
[107.567197,-6.8937036],
[107.5673473,-6.8934426],
[107.5673918,-6.8934267],
[107.5675618,-6.8934853],
[107.5677549,-6.8935971],
[107.5677818,-6.893677],
[107.5676852,-6.8938634],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5648649,-6.8914296],
[107.5654767,-6.8916706],
[107.5654982,-6.8916147],
[107.5654767,-6.8916706],
[107.5658814,-6.891821],
[107.5658496,-6.8919396],
[107.5658814,-6.891821],
[107.5660856,-6.891857],
[107.5661017,-6.8918038],
[107.5660933,-6.8918396],
[107.566096,-6.8919515],
[107.5658559,-6.8926917],
[107.5657691,-6.8926799],
[107.5655824,-6.8926145],
[107.5657691,-6.8926825],
[107.5658559,-6.8926917],
[107.5655931,-6.8933281],
[107.5654161,-6.8932989],
[107.5655555,-6.8928994],
[107.5654161,-6.8932989],
[107.5652122,-6.8935172],
[107.5654161,-6.8932989],
[107.5655931,-6.8933281],
[107.5657084,-6.8930539],
[107.5661698,-6.893227],
[107.5663522,-6.8928116],
[107.5664836,-6.8929127],
[107.5663522,-6.8928116],
[107.5664809,-6.8924894],
[107.5660035,-6.8922524],
[107.5664809,-6.8924894],
[107.5665332,-6.8923775],
[107.5669328,-6.8925346],
[107.5665332,-6.8923775],
[107.5665842,-6.8922337],
[107.5665577,-6.8921633],
[107.566096,-6.8919515],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5645095,-6.8932962],
[107.564906,-6.8936092],
[107.5650572,-6.8937553],
[107.5652261,-6.893517],
[107.5650853,-6.8934371],
[107.5650424,-6.8933612],
[107.5650531,-6.8932494],
[107.5651076,-6.8930699],
[107.5648662,-6.8929607],
[107.5651076,-6.8930699],
[107.5652712,-6.892713],
[107.5652364,-6.8927836],
[107.5650003,-6.8926851],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5650859,-6.894871],
[107.5649973,-6.8950414],
[107.5650724,-6.8950787],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5647506,-6.8957444],
[107.5650142,-6.8958335],
[107.565051,-6.8958136],
[107.565338,-6.8952704],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5599402,-6.8953401],
[107.5601682,-6.8956996],
[107.5602335,-6.8957136],
[107.5606604,-6.8959371],
[107.5607914,-6.8960384],
[107.5608343,-6.8961183],
[107.5608396,-6.8962195],
[107.5606697,-6.8965144],
[107.5601682,-6.8956996],
[107.5606697,-6.8965144],
[107.561225,-6.8974171],
[107.5615853,-6.8971515],
[107.5618964,-6.8967574],
[107.5615853,-6.8971515],
[107.561225,-6.8974171],
[107.5613001,-6.8975396],
[107.5615048,-6.8974124],
[107.5613001,-6.8975396],
[107.5615254,-6.897915],
[107.5622451,-6.8974817],
[107.562449,-6.8975935],
[107.5624651,-6.8976841],
[107.562288,-6.8978598],
[107.5624651,-6.8976841],
[107.562449,-6.8975935],
[107.5622451,-6.8974817],
[107.5615254,-6.897915],
[107.5619116,-6.8985328],
[107.5621378,-6.8983551],
[107.5619116,-6.8985328],
[107.5620162,-6.8987299],
[107.5621088,-6.8986744],
[107.5620162,-6.8987299],
[107.5625634,-6.8996299],
[107.5629886,-6.8992762],
[107.5632032,-6.8989993],
[107.5634816,-6.8983231],
[107.56331,-6.8982086],
[107.5631624,-6.898198],
[107.5630498,-6.8980702],
[107.5628835,-6.8981447],
[107.5627816,-6.8983311],
[107.5628835,-6.8981447],
[107.5630498,-6.8980702],
[107.5631281,-6.8979182],
[107.5631651,-6.8978904],
[107.5633797,-6.8980076],
[107.56331,-6.8982086],
[107.5633797,-6.8980076],
[107.5631651,-6.8978904],
[107.5632751,-6.8976588],
[107.564702,-6.8986493],
[107.5641661,-6.8996427],
[107.5642117,-6.8997129],
[107.564378,-6.8998451],
[107.5646194,-6.8994931],
[107.5643512,-6.8993173],
[107.5646194,-6.8994931],
[107.5647642,-6.8992588],
[107.5644906,-6.899083],
[107.5647642,-6.8992588],
[107.5649922,-6.8988972],
[107.564702,-6.8986493],
[107.5649922,-6.8988972],
[107.5652202,-6.8990516],
[107.5649834,-6.8994079],
[107.5647642,-6.8992588],
[107.5649861,-6.8994079],
[107.5648574,-6.8996475],
[107.5646194,-6.8994931],
[107.5648574,-6.8996502],
[107.5646033,-6.9000368],
[107.5643699,-6.8998744],
[107.564378,-6.8998451],
[107.5643699,-6.8998744],
[107.5644263,-6.8999223],
[107.56404,-6.9004975],
[107.5645309,-6.9007957],
[107.5647079,-6.9005507],
[107.5646489,-6.9004629],
[107.5642385,-6.9001993],
[107.5646489,-6.9004629],
[107.5647079,-6.9005507],
[107.5649171,-6.9002392],
[107.5646033,-6.9000368],
[107.5649171,-6.9002392],
[107.5655255,-6.8992671],
[107.5652202,-6.8990516],
[107.5655255,-6.8992671],
[107.5651183,-6.8999223],
[107.5655742,-6.9002392],
[107.5652999,-6.9000443],
[107.5656735,-6.8995149],
[107.5654796,-6.8993493],
[107.5656735,-6.8995149],
[107.5658688,-6.8996505],
[107.5658237,-6.899688],
[107.5654991,-6.9001913],
[107.5658237,-6.899688],
[107.5658688,-6.8996505],
[107.5659544,-6.8996715],
[107.5664589,-6.9000073],
[107.5660753,-6.899757],
[107.5662121,-6.8996239],
[107.5667267,-6.8999008],
[107.566869,-6.8996741],
[107.5667993,-6.8995809],
[107.5664962,-6.8993839],
[107.5665605,-6.8993014],
[107.5664935,-6.8993839],
[107.566287,-6.8992534],
[107.5663781,-6.8992028],
[107.5662789,-6.8992561],
[107.5659168,-6.8990324],
[107.5659544,-6.8988939],
[107.5663325,-6.8980099],
[107.5676199,-6.8984603],
[107.5663325,-6.8980099],
[107.5651229,-6.8975945],
[107.5649592,-6.8979407],
[107.5649405,-6.8980845],
[107.5653052,-6.8982549],
[107.5649405,-6.8980845],
[107.5648198,-6.8984067],
[107.5659168,-6.8990324],
[107.5648036,-6.8984017],
[107.563999,-6.8978315],
[107.5648036,-6.8984017],
[107.5649405,-6.8980845],
[107.5649592,-6.8979407],
[107.5651229,-6.8975945],
[107.5636133,-6.8970598],
[107.5649061,-6.8975232],
[107.5654991,-6.8962457],
[107.5655847,-6.896221],
[107.5683769,-6.897161],
[107.5669875,-6.896703],
[107.5671217,-6.8963249],
[107.5669849,-6.8962583],
[107.5669527,-6.8962264],
[107.5666764,-6.8960719],
[107.5667461,-6.8958616],
[107.5665664,-6.895779],
[107.5663358,-6.8961145],
[107.5662124,-6.896253],
[107.5663358,-6.8961145],
[107.5665664,-6.895779],
[107.566612,-6.8956272],
[107.5667087,-6.8956643],
[107.5668133,-6.8956696],
[107.5668749,-6.8956991],
[107.566808,-6.8956696],
[107.5667087,-6.8956669],
[107.566612,-6.8956272],
[107.5665745,-6.8957338],
[107.5662687,-6.895582],
[107.5665745,-6.8957338],
[107.5665986,-6.8956619],
[107.5666469,-6.8955607],
[107.5664591,-6.8954329],
[107.5664053,-6.8953507],
[107.5661614,-6.8952065],
[107.5661105,-6.8952518],
[107.5660675,-6.8953796],
[107.5665986,-6.8956619],
[107.5660675,-6.8953796],
[107.5658852,-6.8960134],
[107.5659442,-6.8960293],
[107.5660622,-6.896048],
[107.5659442,-6.8960293],
[107.5658852,-6.8960134],
[107.5657618,-6.8962716],
[107.5658852,-6.8960134],
[107.56603,-6.8955234],
[107.5655177,-6.8952598],
[107.5656006,-6.895143],
[107.5656275,-6.8950498],
[107.5656572,-6.8949509],
[107.5656275,-6.8950524],
[107.5656033,-6.8951456],
[107.5655177,-6.8952598],
[107.5653834,-6.8955823],
[107.5652627,-6.8957927],
[107.5653807,-6.8955823],
[107.5655177,-6.8952598],
[107.56603,-6.8955234],
[107.5660675,-6.8953796],
[107.5661105,-6.8952518],
[107.5661614,-6.8952065],
[107.566384,-6.8948391],
[107.5662164,-6.8947043],
[107.5658945,-6.8951064],
[107.5662164,-6.8947043],
[107.5659696,-6.8944886],
[107.5659015,-6.8944306],
[107.5660249,-6.8942122],
[107.5659015,-6.8944306],
[107.5658372,-6.8943746],
[107.5659364,-6.8941909],
[107.5658372,-6.8943746],
[107.565408,-6.8940311],
[107.5652632,-6.8942175],
[107.565408,-6.8940311],
[107.5652685,-6.8939273],
[107.5650996,-6.8941802],
[107.5652685,-6.8939273],
[107.5651934,-6.8939033],
[107.5652873,-6.8937995],
[107.5651934,-6.8939033],
[107.565003,-6.8937116],
[107.5649279,-6.8937995],
[107.565003,-6.8937116],
[107.564906,-6.8936092],
[107.5647509,-6.8938474],
[107.5646677,-6.8940684],
[107.5644421,-6.8944663],
[107.5646677,-6.8940684],
[107.5643137,-6.8938607],
[107.5644075,-6.8936983],
[107.5643137,-6.8938607],
[107.5641259,-6.8941163],
[107.5643137,-6.8938607],
[107.5641179,-6.8937276],
[107.5639596,-6.8940045],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5662883,-6.891759],
[107.5668064,-6.892011],
[107.5668571,-6.8920704],
[107.5668544,-6.8921343],
[107.566892,-6.8922196],
[107.5668893,-6.8922835],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5667062,-6.8939379],
[107.566732,-6.8939459],
[107.5671595,-6.8942308],
[107.5672909,-6.8939912],
[107.5671595,-6.8942308],
[107.5674331,-6.8944066],
[107.5675672,-6.8942016],
[107.5676959,-6.8940631],
[107.5675672,-6.8942016],
[107.5674331,-6.8944066],
[107.5680124,-6.8948007],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.566384,-6.8948391],
[107.5667287,-6.8951037],
[107.5668977,-6.894896],
[107.5670774,-6.8946936],
[107.5668548,-6.8945498],
[107.5670774,-6.8946936],
[107.5672866,-6.8948241],
[107.5671659,-6.895101],
[107.5672165,-6.8951513],
[107.5670533,-6.8953726],
[107.5667287,-6.8951037],
[107.5674365,-6.8956839],
[107.5671217,-6.8963249],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5651612,-6.9012271],
[107.565019,-6.9014638],
[107.5653543,-6.9016398],
[107.5656493,-6.9017889],
[107.5656433,-6.9018904],
[107.5653463,-6.9023827],
[107.5650861,-6.9022496],
[107.5650378,-6.9021324],
[107.5651337,-6.9020022],
[107.5653543,-6.9016398],
[107.5651987,-6.9019114],
[107.5655126,-6.9020739],
[107.5651987,-6.9019114],
[107.565078,-6.9020632],
[107.564834,-6.9017274],
[107.565019,-6.9014638],
[107.564834,-6.9017274],
[107.5646569,-6.9018845],
[107.5647749,-6.9020579],
[107.5646569,-6.9018845],
[107.5641137,-6.9021941],
[107.5637918,-6.9016483],
[107.5641137,-6.9021941],
[107.5644034,-6.9026734],
[107.5647165,-6.9024614],
[107.5648849,-6.9026914],
[107.5648795,-6.9027712],
[107.5645911,-6.9029797],
[107.5644034,-6.9026734],
[107.5645911,-6.9029797],
[107.5652,-6.9039968],
[107.5652759,-6.9039315],
[107.5655119,-6.9035215],
[107.5656982,-6.9031164],
[107.565843,-6.9027277],
[107.5659732,-6.9023392],
[107.5669281,-6.9003421],
[107.5668208,-6.9005605],
[107.5675236,-6.9008747],
[107.567309,-6.9007815],
[107.5669227,-6.9015643],
[107.5664721,-6.901322],
[107.5669227,-6.9015643],
[107.5664453,-6.9025416],
[107.5659732,-6.9023392],
[107.5664453,-6.9025416],
[107.5668798,-6.9027439],
[107.5670339,-6.9027383],
[107.5670622,-6.9025629],
[107.5674485,-6.9017427],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5672263,-6.8961199],
[107.5676715,-6.8964101],
[107.5676956,-6.8964767],
[107.5677788,-6.8965273],
[107.5678603,-6.8965123],
[107.5680296,-6.8964031],
[107.5676993,-6.8959798],
[107.5674529,-6.8956975],
[107.567702,-6.8959771],
[107.5680296,-6.8964031],
[107.5683001,-6.8968212],
[107.56844,-6.8969251],
[107.5685553,-6.8968878],
[107.5685925,-6.8968132],
[107.5686032,-6.8967493],
[107.5686036,-6.8967892],
[107.5684423,-6.8965895],
[107.5682733,-6.8963392],
[107.5682254,-6.896254],
[107.56798,-6.8959758],
[107.5681017,-6.8958999],
[107.56798,-6.8959758],
[107.5676742,-6.8956216],
[107.5673184,-6.8953753],
[107.5673067,-6.8952994],
[107.567642,-6.8947802],
[107.5673067,-6.8952994],
[107.5673184,-6.895378],
[107.5676742,-6.8956216],
[107.5680292,-6.8950265],
[107.5679219,-6.8952075],
[107.5681017,-6.8958999],
[107.5682254,-6.8961502],
[107.5682254,-6.896254],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5637852,-6.8990676],
[107.5635867,-6.898977],
[107.5634794,-6.8989717],
[107.5632751,-6.8988477],
[107.5634794,-6.8989717],
[107.5629859,-6.899664],
[107.5630771,-6.899925],
[107.5628772,-6.9001332],
[107.5625634,-6.8996299],
[107.5628772,-6.9001332],
[107.5637918,-6.9016483],
[107.5639374,-6.9015708],
[107.5642378,-6.9011448],
[107.5645309,-6.9007957],
[107.5651612,-6.9012271],
[107.5655152,-6.900657],
[107.5655126,-6.9006064],
[107.5649171,-6.9002392],
[107.5655126,-6.9006064],
[107.5656708,-6.9004946],
[107.5657566,-6.9004946],
[107.5662609,-6.9007475],
[107.5661911,-6.9007103],
[107.5664406,-6.9001564],
[107.5665184,-6.9002097],
[107.5664406,-6.9001564],
[107.5661214,-6.8999221],
[107.566159,-6.8999514],
[107.5657566,-6.9004946],
[107.5659578,-6.900223],
[107.5660544,-6.9002709],
[107.5661885,-6.8999753],
[107.5660544,-6.9002709],
[107.5659149,-6.9005745],
]])
def jalanKelurahanCiroyom(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.5840308,-6.915249],
[107.5842615,-6.9142664],
[107.5845351,-6.9142771],
[107.5845431,-6.9142478],
[107.5845807,-6.9137019],
[107.5848891,-6.9136514],
[107.5848918,-6.9135901],
[107.584983,-6.9135795],
[107.5850367,-6.913204],
[107.584924,-6.9132067],
[107.5850367,-6.913204],
[107.5851118,-6.9129031],
[107.5851359,-6.9128153],
[107.5852083,-6.9126022],
[107.58516,-6.9125836],
[107.5852405,-6.9123386],
[107.5844621,-6.9118109],
[107.5852405,-6.9123386],
[107.5857367,-6.9126875],
[107.5855785,-6.9130576],
[107.585439,-6.9135209],
[107.5849991,-6.9134596],
[107.585439,-6.9135235],
[107.5854014,-6.9137259],
[107.5854014,-6.9139815],
[107.5852915,-6.9139815],
[107.5854309,-6.9147564],
[107.5852888,-6.9148203],
[107.585321,-6.9151558],
[107.5853103,-6.9154833],
[107.5851842,-6.9154966],
[107.5851815,-6.9154327],
[107.5851842,-6.9154966],
[107.5851815,-6.9156191],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5850876,-6.9184706],
[107.586536,-6.9186623],
[107.5878127,-6.9187848],
[107.5878556,-6.918199],
[107.5879307,-6.918199],
[107.5878556,-6.918199],
[107.5878127,-6.9187848],
[107.5884028,-6.9188594],
[107.5889982,-6.9189233],
[107.5901462,-6.9190724],
[107.5917019,-6.9192747],
[107.5923242,-6.919344],
[107.5929357,-6.9194292],
[107.5930215,-6.9188221],
[107.5917829,-6.918673],
[107.5915737,-6.9185186],
[107.5914717,-6.9183109],
[107.5917185,-6.9181937],
[107.591461,-6.9183162],
[107.5914825,-6.9181511],
[107.5914771,-6.9183109],
[107.5909299,-6.9183748],
[107.5907529,-6.9181937],
[107.5901896,-6.9185186],
[107.590136,-6.9186624],
[107.5901462,-6.9190724],
[107.590136,-6.9186624],
[107.5901896,-6.9185186],
[107.590256,-6.91848],
[107.5901574,-6.9182363],
[107.5893079,-6.9181977],
[107.5893474,-6.9178156],
[107.5892938,-6.9183269],
[107.5895995,-6.9183641],
[107.5896371,-6.918215],
[107.5895995,-6.9183641],
[107.5895942,-6.9185399],
[107.5895995,-6.9183641],
[107.5892938,-6.9183269],
[107.5890631,-6.9183002],
[107.5889982,-6.9189233],
[107.5890631,-6.9183002],
[107.5885267,-6.9182363],
[107.5884028,-6.9188594],
[107.5885267,-6.9182363],
[107.5881994,-6.9181724],
[107.58806,-6.9181937],
[107.5881994,-6.9181724],
[107.5882316,-6.9177357],
[107.5879098,-6.9176931],
[107.5882316,-6.9177357],
[107.5881994,-6.9181724],
[107.5885267,-6.9182363],
[107.5887124,-6.9182576],
[107.5888163,-6.9172991],
[107.5889773,-6.9172298],
[107.5889987,-6.9171074],
[107.5889773,-6.9172298],
[107.5888163,-6.9172991],
[107.5887626,-6.9172586],
[107.5884622,-6.9172347],
[107.5878239,-6.9171415],
[107.5872043,-6.9170935],
[107.5870353,-6.9171415],
[107.5867617,-6.9171441],
[107.5866705,-6.9171388],
[107.5866437,-6.9173145],
[107.5865364,-6.9173465],
[107.5864774,-6.9174743],
[107.5865042,-6.9175968],
[107.5865525,-6.9177672],
[107.5865632,-6.918108],
[107.586536,-6.9186623],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5851589,-6.9176585],
[107.5852342,-6.9176629],
[107.5852879,-6.9171677],
[107.5852342,-6.9176629],
[107.5861998,-6.9177375],
[107.586232,-6.9173647],
[107.5854917,-6.9172955],
[107.5853844,-6.9180304],
[107.5861569,-6.9180996],
[107.5861998,-6.9177375],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5930215,-6.9188221],
[107.5931774,-6.9177051],
[107.5931077,-6.9177158],
[107.5931774,-6.9177051],
[107.5932954,-6.9170661],
[107.5933169,-6.9165122],
[107.5927697,-6.9164803],
[107.5928233,-6.9160383],
[107.592877,-6.915969],
[107.5928877,-6.915772],
[107.592877,-6.915969],
[107.5929521,-6.915985],
[107.5929574,-6.9158226],
[107.5929521,-6.915985],
[107.5932149,-6.9160289],
[107.5929521,-6.915985],
[107.592877,-6.915969],
[107.5928233,-6.9160383],
[107.5927697,-6.9164803],
[107.5913213,-6.9164962],
[107.5910531,-6.9168104],
[107.5909136,-6.917966],
[107.5907529,-6.9181937],
[107.5909136,-6.917966],
[107.590974,-6.9174628],
[107.5910531,-6.9168104],
[107.5910531,-6.9164962],
[107.5913213,-6.9164962],
[107.5910531,-6.9164962],
[107.5904737,-6.9164536],
[107.5904415,-6.9166986],
[107.5904737,-6.9164536],
[107.590066,-6.9164004],
[107.5899534,-6.9171459],
[107.5898944,-6.9177424],
[107.5898622,-6.9182163],
[107.589889,-6.9177371],
[107.589948,-6.9171353],
[107.5900017,-6.9168477],
[107.5898085,-6.9168477],
[107.5898085,-6.9167359],
[107.589712,-6.9165921],
[107.5897066,-6.9163738],
[107.590066,-6.9164004],
[107.5897066,-6.9163738],
[107.5894223,-6.9163205],
[107.5894062,-6.9164749],
[107.5893794,-6.9166187],
[107.5894545,-6.9166773],
[107.5894598,-6.9167146],
[107.5894116,-6.9167998],
[107.5894759,-6.9168158],
[107.5894545,-6.9169223],
[107.5896261,-6.916885],
[107.5898085,-6.9168477],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5890883,-6.9137554],
[107.589024,-6.9145329],
[107.5891151,-6.9147299],
[107.5900646,-6.9159441],
[107.5902953,-6.9157844],
[107.5908371,-6.915827],
[107.5902953,-6.9157844],
[107.5904884,-6.9155501],
[107.5903972,-6.9156619],
[107.5899627,-6.91514],
[107.5903972,-6.9156619],
[107.5902953,-6.9157844],
[107.5900646,-6.9159441],
[107.5905314,-6.9164554],
[107.5910531,-6.9164962],
[107.5913213,-6.9164962],
[107.5918456,-6.9164926],
[107.5916793,-6.9158696],
[107.5913467,-6.9155394],
[107.5915238,-6.9153264],
[107.5915881,-6.915124],
[107.5917705,-6.9149057],
[107.5920924,-6.9152039],
[107.5917652,-6.914911],
[107.5913414,-6.9146021],
[107.5906923,-6.9142666],
[107.590923,-6.914224],
[107.5916203,-6.9142453],
[107.5909283,-6.9142187],
[107.5906869,-6.914272],
[107.5905367,-6.9142933],
[107.5904026,-6.9146075],
[107.5904455,-6.9145169],
[107.5911965,-6.9149589],
[107.5910249,-6.9152838],
[107.5913414,-6.9155501],
[107.5910356,-6.9152891],
[107.5905099,-6.914895],
[107.590585,-6.9149589],
[107.5907459,-6.9147033],
[107.5904509,-6.9145169],
[107.5901183,-6.9143465],
[107.5894048,-6.9140909],
[107.5890669,-6.9139844],
[107.589024,-6.9145436],
[107.5885358,-6.9141388],
[107.5880315,-6.9145649],
[107.5880047,-6.914927],
[107.5886163,-6.9150761],
[107.5891205,-6.9147299],
[107.5886216,-6.9150708],
[107.5881335,-6.9154702],
[107.5884178,-6.916237],
[107.5894223,-6.9163205],
[107.5890642,-6.9162929],
[107.5890964,-6.9164634],
[107.5890695,-6.9166178],
[107.5890481,-6.9168255],
[107.5890374,-6.9168628],
[107.5886833,-6.9169586],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5884622,-6.9172347],
[107.5884771,-6.9170196],
[107.5883752,-6.916945],
[107.5883591,-6.9167347],
[107.5883645,-6.9162341],
[107.5884178,-6.916237],
[107.5883672,-6.9162288],
[107.5879675,-6.9161729],
[107.5879756,-6.916314],
[107.5879407,-6.9166016],
[107.5879622,-6.9168359],
[107.5879434,-6.9166016],
[107.5879783,-6.9163113],
[107.5879648,-6.9161702],
[107.5876081,-6.9161409],
[107.5876403,-6.916543],
[107.587702,-6.9168332],
[107.5877127,-6.9170169],
[107.5878415,-6.9170382],
[107.5878239,-6.9171415],
[107.5878361,-6.9170382],
[107.5877047,-6.9170089],
[107.5877047,-6.9168279],
[107.5876376,-6.916535],
[107.5876081,-6.9161382],
[107.5875732,-6.9161329],
[107.5875893,-6.9158533],
[107.5883001,-6.9159252],
[107.5878924,-6.9158853],
[107.58793,-6.9157388],
[107.5881335,-6.9154702],
[107.58793,-6.9157335],
[107.5878897,-6.915888],
[107.5875866,-6.915856],
[107.587769,-6.9154752],
[107.5880399,-6.9149427],
[107.5880047,-6.914927],
[107.5880315,-6.9145649],
[107.586959,-6.9154167],
[107.5869912,-6.9154353],
[107.5869885,-6.9154726],
[107.5870127,-6.9155152],
[107.5869697,-6.9158693],
[107.5869697,-6.9160797],
[107.5875786,-6.9161356],
[107.5869697,-6.916077],
[107.5867739,-6.9160664],
[107.5867364,-6.91633],
[107.5867364,-6.916559],
[107.5866908,-6.9166415],
[107.5867203,-6.9168572],
[107.5867471,-6.9170888],
[107.5867617,-6.9171441],
[107.5867444,-6.9170888],
[107.5867176,-6.9168519],
[107.5863153,-6.9168998],
[107.5862643,-6.9167879],
[107.5863153,-6.9169078],
[107.5863421,-6.9169956],
[107.5859881,-6.9169983],
[107.5853631,-6.9169424],
[107.58517,-6.9168998],
[107.5853631,-6.9169424],
[107.5853792,-6.9166974],
[107.5855213,-6.9167081],
[107.5853846,-6.9166974],
[107.5851861,-6.9167001],
[107.5853792,-6.9166974],
[107.5853872,-6.916543],
[107.5857359,-6.9165989],
[107.5853872,-6.9165377],
[107.5851941,-6.916527],
[107.5853899,-6.916543],
[107.5854087,-6.9163859],
[107.5851995,-6.9163513],
[107.5854141,-6.9163832],
[107.5854275,-6.9162208],
[107.5855991,-6.9158294],
[107.5852236,-6.9157149],
[107.5856018,-6.9158294],
[107.5861222,-6.9159865],
[107.586149,-6.9163273],
[107.5862268,-6.916322],
[107.5862965,-6.9163646],
[107.5864521,-6.9163486],
[107.5865674,-6.9163752],
[107.5867418,-6.916338],
[107.5865647,-6.9163779],
[107.5864494,-6.9163486],
[107.5862938,-6.9163646],
[107.5862187,-6.916322],
[107.5861436,-6.9163246],
[107.5861302,-6.9164737],
[107.5859344,-6.9164498],
[107.5857735,-6.9164072],
[107.5854087,-6.9163859],
[107.5854302,-6.9162155],
[107.5856233,-6.9162794],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5834349,-6.9110762],
[107.5836306,-6.9112092],
[107.5834267,-6.9116352],
[107.583239,-6.9115606],
[107.5834267,-6.9116352],
[107.5836306,-6.9112092],
[107.5838023,-6.9113423],
[107.583636,-6.9116299],
[107.5838023,-6.9113423],
[107.5844621,-6.9118109],
[107.5844192,-6.9119707],
[107.584505,-6.9122316],
[107.5845694,-6.9123754],
[107.5845425,-6.9126044],
[107.5840544,-6.9125512],
[107.5839471,-6.9128334],
[107.5845479,-6.9128387],
[107.5839471,-6.9128334],
[107.5837969,-6.9131689],
[107.5847035,-6.9132062],
[107.5837969,-6.9131689],
[107.5836441,-6.9139414],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5830862,-6.9138388],
[107.5831746,-6.913342],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5853103,-6.9154833],
[107.5856881,-6.9154143],
[107.5855915,-6.91514],
[107.5856881,-6.9154143],
[107.5856881,-6.9155794],
[107.5856291,-6.9158377],
[107.5857927,-6.9158829],
[107.5859697,-6.9153903],
[107.5856881,-6.9154143],
[107.5859697,-6.9153903],
[107.5859992,-6.9152945],
[107.586077,-6.9152732],
[107.5866376,-6.9153664],
[107.586959,-6.9154167],
[107.5866376,-6.9153664],
[107.5868307,-6.9147646],
[107.5871445,-6.913798],
[107.5879277,-6.9140909],
[107.5883381,-6.9142986],
[107.5885358,-6.9141388],
[107.5882496,-6.9139285],
[107.5878311,-6.9137395],
[107.5876434,-6.9136915],
[107.5872169,-6.9135105],
[107.5871445,-6.913798],
[107.5872169,-6.9135105],
[107.5868736,-6.9133587],
[107.5867851,-6.9136543],
[107.5868092,-6.9137315],
[107.5866376,-6.9145409],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5867739,-6.9160664],
[107.5861222,-6.9159865],
[107.5861651,-6.9159929],
[107.5861503,-6.9157705],
[107.5861288,-6.9152753],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5857367,-6.9126875],
[107.5863523,-6.9130875],
[107.5867492,-6.9133005],
[107.5868736,-6.9133587],
[107.5867546,-6.9133005],
[107.5866473,-6.9135562],
[107.5867546,-6.9132952],
[107.5863523,-6.9130822],
[107.5861162,-6.9132846],
[107.5861377,-6.9135189],
[107.5860411,-6.9135402],
[107.5859285,-6.9148236],
[107.5857783,-6.9148449],
[107.585789,-6.9149887],
]])
def jalanKelurahanDungusCariang(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.5767156,-6.9174426],
[107.5770075,-6.9174589],
[107.5770182,-6.9173577],
[107.5770075,-6.9174589],
[107.5780053,-6.917576],
[107.5781233,-6.9171447],
[107.5780053,-6.917576],
[107.5781286,-6.9175973],
[107.5787509,-6.9176825],
[107.5788689,-6.9176879],
[107.5789923,-6.9177305],
[107.580607,-6.9179169],
[107.5812024,-6.9180074],
[107.5820017,-6.9180926],
[107.5820178,-6.9178796],
[107.5820822,-6.9177092],
[107.5820178,-6.9178796],
[107.5820017,-6.9180926],
[107.5826938,-6.9181831],
[107.5830907,-6.9182417],
[107.5843996,-6.9183802],
[107.5850876,-6.9184706],
[107.5849951,-6.9184494],
[107.5850648,-6.9176399],
[107.5851589,-6.9176585],
[107.5850648,-6.9176399],
[107.5850809,-6.9172938],
[107.5844104,-6.9173257],
[107.5839222,-6.9172885],
[107.5844104,-6.9173257],
[107.5844265,-6.9170808],
[107.5844104,-6.9173257],
[107.5843996,-6.9183802],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5850809,-6.9172938],
[107.5851171,-6.9168794],
[107.58517,-6.9168998],
[107.5851171,-6.9168794],
[107.5851117,-6.9166984],
[107.5851861,-6.9167001],
[107.5851117,-6.9166984],
[107.5851225,-6.9165173],
[107.5851941,-6.916527],
[107.5851225,-6.9165173],
[107.5851332,-6.9163362],
[107.5851995,-6.9163513],
[107.5851332,-6.9163362],
[107.5851868,-6.9157025],
[107.5852236,-6.9157149],
[107.5843232,-6.9154256],
[107.5842749,-6.9155747],
[107.5842856,-6.9157025],
[107.5842749,-6.9155747],
[107.5843232,-6.9154256],
[107.5838725,-6.9152552],
[107.5838243,-6.9156812],
[107.5838296,-6.915596],
[107.5836687,-6.9155747],
[107.5836633,-6.9154416],
[107.5836204,-6.915399],
[107.5836687,-6.9151966],
[107.5838725,-6.9152552],
[107.5836687,-6.9151966],
[107.5835399,-6.9151806],
[107.583497,-6.915367],
[107.5834058,-6.915383],
[107.5833576,-6.9156013],
[107.5834058,-6.915383],
[107.583497,-6.915367],
[107.5835399,-6.9151806],
[107.5832878,-6.9150741],
[107.5834756,-6.9144138],
[107.5835882,-6.9139931],
[107.5836366,-6.9139975],
[107.5835882,-6.9139931],
[107.5830303,-6.9138653],
[107.5824724,-6.9137268],
[107.5824724,-6.9136576],
[107.5825368,-6.91337],
[107.5826173,-6.9131463],
[107.5827138,-6.9127842],
[107.5828587,-6.9123102],
[107.5830089,-6.9118842],
[107.5831913,-6.9115221],
[107.583239,-6.9115606],
[107.5831913,-6.9115221],
[107.5833844,-6.9110481],
[107.5834349,-6.9110762],
[107.5833844,-6.9110481],
[107.5826226,-6.9105635],
[107.5824402,-6.9109043],
[107.5826226,-6.9105635],
[107.5821613,-6.9102386],
[107.5821291,-6.9103398],
[107.5821184,-6.910654],
[107.5819789,-6.9108883],
[107.5819789,-6.9110747],
[107.5819789,-6.9108883],
[107.5821184,-6.910654],
[107.5821291,-6.9103398],
[107.5821613,-6.9102386],
[107.581657,-6.9099351],
[107.5815229,-6.9098446],
[107.5813566,-6.9101801],
[107.581244,-6.9105315],
[107.5813674,-6.9105688],
[107.581244,-6.9105315],
[107.5813566,-6.9101801],
[107.5815229,-6.9098446],
[107.5813888,-6.9096901],
[107.5811528,-6.9100097],
[107.5809811,-6.9103185],
[107.5810348,-6.9104037],
[107.5810026,-6.9104676],
[107.5806217,-6.9102173],
[107.5810079,-6.9096262],
[107.5810723,-6.9096262],
[107.5811313,-6.909525],
[107.5813888,-6.9096901],
[107.5811313,-6.909525],
[107.5807236,-6.9092907],
[107.5804983,-6.9091096],
[107.5801443,-6.9088807],
[107.5798814,-6.9087369],
[107.5790499,-6.9081351],
[107.5798814,-6.9087369],
[107.5798653,-6.9088487],
[107.5797419,-6.9088221],
[107.579581,-6.9087102],
[107.5793289,-6.9085451],
[107.5791519,-6.9084493],
[107.578889,-6.908428],
[107.5788085,-6.908412],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5788757,-6.9112659],
[107.5781462,-6.9110636],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5782803,-6.9117612],
[107.5780013,-6.9117026],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.576847,-6.9168861],
[107.5773318,-6.9169331],
[107.5774338,-6.9168479],
[107.5775947,-6.9164166],
[107.5776752,-6.9162408],
[107.5777663,-6.9157562],
[107.57782,-6.9154633],
[107.5772031,-6.9153408],
[107.57782,-6.9154633],
[107.5778736,-6.9154101],
[107.5782062,-6.915458],
[107.5785549,-6.9156124],
[107.5784798,-6.9159053],
[107.5781633,-6.9158468],
[107.5780507,-6.9158042],
[107.5777663,-6.9157562],
[107.5780507,-6.9158042],
[107.5781687,-6.9158468],
[107.5784798,-6.9159053],
[107.5784369,-6.9160544],
[107.5782277,-6.9160065],
[107.5784369,-6.9160544],
[107.578394,-6.9163154],
[107.5790324,-6.9164805],
[107.5791128,-6.9164059],
[107.5791718,-6.9162621],
[107.5791128,-6.9164059],
[107.5790324,-6.9164805],
[107.578394,-6.9163154],
[107.5782491,-6.9169065],
[107.5781286,-6.9175973],
[107.5787509,-6.9176825],
[107.5788689,-6.9176879],
[107.5789465,-6.9173006],
[107.5789519,-6.9170556],
[107.5782491,-6.9169065],
[107.5789519,-6.9170556],
[107.5789465,-6.9173006],
[107.5790377,-6.9171941],
[107.5793274,-6.9163527],
[107.5796546,-6.9164539],
[107.5796707,-6.9165391],
[107.5798907,-6.9166083],
[107.5799336,-6.9165178],
[107.5801213,-6.9165444],
[107.5806578,-6.9167254],
[107.5807597,-6.9165444],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5775873,-6.9136243],
[107.5787955,-6.9139343],
[107.5786346,-6.9142752],
[107.5784951,-6.914813],
[107.5779962,-6.9147385],
[107.5778736,-6.9154101],
[107.5782062,-6.915458],
[107.5784951,-6.914813],
[107.5784522,-6.9149089],
[107.5786453,-6.9149834],
[107.5785702,-6.9153136],
[107.5785112,-6.9155905],
[107.5785549,-6.9156124],
[107.5791362,-6.9156811],
[107.5793561,-6.9157317],
[107.579289,-6.9159607],
[107.5793561,-6.9157317],
[107.5795412,-6.915753],
[107.5793274,-6.9163527],
[107.5794634,-6.9159793],
[107.5802734,-6.9162056],
[107.5802224,-6.9163654],
[107.5801313,-6.9164133],
[107.5800991,-6.9165464],
[107.5801313,-6.9164133],
[107.5802224,-6.9163654],
[107.5802734,-6.9162056],
[107.580724,-6.9163521],
[107.5807616,-6.9160938],
[107.5808179,-6.9158435],
[107.5803941,-6.9157184],
[107.5803244,-6.9159394],
[107.5807616,-6.9160938],
[107.5803244,-6.9159394],
[107.5795412,-6.915753],
[107.579686,-6.9151405],
[107.5793936,-6.915058],
[107.5793695,-6.9151166],
[107.5791013,-6.9150553],
[107.5790423,-6.9152311],
[107.5791013,-6.9150553],
[107.5793695,-6.9151166],
[107.5792971,-6.9153722],
[107.5792408,-6.9154161],
[107.5791388,-6.9156811],
[107.5792408,-6.9154161],
[107.5785729,-6.915311],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5805838,-6.9179197],
[107.5806643,-6.917696],
[107.5807501,-6.917704],
[107.5808225,-6.9175176],
[107.5808252,-6.9174084],
[107.5808494,-6.9173445],
[107.5807635,-6.9173099],
[107.5810344,-6.9166682],
[107.5808157,-6.9164871],
[107.580821,-6.9163966],
[107.580724,-6.9163521],
[107.580821,-6.9163966],
[107.5810249,-6.9160664],
[107.5814219,-6.9162155],
[107.5812234,-6.9166469],
[107.5811697,-6.9167427],
[107.5810344,-6.9166682],
[107.5811697,-6.9167427],
[107.5812234,-6.9166469],
[107.5817705,-6.9169078],
[107.5818564,-6.9169078],
[107.5819958,-6.9169558],
[107.5820441,-6.9168386],
[107.5819958,-6.9169558],
[107.5822426,-6.9170303],
[107.5821729,-6.9172593],
[107.5822426,-6.9170303],
[107.5825055,-6.9171102],
[107.5828166,-6.9171475],
[107.5827361,-6.9177066],
[107.5826938,-6.9181831],
[107.5830907,-6.9182417],
[107.5831224,-6.9178877],
[107.5829829,-6.9178717],
[107.5829346,-6.9177705],
[107.5830634,-6.9168812],
[107.5831331,-6.9168066],
[107.5834496,-6.9168546],
[107.5837768,-6.9168919],
[107.5838144,-6.9169717],
[107.5837071,-6.9180634],
[107.5833745,-6.9180315],
[107.5833289,-6.9179503],
[107.5833316,-6.9179023],
[107.5831224,-6.9178877],
[107.5833316,-6.9179023],
[107.583455,-6.9168546],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5851107,-6.9168255],
[107.5844186,-6.9166924],
[107.5836247,-6.9165273],
[107.5833779,-6.9164634],
[107.5827449,-6.9164101],
[107.5827289,-6.9165273],
[107.582863,-6.9166125],
[107.5833833,-6.9166125],
[107.5835872,-6.9166604],
[107.5836247,-6.9165273],
[107.5844186,-6.9166924],
[107.5844669,-6.9163302],
[107.5846171,-6.9161012],
[107.5844669,-6.9163302],
[107.5837159,-6.9161598],
[107.5836408,-6.9161066],
[107.5834316,-6.9160746],
[107.5835764,-6.9154249],
[107.5836204,-6.915399],
[107.5835764,-6.9154249],
[107.5834316,-6.9160746],
[107.5833779,-6.9164634],
[107.5834316,-6.9160746],
[107.5830078,-6.9160001],
[107.5829059,-6.9160001],
[107.582525,-6.9158882],
[107.5824499,-6.9163462],
[107.5825481,-6.916367],
[107.5824499,-6.9163462],
[107.5822426,-6.9170303],
[107.5824499,-6.9163462],
[107.5819054,-6.9162051],
[107.5820422,-6.9157311],
[107.5820207,-6.9156992],
[107.5817042,-6.9156459],
[107.5816882,-6.915606],
[107.5816935,-6.9155421],
[107.5816399,-6.9154888],
[107.5816506,-6.9154356],
[107.5816962,-6.9152811],
[107.5816372,-6.9152172],
[107.5817203,-6.9147885],
[107.5820181,-6.914887],
[107.5820663,-6.9148657],
[107.5822541,-6.9149084],
[107.582187,-6.9152944],
[107.5822541,-6.9149084],
[107.5822863,-6.9147726],
[107.5825277,-6.9148444],
[107.5824901,-6.9150681],
[107.5825277,-6.9148444],
[107.5825786,-6.9148365],
[107.5825974,-6.9146075],
[107.5825786,-6.9148365],
[107.5830588,-6.9150015],
[107.583107,-6.9150042],
[107.5832116,-6.914674],
[107.583107,-6.9150042],
[107.5832878,-6.9150741],
[107.583107,-6.9150042],
[107.5830185,-6.9151187],
[107.58293,-6.9154888],
[107.5826511,-6.9154249],
[107.5825921,-6.9155927],
[107.582525,-6.9158882],
[107.5825062,-6.9159921],
[107.582187,-6.9159175],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5824724,-6.9137268],
[107.5823858,-6.9138174],
[107.5823192,-6.9143792],
[107.5822863,-6.9147726],
[107.5811283,-6.9144964],
[107.5811604,-6.9141396],
[107.5812302,-6.9137615],
[107.5811604,-6.9141396],
[107.5811283,-6.9144964],
[107.5810263,-6.9150023],
[107.5812785,-6.9150236],
[107.5812892,-6.9150822],
[107.5813965,-6.9151035],
[107.5812892,-6.9150822],
[107.5811229,-6.9157478],
[107.5815735,-6.9158544],
[107.5811229,-6.9157478],
[107.5810249,-6.9160664],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5798653,-6.9088487],
[107.5798571,-6.9090062],
[107.5792724,-6.9088518],
[107.5794226,-6.9088891],
[107.5793904,-6.9090968],
[107.5793904,-6.9092778],
[107.5792885,-6.9092778],
[107.5793904,-6.9092778],
[107.5796748,-6.9092778],
[107.579723,-6.9093258],
[107.5798411,-6.9093311],
[107.5798571,-6.9090062],
[107.5798411,-6.9093311],
[107.5798303,-6.9094536],
[107.5796157,-6.9094163],
[107.5795567,-6.9093577],
[107.5795567,-6.9092778],
[107.5795567,-6.9093577],
[107.5796157,-6.9094163],
[107.5798303,-6.9094536],
[107.5798089,-6.9097891],
[107.5796587,-6.9097731],
[107.5796372,-6.909837],
[107.5795192,-6.9098051],
[107.5796372,-6.909837],
[107.5796587,-6.9097731],
[107.5798089,-6.9097891],
[107.5798035,-6.9100766],
[107.5792992,-6.9099808],
[107.5793743,-6.9097252],
[107.5792992,-6.9099808],
[107.5798035,-6.9100766],
[107.5797713,-6.9104494],
[107.578913,-6.9101885],
[107.5797713,-6.9104494],
[107.5798357,-6.9105027],
[107.5801844,-6.9106305],
[107.5802595,-6.9107157],
[107.5803453,-6.910737],
[107.5805545,-6.9102098],
[107.5806217,-6.9102173],
[107.5805545,-6.9102098],
[107.5802434,-6.9099968],
[107.5798089,-6.9097891],
[107.5798035,-6.9100766],
[107.5797713,-6.9104494],
[107.579723,-6.9109926],
[107.5796587,-6.9112482],
[107.5799001,-6.9113494],
[107.5796587,-6.9112482],
[107.5795246,-6.9116423],
[107.5796587,-6.9117062],
[107.5795246,-6.9116423],
[107.5794655,-6.9117595],
[107.5790847,-6.9115678],
[107.5794655,-6.9117595],
[107.5793851,-6.9120098],
[107.5789291,-6.9118127],
[107.5793851,-6.9120098],
[107.5793314,-6.9121163],
[107.5788379,-6.9119352],
[107.5793314,-6.9121163],
[107.5792456,-6.9123187],
[107.5789935,-6.9122122],
[107.5792456,-6.9123187],
[107.5792027,-6.9124465],
[107.5794226,-6.912521],
[107.5792027,-6.9124465],
[107.5790471,-6.9127074],
[107.5792134,-6.912782],
[107.5790471,-6.9127074],
[107.5789774,-6.9129897],
[107.5793422,-6.9130908],
[107.5789774,-6.9129897],
[107.5789345,-6.9132932],
[107.5793958,-6.9134263],
[107.5789345,-6.9132932],
[107.5788594,-6.9135755],
[107.5787955,-6.9139343],
[107.5797797,-6.9141558],
[107.5796748,-6.9141346],
[107.5800261,-6.9130243],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5810263,-6.9150023],
[107.5804959,-6.914912],
[107.5804422,-6.9149653],
[107.5803618,-6.9149413],
[107.5804047,-6.9147789],
[107.5803886,-6.9148375],
[107.5803618,-6.9149413],
[107.5803537,-6.9149893],
[107.580182,-6.9149307],
[107.5803537,-6.9149893],
[107.5803081,-6.915149],
[107.5797475,-6.9149919],
[107.579686,-6.9151405],
[107.5793936,-6.915058],
[107.5795088,-6.9147097],
[107.5795893,-6.9147283],
[107.5796161,-6.9146964],
[107.5797797,-6.9141558],
[107.5803027,-6.914289],
[107.5811283,-6.9144964],
[107.5807155,-6.9143921],
[107.5807963,-6.9141612],
[107.5807104,-6.9141079],
[107.5806407,-6.9141052],
[107.5807104,-6.9141079],
[107.5807963,-6.9141612],
[107.5808472,-6.9141159],
[107.5808633,-6.9139987],
[107.5805549,-6.9138922],
[107.5808633,-6.9139987],
[107.5808472,-6.9141159],
[107.5807963,-6.9141612],
[107.5807155,-6.9143921],
[107.5808855,-6.9144348],
[107.5809331,-6.9142304],
[107.5808855,-6.9144348],
[107.5807641,-6.9149546],
[107.5808855,-6.9144348],
[107.5803027,-6.914289],
[107.580233,-6.9144461],
[107.5801445,-6.9147762],
[107.5803886,-6.9148375],
[107.5801445,-6.9147762],
[107.5800426,-6.9148748],
[107.5797663,-6.9148295],
[107.5797207,-6.9148535],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.581657,-6.9099351],
[107.5813715,-6.9107838],
[107.5811623,-6.9114814],
[107.580878,-6.9114441],
[107.5807599,-6.911721],
[107.5804542,-6.9116358],
[107.5804864,-6.9115453],
[107.5804542,-6.9116358],
[107.5802986,-6.9121684],
[107.5803898,-6.912195],
[107.5802986,-6.9121684],
[107.5802611,-6.9122163],
[107.5794081,-6.9119607],
[107.5797568,-6.9120672],
[107.5796871,-6.9122323],
[107.5797568,-6.9120672],
[107.5802611,-6.9122163],
[107.5801377,-6.9126743],
[107.579907,-6.9126184],
[107.5801377,-6.9126743],
[107.5800261,-6.9130243],
[107.5797595,-6.9129512],
[107.5800261,-6.9130243],
[107.5801377,-6.9126743],
[107.5801752,-6.9125358],
[107.5804274,-6.9126876],
[107.5807358,-6.9127648],
[107.5809772,-6.911998],
[107.5805856,-6.9118542],
[107.5809826,-6.911998],
[107.5811703,-6.9119287],
[107.5813018,-6.9117983],
[107.5813339,-6.9117503],
[107.5813822,-6.9116598],
[107.5813339,-6.9117503],
[107.5810308,-6.9116598],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5799214,-6.9141892],
[107.5804274,-6.9126876],
[107.5807358,-6.9127648],
[107.580879,-6.9128259],
[107.5808548,-6.9129057],
[107.5807878,-6.912959],
[107.5808548,-6.9129057],
[107.580879,-6.9128232],
[107.5809192,-6.912714],
[107.5812572,-6.9127832],
[107.5811874,-6.9130415],
[107.5812572,-6.9127832],
[107.5826173,-6.9131463],
[107.5825368,-6.91337],
[107.5822335,-6.9133105],
[107.5821611,-6.9135661],
[107.5822335,-6.9133105],
[107.5813323,-6.9130522],
[107.5812089,-6.9130761],
[107.5811338,-6.9133318],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5827138,-6.9127842],
[107.5821254,-6.9126154],
[107.5810471,-6.9123038],
[107.5821227,-6.912618],
[107.5822756,-6.9122452],
[107.5815406,-6.9120322],
[107.5822756,-6.9122452],
[107.5824285,-6.9118911],
[107.5816989,-6.9116595],
]])
def jalanKelurahanGaruda(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.5729636,-6.9155205],
[107.5731415,-6.9155822],
[107.5732327,-6.9156515],
[107.5733587,-6.9156834],
[107.5735492,-6.9157713],
[107.573348,-6.9159337],
[107.5730303,-6.9163057],
[107.5733507,-6.9159337],
[107.5735492,-6.9157713],
[107.5737476,-6.9158778],
[107.5734472,-6.9162719],
[107.5736323,-6.9164529],
[107.5737262,-6.9164503],
[107.5739113,-6.916184],
[107.5739971,-6.9162266],
[107.5739113,-6.916184],
[107.5737262,-6.9164503],
[107.5736323,-6.9164529],
[107.573446,-6.9166438],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5725292,-6.9158665],
[107.5729203,-6.9162178],
[107.5729766,-6.9161486],
[107.5729203,-6.9162178],
[107.5730303,-6.9163057],
[107.573446,-6.9166438],
[107.5738617,-6.9169766],
[107.5742453,-6.9170805],
[107.5743445,-6.9170166],
[107.5744813,-6.9170459],
[107.5745457,-6.9171391],
[107.5752914,-6.9173095],
[107.5758546,-6.9173521],
[107.5759914,-6.9169021],
[107.5758546,-6.9173521],
[107.5767156,-6.9174426],
[107.576847,-6.9168861],
[107.5765171,-6.9168409],
[107.5764528,-6.9167743],
[107.5764367,-6.9166438],
[107.5764528,-6.9167743],
[107.5765171,-6.9168409],
[107.576847,-6.9168861],
[107.5770321,-6.916074],
[107.5774452,-6.9142767],
[107.5775873,-6.9136243],
[107.577987,-6.9117897],
[107.5784376,-6.9098406],
[107.5783115,-6.909782],
[107.5783384,-6.9097048],
[107.5784751,-6.9097154],
[107.5784376,-6.9098406],
[107.5784751,-6.9097154],
[107.5786978,-6.9086637],
[107.5788963,-6.908227],
[107.5787271,-6.908121],
[107.5788963,-6.908227],
[107.5791012,-6.9079],
[107.5790556,-6.9079719],
[107.5781562,-6.9074106],
[107.5780409,-6.9076289],
[107.5780087,-6.9077701],
[107.5780409,-6.9076289],
[107.5778719,-6.9075544],
[107.5780409,-6.9076289],
[107.5781562,-6.9074106],
[107.5777807,-6.9071816],
[107.5776788,-6.9073813],
[107.5777807,-6.9071816],
[107.5775983,-6.9070431],
[107.5773999,-6.907344],
[107.5775983,-6.9070431],
[107.5771665,-6.9067955],
[107.5765281,-6.9063428],
[107.5761902,-6.9069686],
[107.5758039,-6.9068088],
[107.5761902,-6.9069686],
[107.5765281,-6.9063428],
[107.5761285,-6.9060872],
[107.5758549,-6.9065905],
[107.5761285,-6.9060872],
[107.5754177,-6.9056052],
[107.5751361,-6.9054375],
[107.5754177,-6.9056052],
[107.5751253,-6.9061591],
[107.5748947,-6.9064467],
[107.5750127,-6.9063002],
[107.5753909,-6.9065292],
[107.5754713,-6.9064307],
[107.5756403,-6.9064973],
[107.5759381,-6.9059607],
[107.5756832,-6.9057836],
[107.5755491,-6.9059967],
[107.5753936,-6.9062816],
[107.5751253,-6.9061591],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5737476,-6.9158778],
[107.5739852,-6.915599],
[107.5741636,-6.9152395],
[107.5735333,-6.9149932],
[107.5741636,-6.9152395],
[107.5743219,-6.9152648],
[107.5749656,-6.9137098],
[107.5743219,-6.9152648],
[107.5749334,-6.915595],
[107.5747617,-6.9160316],
[107.5741287,-6.9157707],
[107.5747617,-6.9160316],
[107.5743445,-6.9170166],
[107.5747617,-6.9160316],
[107.5744813,-6.9170459],
[107.5747617,-6.9160316],
[107.5749334,-6.915595],
[107.576017,-6.9158559],
[107.5765374,-6.9159731],
[107.5770321,-6.916074],
[107.5765374,-6.9159731],
[107.577106,-6.9135074],
[107.5775873,-6.9136243],
[107.577106,-6.9135074],
[107.5774118,-6.9121707],
[107.5773688,-6.9123465],
[107.5767197,-6.9121121],
[107.5770202,-6.9114784],
[107.5774547,-6.9116435],
[107.577987,-6.9117897],
[107.5774547,-6.9116435],
[107.577519,-6.9113133],
[107.5774547,-6.9116435],
[107.5770202,-6.9114784],
[107.5775459,-6.9105518],
[107.5775888,-6.91044],
[107.577562,-6.9103494],
[107.577283,-6.9101897],
[107.577562,-6.909721],
[107.5774064,-6.909982],
[107.577224,-6.9098861],
[107.5774815,-6.9094974],
[107.5776639,-6.9091938],
[107.5779643,-6.9087039],
[107.5781467,-6.9086133],
[107.5780179,-6.9085015],
[107.5779267,-6.9084589],
[107.5778034,-6.9083311],
[107.5779267,-6.9084589],
[107.5780233,-6.9085015],
[107.5781413,-6.9086133],
[107.5782915,-6.908608],
[107.5786978,-6.9086637],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.576017,-6.9158559],
[107.5761265,-6.9153624],
[107.5750911,-6.9150828],
[107.5761265,-6.9153624],
[107.5764025,-6.9140311],
[107.5758017,-6.9137967],
[107.5759612,-6.9138593],
[107.5762308,-6.9133175],
[107.577106,-6.9135074],
[107.5762308,-6.9133175],
[107.5764883,-6.9127583],
[107.577234,-6.91295],
[107.5764883,-6.9127583],
[107.5767197,-6.9121121],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5749334,-6.915595],
[107.5758022,-6.9131228],
[107.5753516,-6.9129897],
[107.575483,-6.9130269],
[107.5756734,-6.9125423],
[107.575483,-6.9130269],
[107.5758022,-6.9131228],
[107.5762308,-6.9133175],
[107.5758022,-6.9131228],
[107.5758558,-6.9130323],
[107.5766337,-6.9112536],
[107.5761723,-6.9110832],
[107.5766337,-6.9112536],
[107.5770202,-6.9114784],
[107.5766337,-6.9112536],
[107.5768107,-6.9109553],
[107.5767678,-6.9108701],
[107.5770306,-6.9104175],
[107.5767678,-6.9108701],
[107.5768107,-6.9109553],
[107.577283,-6.9101897],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5737868,-6.9084887],
[107.5746753,-6.9087599],
[107.5752063,-6.908909],
[107.574627,-6.9099368],
[107.5742139,-6.9097664],
[107.5746753,-6.9087599],
[107.5742139,-6.9097664],
[107.5734708,-6.9094448],
[107.5742139,-6.9097664],
[107.574627,-6.9099368],
[107.5750132,-6.9100859],
[107.5755121,-6.9092498],
[107.5756999,-6.9092924],
[107.5757857,-6.9090634],
[107.5754316,-6.9089196],
[107.5757857,-6.9090634],
[107.5756999,-6.9092924],
[107.5755121,-6.9092498],
[107.5750132,-6.9100859],
[107.5754263,-6.9101924],
[107.5760164,-6.9092764],
[107.5754263,-6.9101924],
[107.5752653,-6.9106504],
[107.5754263,-6.9101924],
[107.5766225,-6.9107037],
[107.5759024,-6.9103908],
[107.5762685,-6.9097983],
[107.576365,-6.9094788],
[107.5761934,-6.9093936],
[107.576306,-6.9090528],
[107.5764026,-6.9086054],
[107.5767567,-6.9086693],
[107.5768586,-6.9084457],
[107.5767567,-6.9086693],
[107.5770302,-6.9087119],
[107.5773253,-6.9082699],
[107.5770302,-6.9087119],
[107.5772448,-6.9087758],
[107.577336,-6.9087652],
[107.577395,-6.908648],
[107.577336,-6.9087652],
[107.5775238,-6.9087545],
[107.5779643,-6.9087039],
[107.5775238,-6.9087545],
[107.5770839,-6.9090208],
[107.5770517,-6.9092818],
[107.5770034,-6.9098516],
[107.577224,-6.9098861],
[107.5770034,-6.9098516],
[107.5770517,-6.9092764],
[107.5770839,-6.9090208],
[107.5769873,-6.9090155],
[107.5768532,-6.9089516],
[107.5767674,-6.9089835],
[107.5767298,-6.9090847],
[107.5766011,-6.9096492],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5771665,-6.9067955],
[107.5770051,-6.907072],
[107.5768174,-6.9069868],
[107.5770051,-6.907072],
[107.5766135,-6.9077324],
[107.5762756,-6.907514],
[107.5766135,-6.9077324],
[107.5765009,-6.9078868],
[107.5763936,-6.9080732],
[107.5764901,-6.9081477],
[107.5764026,-6.9086054],
[107.5759966,-6.9084939],
[107.5760932,-6.9082117],
[107.5759966,-6.9084939],
[107.575723,-6.9084619],
[107.5756586,-6.9086483],
[107.575723,-6.9084619],
[107.5756426,-6.9084513],
[107.5757713,-6.9080892],
[107.5757713,-6.9079134],
[107.576002,-6.9074182],
[107.5751919,-6.9070241],
[107.5752992,-6.9067045],
[107.575428,-6.9067844],
[107.5752992,-6.9067045],
[107.5753909,-6.9065292],
[107.5752992,-6.9067045],
[107.5751919,-6.9070241],
[107.57509,-6.9071359],
[107.5748701,-6.90704],
[107.5746931,-6.9069229],
[107.5743926,-6.9067631],
[107.5744879,-6.9068124],
[107.574339,-6.9071199],
[107.5742478,-6.907072],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5739905,-6.9081123],
[107.5742721,-6.9081522],
[107.5742453,-6.9079925],
[107.5742158,-6.9079073],
[107.5742426,-6.9077182],
[107.5742158,-6.9079073],
[107.5742453,-6.9079925],
[107.5742721,-6.9081522],
[107.5749856,-6.9082801],
[107.5751519,-6.9079579],
[107.5749856,-6.9082801],
[107.5754228,-6.9083946],
[107.5755006,-6.9082082],
[107.5754228,-6.9083946],
[107.5755301,-6.9084265],
[107.5754282,-6.9086901],
[107.5755301,-6.9084265],
[107.5756426,-6.9084513],
]])
def jalanKelurahanKebonJeruk(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.592997,-6.9194399],
[107.5938698,-6.9195477],
[107.5951519,-6.9197021],
[107.5954952,-6.9197421],
[107.5956937,-6.9197687],
[107.5972386,-6.9199498],
[107.5976544,-6.9200004],
[107.5983705,-6.9200829],
[107.5997385,-6.9202507],
[107.6004761,-6.9203385],
[107.6005619,-6.9200589],
[107.6005673,-6.9198033],
[107.6005619,-6.9200589],
[107.6004761,-6.9203385],
[107.6018038,-6.9204983],
[107.6021498,-6.9205462],
[107.6031502,-6.9206607],
[107.6041051,-6.9207779],
[107.6041775,-6.9198513],
[107.6041909,-6.9194519],
[107.6042151,-6.9191164],
[107.6042419,-6.9186477],
[107.6042875,-6.9183176],
[107.6043438,-6.9183309],
[107.6042875,-6.9183202],
[107.6043948,-6.91713],
[107.6044752,-6.9158253],
[107.604545,-6.9158066],
[107.6044752,-6.9158253],
[107.6045745,-6.9148294],
[107.6046228,-6.9147629],
[107.6045745,-6.9148268],
[107.6030376,-6.9147602],
[107.6027881,-6.9147842],
[107.6027881,-6.9151463],
[107.6027881,-6.9147842],
[107.6025628,-6.9148241],
[107.6015168,-6.9147708],
[107.6004385,-6.9146777],
[107.5994568,-6.9146537],
[107.5992235,-6.9147309],
[107.5990786,-6.9146191],
[107.5989258,-6.9147069],
[107.5982847,-6.9146723],
[107.5981855,-6.9147149],
[107.5982847,-6.9146723],
[107.5989258,-6.9147069],
[107.5990786,-6.9146191],
[107.5992262,-6.9147336],
[107.5990089,-6.9148081],
[107.5980702,-6.9148107],
[107.5981185,-6.9142994],
[107.5980702,-6.9148107],
[107.5973567,-6.9148426],
[107.5973031,-6.9146829],
[107.5979307,-6.9147521],
[107.5973031,-6.9146829],
[107.595506,-6.9146349],
[107.5953773,-6.9145764],
[107.5936124,-6.9143421],
[107.5929579,-6.9143474],
[107.5928399,-6.9143634],
[107.5927862,-6.914406],
[107.5927916,-6.9145071],
[107.592856,-6.9145604],
[107.5929365,-6.914603],
[107.5929472,-6.9148107],
[107.5928877,-6.915772],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5929574,-6.9158226],
[107.5930423,-6.9146035],
[107.5931067,-6.914513],
[107.5932301,-6.9144757],
[107.5935922,-6.9144837],
[107.5954965,-6.9147686],
[107.595506,-6.9146349],
[107.5954992,-6.9147686],
[107.5954617,-6.9151574],
[107.5972724,-6.915159],
[107.5971812,-6.9152921],
[107.5978732,-6.9152921],
[107.5971812,-6.9152921],
[107.5956201,-6.9153134],
[107.5954592,-6.9152602],
[107.5954592,-6.915159],
[107.5954592,-6.9152602],
[107.5954914,-6.9160536],
[107.5932149,-6.9160289],
[107.5954914,-6.9160536],
[107.5954806,-6.9163838],
[107.5933169,-6.9165122],
[107.5942522,-6.916453],
[107.5942951,-6.9171347],
[107.5942522,-6.916453],
[107.5954806,-6.9163838],
[107.5955987,-6.9163838],
[107.5955504,-6.9172945],
[107.5955987,-6.9163838],
[107.5958562,-6.9163838],
[107.5958722,-6.9166501],
[107.5960546,-6.9169803],
[107.5959634,-6.9174329],
[107.5960546,-6.9169803],
[107.5958722,-6.9166554],
[107.5958562,-6.9163838],
[107.5959259,-6.9163785],
[107.5958883,-6.9161495],
[107.5959259,-6.9163785],
[107.5966984,-6.9163732],
[107.5966876,-6.9166767],
[107.5967252,-6.9167353],
[107.5967145,-6.9169962],
[107.5967252,-6.9167353],
[107.5966876,-6.9166767],
[107.5966984,-6.9163732],
[107.5974011,-6.9163465],
[107.5973904,-6.9164956],
[107.5972831,-6.9170069],
[107.5973904,-6.9164956],
[107.5974011,-6.9163465],
[107.598195,-6.9163252],
[107.5982165,-6.9170921],
[107.598195,-6.9163199],
[107.598136,-6.9155318],
[107.5972187,-6.915585],
[107.598136,-6.9155318],
[107.5980702,-6.9148107],
[107.5973567,-6.9148426],
[107.5972724,-6.915159],
[107.5973567,-6.9148426],
[107.5980702,-6.9148107],
[107.598136,-6.9155318],
[107.598195,-6.9163252],
[107.5995703,-6.9162367],
[107.5992753,-6.9147988],
[107.5995489,-6.9147829],
[107.5992753,-6.9147988],
[107.5992235,-6.9147309],
[107.5992753,-6.9147988],
[107.5995703,-6.9162367],
[107.5999405,-6.9162047],
[107.5998976,-6.9160343],
[107.5998815,-6.915816],
[107.5996937,-6.9148787],
[107.5998815,-6.915816],
[107.5998976,-6.916029],
[107.5999405,-6.9162047],
[107.6013406,-6.9161994],
[107.6012923,-6.9156722],
[107.6014533,-6.9156562],
[107.6014586,-6.9155604],
[107.6018019,-6.9155497],
[107.6018127,-6.9152994],
[107.601582,-6.9153473],
[107.6015168,-6.9147708],
[107.601582,-6.9153473],
[107.6018127,-6.9152994],
[107.6018019,-6.9155497],
[107.6014586,-6.9155604],
[107.6014533,-6.9156562],
[107.6012923,-6.9156722],
[107.6013406,-6.9161994],
[107.6016142,-6.9162047],
[107.6016356,-6.9156349],
[107.6016142,-6.9162047],
[107.6023008,-6.9161195],
[107.6023116,-6.9153686],
[107.6024779,-6.915374],
[107.6024832,-6.9158266],
[107.6024779,-6.915374],
[107.6027514,-6.9153846],
[107.6027193,-6.9161089],
[107.6023008,-6.9161195],
[107.6027193,-6.9161089],
[107.6031055,-6.9160982],
[107.6044752,-6.9158253],
[107.6031055,-6.9160982],
[107.6031484,-6.9168012],
[107.6027407,-6.9168065],
[107.60273,-6.916929],
[107.6026388,-6.9169343],
[107.60273,-6.916929],
[107.6027407,-6.9168065],
[107.6028748,-6.9168065],
[107.6028802,-6.9169503],
[107.6028748,-6.9168065],
[107.6031484,-6.9168012],
[107.6031699,-6.9170089],
[107.6043948,-6.91713],
[107.6042875,-6.9183176],
[107.6034971,-6.9180633],
[107.6030948,-6.9179302],
[107.6031699,-6.9170089],
[107.6030948,-6.9179302],
[107.603025,-6.9182284],
[107.6030948,-6.9179302],
[107.6034971,-6.9180633],
[107.6033684,-6.9185692],
[107.6033684,-6.9186437],
[107.6037921,-6.9186704],
[107.6038619,-6.9185905],
[107.6042419,-6.9186477],
[107.6042151,-6.9191164],
[107.6036419,-6.9191017],
[107.6032986,-6.9190751],
[107.6033684,-6.9186437],
[107.6033684,-6.9185692],
[107.6027032,-6.9185266],
[107.6027032,-6.9189526],
[107.6029982,-6.9189633],
[107.6033093,-6.9190005],
[107.6029982,-6.9189633],
[107.6029928,-6.9191017],
[107.602907,-6.9191337],
[107.6029124,-6.9193627],
[107.6029928,-6.9194053],
[107.6030197,-6.9196609],
[107.6032342,-6.9196662],
[107.6032986,-6.9190751],
[107.6032705,-6.9193414],
[107.6041909,-6.9194519],
[107.6041775,-6.9198513],
[107.6032503,-6.919794],
[107.6032342,-6.9196662],
[107.6032503,-6.919794],
[107.6032369,-6.9198792],
[107.6021948,-6.9198239],
[107.6032369,-6.9198792],
[107.6032141,-6.9201819],
[107.6026696,-6.9200807],
[107.6032168,-6.9201819],
[107.6031658,-6.9204348],
[107.6031502,-6.9206607],
[107.6021498,-6.9205462],
[107.6021895,-6.9198677],
[107.6018515,-6.9198464],
[107.6021895,-6.9198677],
[107.6021948,-6.9198239],
[107.6022351,-6.9193085],
[107.6018837,-6.9193085],
[107.6022378,-6.9193112],
[107.6022538,-6.9184511],
[107.6027032,-6.9185266],
[107.6022512,-6.9184485],
[107.6022485,-6.9179799],
[107.6025757,-6.9179692],
[107.6022485,-6.9179799],
[107.6022458,-6.917884],
[107.6016182,-6.917892],
[107.6022458,-6.9178867],
[107.6022726,-6.9173115],
[107.6026776,-6.9173594],
[107.6026723,-6.9175805],
[107.6026803,-6.9173594],
[107.6031363,-6.9173861],
[107.6026776,-6.9173648],
[107.6022753,-6.9173142],
[107.6022699,-6.9169467],
[107.6022109,-6.9161266],
[107.6022699,-6.9169441],
[107.602278,-6.9173168],
[107.6015162,-6.9172955],
[107.6015484,-6.9168296],
[107.6015189,-6.9172955],
[107.6012882,-6.9172876],
[107.6008323,-6.9172663],
[107.6007894,-6.9177242],
[107.6008296,-6.9172609],
[107.6007974,-6.9172583],
[107.6008135,-6.9171837],
[107.6008189,-6.917],
[107.6007062,-6.9168855],
[107.600505,-6.9166432],
[107.6004004,-6.9163982],
[107.6003924,-6.9162038],
[107.5999405,-6.9162047],
[107.5995703,-6.9162367],
[107.5991908,-6.9162571],
[107.5992095,-6.9165447],
[107.5994456,-6.9166325],
[107.5998211,-6.9168935],
[107.5998586,-6.9166911],
[107.5998211,-6.9168935],
[107.5998104,-6.9171864],
[107.5982165,-6.9170921],
[107.5972831,-6.9170069],
[107.5967145,-6.9169962],
[107.5964254,-6.9169893],
[107.5963315,-6.9170639],
[107.5962886,-6.9169893],
[107.5963154,-6.9167337],
[107.5962886,-6.9169973],
[107.5963369,-6.9170612],
[107.5960043,-6.9174526],
[107.5959634,-6.9174329],
[107.5955504,-6.9172945],
[107.5952291,-6.9171864],
[107.5942951,-6.9171347],
[107.5941616,-6.9171411],
[107.5932954,-6.9170661],
[107.5941616,-6.9171411],
[107.5941643,-6.9181529],
[107.5941482,-6.918334],
[107.5931129,-6.9181849],
[107.5941536,-6.9183313],
[107.5940248,-6.9187041],
[107.5933757,-6.9186242],
[107.5940248,-6.9187041],
[107.593939,-6.9190076],
[107.5938698,-6.9195477],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5941643,-6.9181529],
[107.5944015,-6.9181847],
[107.5944444,-6.9178652],
[107.5950291,-6.9178652],
[107.5950559,-6.9177427],
[107.5952008,-6.9177054],
[107.5955226,-6.9177534],
[107.5955504,-6.9172945],
[107.5955226,-6.917748],
[107.5954904,-6.9183179],
[107.5943961,-6.91819],
[107.5954958,-6.9183179],
[107.5961503,-6.9183072],
[107.5954958,-6.9183179],
[107.5954422,-6.9187812],
[107.5953992,-6.9189409],
[107.5951632,-6.9193882],
[107.5951519,-6.9197021],
[107.5954952,-6.9197421],
[107.5956085,-6.9194095],
[107.5956889,-6.9190314],
[107.5957908,-6.9186054],
[107.5956889,-6.9190314],
[107.5956085,-6.9194149],
[107.5954952,-6.9197421],
[107.5956937,-6.9197687],
[107.5958713,-6.9192711],
[107.5959947,-6.9191113],
[107.596279,-6.9192019],
[107.5964453,-6.9192338],
[107.5966867,-6.9192871],
[107.5967564,-6.91909],
[107.5965794,-6.9190527],
[107.5965526,-6.9191539],
[107.5965794,-6.9190527],
[107.596456,-6.9190368],
[107.5964507,-6.9192338],
[107.596456,-6.9190368],
[107.5963273,-6.9190527],
[107.596279,-6.9192019],
[107.5959947,-6.9191113],
[107.5961503,-6.9187332],
[107.5964829,-6.9188557],
[107.5967779,-6.9188983],
[107.5967564,-6.91909],
[107.5967779,-6.9188983],
[107.5968289,-6.9185868],
[107.5965875,-6.9185149],
[107.5962119,-6.9184856],
[107.5961503,-6.9187306],
[107.5962093,-6.9184909],
[107.5962441,-6.9183152],
[107.5961476,-6.9183072],
[107.5962441,-6.9183179],
[107.5965821,-6.918443],
[107.5965875,-6.9185149],
[107.5965821,-6.9184403],
[107.596743,-6.9176256],
[107.5963407,-6.9175137],
[107.5962978,-6.9175723],
[107.5960043,-6.9174526],
[107.5959705,-6.9174924],
[107.5961503,-6.9183072],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5972831,-6.9170069],
[107.597159,-6.9175675],
[107.5974702,-6.91769],
[107.5973951,-6.9183557],
[107.5969123,-6.9182439],
[107.597159,-6.9175675],
[107.5969123,-6.9182439],
[107.5968289,-6.9185868],
[107.597379,-6.9186699],
[107.5973951,-6.9183557],
[107.597379,-6.9186699],
[107.5973575,-6.9190118],
[107.5967779,-6.9188983],
[107.5973575,-6.9190118],
[107.5972386,-6.9199498],
[107.5976544,-6.9200004],
[107.5977706,-6.9193739],
[107.5977116,-6.9193153],
[107.5977867,-6.9186656],
[107.597379,-6.9186699],
[107.5977867,-6.9186656],
[107.5982802,-6.9187082],
[107.5983705,-6.9200829],
[107.5982802,-6.9187082],
[107.5982641,-6.9177869],
[107.5981193,-6.9177923],
[107.5979154,-6.9177656],
[107.597733,-6.9177443],
[107.5979154,-6.9177656],
[107.5981193,-6.9177923],
[107.5982641,-6.9177869],
[107.5982165,-6.9170921],
[107.5982641,-6.9177869],
[107.5982748,-6.9180532],
[107.5989722,-6.9181224],
[107.5990795,-6.9180745],
[107.5990956,-6.9178562],
[107.5989668,-6.9178455],
[107.5989132,-6.9177656],
[107.5987254,-6.9177071],
[107.5989132,-6.9177656],
[107.5989668,-6.9178455],
[107.5990956,-6.9178562],
[107.5991868,-6.9174994],
[107.5994765,-6.9175473],
[107.5997822,-6.9175739],
[107.5998104,-6.9171864],
[107.6007974,-6.9172583],
[107.5998104,-6.9171864],
[107.5997822,-6.9175739],
[107.6000612,-6.9176378],
[107.5997822,-6.9175739],
[107.599734,-6.9177443],
[107.6003831,-6.9178455],
[107.6004206,-6.9174994],
[107.600544,-6.9174727],
[107.6005547,-6.9172384],
[107.600544,-6.9174727],
[107.6004206,-6.9174994],
[107.6003831,-6.9178455],
[107.6004004,-6.9182369],
[107.6005667,-6.9182795],
[107.6005399,-6.9185458],
[107.6004433,-6.9186097],
[107.6002395,-6.9186097],
[107.6002234,-6.9188973],
[107.5996387,-6.9188281],
[107.599628,-6.9183168],
[107.5999713,-6.9183168],
[107.599628,-6.9183168],
[107.5996119,-6.9180293],
[107.5999874,-6.9180825],
[107.6004004,-6.9182369],
[107.6003831,-6.9178455],
[107.599734,-6.9177443],
[107.5996119,-6.9180293],
[107.5996199,-6.9181631],
[107.5993812,-6.9181304],
[107.5994765,-6.9175473],
[107.5993812,-6.9181304],
[107.5990795,-6.9180745],
[107.5989722,-6.9181224],
[107.5989198,-6.9187535],
[107.5993812,-6.9187855],
[107.5993812,-6.9181304],
[107.5993812,-6.9187855],
[107.5996387,-6.9188281],
[107.5998157,-6.9190783],
[107.5995421,-6.919105],
[107.5998157,-6.9190783],
[107.5997621,-6.9197706],
[107.5997385,-6.9202507],
[107.5997621,-6.9197706],
[107.5993436,-6.919744],
[107.5992793,-6.9198026],
[107.5992846,-6.9199304],
[107.5992793,-6.9198026],
[107.5993436,-6.919744],
[107.5997621,-6.9197706],
[107.5997889,-6.9194085],
[107.5991291,-6.9193499],
[107.5991291,-6.9190996],
[107.5987804,-6.919073],
[107.5987643,-6.9187482],
[107.5989198,-6.9187535],
[107.5987643,-6.9187482],
[107.5982802,-6.9187082],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.6018038,-6.9204983],
[107.6018515,-6.9198464],
[107.6018837,-6.9193085],
[107.6015508,-6.9193359],
[107.6015616,-6.9196714],
[107.6011592,-6.9196608],
[107.6011217,-6.919762],
[107.6011592,-6.9198951],
[107.6011217,-6.919762],
[107.6011592,-6.9196608],
[107.6011807,-6.919304],
[107.6015508,-6.9193359],
[107.6015562,-6.9189099],
[107.6017761,-6.9189099],
[107.6015562,-6.9189099],
[107.6015776,-6.9185318],
[107.60117,-6.9185265],
[107.6015776,-6.9185318],
[107.6016182,-6.917892],
[107.6012451,-6.9178981],
[107.6012882,-6.9172876],
[107.6012451,-6.9178981],
[107.60117,-6.9185265],
[107.6010788,-6.9192667],
[107.6011807,-6.919304],
[107.6010788,-6.9192667],
[107.6006872,-6.9190111],
[107.6002234,-6.9188973],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.6004385,-6.9146777],
[107.6004271,-6.9149108],
[107.5999765,-6.9150493],
[107.6001857,-6.915912],
[107.6002501,-6.915928],
[107.6002554,-6.9162049],
[107.600588,-6.9161996],
[107.600529,-6.9158641],
[107.6002501,-6.9159227],
[107.600529,-6.9158641],
[107.6006739,-6.9158321],
[107.6007382,-6.9157842],
[107.6007114,-6.9156457],
[107.6009528,-6.9156085],
[107.6010011,-6.9155339],
[107.6015053,-6.9154274],
[107.6014732,-6.9152357],
[107.6013551,-6.9152517],
[107.6013716,-6.9154557],
[107.6010011,-6.9155339],
[107.6008777,-6.9153315],
[107.6006041,-6.9154061],
[107.6004271,-6.9149055],
]])
def jalanKelurahanMaleber(self, nama):
self.jalan.record(nama)
self.jalan.line([[
[107.5688412,-6.9100128],
[107.569164,-6.9105174],
[107.569341,-6.9106346],
[107.5692847,-6.9107491],
[107.5696974,-6.9114267],
[107.5699509,-6.9108835],
[107.5696974,-6.9114267],
[107.5700515,-6.9119965],
[107.5701373,-6.9119139],
[107.5700515,-6.9119965],
[107.5704029,-6.9125876],
[107.5705826,-6.9123799],
[107.5706255,-6.9122947],
[107.5706952,-6.9120551],
[107.5706523,-6.9120471],
[107.5706952,-6.9120551],
[107.5708293,-6.912063],
[107.5709715,-6.9120737],
[107.5708293,-6.912063],
[107.5710466,-6.9113122],
[107.5708293,-6.912063],
[107.5706952,-6.9120551],
[107.5706255,-6.9122947],
[107.5705826,-6.9123799],
[107.5704029,-6.9125876],
[107.5706335,-6.9129604],
[107.5707784,-6.9128432],
[107.571009,-6.9125743],
[107.5710546,-6.9125743],
[107.5711968,-6.9126062],
[107.5712799,-6.9123719],
[107.5711968,-6.9126062],
[107.5710546,-6.9125743],
[107.571009,-6.9125743],
[107.5707784,-6.9128432],
[107.5706335,-6.9129604],
[107.5709473,-6.9134849],
[107.571229,-6.9133278],
[107.5714865,-6.9133997],
[107.571229,-6.9133278],
[107.5709473,-6.9134849],
[107.5716769,-6.9147284],
[107.5717761,-6.9146565],
[107.5720175,-6.9138817],
[107.5721946,-6.9135195],
[107.5720256,-6.9134477],
[107.5721946,-6.9135195],
[107.5722482,-6.913421],
[107.5723825,-6.9130857],
[107.5721198,-6.9129207],
[107.5722113,-6.9127933],
[107.5721959,-6.9127141],
[107.5718432,-6.9126089],
[107.5721959,-6.9127141],
[107.5722113,-6.9127933],
[107.5721198,-6.9129207],
[107.5723825,-6.9130857],
[107.5733039,-6.9134641],
[107.5723825,-6.9130857],
[107.5722482,-6.913421],
[107.5721946,-6.9135195],
[107.5720175,-6.9138817],
[107.5717761,-6.9146565],
[107.5716769,-6.9147284],
[107.5718647,-6.9150186],
[107.572157,-6.9148296],
[107.5725674,-6.9148882],
[107.5725754,-6.9148296],
[107.5725674,-6.9148882],
[107.572157,-6.9148296],
[107.5718647,-6.9150186],
[107.5721892,-6.9154953],
[107.572318,-6.9153142],
[107.5721892,-6.9154953],
[107.5724226,-6.9157908],
[107.572554,-6.9155805],
[107.572503,-6.9154846],
[107.5723662,-6.915426],
[107.572503,-6.9154846],
[107.572554,-6.9155805],
[107.572664,-6.9156071],
[107.5728008,-6.9155565],
[107.5729214,-6.9155139],
[107.5729636,-6.9155205],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5735333,-6.9149932],
[107.5731403,-6.9148526],
[107.5733817,-6.9149379],
[107.5735078,-6.9142189],
[107.5735641,-6.9139686],
[107.5742373,-6.9141444],
[107.5738457,-6.9151136],
[107.5742373,-6.9141417],
[107.574468,-6.9136757],
[107.57424,-6.914147],
[107.5735587,-6.9139713],
[107.5731537,-6.9138568],
[107.5722482,-6.913421],
[107.5731537,-6.9138568],
[107.5733039,-6.9134641],
[107.5739476,-6.9137037],
[107.5733039,-6.9134641],
[107.5734675,-6.9129448],
[107.5740469,-6.9131525],
[107.5734675,-6.9129448],
[107.5737223,-6.9123404],
[107.5745994,-6.9126759],
[107.5745833,-6.9127717],
[107.5745941,-6.9128383],
[107.574645,-6.9128783],
[107.5747738,-6.912849],
[107.5748086,-6.9127904],
[107.5748355,-6.9127584],
[107.5753516,-6.9129897],
[107.5748355,-6.9127611],
[107.5747604,-6.9127345],
[107.5749052,-6.9124735],
[107.574645,-6.9123244],
[107.5749079,-6.9124735],
[107.5747604,-6.9127345],
[107.5745967,-6.9126759],
[107.5740871,-6.9124762],
[107.574181,-6.9122259],
[107.5740871,-6.9124762],
[107.5737223,-6.9123404],
[107.5732932,-6.9121726],
[107.5736097,-6.9113685],
[107.5732932,-6.9121726],
[107.5729794,-6.9120448],
[107.5727299,-6.9126386],
[107.5729794,-6.9120422],
[107.5729016,-6.9120129],
[107.5731576,-6.9113992],
[107.5733064,-6.9109505],
[107.5731576,-6.9113992],
[107.5729016,-6.9120129],
[107.5719977,-6.9116561],
[107.5721237,-6.911451],
[107.5719977,-6.9116561],
[107.5718662,-6.9119729],
[107.5719977,-6.9116561],
[107.571771,-6.9115695],
[107.5717187,-6.9116827],
[107.571771,-6.9115695],
[107.5717974,-6.9115781],
[107.5719453,-6.911215],
[107.5717981,-6.9112033],
[107.5719453,-6.911215],
[107.5717974,-6.9115781],
[107.5715459,-6.9114949],
[107.5713824,-6.9119103],
[107.5715459,-6.9114949],
[107.57131,-6.9114123],
[107.5714226,-6.9110928],
[107.57131,-6.9114123],
[107.5710466,-6.9113122],
[107.5711517,-6.9109676],
[107.5710466,-6.9113122],
[107.5706314,-6.9111354],
[107.5705402,-6.9113484],
[107.5705617,-6.9114496],
[107.5705053,-6.9115987],
[107.5704436,-6.9115774],
[107.5704597,-6.9115108],
[107.5703363,-6.9114443],
[107.5704517,-6.9110848],
[107.5706314,-6.9111354],
[107.5705821,-6.9111211],
[107.5709613,-6.9101795],
[107.5708916,-6.9101555],
[107.5709613,-6.9101795],
[107.5705821,-6.9111211],
[107.5704463,-6.9110821],
[107.5699509,-6.9108835],
[107.5696658,-6.9107653],
[107.5703524,-6.909125],
[107.5705804,-6.909133],
[107.570854,-6.9085925],
[107.5705804,-6.909133],
[107.5703524,-6.909125],
[107.5696658,-6.9107653],
[107.569341,-6.9106346],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5696075,-6.9094825],
[107.5696906,-6.9092908],
[107.5693588,-6.9092193],
[107.5696906,-6.9092908],
[107.5697684,-6.9090911],
[107.5695056,-6.9090351],
[107.5697684,-6.9090911],
[107.5698569,-6.9088914],
[107.5695834,-6.9088354],
[107.5698569,-6.9088914],
[107.5699481,-6.9087023],
[107.5697282,-6.9086331],
[107.5699481,-6.9087023],
[107.5700366,-6.9084973],
[107.5696569,-6.9084019],
[107.5700366,-6.9084973],
[107.5701129,-6.9083221],
[107.5698607,-6.9082528],
[107.5701129,-6.9083221],
[107.570854,-6.9085925],
[107.5701129,-6.9083221],
[107.5701665,-6.9082049],
[107.5698661,-6.9081197],
[107.5701665,-6.9082049],
[107.5702041,-6.908125],
[107.5703113,-6.908157],
[107.5702041,-6.908125],
[107.5702631,-6.9079652],
[107.5700002,-6.9078694],
[107.5702631,-6.9079652],
[107.5702952,-6.9079173],
[107.5705125,-6.9079985],
[107.570719,-6.9080877],
[107.5705125,-6.9079985],
[107.5702952,-6.9079173],
[107.5703435,-6.9077203],
[107.569968,-6.9076297],
[107.5703435,-6.9077203],
[107.5704133,-6.9075499],
[107.5700753,-6.9074806],
[107.5704133,-6.9075499],
[107.5706332,-6.9076138],
[107.5705125,-6.9079985],
[107.5706332,-6.9076138],
[107.5704133,-6.9075499],
[107.570483,-6.907257],
[107.5700807,-6.9071558],
[107.570483,-6.907257],
[107.5709604,-6.9073794],
[107.570483,-6.907257],
[107.5705098,-6.9070013],
[107.5702309,-6.9069641],
[107.5705098,-6.9070013],
[107.5705366,-6.9069215],
[107.5707995,-6.9069854],
[107.5705366,-6.9069215],
[107.570542,-6.9068256],
[107.5702255,-6.9067777],
[107.570542,-6.9068256],
[107.5705742,-6.9067138],
[107.5708585,-6.9067777],
[107.5705742,-6.9067138],
[107.570593,-6.9065859],
[107.5702094,-6.9065487],
[107.570593,-6.9065859],
[107.5705957,-6.906522],
[107.5707727,-6.9065487],
[107.5705957,-6.906522],
[107.5706225,-6.9063889],
[107.5702952,-6.9063463],
[107.5706225,-6.9063889],
[107.5706654,-6.9061972],
[107.5702952,-6.9061493],
[107.5706654,-6.9061972],
[107.570719,-6.9059629],
[107.5702952,-6.9058936],
[107.570719,-6.9059629],
[107.5708961,-6.9059788],
[107.570719,-6.9059629],
[107.5707459,-6.9057392],
[107.570365,-6.9056487],
[107.5707459,-6.9057392],
[107.5707673,-6.9055368],
[107.5703274,-6.9054569],
[107.5707673,-6.9055368],
[107.5708371,-6.9053664],
[107.5706654,-6.9053025],
[107.5708371,-6.9053664],
[107.5708961,-6.9052066],
[107.570542,-6.9051108],
[107.5708961,-6.9052066],
[107.5709765,-6.9049776],
[107.5709578,-6.9050296],
[107.5705313,-6.9048871],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5758411,-6.9122926],
[107.5737543,-6.9114911],
[107.5736578,-6.9113633],
[107.5736097,-6.9113685],
[107.5736604,-6.9113659],
[107.5736953,-6.9112754],
[107.5737892,-6.9112434],
[107.5739179,-6.9109692],
[107.5742505,-6.9110704],
[107.5760771,-6.9117866],
[107.5742532,-6.9110704],
[107.5739179,-6.9109665],
[107.5742183,-6.9102769],
[107.5761723,-6.9110832],
[107.574221,-6.9102769],
[107.5737194,-6.9100665],
[107.5733064,-6.9109505],
[107.5737221,-6.9100665],
[107.5732742,-6.9098349],
[107.5730677,-6.909739],
[107.5732715,-6.9098322],
[107.5734512,-6.9094301],
[107.5737248,-6.908602],
[107.5737868,-6.9084887],
[107.5739448,-6.9081067],
[107.5739905,-6.9081123],
[107.5739448,-6.9081067],
[107.5736658,-6.9080375],
[107.5737838,-6.907694],
[107.5739367,-6.907694],
[107.5742478,-6.907072],
[107.5739367,-6.907694],
[107.5737838,-6.907694],
[107.5736658,-6.9080375],
[107.5731535,-6.9079523],
[107.5737731,-6.9064718],
[107.5741084,-6.906597],
[107.5743926,-6.9067631],
[107.5741084,-6.906597],
[107.5737731,-6.9064718],
[107.5731535,-6.9079523],
[107.5728906,-6.9078964],
[107.5729577,-6.9078218],
[107.5730382,-6.907505],
[107.5729577,-6.9078218],
[107.5728906,-6.9078964],
[107.5726573,-6.9085807],
[107.5728906,-6.9078964],
[107.5726788,-6.9078511],
[107.5729657,-6.9069831],
[107.5732527,-6.9070576],
[107.5729657,-6.9069831],
[107.5729604,-6.9068979],
[107.5730623,-6.9064692],
[107.5730221,-6.9064239],
[107.5730757,-6.9062135],
[107.5733627,-6.90632],
[107.5734325,-6.9062934],
[107.5734566,-6.9061763],
[107.5734995,-6.9060724],
[107.5737007,-6.905476],
[107.5737999,-6.9055079],
[107.5743256,-6.9057928],
[107.5742291,-6.9060538],
[107.5741888,-6.9061283],
[107.5743095,-6.9061922],
[107.5744436,-6.9062082],
[107.5745643,-6.9058993],
[107.5745107,-6.9058461],
[107.5745831,-6.9055905],
[107.5747682,-6.9052044],
[107.575074,-6.9053961],
[107.574685,-6.9060218],
[107.575074,-6.9053961],
[107.5751361,-6.9054375],
[107.575074,-6.9053961],
[107.5747682,-6.9052044],
[107.5741271,-6.904781],
[107.5746166,-6.9051038],
[107.5745375,-6.9052363],
[107.5745053,-6.9052549],
[107.5744705,-6.9053455],
[107.5745322,-6.9054067],
[107.5744061,-6.9057023],
[107.5743551,-6.9057209],
[107.5743256,-6.9057928],
[107.5737999,-6.9055079],
[107.5741271,-6.904781],
[107.573816,-6.9045014],
[107.573706,-6.9044401],
[107.5735478,-6.9046958],
[107.573706,-6.9044401],
[107.5734485,-6.9042697],
[107.5733439,-6.9044481],
[107.5734485,-6.9042697],
[107.5731079,-6.9040407],
[107.5729684,-6.904291],
[107.5731079,-6.9040407],
[107.5728772,-6.9038836],
[107.572778,-6.9040194],
[107.5728772,-6.9038836],
[107.572609,-6.9037798],
[107.5722684,-6.9042751],
[107.5723542,-6.9041526],
[107.5725098,-6.9042617],
[107.5723542,-6.9041526],
[107.572609,-6.9037798],
[107.5723327,-6.9035428],
[107.572204,-6.9037958],
[107.5720645,-6.9041233],
[107.572204,-6.9037958],
[107.5723327,-6.9035428],
[107.5719251,-6.9032739],
[107.5717802,-6.9035295],
[107.5719251,-6.9032739],
[107.571799,-6.903194],
[107.5715388,-6.9037239],
[107.571799,-6.903194],
[107.5709002,-6.902592],
[107.5715469,-6.9030262],
[107.5711499,-6.9036679],
[107.5709434,-6.9041313],
[107.5713511,-6.9043043],
[107.5714369,-6.9040194],
[107.5715388,-6.9037239],
[107.571681,-6.9037904],
[107.571858,-6.9039129],
[107.5718741,-6.9040061],
[107.5714369,-6.9040194],
[107.5718741,-6.9040061],
[107.5720645,-6.9041233],
[107.5722684,-6.9042751],
[107.5721557,-6.9044721],
[107.5722281,-6.9045067],
[107.5721557,-6.9044721],
[107.5720404,-6.9046825],
[107.5713913,-6.9043629],
[107.5712947,-6.904544],
[107.5713913,-6.9043629],
[107.5713511,-6.9043043],
[107.5713913,-6.9043629],
[107.5720404,-6.9046825],
[107.5717829,-6.9051271],
[107.57163,-6.9050339],
[107.5717829,-6.9051271],
[107.5717078,-6.9052842],
[107.5715576,-6.9056331],
[107.5713242,-6.9055718],
[107.5711472,-6.9054893],
[107.5712813,-6.9051618],
[107.5711472,-6.9054893],
[107.570997,-6.9054307],
[107.5708371,-6.9053664],
[107.570997,-6.9054307],
[107.5708961,-6.9059788],
[107.5707727,-6.9065487],
[107.5708897,-6.9066209],
[107.5708585,-6.9067777],
[107.5707995,-6.9069854],
[107.5708585,-6.9067777],
[107.5708897,-6.9066209],
[107.5709353,-6.9066183],
[107.5710989,-6.9066289],
[107.5713752,-6.9066795],
[107.5712733,-6.9073905],
[107.5712384,-6.9074491],
[107.5709604,-6.9073794],
[107.570719,-6.9080877],
[107.5706564,-6.9082532],
[107.570719,-6.9080877],
[107.5709604,-6.9073794],
[107.5712384,-6.9074491],
[107.5710158,-6.908176],
[107.570938,-6.9083783],
[107.5708736,-6.9083517],
[107.570938,-6.9083783],
[107.570854,-6.9085925],
[107.570938,-6.9083783],
[107.5710158,-6.908176],
[107.5713913,-6.9083411],
[107.5710936,-6.9093289],
[107.571343,-6.9094088],
[107.5714101,-6.9094275],
[107.5713135,-6.9098349],
[107.5715388,-6.9099227],
[107.5713913,-6.9104047],
[107.5714825,-6.910426],
[107.5713913,-6.9104047],
[107.5709434,-6.9102263],
[107.5709613,-6.9101795],
[107.5710936,-6.909771],
[107.5710667,-6.9098615],
[107.5712169,-6.9099147],
[107.5710667,-6.9098615],
[107.5709434,-6.9102263],
[107.5713913,-6.9104047],
[107.5712786,-6.9107242],
[107.571233,-6.9107242],
[107.5711517,-6.9109676],
[107.570989,-6.9109079],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5731576,-6.9113992],
[107.5722645,-6.9110155],
[107.5721814,-6.9112551],
[107.5719453,-6.911215],
[107.5722404,-6.9104856],
[107.5728948,-6.9107652],
[107.5730987,-6.9103897],
[107.5730424,-6.9104909],
[107.5728439,-6.9103977],
[107.5730424,-6.9104909],
[107.5728948,-6.9107652],
[107.5733064,-6.9109505],
[107.5728948,-6.9107652],
[107.5722404,-6.9104856],
[107.5723369,-6.9102166],
[107.5715388,-6.9099227],
[107.5723369,-6.9102166],
[107.5725703,-6.9097054],
[107.5727473,-6.9097959],
[107.5725703,-6.9097054],
[107.5717737,-6.9094924],
[107.5714101,-6.9094275],
[107.571343,-6.9094088],
[107.5719588,-6.907687],
[107.5716503,-6.9076018],
[107.5716074,-6.9075033],
[107.5712733,-6.9073905],
[107.5716074,-6.9075033],
[107.5716503,-6.9076018],
[107.5713913,-6.9083411],
[107.5716503,-6.9076018],
[107.5719588,-6.907687],
[107.5724201,-6.9077829],
[107.5726788,-6.9078511],
[107.5724201,-6.9077829],
[107.5724201,-6.9076311],
[107.5721626,-6.9075699],
[107.5723209,-6.9067471],
[107.5721626,-6.9075699],
[107.5716583,-6.9074527],
[107.5721626,-6.9075699],
[107.5724201,-6.9076311],
[107.5724201,-6.9077829],
[107.5719614,-6.9089492],
[107.5717737,-6.9094924],
[107.5719614,-6.9089492],
[107.5729351,-6.9093033],
[107.5729914,-6.9092847],
[107.5732918,-6.9084992],
[107.5737248,-6.908602],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.5708961,-6.9059788],
[107.5710438,-6.9060206],
[107.5711472,-6.9054893],
[107.5710438,-6.9060206],
[107.5709353,-6.9066183],
[107.5710438,-6.9060206],
[107.5712101,-6.9060499],
[107.5713242,-6.9055718],
[107.5712101,-6.9060499],
[107.5710989,-6.9066289],
[107.5712101,-6.9060499],
[107.5714622,-6.9060952],
[107.5714729,-6.9060605],
[107.5715576,-6.9056331],
[107.5714729,-6.9060605],
[107.5714622,-6.9060952],
[107.5714434,-6.9062922],
[107.5713752,-6.9066795],
[107.5714434,-6.9062922],
[107.5718323,-6.906428],
[107.5717572,-6.9067475],
[107.5718323,-6.906428],
[107.5720308,-6.9064839],
[107.5723876,-6.9066969],
[107.5729604,-6.9068979],
[107.5730623,-6.9064692],
[107.5730221,-6.9064239],
[107.5722615,-6.9060632],
[107.5721113,-6.9060179],
[107.5720711,-6.9062603],
[107.5723151,-6.9063455],
[107.5726102,-6.9064946],
[107.5723125,-6.9063428],
[107.5720711,-6.9062603],
[107.5714729,-6.9060605],
[107.5720711,-6.9062603],
[107.5720308,-6.9064839],
[107.5720711,-6.9062603],
[107.5721113,-6.9060179],
[107.5721515,-6.9058129],
[107.57224,-6.9055706],
[107.5717078,-6.9052842],
[107.5717829,-6.9051271],
[107.5720094,-6.9047425],
[107.5725297,-6.9050354],
[107.5724332,-6.9052378],
[107.57224,-6.9055706],
[107.5724332,-6.9052378],
[107.572527,-6.9053496],
[107.5723285,-6.905757],
[107.5725834,-6.9058848],
[107.5726531,-6.9058555],
[107.5727604,-6.9055733],
[107.5729642,-6.9051099],
[107.5733439,-6.9044481],
[107.5731386,-6.9043377],
[107.5730688,-6.9043298],
[107.5727094,-6.9049688],
[107.572527,-6.9053496],
[107.5727094,-6.9049688],
[107.5729642,-6.9051099],
]])
self.jalan.record(nama)
self.jalan.line([[
[107.573816,-6.9045014],
[107.5735225,-6.9049559],
[107.5732355,-6.9056402],
[107.5736057,-6.9057613],
[107.5734995,-6.9060724],
[107.5730934,-6.9059863],
[107.5732355,-6.9056402],
[107.5730934,-6.9059863],
[107.5730558,-6.9060902],
[107.5734566,-6.9061763],
[107.5731222,-6.9061048],
[107.5730757,-6.9062135],
[107.5728386,-6.9061328],
[107.5726642,-6.9060822],
[107.5721515,-6.9058129],
]])
def close(self):
self.kelurahan.close()
self.kantor.close()
self.jalan.close()
| [
"shapefile.Writer"
] | [((81, 145), 'shapefile.Writer', 'shapefile.Writer', (['"""kelurahan_andir"""'], {'shapeType': 'shapefile.POLYGON'}), "('kelurahan_andir', shapeType=shapefile.POLYGON)\n", (97, 145), False, 'import shapefile\n'), ((271, 340), 'shapefile.Writer', 'shapefile.Writer', (['"""kantor_kelurahan_andir"""'], {'shapeType': 'shapefile.POINT'}), "('kantor_kelurahan_andir', shapeType=shapefile.POINT)\n", (287, 340), False, 'import shapefile\n'), ((466, 527), 'shapefile.Writer', 'shapefile.Writer', (['"""jalan_andir"""'], {'shapeType': 'shapefile.POLYLINE'}), "('jalan_andir', shapeType=shapefile.POLYLINE)\n", (482, 527), False, 'import shapefile\n')] |
# Copyright 2016-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch, glob, os
from .sparseConvNetTensor import SparseConvNetTensor
from .metadata import Metadata
import sparseconvnet as scn
import pdb
def toLongTensor(dimension, x):
if hasattr(x, 'type') and x.type() == 'torch.LongTensor':
return x
elif isinstance(x, (list, tuple)):
assert len(x) == dimension
return torch.LongTensor(x)
else:
return torch.LongTensor(dimension).fill_(x)
def optionalTensor(a, b):
return getattr(a, b) if hasattr(a, b) else torch.Tensor()
def optionalTensorReturn(a):
return a if a.numel() else None
def threadDatasetIterator(d):
try:
import queue
except BaseException:
import Queue as queue
import threading
def iterator():
def worker(i):
for k in range(i, len(d), 8):
q.put(d[k])
q = queue.Queue(16)
for i in range(8):
t = threading.Thread(target=worker, args=(i,))
t.start()
for _ in range(len(d)):
item = q.get()
yield item
q.task_done()
q.join()
return iterator
def concatenate_feature_planes(input):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].spatial_size
output.features = torch.cat([i.features for i in input], 1)
return output
def extract_featrue(input, start, end):
output = SparseConvNetTensor()
output.metadata = input.metadata
output.spatial_size = input.spatial_size
output.features = input.features[:,start:end]
return output
def upsample_feature(lr, hr, stride, bilinear = False):
# pdb.set_trace()
loc_lr = lr.get_spatial_locations().cuda().int()
loc_hr = hr.get_spatial_locations().cuda().int()
batch_size = torch.max(loc_hr[:,3]).item() + 1
output = SparseConvNetTensor()
output.metadata = hr.metadata
output.spatial_size = hr.spatial_size
lr_start_index = 0
hr_start_index = 0
for k in range(batch_size):
if not bilinear:
correspondence = scn.SCN.ResolutionBasedScattering(lr.metadata,loc_lr[loc_lr[:,3] == k,0:3], loc_hr[loc_hr[:,3] == k,0:3], stride)
correspondence += lr_start_index
correspondence[correspondence < 0] = 0
output_feature = torch.index_select(lr.features,0,correspondence.long())
else:
location_lr = loc_lr[loc_lr[:,3] == k,0:3]
location_hr = loc_hr[loc_hr[:,3] == k,0:3]
candidate = location_hr.clone().float()
candidate = (candidate - (stride - 1) / 2) / stride
ceil_candidate = torch.ceil(candidate)
floor_candidate = torch.floor(candidate)
diff = [ceil_candidate - candidate, candidate - floor_candidate]
anchors = [ceil_candidate, floor_candidate]
# ceil, up
# floor, bottom
for x in [0,1]:
for y in [0,1]:
for z in [0,1]:
w = (1-diff[x][:,0])*(1-diff[y][:,1])*(1-diff[z][:,2])
query = location_hr.clone()
query[:,0] = anchors[x][:,0]
query[:,1] = anchors[y][:,1]
query[:,2] = anchors[z][:,2]
if x==0 and y ==0 and z==0:
weight = w
lr_candidates = query
else:
weight = torch.cat([weight,w],0)
lr_candidates = torch.cat([lr_candidates ,query],0)
neighbor_correspondence = scn.SCN.ResolutionBasedScattering(lr.metadata,location_lr, lr_candidates, 1).long()
weight[neighbor_correspondence < 0] = 0
neighbor_correspondence.requires_grad = False
weight.requires_grad = False
neighbor_correspondence[neighbor_correspondence < 0] = 0
hr_feature = torch.index_select(lr.features,0,neighbor_correspondence + lr_start_index) * weight.view([-1,1]).expand(-1,lr.features.shape[1])
output_feature = sum([hr_feature[i * location_hr.shape[0]: i * location_hr.shape[0] + location_hr.shape[0]] for i in range(8)])
total_weight = sum([weight[i * location_hr.shape[0]: i * location_hr.shape[0] + location_hr.shape[0]] for i in range(8)])
output_feature /= total_weight.view([-1,1]).expand(-1,lr.features.shape[1])
if k == 0:
output.features = output_feature
else:
output.features = torch.cat([output.features, output_feature ], 0)
lr_start_index = lr_start_index + torch.sum(loc_lr[:,3] == k)
hr_start_index = hr_start_index + torch.sum(loc_hr[:,3] == k)
return output
def add_feature_planes(input):
output = SparseConvNetTensor()
output.metadata = input[0].metadata
output.spatial_size = input[0].spatial_size
output.features = sum([i.features for i in input])
return output
def append_tensors(tensors):
spatial_size=tensors[0].spatial_size
dimension=len(spatial_size)
x=SparseConvNetTensor(
features=torch.cat([t.features for t in tensors],0),
metadata=Metadata(dimension),
spatial_size=spatial_size)
for t in tensors:
x.metadata.appendMetadata(t.metadata,spatial_size)
return x
class AddCoords(torch.nn.Module):
def forward(self, input):
output = SparseConvNetTensor()
if input.features.numel():
with torch.no_grad():
coords = input.get_spatial_locations()
d = (input.spatial_size.type_as(input.features)-1)/2
coords=coords[:,:-1].type_as(input.features)/ d[None,:] - 1
output.features = torch.cat([input.features,coords],1)
else:
output.features = input.features
output.metadata = input.metadata
output.spatial_size = input.spatial_size
return output
def compare_sparse(x, y):
cL,cR,L,R = x.metadata.compareSparseHelper(y.metadata, x.spatial_size)
if x.features.is_cuda:
cL=cL.cuda()
cR=cR.cuda()
L=L.cuda()
R=R.cuda()
e = 0
if cL.numel():
e += (x.features[cL]-y.features[cR]).pow(2).sum()
if L.numel():
e += x.features[L].pow(2).sum()
if R.numel():
e += y.features[R].pow(2).sum()
return e / (cL.numel() + L.numel() + R.numel())
def spectral_norm_svd(module):
w=module.weight
if w.ndimension()==3:
w=w.view(-1,w.size(2))
_,s,_=torch.svd(w)
return s[0]
def pad_with_batch_idx(x,idx): #add a batch index to the list of coordinates
return torch.cat([x,torch.LongTensor(x.size(0),1).fill_(idx)],1)
def batch_location_tensors(location_tensors):
a=[]
for batch_idx, lt in enumerate(location_tensors):
if lt.numel():
a.append(pad_with_batch_idx(lt,batch_idx))
return torch.cat(a,0)
def checkpoint_restore(model,exp_name,name2,use_cuda=True,epoch=0):
if use_cuda:
model.cpu()
if epoch>0:
f=exp_name+'-%09d-'%epoch+name2+'.pth'
assert os.path.isfile(f)
print('Restore from ' + f)
model.load_state_dict(torch.load(f))
else:
f=sorted(glob.glob(exp_name+'-*-'+name2+'.pth'))
if len(f)>0:
f=f[-1]
print('Restore from ' + f)
model.load_state_dict(torch.load(f))
epoch=int(f[len(exp_name)+1:-len(name2)-5])
if use_cuda:
model.cuda()
return epoch+1
def is_power2(num):
return num != 0 and ((num & (num - 1)) == 0)
def checkpoint_save(model,exp_name,name2,epoch, use_cuda=True):
f=exp_name+'-%04d-'%epoch+name2+'.pth'
model.cpu()
torch.save(model.state_dict(),f)
if use_cuda:
model.cuda()
#remove previous checkpoints unless they are a power of 2 to save disk space
epoch=epoch-1
f=exp_name+'-%04d-'%epoch+name2+'.pth'
if os.path.isfile(f):
if not is_power2(epoch):
os.remove(f)
| [
"torch.index_select",
"sparseconvnet.SCN.ResolutionBasedScattering",
"torch.LongTensor",
"torch.load",
"torch.floor",
"torch.Tensor",
"torch.max",
"os.path.isfile",
"torch.ceil",
"torch.no_grad",
"torch.svd",
"torch.sum",
"glob.glob",
"threading.Thread",
"Queue.Queue",
"torch.cat",
"... | [((1504, 1545), 'torch.cat', 'torch.cat', (['[i.features for i in input]', '(1)'], {}), '([i.features for i in input], 1)\n', (1513, 1545), False, 'import torch, glob, os\n'), ((6751, 6763), 'torch.svd', 'torch.svd', (['w'], {}), '(w)\n', (6760, 6763), False, 'import torch, glob, os\n'), ((7126, 7141), 'torch.cat', 'torch.cat', (['a', '(0)'], {}), '(a, 0)\n', (7135, 7141), False, 'import torch, glob, os\n'), ((8149, 8166), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (8163, 8166), False, 'import torch, glob, os\n'), ((701, 715), 'torch.Tensor', 'torch.Tensor', ([], {}), '()\n', (713, 715), False, 'import torch, glob, os\n'), ((1048, 1063), 'Queue.Queue', 'queue.Queue', (['(16)'], {}), '(16)\n', (1059, 1063), True, 'import Queue as queue\n'), ((7325, 7342), 'os.path.isfile', 'os.path.isfile', (['f'], {}), '(f)\n', (7339, 7342), False, 'import torch, glob, os\n'), ((544, 563), 'torch.LongTensor', 'torch.LongTensor', (['x'], {}), '(x)\n', (560, 563), False, 'import torch, glob, os\n'), ((1107, 1149), 'threading.Thread', 'threading.Thread', ([], {'target': 'worker', 'args': '(i,)'}), '(target=worker, args=(i,))\n', (1123, 1149), False, 'import threading\n'), ((2274, 2397), 'sparseconvnet.SCN.ResolutionBasedScattering', 'scn.SCN.ResolutionBasedScattering', (['lr.metadata', 'loc_lr[loc_lr[:, 3] == k, 0:3]', 'loc_hr[loc_hr[:, 3] == k, 0:3]', 'stride'], {}), '(lr.metadata, loc_lr[loc_lr[:, 3] == k, 0:\n 3], loc_hr[loc_hr[:, 3] == k, 0:3], stride)\n', (2307, 2397), True, 'import sparseconvnet as scn\n'), ((2839, 2860), 'torch.ceil', 'torch.ceil', (['candidate'], {}), '(candidate)\n', (2849, 2860), False, 'import torch, glob, os\n'), ((2891, 2913), 'torch.floor', 'torch.floor', (['candidate'], {}), '(candidate)\n', (2902, 2913), False, 'import torch, glob, os\n'), ((4763, 4810), 'torch.cat', 'torch.cat', (['[output.features, output_feature]', '(0)'], {}), '([output.features, output_feature], 0)\n', (4772, 4810), False, 'import torch, glob, os\n'), ((4854, 4882), 'torch.sum', 'torch.sum', (['(loc_lr[:, 3] == k)'], {}), '(loc_lr[:, 3] == k)\n', (4863, 4882), False, 'import torch, glob, os\n'), ((4924, 4952), 'torch.sum', 'torch.sum', (['(loc_hr[:, 3] == k)'], {}), '(loc_hr[:, 3] == k)\n', (4933, 4952), False, 'import torch, glob, os\n'), ((5346, 5389), 'torch.cat', 'torch.cat', (['[t.features for t in tensors]', '(0)'], {}), '([t.features for t in tensors], 0)\n', (5355, 5389), False, 'import torch, glob, os\n'), ((5960, 5998), 'torch.cat', 'torch.cat', (['[input.features, coords]', '(1)'], {}), '([input.features, coords], 1)\n', (5969, 5998), False, 'import torch, glob, os\n'), ((7408, 7421), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (7418, 7421), False, 'import torch, glob, os\n'), ((7450, 7494), 'glob.glob', 'glob.glob', (["(exp_name + '-*-' + name2 + '.pth')"], {}), "(exp_name + '-*-' + name2 + '.pth')\n", (7459, 7494), False, 'import torch, glob, os\n'), ((8213, 8225), 'os.remove', 'os.remove', (['f'], {}), '(f)\n', (8222, 8225), False, 'import torch, glob, os\n'), ((1995, 2018), 'torch.max', 'torch.max', (['loc_hr[:, 3]'], {}), '(loc_hr[:, 3])\n', (2004, 2018), False, 'import torch, glob, os\n'), ((4160, 4236), 'torch.index_select', 'torch.index_select', (['lr.features', '(0)', '(neighbor_correspondence + lr_start_index)'], {}), '(lr.features, 0, neighbor_correspondence + lr_start_index)\n', (4178, 4236), False, 'import torch, glob, os\n'), ((5713, 5728), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5726, 5728), False, 'import torch, glob, os\n'), ((7604, 7617), 'torch.load', 'torch.load', (['f'], {}), '(f)\n', (7614, 7617), False, 'import torch, glob, os\n'), ((589, 616), 'torch.LongTensor', 'torch.LongTensor', (['dimension'], {}), '(dimension)\n', (605, 616), False, 'import torch, glob, os\n'), ((3831, 3908), 'sparseconvnet.SCN.ResolutionBasedScattering', 'scn.SCN.ResolutionBasedScattering', (['lr.metadata', 'location_lr', 'lr_candidates', '(1)'], {}), '(lr.metadata, location_lr, lr_candidates, 1)\n', (3864, 3908), True, 'import sparseconvnet as scn\n'), ((3688, 3713), 'torch.cat', 'torch.cat', (['[weight, w]', '(0)'], {}), '([weight, w], 0)\n', (3697, 3713), False, 'import torch, glob, os\n'), ((3756, 3792), 'torch.cat', 'torch.cat', (['[lr_candidates, query]', '(0)'], {}), '([lr_candidates, query], 0)\n', (3765, 3792), False, 'import torch, glob, os\n')] |
import json
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from treebeard.mp_tree import MP_Node
try:
from wagtail.core.blocks import StreamValue
except ImportError: # pragma: no cover; fallback for Wagtail < 2.0
from wagtail.wagtailcore.blocks import StreamValue
def get_page(page_cls, slug):
return page_cls.objects.get(slug=slug)
def get_free_path(apps, parent_page):
offset = 1
base_page_cls = apps.get_model('wagtailcore', 'Page')
while True:
path = MP_Node._get_path(
parent_page.path,
parent_page.depth + 1,
parent_page.numchild + offset
)
try:
base_page_cls.objects.get(path=path)
except base_page_cls.DoesNotExist:
return path
offset += 1
@transaction.atomic
def get_or_create_page(apps, page_cls_app, page_cls_name, title, slug,
parent_page, live=False, **kwargs):
page_cls = apps.get_model(page_cls_app, page_cls_name)
try:
return get_page(page_cls, slug)
except ObjectDoesNotExist:
pass
ContentType = apps.get_model('contenttypes.ContentType')
page_content_type = ContentType.objects.get_for_model(page_cls)
parent_page = get_page(parent_page.specific_class, parent_page.slug)
page = page_cls.objects.create(
title=title,
slug=slug,
depth=parent_page.depth + 1,
path=get_free_path(apps, parent_page),
content_type=page_content_type,
live=live,
**kwargs
)
parent_page.numchild += 1
parent_page.save()
return page
def is_page(page_or_revision):
""" Return True if the page_or_revision is a Page object """
return not hasattr(page_or_revision, 'content_json')
def get_stream_data(page_or_revision, field_name):
""" Get the stream field data for a given field name on a page or a
revision """
if is_page(page_or_revision):
field = getattr(page_or_revision, field_name)
return field.stream_data
else:
revision_content = json.loads(page_or_revision.content_json)
field = revision_content.get(field_name, "[]")
return json.loads(field)
def set_stream_data(page_or_revision, field_name, stream_data, commit=True):
""" Set the stream field data for a given field name on a page or a
revision. If commit is True (default) save() is called on the
page_or_revision object. """
if is_page(page_or_revision):
field = getattr(page_or_revision, field_name)
stream_block = field.stream_block
stream_value = StreamValue(stream_block, stream_data, is_lazy=True)
setattr(page_or_revision, field_name, stream_value)
else:
revision_content = json.loads(page_or_revision.content_json)
revision_content[field_name] = json.dumps(stream_data)
page_or_revision.content_json = json.dumps(revision_content)
if commit:
page_or_revision.save()
def migrate_stream_data(page_or_revision, block_path, stream_data, mapper):
""" Recursively run the mapper on fields of block_type in stream_data """
migrated = False
if isinstance(block_path, str):
block_path = [block_path, ]
if len(block_path) == 0:
return stream_data, False
# Separate out the current block name from its child paths
block_name = block_path[0]
child_block_path = block_path[1:]
for field in stream_data:
if field['type'] == block_name:
if len(child_block_path) == 0:
value = mapper(page_or_revision, field['value'])
field_migrated = True
else:
value, field_migrated = migrate_stream_data(
page_or_revision, child_block_path, field['value'], mapper
)
if field_migrated:
field['value'] = value
migrated = True
return stream_data, migrated
def migrate_stream_field(page_or_revision, field_name, block_path, mapper):
""" Run mapper on blocks within a StreamField on a page or revision. """
stream_data = get_stream_data(page_or_revision, field_name)
stream_data, migrated = migrate_stream_data(
page_or_revision, block_path, stream_data, mapper
)
if migrated:
set_stream_data(page_or_revision, field_name, stream_data)
@transaction.atomic
def migrate_page_types_and_fields(apps, page_types_and_fields, mapper):
""" Migrate Wagtail StreamFields using the given mapper function.
page_types_and_fields should be a list of 4-tuples
providing ('app', 'PageType', 'field_name', ('block_path', )).
'field_name' is the field on the 'PageType' model.
'block path' is a tuple containing block names to access the
StreamBlock type to migrate."""
for app, page_type, field_name, block_path in page_types_and_fields:
page_model = apps.get_model(app, page_type)
revision_model = apps.get_model('wagtailcore.PageRevision')
for page in page_model.objects.all():
migrate_stream_field(page, field_name, block_path, mapper)
revisions = revision_model.objects.filter(
page=page).order_by('-id')
for revision in revisions:
migrate_stream_field(revision, field_name, block_path, mapper)
| [
"json.dumps",
"json.loads",
"treebeard.mp_tree.MP_Node._get_path",
"wagtail.wagtailcore.blocks.StreamValue"
] | [((538, 632), 'treebeard.mp_tree.MP_Node._get_path', 'MP_Node._get_path', (['parent_page.path', '(parent_page.depth + 1)', '(parent_page.numchild + offset)'], {}), '(parent_page.path, parent_page.depth + 1, parent_page.\n numchild + offset)\n', (555, 632), False, 'from treebeard.mp_tree import MP_Node\n'), ((2104, 2145), 'json.loads', 'json.loads', (['page_or_revision.content_json'], {}), '(page_or_revision.content_json)\n', (2114, 2145), False, 'import json\n'), ((2216, 2233), 'json.loads', 'json.loads', (['field'], {}), '(field)\n', (2226, 2233), False, 'import json\n'), ((2637, 2689), 'wagtail.wagtailcore.blocks.StreamValue', 'StreamValue', (['stream_block', 'stream_data'], {'is_lazy': '(True)'}), '(stream_block, stream_data, is_lazy=True)\n', (2648, 2689), False, 'from wagtail.wagtailcore.blocks import StreamValue\n'), ((2787, 2828), 'json.loads', 'json.loads', (['page_or_revision.content_json'], {}), '(page_or_revision.content_json)\n', (2797, 2828), False, 'import json\n'), ((2868, 2891), 'json.dumps', 'json.dumps', (['stream_data'], {}), '(stream_data)\n', (2878, 2891), False, 'import json\n'), ((2932, 2960), 'json.dumps', 'json.dumps', (['revision_content'], {}), '(revision_content)\n', (2942, 2960), False, 'import json\n')] |
from collections import OrderedDict
from unittest import TestCase
from frozenordereddict import FrozenOrderedDict
class TestFrozenOrderedDict(TestCase):
ITEMS_1 = (
("b", 2),
("a", 1),
)
ITEMS_2 = (
("d", 4),
("c", 3),
)
ODICT_1 = OrderedDict(ITEMS_1)
ODICT_2 = OrderedDict(ITEMS_2)
def test_init_from_items(self):
fod = FrozenOrderedDict(self.ITEMS_1)
self.assertEqual(list(self.ITEMS_1), list(fod.items()))
def test_init_from_ordereddict(self):
fod = FrozenOrderedDict(self.ODICT_1)
self.assertEqual(list(self.ITEMS_1), list(fod.items()))
def test_setitem(self):
def doit():
fod = FrozenOrderedDict()
fod[1] = "b"
self.assertRaises(TypeError, doit)
def test_delitem(self):
def doit():
fod = FrozenOrderedDict(self.ITEMS_1)
del fod[1]
self.assertRaises(TypeError, doit)
def test_copy_no_items(self):
fod1 = FrozenOrderedDict(self.ITEMS_1)
fod2 = fod1.copy()
self.assertNotEqual(id(fod1), id(fod2))
self.assertEqual(fod1.items(), fod2.items())
self.assertEqual(repr(fod1), repr(fod2))
self.assertEqual(len(fod1), len(fod2))
self.assertEqual(hash(fod1), hash(fod2))
def test_copy_tuple_items(self):
fod1 = FrozenOrderedDict(self.ITEMS_1)
fod2 = fod1.copy(self.ITEMS_2)
self.assertNotEqual(id(fod1), id(fod2))
self.assertEqual(list(fod1.items()) + list(self.ITEMS_2), list(fod2.items()))
def test_copy_ordereddict_items(self):
fod1 = FrozenOrderedDict(self.ITEMS_1)
fod2 = fod1.copy(self.ODICT_2)
self.assertNotEqual(id(fod1), id(fod2))
self.assertEqual(list(fod1.items()) + list(self.ITEMS_2), list(fod2.items()))
def test_copy_kwargs(self):
fod1 = FrozenOrderedDict(self.ITEMS_1)
fod2 = fod1.copy(**self.ODICT_2)
self.assertNotEqual(id(fod1), id(fod2))
self.assertEqual(dict(list(fod1.items()) + list(self.ODICT_2.items())), fod2)
| [
"collections.OrderedDict",
"frozenordereddict.FrozenOrderedDict"
] | [((287, 307), 'collections.OrderedDict', 'OrderedDict', (['ITEMS_1'], {}), '(ITEMS_1)\n', (298, 307), False, 'from collections import OrderedDict\n'), ((322, 342), 'collections.OrderedDict', 'OrderedDict', (['ITEMS_2'], {}), '(ITEMS_2)\n', (333, 342), False, 'from collections import OrderedDict\n'), ((394, 425), 'frozenordereddict.FrozenOrderedDict', 'FrozenOrderedDict', (['self.ITEMS_1'], {}), '(self.ITEMS_1)\n', (411, 425), False, 'from frozenordereddict import FrozenOrderedDict\n'), ((547, 578), 'frozenordereddict.FrozenOrderedDict', 'FrozenOrderedDict', (['self.ODICT_1'], {}), '(self.ODICT_1)\n', (564, 578), False, 'from frozenordereddict import FrozenOrderedDict\n'), ((1015, 1046), 'frozenordereddict.FrozenOrderedDict', 'FrozenOrderedDict', (['self.ITEMS_1'], {}), '(self.ITEMS_1)\n', (1032, 1046), False, 'from frozenordereddict import FrozenOrderedDict\n'), ((1374, 1405), 'frozenordereddict.FrozenOrderedDict', 'FrozenOrderedDict', (['self.ITEMS_1'], {}), '(self.ITEMS_1)\n', (1391, 1405), False, 'from frozenordereddict import FrozenOrderedDict\n'), ((1639, 1670), 'frozenordereddict.FrozenOrderedDict', 'FrozenOrderedDict', (['self.ITEMS_1'], {}), '(self.ITEMS_1)\n', (1656, 1670), False, 'from frozenordereddict import FrozenOrderedDict\n'), ((1893, 1924), 'frozenordereddict.FrozenOrderedDict', 'FrozenOrderedDict', (['self.ITEMS_1'], {}), '(self.ITEMS_1)\n', (1910, 1924), False, 'from frozenordereddict import FrozenOrderedDict\n'), ((710, 729), 'frozenordereddict.FrozenOrderedDict', 'FrozenOrderedDict', ([], {}), '()\n', (727, 729), False, 'from frozenordereddict import FrozenOrderedDict\n'), ((866, 897), 'frozenordereddict.FrozenOrderedDict', 'FrozenOrderedDict', (['self.ITEMS_1'], {}), '(self.ITEMS_1)\n', (883, 897), False, 'from frozenordereddict import FrozenOrderedDict\n')] |
import random
import unittest
from serial.serialutil import SerialException
from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string
from tests.base import BaseTestCase
class DummySerialTest(BaseTestCase):
def setUp(self):
"""Set up the test environment:
1. Create a random serial port name.
2. Create a random baud rate.
"""
self.random_serial_port = random_string()
self.random_baudrate = random_string(5, constants.NUMBERS)
def test_write_closed_port(self):
"""Tests writing-to a closed DummySerial port."""
rand_write_len1 = random.randint(0, 1024)
rand_write_len2 = random.randint(0, 1024)
rand_write_str1 = random_string(rand_write_len1).encode()
rand_write_str2 = random_string(rand_write_len2).encode()
ds_instance = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_str1: rand_write_str2}
)
self.assertTrue(ds_instance._isOpen) # pylint: disable=W0212
ds_instance.close()
self.assertFalse(ds_instance._isOpen) # pylint: disable=W0212
with self.assertRaises(SerialException):
ds_instance.write(rand_write_str1)
self.assertFalse(ds_instance._isOpen) # pylint: disable=W0212
def test_write_and_read_to_closed_port(self):
"""Tests writing-to and reading-from a closed DummySerial port."""
rand_write_len1 = random.randint(0, 1024)
rand_write_len2 = random.randint(0, 1024)
rand_write_str1 = random_string(rand_write_len1).encode()
rand_write_str2 = random_string(rand_write_len2).encode()
ds_instance = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_str1: rand_write_str2}
)
self.assertTrue(ds_instance._isOpen) # pylint: disable=W0212
ds_instance.write(rand_write_str1)
ds_instance.close()
self.assertFalse(ds_instance._isOpen) # pylint: disable=W0212
with self.assertRaises(SerialException):
ds_instance.read(rand_write_len2)
self.assertFalse(ds_instance._isOpen) # pylint: disable=W0212
def test_repr_port(self):
"""Tests describing a DummySerial port."""
rand_write_len1 = random.randint(0, 1024)
rand_write_len2 = random.randint(0, 1024)
rand_write_str1 = random_string(rand_write_len1).encode()
rand_write_str2 = random_string(rand_write_len2).encode()
ds_instance = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_str1: rand_write_str2}
)
self.assertTrue(self.random_serial_port in str(ds_instance))
def test_open_port(self):
"""Tests opening an already-open DummySerial port."""
rand_write_len1 = random.randint(0, 1024)
rand_write_len2 = random.randint(0, 1024)
rand_write_str1 = random_string(rand_write_len1).encode()
rand_write_str2 = random_string(rand_write_len2).encode()
ds_instance = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_str1: rand_write_str2}
)
self.assertTrue(ds_instance._isOpen) # pylint: disable=W0212
with self.assertRaises(SerialException):
ds_instance.open()
ds_instance.close()
self.assertFalse(ds_instance._isOpen) # pylint: disable=W0212
ds_instance.open()
self.assertTrue(ds_instance._isOpen) # pylint: disable=W0212
def test_close(self):
"""Tests closing a DummySerial port."""
rand_write_len1 = random.randint(0, 1024)
rand_write_len2 = random.randint(0, 1024)
rand_write_str1 = random_string(rand_write_len1).encode()
rand_write_str2 = random_string(rand_write_len2).encode()
ds_instance = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_str1: rand_write_str2}
)
self.assertTrue(ds_instance._isOpen) # pylint: disable=W0212
self.assertFalse(ds_instance.close())
self.assertFalse(ds_instance._isOpen) # pylint: disable=W0212
def test_write_and_read_no_data_present(self): # pylint: disable=C0103
"""Tests writing and reading with an unspecified response."""
rand_write_len1 = random.randint(256, 1024)
rand_read_len2 = random.randint(1, 16) # give it some order of magnitudes less
rand_write_bytes1 = random_bytes(rand_write_len1)
ds_instance = DummySerial(port=self.random_serial_port, baudrate=self.random_baudrate)
ds_instance.write(rand_write_bytes1)
while 1:
ds_instance.read(rand_read_len2) # discard this data
if not ds_instance.in_waiting:
empty_data = ds_instance.read(rand_read_len2)
break
self.assertEqual(constants.NO_DATA_PRESENT, empty_data)
def test_writing_non_bytes_data_raises_type_error(self):
"""Ensures that errors are raised if attempting to write non-bytes data"""
rand_write_len = random.randint(256, 1024)
rand_write_string = random_string(rand_write_len)
ds = DummySerial(port=self.random_serial_port, baudrate=self.random_baudrate)
with self.assertRaises(TypeError):
ds.write(rand_write_string)
def test_negative_read_size(self):
"""Ensures that errors are raised if attempting to access more or less data than in the buffer"""
rand_write_len = random.randint(256, 1024)
rand_write_bytes = random_bytes(rand_write_len)
ds = DummySerial(port=self.random_serial_port, baudrate=self.random_baudrate)
ds.write(rand_write_bytes)
with self.assertRaises(exceptions.DSIOError):
ds.read(-1)
def test_timeout_with_large_read_size(self):
"""Ensures that errors are raised if attempting to access more or less data than in the buffer"""
rand_write_len = random.randint(256, 1024)
rand_write_bytes = random_bytes(rand_write_len)
ds = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_bytes: rand_write_bytes}
)
ds.write(rand_write_bytes)
result = ds.read(rand_write_len + 2)
self.assertEqual(len(result), rand_write_len)
def test_partial_read(self):
"""Ensures that errors are raised if attempting to access more or less data than in the buffer"""
rand_write_len = random.randint(256, 1024)
rand_write_bytes = random_bytes(rand_write_len)
ds = DummySerial(
port=self.random_serial_port, baudrate=self.random_baudrate, responses={rand_write_bytes: rand_write_bytes}
)
ds.write(rand_write_bytes)
result = ds.read(rand_write_len - 2)
self.assertEqual(len(result), rand_write_len - 2)
self.assertEqual(ds.in_waiting, 2)
def test_write_bytearray(self):
"""Ensures that errors are raised if attempting to access more or less data than in the buffer"""
rand_write_len = random.randint(256, 1024)
rand_write_bytearray = bytearray(random_bytes(rand_write_len))
ds = DummySerial(
port=self.random_serial_port,
baudrate=self.random_baudrate,
)
ds.write(rand_write_bytearray)
if __name__ == "__main__":
unittest.main()
| [
"data_gateway.dummy_serial.random_bytes",
"data_gateway.dummy_serial.random_string",
"data_gateway.dummy_serial.DummySerial",
"unittest.main",
"random.randint"
] | [((7544, 7559), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7557, 7559), False, 'import unittest\n'), ((446, 461), 'data_gateway.dummy_serial.random_string', 'random_string', ([], {}), '()\n', (459, 461), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((493, 528), 'data_gateway.dummy_serial.random_string', 'random_string', (['(5)', 'constants.NUMBERS'], {}), '(5, constants.NUMBERS)\n', (506, 528), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((652, 675), 'random.randint', 'random.randint', (['(0)', '(1024)'], {}), '(0, 1024)\n', (666, 675), False, 'import random\n'), ((702, 725), 'random.randint', 'random.randint', (['(0)', '(1024)'], {}), '(0, 1024)\n', (716, 725), False, 'import random\n'), ((881, 1003), 'data_gateway.dummy_serial.DummySerial', 'DummySerial', ([], {'port': 'self.random_serial_port', 'baudrate': 'self.random_baudrate', 'responses': '{rand_write_str1: rand_write_str2}'}), '(port=self.random_serial_port, baudrate=self.random_baudrate,\n responses={rand_write_str1: rand_write_str2})\n', (892, 1003), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((1511, 1534), 'random.randint', 'random.randint', (['(0)', '(1024)'], {}), '(0, 1024)\n', (1525, 1534), False, 'import random\n'), ((1561, 1584), 'random.randint', 'random.randint', (['(0)', '(1024)'], {}), '(0, 1024)\n', (1575, 1584), False, 'import random\n'), ((1740, 1862), 'data_gateway.dummy_serial.DummySerial', 'DummySerial', ([], {'port': 'self.random_serial_port', 'baudrate': 'self.random_baudrate', 'responses': '{rand_write_str1: rand_write_str2}'}), '(port=self.random_serial_port, baudrate=self.random_baudrate,\n responses={rand_write_str1: rand_write_str2})\n', (1751, 1862), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((2368, 2391), 'random.randint', 'random.randint', (['(0)', '(1024)'], {}), '(0, 1024)\n', (2382, 2391), False, 'import random\n'), ((2418, 2441), 'random.randint', 'random.randint', (['(0)', '(1024)'], {}), '(0, 1024)\n', (2432, 2441), False, 'import random\n'), ((2597, 2719), 'data_gateway.dummy_serial.DummySerial', 'DummySerial', ([], {'port': 'self.random_serial_port', 'baudrate': 'self.random_baudrate', 'responses': '{rand_write_str1: rand_write_str2}'}), '(port=self.random_serial_port, baudrate=self.random_baudrate,\n responses={rand_write_str1: rand_write_str2})\n', (2608, 2719), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((2927, 2950), 'random.randint', 'random.randint', (['(0)', '(1024)'], {}), '(0, 1024)\n', (2941, 2950), False, 'import random\n'), ((2977, 3000), 'random.randint', 'random.randint', (['(0)', '(1024)'], {}), '(0, 1024)\n', (2991, 3000), False, 'import random\n'), ((3156, 3278), 'data_gateway.dummy_serial.DummySerial', 'DummySerial', ([], {'port': 'self.random_serial_port', 'baudrate': 'self.random_baudrate', 'responses': '{rand_write_str1: rand_write_str2}'}), '(port=self.random_serial_port, baudrate=self.random_baudrate,\n responses={rand_write_str1: rand_write_str2})\n', (3167, 3278), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((3745, 3768), 'random.randint', 'random.randint', (['(0)', '(1024)'], {}), '(0, 1024)\n', (3759, 3768), False, 'import random\n'), ((3795, 3818), 'random.randint', 'random.randint', (['(0)', '(1024)'], {}), '(0, 1024)\n', (3809, 3818), False, 'import random\n'), ((3974, 4096), 'data_gateway.dummy_serial.DummySerial', 'DummySerial', ([], {'port': 'self.random_serial_port', 'baudrate': 'self.random_baudrate', 'responses': '{rand_write_str1: rand_write_str2}'}), '(port=self.random_serial_port, baudrate=self.random_baudrate,\n responses={rand_write_str1: rand_write_str2})\n', (3985, 4096), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((4476, 4501), 'random.randint', 'random.randint', (['(256)', '(1024)'], {}), '(256, 1024)\n', (4490, 4501), False, 'import random\n'), ((4527, 4548), 'random.randint', 'random.randint', (['(1)', '(16)'], {}), '(1, 16)\n', (4541, 4548), False, 'import random\n'), ((4618, 4647), 'data_gateway.dummy_serial.random_bytes', 'random_bytes', (['rand_write_len1'], {}), '(rand_write_len1)\n', (4630, 4647), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((4671, 4743), 'data_gateway.dummy_serial.DummySerial', 'DummySerial', ([], {'port': 'self.random_serial_port', 'baudrate': 'self.random_baudrate'}), '(port=self.random_serial_port, baudrate=self.random_baudrate)\n', (4682, 4743), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((5236, 5261), 'random.randint', 'random.randint', (['(256)', '(1024)'], {}), '(256, 1024)\n', (5250, 5261), False, 'import random\n'), ((5290, 5319), 'data_gateway.dummy_serial.random_string', 'random_string', (['rand_write_len'], {}), '(rand_write_len)\n', (5303, 5319), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((5334, 5406), 'data_gateway.dummy_serial.DummySerial', 'DummySerial', ([], {'port': 'self.random_serial_port', 'baudrate': 'self.random_baudrate'}), '(port=self.random_serial_port, baudrate=self.random_baudrate)\n', (5345, 5406), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((5661, 5686), 'random.randint', 'random.randint', (['(256)', '(1024)'], {}), '(256, 1024)\n', (5675, 5686), False, 'import random\n'), ((5714, 5742), 'data_gateway.dummy_serial.random_bytes', 'random_bytes', (['rand_write_len'], {}), '(rand_write_len)\n', (5726, 5742), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((5757, 5829), 'data_gateway.dummy_serial.DummySerial', 'DummySerial', ([], {'port': 'self.random_serial_port', 'baudrate': 'self.random_baudrate'}), '(port=self.random_serial_port, baudrate=self.random_baudrate)\n', (5768, 5829), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((6125, 6150), 'random.randint', 'random.randint', (['(256)', '(1024)'], {}), '(256, 1024)\n', (6139, 6150), False, 'import random\n'), ((6178, 6206), 'data_gateway.dummy_serial.random_bytes', 'random_bytes', (['rand_write_len'], {}), '(rand_write_len)\n', (6190, 6206), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((6221, 6345), 'data_gateway.dummy_serial.DummySerial', 'DummySerial', ([], {'port': 'self.random_serial_port', 'baudrate': 'self.random_baudrate', 'responses': '{rand_write_bytes: rand_write_bytes}'}), '(port=self.random_serial_port, baudrate=self.random_baudrate,\n responses={rand_write_bytes: rand_write_bytes})\n', (6232, 6345), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((6664, 6689), 'random.randint', 'random.randint', (['(256)', '(1024)'], {}), '(256, 1024)\n', (6678, 6689), False, 'import random\n'), ((6717, 6745), 'data_gateway.dummy_serial.random_bytes', 'random_bytes', (['rand_write_len'], {}), '(rand_write_len)\n', (6729, 6745), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((6760, 6884), 'data_gateway.dummy_serial.DummySerial', 'DummySerial', ([], {'port': 'self.random_serial_port', 'baudrate': 'self.random_baudrate', 'responses': '{rand_write_bytes: rand_write_bytes}'}), '(port=self.random_serial_port, baudrate=self.random_baudrate,\n responses={rand_write_bytes: rand_write_bytes})\n', (6771, 6884), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((7253, 7278), 'random.randint', 'random.randint', (['(256)', '(1024)'], {}), '(256, 1024)\n', (7267, 7278), False, 'import random\n'), ((7364, 7436), 'data_gateway.dummy_serial.DummySerial', 'DummySerial', ([], {'port': 'self.random_serial_port', 'baudrate': 'self.random_baudrate'}), '(port=self.random_serial_port, baudrate=self.random_baudrate)\n', (7375, 7436), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((7320, 7348), 'data_gateway.dummy_serial.random_bytes', 'random_bytes', (['rand_write_len'], {}), '(rand_write_len)\n', (7332, 7348), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((752, 782), 'data_gateway.dummy_serial.random_string', 'random_string', (['rand_write_len1'], {}), '(rand_write_len1)\n', (765, 782), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((818, 848), 'data_gateway.dummy_serial.random_string', 'random_string', (['rand_write_len2'], {}), '(rand_write_len2)\n', (831, 848), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((1611, 1641), 'data_gateway.dummy_serial.random_string', 'random_string', (['rand_write_len1'], {}), '(rand_write_len1)\n', (1624, 1641), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((1677, 1707), 'data_gateway.dummy_serial.random_string', 'random_string', (['rand_write_len2'], {}), '(rand_write_len2)\n', (1690, 1707), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((2468, 2498), 'data_gateway.dummy_serial.random_string', 'random_string', (['rand_write_len1'], {}), '(rand_write_len1)\n', (2481, 2498), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((2534, 2564), 'data_gateway.dummy_serial.random_string', 'random_string', (['rand_write_len2'], {}), '(rand_write_len2)\n', (2547, 2564), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((3027, 3057), 'data_gateway.dummy_serial.random_string', 'random_string', (['rand_write_len1'], {}), '(rand_write_len1)\n', (3040, 3057), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((3093, 3123), 'data_gateway.dummy_serial.random_string', 'random_string', (['rand_write_len2'], {}), '(rand_write_len2)\n', (3106, 3123), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((3845, 3875), 'data_gateway.dummy_serial.random_string', 'random_string', (['rand_write_len1'], {}), '(rand_write_len1)\n', (3858, 3875), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n'), ((3911, 3941), 'data_gateway.dummy_serial.random_string', 'random_string', (['rand_write_len2'], {}), '(rand_write_len2)\n', (3924, 3941), False, 'from data_gateway.dummy_serial import DummySerial, constants, exceptions, random_bytes, random_string\n')] |
# -*- coding: utf-8 -*-
"""
Created on %(date)s
@author: %Christian
"""
"""
#BASE +BN层
#dropout改为0.15
"""
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
import paddlenlp as ppnlp
class QuestionMatching_base(nn.Layer):
'''
base模型
dropout改为0.15
'''
def __init__(self, pretrained_model, dropout=None, rdrop_coef=0.0):
super().__init__()
self.ptm = pretrained_model
self.dropout = nn.Dropout(dropout if dropout is not None else 0.15)
#线性变换层,Out=XW+b
self.classifier = nn.Linear(self.ptm.config["hidden_size"], 2)
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None,
do_evaluate=False):
_, cls_embedding1 = self.ptm(input_ids, token_type_ids, position_ids,
attention_mask)
cls_embedding1 = self.dropout(cls_embedding1)
logits1 = self.classifier(cls_embedding1)
kl_loss = 0.0
return logits1, kl_loss
class QuestionMatching_BN(nn.Layer):
'''
base模型+BN
dropout改为0.15
'''
def __init__(self, pretrained_model, dropout=None, rdrop_coef=0.0):
super().__init__()
self.ptm = pretrained_model
self.dropout = nn.Dropout(dropout if dropout is not None else 0.15)
self.linear=nn.Linear(self.ptm.config["hidden_size"], self.ptm.config["max_position_embeddings"])
self.batchnorm1d=nn.BatchNorm1D(self.ptm.config["max_position_embeddings"])
self.relu=nn.ReLU()
# self.relu=nn.GELU()
#线性变换层,Out=XW+b
self.classifier = nn.Linear(self.ptm.config["max_position_embeddings"], 2)
def forward(self,
input_ids,
token_type_ids=None,
position_ids=None,
attention_mask=None,
do_evaluate=False):
#pretrained_model返回
#sequence_output[batch_size, sequence_length, hidden_size],
#pooled_output [batch_size, hidden_size] The output of first token ([CLS]) in sequence
_, cls_embedding1 = self.ptm(input_ids,
token_type_ids, #用于区分当前token是属于哪个句子的
position_ids,#明确每个token是在什么位置上,从0到最后依次编号。
attention_mask,#指定对哪些词进行self-Attention操作(padding的位置是不需要参与attention计算的)
)
cls_embedding1 = self.dropout(cls_embedding1)
cls_embedding1=self.linear(cls_embedding1)
cls_embedding1=self.batchnorm1d(cls_embedding1)
cls_embedding1=self.relu(cls_embedding1)
cls_embedding1=self.dropout(cls_embedding1)
logits1 = self.classifier(cls_embedding1)
kl_loss = 0.0
return logits1, kl_loss
| [
"paddle.nn.BatchNorm1D",
"paddle.nn.Dropout",
"paddle.nn.Linear",
"paddle.nn.ReLU"
] | [((475, 527), 'paddle.nn.Dropout', 'nn.Dropout', (['(dropout if dropout is not None else 0.15)'], {}), '(dropout if dropout is not None else 0.15)\n', (485, 527), True, 'import paddle.nn as nn\n'), ((582, 626), 'paddle.nn.Linear', 'nn.Linear', (["self.ptm.config['hidden_size']", '(2)'], {}), "(self.ptm.config['hidden_size'], 2)\n", (591, 626), True, 'import paddle.nn as nn\n'), ((1391, 1443), 'paddle.nn.Dropout', 'nn.Dropout', (['(dropout if dropout is not None else 0.15)'], {}), '(dropout if dropout is not None else 0.15)\n', (1401, 1443), True, 'import paddle.nn as nn\n'), ((1471, 1561), 'paddle.nn.Linear', 'nn.Linear', (["self.ptm.config['hidden_size']", "self.ptm.config['max_position_embeddings']"], {}), "(self.ptm.config['hidden_size'], self.ptm.config[\n 'max_position_embeddings'])\n", (1480, 1561), True, 'import paddle.nn as nn\n'), ((1583, 1641), 'paddle.nn.BatchNorm1D', 'nn.BatchNorm1D', (["self.ptm.config['max_position_embeddings']"], {}), "(self.ptm.config['max_position_embeddings'])\n", (1597, 1641), True, 'import paddle.nn as nn\n'), ((1661, 1670), 'paddle.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (1668, 1670), True, 'import paddle.nn as nn\n'), ((1756, 1812), 'paddle.nn.Linear', 'nn.Linear', (["self.ptm.config['max_position_embeddings']", '(2)'], {}), "(self.ptm.config['max_position_embeddings'], 2)\n", (1765, 1812), True, 'import paddle.nn as nn\n')] |
from functools import partialmethod
import pandas as pd
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
import sqlite3
import click
import json
import pkg_resources
from itertools import combinations
from q2_mlab.db.schema import RegressionScore
from q2_mlab.plotting.components import (
Mediator,
ComponentMixin,
Plottable,
ButtonComponent,
ScatterComponent,
SegmentComponent,
DataSourceComponent,
SelectComponent,
)
from bokeh.plotting import figure
from bokeh.transform import factor_cmap
from bokeh.models import (
ColumnDataSource,
CheckboxButtonGroup,
TextInput,
Legend,
LegendItem,
)
from bokeh.models.widgets import (
Div,
)
from bokeh.palettes import (
Category20,
Set3,
)
from bokeh.layouts import column, row
from bokeh.server.server import Server
groups = ['parameters_id', 'dataset', 'target', 'level', 'algorithm']
drop_cols = ['artifact_uuid', 'datetime', 'CV_IDX', 'id']
target_map = {
'age_v2': 'age',
'BL_AGE': 'age',
'age': 'age',
'bmi_v2': 'bmi',
'BMI': 'bmi',
'bmi': 'bmi'
}
with pkg_resources.resource_stream(
__name__, "standard_deviations.json"
) as f:
TARGET_SD = json.load(f)
def _get_standardized_mae(df_row, norm_dict):
"""
"""
mae = df_row['MAE']
target = df_row['target']
dataset = df_row['dataset']
cv_fold = df_row['CV_IDX']
level = df_row['level']
key = f"({dataset}, {target}, {level}, {cv_fold})"
sd = norm_dict.get(key, 1)
standardized_mae = mae / sd
return standardized_mae
def process_db_df(df):
# remap values for consistency
df['level'] = df['level'].replace('none', 'MG')
df['target'] = df['target'].map(target_map)
df['standardized_MAE'] = df.apply(_get_standardized_mae, axis=1,
args=(TARGET_SD,))
group_stats = df.drop(
drop_cols, axis=1
).groupby(
groups
).agg(
['var', 'mean']
)
group_stats.columns = agg_columns = ['_'.join(col).strip() for
col in group_stats.columns.values]
group_stats.reset_index(inplace=True)
min_by = ['dataset', 'target']
group_mins = group_stats[agg_columns + min_by].groupby(min_by).min()
indices = group_stats[['dataset', 'target']].to_records(
index=False).tolist()
expanded_group_mins = group_mins.loc[indices]
expanded_group_mins.index = group_stats.index
relative_group_stats = (group_stats / expanded_group_mins)[agg_columns]
relative_group_stats.columns = ['relative_' + col for
col in relative_group_stats]
group_stats = group_stats.join(relative_group_stats)
return group_stats
def find_segments(group_stats, across, groupby):
"""
TODO makes some assumptions about the guarantees on pairs when there are
more than 2 categories
"""
seg_cols = groupby.copy()
seg_cols.remove(across)
group_counts = group_stats[seg_cols + [across]].groupby(seg_cols).count()
max_n_pairs = group_counts[across].max()
category_values = group_stats[across].unique()
where = (group_counts[across] == max_n_pairs)
keep_repeats = group_stats.set_index(seg_cols).loc[where]
keep_repeats_parts = []
for i, sub_group in enumerate(category_values):
where = keep_repeats[across] == sub_group
keep_repeats_parts.append(keep_repeats.loc[where])
keep_repeats_parts[i].columns = [col + '_' + sub_group for
col in keep_repeats_parts[i].columns]
segment_df = pd.concat(keep_repeats_parts,
axis=1
)
return segment_df
class TextInputComponent(ComponentMixin):
def __init__(self, text_input_kwargs):
super().__init__()
self.text_input = TextInput(**text_input_kwargs)
self.layout = self.text_input
self.input_text_callback = None
def set_mediator(self, mediator):
super().set_mediator(mediator)
event_name = 'text-change'
text_change = self.make_attr_old_new_callback(event_name)
self.input_text_callback = text_change
self.text_input.on_change('value', self.input_text_callback)
class CheckboxButtonGroupComponent(ComponentMixin):
def __init__(self, checkbox_kwargs):
super().__init__()
self.checkbox = CheckboxButtonGroup(**checkbox_kwargs)
self.checkbox_change = None
self.layout = self.checkbox
def set_mediator(self, mediator):
super().set_mediator(mediator)
event_name = 'checkbox-change'
self.checkbox_change = self.make_attr_old_new_callback(event_name)
self.checkbox.on_change('active', self.checkbox_change)
class SegmentComponentExt(SegmentComponent):
def redraw(self, x, y, seg_0, seg_1, data):
self.data_source.data = data
self.segment.glyph.x0 = '_'.join([x, seg_0])
self.segment.glyph.x1 = '_'.join([x, seg_1])
self.segment.glyph.y0 = '_'.join([y, seg_0])
self.segment.glyph.y1 = '_'.join([y, seg_1])
palettes = {
'Category20': Category20,
'Set3': Set3,
}
DEFAULTS = {
'segment_variable': 'dataset',
'x': 'MAE_mean',
'y': 'MAE_var',
'x_axis_type': 'log',
'y_axis_type': 'log',
'cmap': 'Category20'
}
class AlgorithmScatter(Mediator, Plottable):
def __init__(self, x, y, engine, cmap=None):
super().__init__()
self.x = x
self.y = y
self.engine = engine
self.data = None
self.scatter = None
if cmap is None:
self.cmap = Category20
else:
self.cmap = cmap
self.line_segment_variable = DEFAULTS['segment_variable']
self.data_raw = None
self.data_static = None
self.data = None
self.seg_0, self.seg_1 = None, None
self.scatter_source = None
self.x_axis_type = DEFAULTS['x_axis_type']
self.y_axis_type = DEFAULTS['y_axis_type']
self.axis_types = ['linear', 'log']
self.line_segment_pairs = {
'dataset': ['finrisk', 'sol'],
'level': ['16S', 'MG'],
'target': ['age', 'bmi'],
}
self.scatter_tools = 'pan,wheel_zoom,box_select,lasso_select,'\
'reset,box_zoom,save'
self.segment = None
self.segment_source = None
self.segment_button = None
self.segment_variable_select = None
self.x_var_select = None
self.y_var_select = None
self.dataset_bars = None
self.dataset_bars_source = None
self.dataset_bars_figure = None
self.level_bars = None
self.level_bars_source = None
self.level_bars_figure = None
self.target_bars = None
self.target_bars_source = None
self.target_bars_figure = None
self.query_button = None
self.query_input = None
self.query_row = None
self.layout = None
def notify(self,
component,
event_name,
*args, **kwargs,
):
if (event_name == 'dropdown-select') and \
(component is self.x_var_select):
self.x = component.select.value
self.scatter.scatter.glyph.x = self.x
self.scatter.layout.xaxis.axis_label = self.x
self.segment.segment.glyph.x0 = '_'.join([self.x, self.seg_0])
self.segment.segment.glyph.x1 = '_'.join([self.x, self.seg_1])
if (event_name == 'dropdown-select') and \
(component is self.y_var_select):
self.y = component.select.value
self.scatter.scatter.glyph.y = self.y
self.scatter.layout.yaxis.axis_label = self.y
self.segment.segment.glyph.y0 = '_'.join([self.y, self.seg_0])
self.segment.segment.glyph.y1 = '_'.join([self.y, self.seg_1])
if (event_name == 'selected-indices') and \
(component is self.scatter_source):
selected_indices = self.scatter_source.data_source.selected.indices
self.dataset_bars_source.data = self.get_dataset_counts(
indices=selected_indices,
)
self.level_bars_source.data = self.get_level_counts(
indices=selected_indices,
)
self.target_bars_source.data = self.get_target_counts(
indices=selected_indices,
)
if (event_name == 'button-click') and \
(component is self.query_button):
df = self.handle_query(self.query_input.text_input.value)
# need to update self.data due to how the hbars are currently
# written
self.data = df
self.scatter_source.data_source.data = df.to_dict(orient='list')
segment_source = find_segments(
df,
across=self.line_segment_variable,
groupby=['parameters_id', 'algorithm', 'level', 'dataset',
'target'],
)
self.segment.segment.data_source.data = segment_source.to_dict(
orient='list',
)
selected_indices = self.scatter_source.data_source.selected.indices
self.dataset_bars_source.data = self.get_dataset_counts(
indices=selected_indices,
)
self.level_bars_source.data = self.get_level_counts(
indices=selected_indices,
)
self.target_bars_source.data = self.get_target_counts(
indices=selected_indices,
)
if (event_name == 'checkbox-change') and \
(component is self.segment_button):
active = self.segment_button.checkbox.active
if 0 in active:
self.segment.segment.visible = True
else:
self.segment.segment.visible = False
if (event_name == 'dropdown-select') and \
(component is self.segment_variable_select):
new_segment_variable = self.segment_variable_select.select.value
self.line_segment_variable = new_segment_variable
new_segment_data = find_segments(
self.data,
across=self.line_segment_variable,
groupby=['parameters_id', 'algorithm', 'level', 'dataset',
'target']
)
line_segment_ends = self.line_segment_pairs[new_segment_variable]
self.segment.redraw(
self.x,
self.y,
*line_segment_ends,
new_segment_data
)
def plot(self):
self.data_raw = pd.read_sql_table(RegressionScore.__tablename__,
con=self.engine,
)
# TODO this is temporary
self.data_raw = self.data_raw.loc[
self.data_raw['algorithm'] != 'MLPRegressor'
]
self.data = df = process_db_df(self.data_raw)
self.data_static = df
self.seg_0, self.seg_1 = self.line_segment_pairs[
self.line_segment_variable
]
# ## Data Setup
scatter_source = ColumnDataSource(df)
self.scatter_source = DataSourceComponent(scatter_source)
self.scatter_source.set_mediator(self)
# ## General Setup
algorithms = sorted(df['algorithm'].unique())
levels = sorted(df['level'].unique())
datasets = sorted(df['dataset'].unique())
targets = sorted(df['target'].unique())
plot_width = 600
self.line_segment_pairs = {
'level': ['16S', 'MG'],
'target': ['age', 'bmi'],
}
dataset_combinations = combinations(["finrisk", "imsms", "sol"], r=2)
for dataset_pair in dataset_combinations:
d1, d2 = dataset_pair
self.line_segment_pairs[f"{d1}-to-{d2}"] = [d1, d2]
categorical_variables = ['parameters_id', 'target', 'algorithm',
'level', 'dataset']
plottable_variables = list(sorted(
df.columns.drop(categorical_variables)
))
color_scheme = self.cmap[len(algorithms)]
algorithm_cmap = factor_cmap('algorithm', palette=color_scheme,
factors=algorithms,
)
figure_kwargs = dict(x_axis_type=self.x_axis_type,
y_axis_type=self.y_axis_type,
plot_height=400,
tools=self.scatter_tools,
output_backend='webgl',
)
# ## Segment Plot
segment_source = ColumnDataSource(
find_segments(self.data, across=self.line_segment_variable,
groupby=['parameters_id', 'algorithm', 'level',
'dataset']
)
)
self.segment_source = DataSourceComponent(scatter_source)
self.segment = SegmentComponentExt(data_source=segment_source)
segment_kwargs = {
'x0': self.x + '_' + self.seg_0,
'x1': self.x + '_' + self.seg_1,
'y0': self.y + '_' + self.seg_0,
'y1': self.y + '_' + self.seg_1,
'line_width': 0.1,
'line_color': '#A9A9A9',
}
self.segment.plot(
figure_kwargs=figure_kwargs,
segment_kwargs=segment_kwargs,
)
# ## Segment Visible button
self.segment_button = CheckboxButtonGroupComponent(
checkbox_kwargs=dict(
labels=['Segments'],
active=[0],
)
)
self.segment_button.set_mediator(self)
self.segment_variable_select = SelectComponent(
select_kwargs=dict(
value=self.line_segment_variable,
title='Segment Variable',
options=list(self.line_segment_pairs.keys()),
)
)
self.segment_variable_select.set_mediator(self)
# ## Scatter plot
self.scatter = ScatterComponent()
scatter_kwargs = dict(x=self.x, y=self.y, source=scatter_source,
# legend_field='algorithm',
fill_color=algorithm_cmap,
name='scatter',
)
self.scatter.plot(
figure=self.segment.layout,
scatter_kwargs=scatter_kwargs,
)
scatter = self.scatter.layout
scatter.toolbar.logo = None
scatter.xaxis.axis_label = self.x
scatter.yaxis.axis_label = self.y
self.scatter.scatter.glyph.line_color = 'white'
self.scatter.scatter.glyph.line_width = 0.1
self.scatter.scatter.nonselection_glyph.line_color = 'white'
transform = algorithm_cmap['transform']
legend_fig = figure(outline_line_alpha=0, toolbar_location=None)
legend_items = []
for i, (alg, color) in enumerate(zip(transform.factors,
transform.palette)):
legend_fig.circle(fill_color=color, name=f'circ{i}',
line_color='white',
)
renderers = legend_fig.select(name=f'circ{i}')
legend_item = LegendItem(
label=alg,
renderers=renderers,
)
legend_items.append(legend_item)
legend = Legend(
items=legend_items,
location='top_left',
)
legend_fig.add_layout(legend)
scatter.plot_width = plot_width
scatter.plot_height = 500
# ## Variable Selection
self.x_var_select = SelectComponent(
select_kwargs=dict(
value=self.x,
title='X variable',
options=plottable_variables
)
)
self.x_var_select.set_mediator(self)
x_select = self.x_var_select.select
self.y_var_select = SelectComponent(
select_kwargs=dict(
value=self.y,
title='Y variable',
options=plottable_variables
)
)
self.y_var_select.set_mediator(self)
y_select = self.y_var_select.select
# ## Dataset Stacked Hbars
data_getter = self.get_dataset_counts
self.dataset_bars_source = ColumnDataSource(data_getter())
self.dataset_bars_figure = figure(y_range=datasets, plot_height=100)
self.dataset_bars = self.dataset_bars_figure.hbar_stack(
algorithms, y='dataset',
height=0.9,
color=color_scheme,
source=self.dataset_bars_source,
)
self.dataset_bars_figure.toolbar_location = None
self.dataset_bars_figure.plot_width = plot_width
# ## Level Stacked Hbars
data_getter = self.get_level_counts
self.level_bars_source = ColumnDataSource(data_getter())
self.level_bars_figure = figure(y_range=levels, plot_height=100)
self.level_bars = self.level_bars_figure.hbar_stack(
algorithms, y='level',
height=0.9,
color=color_scheme,
source=self.level_bars_source,
)
self.level_bars_figure.toolbar_location = None
self.level_bars_figure.plot_width = plot_width
# ## Target Stacked Hbars
data_getter = self.get_target_counts
self.target_bars_source = ColumnDataSource(data_getter())
self.target_bars_figure = figure(y_range=targets, plot_height=100)
self.target_bars = self.target_bars_figure.hbar_stack(
algorithms, y='target',
height=0.9,
color=color_scheme,
source=self.target_bars_source,
)
self.target_bars_figure.toolbar_location = None
self.target_bars_figure.plot_width = plot_width
# ## Text input
button_width = 100
self.query_input = TextInputComponent(
text_input_kwargs=dict(
title='Enter query',
width=plot_width - button_width
)
)
self.query_button = ButtonComponent(
button_kwargs=dict(
label='Execute',
width=button_width,
)
)
self.query_button.set_mediator(self)
self.query_row = row(self.query_input.layout,
column(
Div(text="", height=8),
self.query_button.layout,
))
# ## Layout
variable_selection = row(x_select, y_select,
)
segment_selection = row(
self.segment_variable_select.layout,
column(
Div(text="", height=8),
self.segment_button.layout,
)
)
self.layout = row(
column(
self.query_row,
variable_selection,
segment_selection,
row(
scatter,
column(
self.dataset_bars_figure,
self.level_bars_figure,
self.target_bars_figure,
legend_fig,
),
),
),
)
return self
def handle_query(self, text):
if text != '':
df = self.data_static.query(text).reset_index(drop=True)
else:
df = self.data_static
return df
def get_counts_by(self, category, by, indices=None):
# TODO consider switching orientation of counts and by
data = self.subset_selected(indices)
counts = pd.crosstab(data[by], data[category])
# algorithms = list(counts.index.values)
counts_dict = counts.to_dict(orient='list')
levels = sorted(self.data[by].unique())
counts_dict[by] = list(filter(lambda x: x in counts.index, levels))
return counts_dict
def subset_selected(self, indices):
# should handle None and empty list
if not indices:
# might want to grab data from the scatter plot instead
data = self.data
else:
data = self.data.reindex(indices)
return data
get_level_counts = partialmethod(get_counts_by, 'algorithm', 'level')
get_dataset_counts = partialmethod(get_counts_by, 'algorithm', 'dataset')
get_target_counts = partialmethod(get_counts_by, 'algorithm', 'target')
def app(self, doc):
doc.add_root(self.layout)
@click.group('mlab-plotting')
def mlab_plotting():
pass
@mlab_plotting.command()
@click.option(
'--db',
help='Path to SQLite database file.',
type=click.Path(exists=False),
)
@click.option(
'--color-scheme',
help='Color scheme to plot with',
type=click.Choice(
list(palettes.keys()),
),
default=DEFAULTS['cmap'],
)
def metric_scatter(db, color_scheme):
run_app(db, color_scheme)
def run_app(db, color_scheme):
# thanks https://github.com/sqlalchemy/sqlalchemy/issues/4863
def connect():
return sqlite3.connect(f"file:{db}?mode=ro", uri=True)
engine = create_engine("sqlite://", creator=connect)
bkapp = AlgorithmScatter(
DEFAULTS['x'], DEFAULTS['y'],
engine=engine,
cmap=palettes.get(color_scheme),
).plot().app
server = Server({'/': bkapp})
server.start()
server.io_loop.add_callback(server.show, "/")
server.io_loop.start()
| [
"bokeh.transform.factor_cmap",
"q2_mlab.plotting.components.DataSourceComponent",
"bokeh.layouts.column",
"bokeh.layouts.row",
"bokeh.plotting.figure",
"bokeh.models.LegendItem",
"bokeh.models.widgets.Div",
"q2_mlab.plotting.components.ScatterComponent",
"pandas.read_sql_table",
"click.group",
"... | [((20971, 20999), 'click.group', 'click.group', (['"""mlab-plotting"""'], {}), "('mlab-plotting')\n", (20982, 20999), False, 'import click\n'), ((1122, 1189), 'pkg_resources.resource_stream', 'pkg_resources.resource_stream', (['__name__', '"""standard_deviations.json"""'], {}), "(__name__, 'standard_deviations.json')\n", (1151, 1189), False, 'import pkg_resources\n'), ((1218, 1230), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1227, 1230), False, 'import json\n'), ((3637, 3674), 'pandas.concat', 'pd.concat', (['keep_repeats_parts'], {'axis': '(1)'}), '(keep_repeats_parts, axis=1)\n', (3646, 3674), True, 'import pandas as pd\n'), ((20704, 20754), 'functools.partialmethod', 'partialmethod', (['get_counts_by', '"""algorithm"""', '"""level"""'], {}), "(get_counts_by, 'algorithm', 'level')\n", (20717, 20754), False, 'from functools import partialmethod\n'), ((20780, 20832), 'functools.partialmethod', 'partialmethod', (['get_counts_by', '"""algorithm"""', '"""dataset"""'], {}), "(get_counts_by, 'algorithm', 'dataset')\n", (20793, 20832), False, 'from functools import partialmethod\n'), ((20857, 20908), 'functools.partialmethod', 'partialmethod', (['get_counts_by', '"""algorithm"""', '"""target"""'], {}), "(get_counts_by, 'algorithm', 'target')\n", (20870, 20908), False, 'from functools import partialmethod\n'), ((21594, 21637), 'sqlalchemy.create_engine', 'create_engine', (['"""sqlite://"""'], {'creator': 'connect'}), "('sqlite://', creator=connect)\n", (21607, 21637), False, 'from sqlalchemy import create_engine\n'), ((21800, 21820), 'bokeh.server.server.Server', 'Server', (["{'/': bkapp}"], {}), "({'/': bkapp})\n", (21806, 21820), False, 'from bokeh.server.server import Server\n'), ((3892, 3922), 'bokeh.models.TextInput', 'TextInput', ([], {}), '(**text_input_kwargs)\n', (3901, 3922), False, 'from bokeh.models import ColumnDataSource, CheckboxButtonGroup, TextInput, Legend, LegendItem\n'), ((4442, 4480), 'bokeh.models.CheckboxButtonGroup', 'CheckboxButtonGroup', ([], {}), '(**checkbox_kwargs)\n', (4461, 4480), False, 'from bokeh.models import ColumnDataSource, CheckboxButtonGroup, TextInput, Legend, LegendItem\n'), ((10818, 10883), 'pandas.read_sql_table', 'pd.read_sql_table', (['RegressionScore.__tablename__'], {'con': 'self.engine'}), '(RegressionScore.__tablename__, con=self.engine)\n', (10835, 10883), True, 'import pandas as pd\n'), ((11358, 11378), 'bokeh.models.ColumnDataSource', 'ColumnDataSource', (['df'], {}), '(df)\n', (11374, 11378), False, 'from bokeh.models import ColumnDataSource, CheckboxButtonGroup, TextInput, Legend, LegendItem\n'), ((11409, 11444), 'q2_mlab.plotting.components.DataSourceComponent', 'DataSourceComponent', (['scatter_source'], {}), '(scatter_source)\n', (11428, 11444), False, 'from q2_mlab.plotting.components import Mediator, ComponentMixin, Plottable, ButtonComponent, ScatterComponent, SegmentComponent, DataSourceComponent, SelectComponent\n'), ((11894, 11940), 'itertools.combinations', 'combinations', (["['finrisk', 'imsms', 'sol']"], {'r': '(2)'}), "(['finrisk', 'imsms', 'sol'], r=2)\n", (11906, 11940), False, 'from itertools import combinations\n'), ((12397, 12463), 'bokeh.transform.factor_cmap', 'factor_cmap', (['"""algorithm"""'], {'palette': 'color_scheme', 'factors': 'algorithms'}), "('algorithm', palette=color_scheme, factors=algorithms)\n", (12408, 12463), False, 'from bokeh.transform import factor_cmap\n'), ((13173, 13208), 'q2_mlab.plotting.components.DataSourceComponent', 'DataSourceComponent', (['scatter_source'], {}), '(scatter_source)\n', (13192, 13208), False, 'from q2_mlab.plotting.components import Mediator, ComponentMixin, Plottable, ButtonComponent, ScatterComponent, SegmentComponent, DataSourceComponent, SelectComponent\n'), ((14330, 14348), 'q2_mlab.plotting.components.ScatterComponent', 'ScatterComponent', ([], {}), '()\n', (14346, 14348), False, 'from q2_mlab.plotting.components import Mediator, ComponentMixin, Plottable, ButtonComponent, ScatterComponent, SegmentComponent, DataSourceComponent, SelectComponent\n'), ((15144, 15195), 'bokeh.plotting.figure', 'figure', ([], {'outline_line_alpha': '(0)', 'toolbar_location': 'None'}), '(outline_line_alpha=0, toolbar_location=None)\n', (15150, 15195), False, 'from bokeh.plotting import figure\n'), ((15736, 15783), 'bokeh.models.Legend', 'Legend', ([], {'items': 'legend_items', 'location': '"""top_left"""'}), "(items=legend_items, location='top_left')\n", (15742, 15783), False, 'from bokeh.models import ColumnDataSource, CheckboxButtonGroup, TextInput, Legend, LegendItem\n'), ((16750, 16791), 'bokeh.plotting.figure', 'figure', ([], {'y_range': 'datasets', 'plot_height': '(100)'}), '(y_range=datasets, plot_height=100)\n', (16756, 16791), False, 'from bokeh.plotting import figure\n'), ((17299, 17338), 'bokeh.plotting.figure', 'figure', ([], {'y_range': 'levels', 'plot_height': '(100)'}), '(y_range=levels, plot_height=100)\n', (17305, 17338), False, 'from bokeh.plotting import figure\n'), ((17838, 17878), 'bokeh.plotting.figure', 'figure', ([], {'y_range': 'targets', 'plot_height': '(100)'}), '(y_range=targets, plot_height=100)\n', (17844, 17878), False, 'from bokeh.plotting import figure\n'), ((18957, 18980), 'bokeh.layouts.row', 'row', (['x_select', 'y_select'], {}), '(x_select, y_select)\n', (18960, 18980), False, 'from bokeh.layouts import column, row\n'), ((20102, 20139), 'pandas.crosstab', 'pd.crosstab', (['data[by]', 'data[category]'], {}), '(data[by], data[category])\n', (20113, 20139), True, 'import pandas as pd\n'), ((21135, 21159), 'click.Path', 'click.Path', ([], {'exists': '(False)'}), '(exists=False)\n', (21145, 21159), False, 'import click\n'), ((21532, 21579), 'sqlite3.connect', 'sqlite3.connect', (['f"""file:{db}?mode=ro"""'], {'uri': '(True)'}), "(f'file:{db}?mode=ro', uri=True)\n", (21547, 21579), False, 'import sqlite3\n'), ((15584, 15626), 'bokeh.models.LegendItem', 'LegendItem', ([], {'label': 'alg', 'renderers': 'renderers'}), '(label=alg, renderers=renderers)\n', (15594, 15626), False, 'from bokeh.models import ColumnDataSource, CheckboxButtonGroup, TextInput, Legend, LegendItem\n'), ((18792, 18814), 'bokeh.models.widgets.Div', 'Div', ([], {'text': '""""""', 'height': '(8)'}), "(text='', height=8)\n", (18795, 18814), False, 'from bokeh.models.widgets import Div\n'), ((19134, 19156), 'bokeh.models.widgets.Div', 'Div', ([], {'text': '""""""', 'height': '(8)'}), "(text='', height=8)\n", (19137, 19156), False, 'from bokeh.models.widgets import Div\n'), ((19447, 19545), 'bokeh.layouts.column', 'column', (['self.dataset_bars_figure', 'self.level_bars_figure', 'self.target_bars_figure', 'legend_fig'], {}), '(self.dataset_bars_figure, self.level_bars_figure, self.\n target_bars_figure, legend_fig)\n', (19453, 19545), False, 'from bokeh.layouts import column, row\n')] |
from AIPS import AIPS
from AIPSTask import AIPSTask
from AIPSData import AIPSImage
from ObitTask import ObitTask
AIPS.userno = 103
image = AIPSImage('MANDELBROT', 'MANDL', 1, 1)
mandl = AIPSTask('mandl')
mandl.outdata = image
mandl.imsize[1:] = [ 512, 512 ]
mandl.go()
try:
template = ObitTask('Template')
template.DataType = 'AIPS'
template.inName = image.name
template.inClass = image.klass
template.inDisk = image.disk
template.inSeq = image.seq
template.go()
finally:
image.zap()
| [
"AIPSTask.AIPSTask",
"ObitTask.ObitTask",
"AIPSData.AIPSImage"
] | [((141, 179), 'AIPSData.AIPSImage', 'AIPSImage', (['"""MANDELBROT"""', '"""MANDL"""', '(1)', '(1)'], {}), "('MANDELBROT', 'MANDL', 1, 1)\n", (150, 179), False, 'from AIPSData import AIPSImage\n'), ((189, 206), 'AIPSTask.AIPSTask', 'AIPSTask', (['"""mandl"""'], {}), "('mandl')\n", (197, 206), False, 'from AIPSTask import AIPSTask\n'), ((293, 313), 'ObitTask.ObitTask', 'ObitTask', (['"""Template"""'], {}), "('Template')\n", (301, 313), False, 'from ObitTask import ObitTask\n')] |
import bpy
import bmesh
import numpy
from random import randint
import time
# pointsToVoxels() has been modified from the function generate_blocks() in https://github.com/cagcoach/BlenderPlot/blob/master/blendplot.py
# Some changes to accomodate Blender 2.8's API changes were made,
# and the function has been made much more efficient through creative usage of numpy.
def pointsToVoxels(points, name="VoxelMesh"):
# For now, we'll combine the voxels from each of the six views into one array and then just take the unique values.
# Later on, this could be re-structured to, for example, render the voxels from each face in a separate colour
points = numpy.concatenate(tuple(points.values()))
points = numpy.unique(points, axis=0)
print("Number of points:", len(points))
mesh = bpy.data.meshes.new("mesh") # add a new mesh
obj = bpy.data.objects.new(name, mesh)
bpy.context.collection.objects.link(obj) # put the object into the scene (link)
bpy.context.view_layer.objects.active = obj
obj.select_set(state=True) # select object
mesh = obj.data
bm = bmesh.new()
# 0 1 2 3 4 5 6 7
block=numpy.array([ [-1,-1,-1],[-1,-1,1],[-1,1,-1],[-1,1,1],[1,-1,-1],[1,-1,1],[1,1,-1],[1,1,1] ]).astype(float)
block*=0.5
print("Creating vertices...")
# Function to apply each point to each element of "block" as efficiently as possible
# First, produce 8 copies of each point. numpy.tile() is apparently the most efficient way to do so.
pointsTiled = numpy.tile(points, (1,8))
# This will make each tuple 24 items long. To fix this, we need to reshape pointsTiled, and split each 24-long tuple into 8 3-longs.
pointsDuplicated = numpy.reshape(pointsTiled, (pointsTiled.shape[0], 8, 3))
# Then, a lambda to piecewise add the elements of "block" to a respective set of 8 duplicate points in pointsDuplicated
blockerize = lambda x : x + block
# Apply it
pointsBlockerized = blockerize(pointsDuplicated)
# pointsBlockerized is now a 2D array of thruples. Convert back to a 1D array.
verts = numpy.reshape(pointsBlockerized, (pointsBlockerized.shape[0]*pointsBlockerized.shape[1], 3) )
#print("points shape:", points.shape)
#print("verts shape:", verts.shape)
#print("verts:", verts)
'''for pt in points:
print((block+pt))
verts=numpy.append(verts, (block+pt),axis=0)'''
printAfterCount = 100000
nextThreshold = 0
pointsDone = 0
#print(verts)
for v in verts:
bm.verts.new(v)
pointsDone += 1
if pointsDone > nextThreshold:
print(pointsDone, "vertices have been added so far.")
nextThreshold += printAfterCount
print("Calling to_mesh().")
bm.to_mesh(mesh)
print("Ensuring lookup table.")
bm.verts.ensure_lookup_table()
nextThreshold = 0
cubesDone = 0
for i in range(0,len(bm.verts),8):
bm.faces.new( [bm.verts[i+0], bm.verts[i+1],bm.verts[i+3], bm.verts[i+2]])
bm.faces.new( [bm.verts[i+4], bm.verts[i+5],bm.verts[i+1], bm.verts[i+0]])
bm.faces.new( [bm.verts[i+6], bm.verts[i+7],bm.verts[i+5], bm.verts[i+4]])
bm.faces.new( [bm.verts[i+2], bm.verts[i+3],bm.verts[i+7], bm.verts[i+6]])
bm.faces.new( [bm.verts[i+5], bm.verts[i+7],bm.verts[i+3], bm.verts[i+1]]) #top
bm.faces.new( [bm.verts[i+0], bm.verts[i+2],bm.verts[i+6], bm.verts[i+4]]) #bottom
cubesDone += 1
if cubesDone > nextThreshold:
print(cubesDone, "cubes have been made so far.")
nextThreshold += printAfterCount
if bpy.context.mode == 'EDIT_MESH':
bmesh.update_edit_mesh(obj.data)
else:
bm.to_mesh(obj.data)
obj.data.update()
bm.free
return obj
# Given a 3D array of 0 and 1's it'll place a voxel in every cell that has a 1 in it
def imagesToVoxelsInefficient(image3D):
for xValue in range(len(image3D)):
for yValue in range(len(image3D[xValue])):
for zValue in range(len(image3D[xValue][yValue])):
if(image3D[xValue][yValue][zValue]==0):
createVoxel((xValue,yValue,zValue))
# place a voxel at a given position, using mesh.primitive_cube_add is really slow so it might be worth making this faster
def createVoxel(position):
bpy.ops.mesh.primitive_cube_add(location=position,size=1)
# print(position)
if __name__ == "__main__":
# calculate the runtime of this script
startTime = time.time()
# createVoxel((1,2,3))
# Generate a 10*10*10 3D texture
testImageArray = []
for x in range(10):
yArray = []
for y in range(10):
zArray = []
for z in range(10):
zArray.append(0)
# zArray.append(randint(0,1))
yArray.append(zArray)
testImageArray.append(yArray)
# print(testImageArray)
# place voxels based on that 10*10*10 array
imagesToVoxelsInefficient(testImageArray)
# testImage = [[[0,0],[1,1]],[[1,1],[1,0]]]
stopTime = time.time()
print("Script took:",stopTime-startTime) | [
"numpy.tile",
"numpy.reshape",
"numpy.unique",
"bmesh.update_edit_mesh",
"bpy.data.objects.new",
"bpy.data.meshes.new",
"bpy.context.collection.objects.link",
"bmesh.new",
"numpy.array",
"time.time",
"bpy.ops.mesh.primitive_cube_add"
] | [((721, 749), 'numpy.unique', 'numpy.unique', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (733, 749), False, 'import numpy\n'), ((806, 833), 'bpy.data.meshes.new', 'bpy.data.meshes.new', (['"""mesh"""'], {}), "('mesh')\n", (825, 833), False, 'import bpy\n'), ((862, 894), 'bpy.data.objects.new', 'bpy.data.objects.new', (['name', 'mesh'], {}), '(name, mesh)\n', (882, 894), False, 'import bpy\n'), ((899, 939), 'bpy.context.collection.objects.link', 'bpy.context.collection.objects.link', (['obj'], {}), '(obj)\n', (934, 939), False, 'import bpy\n'), ((1105, 1116), 'bmesh.new', 'bmesh.new', ([], {}), '()\n', (1114, 1116), False, 'import bmesh\n'), ((1590, 1616), 'numpy.tile', 'numpy.tile', (['points', '(1, 8)'], {}), '(points, (1, 8))\n', (1600, 1616), False, 'import numpy\n'), ((1776, 1832), 'numpy.reshape', 'numpy.reshape', (['pointsTiled', '(pointsTiled.shape[0], 8, 3)'], {}), '(pointsTiled, (pointsTiled.shape[0], 8, 3))\n', (1789, 1832), False, 'import numpy\n'), ((2158, 2256), 'numpy.reshape', 'numpy.reshape', (['pointsBlockerized', '(pointsBlockerized.shape[0] * pointsBlockerized.shape[1], 3)'], {}), '(pointsBlockerized, (pointsBlockerized.shape[0] *\n pointsBlockerized.shape[1], 3))\n', (2171, 2256), False, 'import numpy\n'), ((4383, 4441), 'bpy.ops.mesh.primitive_cube_add', 'bpy.ops.mesh.primitive_cube_add', ([], {'location': 'position', 'size': '(1)'}), '(location=position, size=1)\n', (4414, 4441), False, 'import bpy\n'), ((4559, 4570), 'time.time', 'time.time', ([], {}), '()\n', (4568, 4570), False, 'import time\n'), ((5130, 5141), 'time.time', 'time.time', ([], {}), '()\n', (5139, 5141), False, 'import time\n'), ((3716, 3748), 'bmesh.update_edit_mesh', 'bmesh.update_edit_mesh', (['obj.data'], {}), '(obj.data)\n', (3738, 3748), False, 'import bmesh\n'), ((1221, 1339), 'numpy.array', 'numpy.array', (['[[-1, -1, -1], [-1, -1, 1], [-1, 1, -1], [-1, 1, 1], [1, -1, -1], [1, -1, 1\n ], [1, 1, -1], [1, 1, 1]]'], {}), '([[-1, -1, -1], [-1, -1, 1], [-1, 1, -1], [-1, 1, 1], [1, -1, -1\n ], [1, -1, 1], [1, 1, -1], [1, 1, 1]])\n', (1232, 1339), False, 'import numpy\n')] |
#!/usr/bin/python
#-*-coding:utf-8-*-
import json
import sys
import time
# TBD: auto discovery
# data_path = "/proc/fs/lustre/llite/nvmefs-ffff883f8a4f2800/stats"
data_path = "/proc/fs/lustre/lmv/shnvme3-clilmv-ffff8859d3e2d000/md_stats"
# use a dic1/dic2 to hold sampling data
def load_data(dic):
# Open file
fileHandler = open(data_path, "r")
# Get list of all lines in file
listOfLines = fileHandler.readlines()
# Close file
fileHandler.close()
# Iterate over the lines
for line in listOfLines:
words = line.split()
if(len(words) < 2):
println("got error line, to skip")
continue
dic[words[0]] = float(words[1])
# print(dic)
# put "next - prev" into delta
def calc_delta(prev, next, delta):
for key in prev:
delta[key] = next[key] - prev[key]
# print a dictionary in the indented json format
def print_dict(dic):
print(json.dumps(dic, indent=2, sort_keys=True, ensure_ascii=False))
# calculate iops for each category except snapshot_time, all divided by snapshot_time
def calc_iops_from_delta(delta):
# in case of snapshot_time error, skip
if (delta['snapshot_time'] < 0.000001):
print("error: time gap too small")
return
for key in delta:
if ('snapshot_time' != key):
delta[key] = int(delta[key]/delta['snapshot_time'])
if __name__ == '__main__':
# dic1/dic2 are used to load prev/next kernel data interchangably
# calc delta by doing: next - prev
# calc iops by doing: delta/time_consumption
dic1 = {}
dic2 = {}
delta = {}
load_data(dic1)
prev = 1
# load_data(dic2)
# calc_delta(dic1, dic2, delta)
# calc_iops_from_delta(delta)
# print_dict(delta)
# dic1['name'] = 'anhua'
# print_dict(dic1)
# enter loop
while True:
time.sleep(2) # TBD: configurable
if prev == 1:
load_data(dic2)
prev = 2
calc_delta(dic1, dic2, delta)
else:
load_data(dic1)
prev = 1
calc_delta(dic2, dic1, delta)
calc_iops_from_delta(delta)
print_dict(delta)
| [
"json.dumps",
"time.sleep"
] | [((942, 1003), 'json.dumps', 'json.dumps', (['dic'], {'indent': '(2)', 'sort_keys': '(True)', 'ensure_ascii': '(False)'}), '(dic, indent=2, sort_keys=True, ensure_ascii=False)\n', (952, 1003), False, 'import json\n'), ((1868, 1881), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (1878, 1881), False, 'import time\n')] |
import unittest
from hashlib import sha1
import pickle
import numpy as np
from datasketch.lsh import MinHashLSH
from datasketch.minhash import MinHash
from datasketch.weighted_minhash import WeightedMinHashGenerator
class TestMinHashLSH(unittest.TestCase):
def test_init(self):
lsh = MinHashLSH(threshold=0.8)
self.assertTrue(lsh.is_empty())
b1, r1 = lsh.b, lsh.r
lsh = MinHashLSH(threshold=0.8, weights=(0.2,0.8))
b2, r2 = lsh.b, lsh.r
self.assertTrue(b1 < b2)
self.assertTrue(r1 > r2)
def test_insert(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue("a" in items)
self.assertTrue("b" in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys["a"]):
self.assertTrue("a" in lsh.hashtables[i][H])
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.insert, "c", m3)
def test_query(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
m3 = MinHash(18)
self.assertRaises(ValueError, lsh.query, m3)
def test_remove(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.remove("a")
self.assertTrue("a" not in lsh.keys)
for table in lsh.hashtables:
for H in table:
self.assertGreater(len(table[H]), 0)
self.assertTrue("a" not in table[H])
self.assertRaises(ValueError, lsh.remove, "c")
def test_pickle(self):
lsh = MinHashLSH(threshold=0.5, num_perm=16)
m1 = MinHash(16)
m1.update("a".encode("utf8"))
m2 = MinHash(16)
m2.update("b".encode("utf8"))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh2 = pickle.loads(pickle.dumps(lsh))
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
class TestWeightedMinHashLSH(unittest.TestCase):
def test_init(self):
lsh = MinHashLSH(threshold=0.8)
self.assertTrue(lsh.is_empty())
b1, r1 = lsh.b, lsh.r
lsh = MinHashLSH(threshold=0.8, weights=(0.2,0.8))
b2, r2 = lsh.b, lsh.r
self.assertTrue(b1 < b2)
self.assertTrue(r1 > r2)
def test_insert(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
for t in lsh.hashtables:
self.assertTrue(len(t) >= 1)
items = []
for H in t:
items.extend(t[H])
self.assertTrue("a" in items)
self.assertTrue("b" in items)
self.assertTrue("a" in lsh)
self.assertTrue("b" in lsh)
for i, H in enumerate(lsh.keys["a"]):
self.assertTrue("a" in lsh.hashtables[i][H])
mg = WeightedMinHashGenerator(10, 5)
m3 = mg.minhash(np.random.uniform(1, 10, 10))
self.assertRaises(ValueError, lsh.insert, "c", m3)
def test_query(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
mg = WeightedMinHashGenerator(10, 5)
m3 = mg.minhash(np.random.uniform(1, 10, 10))
self.assertRaises(ValueError, lsh.query, m3)
def test_remove(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh.remove("a")
self.assertTrue("a" not in lsh.keys)
for table in lsh.hashtables:
for H in table:
self.assertGreater(len(table[H]), 0)
self.assertTrue("a" not in table[H])
self.assertRaises(ValueError, lsh.remove, "c")
def test_pickle(self):
lsh = MinHashLSH(threshold=0.5, num_perm=4)
mg = WeightedMinHashGenerator(10, 4)
m1 = mg.minhash(np.random.uniform(1, 10, 10))
m2 = mg.minhash(np.random.uniform(1, 10, 10))
lsh.insert("a", m1)
lsh.insert("b", m2)
lsh2 = pickle.loads(pickle.dumps(lsh))
result = lsh.query(m1)
self.assertTrue("a" in result)
result = lsh.query(m2)
self.assertTrue("b" in result)
if __name__ == "__main__":
unittest.main()
| [
"datasketch.lsh.MinHashLSH",
"datasketch.weighted_minhash.WeightedMinHashGenerator",
"pickle.dumps",
"datasketch.minhash.MinHash",
"numpy.random.uniform",
"unittest.main"
] | [((5700, 5715), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5713, 5715), False, 'import unittest\n'), ((299, 324), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.8)'}), '(threshold=0.8)\n', (309, 324), False, 'from datasketch.lsh import MinHashLSH\n'), ((409, 454), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.8)', 'weights': '(0.2, 0.8)'}), '(threshold=0.8, weights=(0.2, 0.8))\n', (419, 454), False, 'from datasketch.lsh import MinHashLSH\n'), ((592, 630), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(16)'}), '(threshold=0.5, num_perm=16)\n', (602, 630), False, 'from datasketch.lsh import MinHashLSH\n'), ((644, 655), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (651, 655), False, 'from datasketch.minhash import MinHash\n'), ((707, 718), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (714, 718), False, 'from datasketch.minhash import MinHash\n'), ((1242, 1253), 'datasketch.minhash.MinHash', 'MinHash', (['(18)'], {}), '(18)\n', (1249, 1253), False, 'from datasketch.minhash import MinHash\n'), ((1354, 1392), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(16)'}), '(threshold=0.5, num_perm=16)\n', (1364, 1392), False, 'from datasketch.lsh import MinHashLSH\n'), ((1406, 1417), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (1413, 1417), False, 'from datasketch.minhash import MinHash\n'), ((1469, 1480), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (1476, 1480), False, 'from datasketch.minhash import MinHash\n'), ((1729, 1740), 'datasketch.minhash.MinHash', 'MinHash', (['(18)'], {}), '(18)\n', (1736, 1740), False, 'from datasketch.minhash import MinHash\n'), ((1836, 1874), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(16)'}), '(threshold=0.5, num_perm=16)\n', (1846, 1874), False, 'from datasketch.lsh import MinHashLSH\n'), ((1888, 1899), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (1895, 1899), False, 'from datasketch.minhash import MinHash\n'), ((1951, 1962), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (1958, 1962), False, 'from datasketch.minhash import MinHash\n'), ((2404, 2442), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(16)'}), '(threshold=0.5, num_perm=16)\n', (2414, 2442), False, 'from datasketch.lsh import MinHashLSH\n'), ((2456, 2467), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (2463, 2467), False, 'from datasketch.minhash import MinHash\n'), ((2519, 2530), 'datasketch.minhash.MinHash', 'MinHash', (['(16)'], {}), '(16)\n', (2526, 2530), False, 'from datasketch.minhash import MinHash\n'), ((2903, 2928), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.8)'}), '(threshold=0.8)\n', (2913, 2928), False, 'from datasketch.lsh import MinHashLSH\n'), ((3013, 3058), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.8)', 'weights': '(0.2, 0.8)'}), '(threshold=0.8, weights=(0.2, 0.8))\n', (3023, 3058), False, 'from datasketch.lsh import MinHashLSH\n'), ((3196, 3233), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(4)'}), '(threshold=0.5, num_perm=4)\n', (3206, 3233), False, 'from datasketch.lsh import MinHashLSH\n'), ((3247, 3278), 'datasketch.weighted_minhash.WeightedMinHashGenerator', 'WeightedMinHashGenerator', (['(10)', '(4)'], {}), '(10, 4)\n', (3271, 3278), False, 'from datasketch.weighted_minhash import WeightedMinHashGenerator\n'), ((3872, 3903), 'datasketch.weighted_minhash.WeightedMinHashGenerator', 'WeightedMinHashGenerator', (['(10)', '(5)'], {}), '(10, 5)\n', (3896, 3903), False, 'from datasketch.weighted_minhash import WeightedMinHashGenerator\n'), ((4058, 4095), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(4)'}), '(threshold=0.5, num_perm=4)\n', (4068, 4095), False, 'from datasketch.lsh import MinHashLSH\n'), ((4109, 4140), 'datasketch.weighted_minhash.WeightedMinHashGenerator', 'WeightedMinHashGenerator', (['(10)', '(4)'], {}), '(10, 4)\n', (4133, 4140), False, 'from datasketch.weighted_minhash import WeightedMinHashGenerator\n'), ((4459, 4490), 'datasketch.weighted_minhash.WeightedMinHashGenerator', 'WeightedMinHashGenerator', (['(10)', '(5)'], {}), '(10, 5)\n', (4483, 4490), False, 'from datasketch.weighted_minhash import WeightedMinHashGenerator\n'), ((4640, 4677), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(4)'}), '(threshold=0.5, num_perm=4)\n', (4650, 4677), False, 'from datasketch.lsh import MinHashLSH\n'), ((4691, 4722), 'datasketch.weighted_minhash.WeightedMinHashGenerator', 'WeightedMinHashGenerator', (['(10)', '(4)'], {}), '(10, 4)\n', (4715, 4722), False, 'from datasketch.weighted_minhash import WeightedMinHashGenerator\n'), ((5234, 5271), 'datasketch.lsh.MinHashLSH', 'MinHashLSH', ([], {'threshold': '(0.5)', 'num_perm': '(4)'}), '(threshold=0.5, num_perm=4)\n', (5244, 5271), False, 'from datasketch.lsh import MinHashLSH\n'), ((5285, 5316), 'datasketch.weighted_minhash.WeightedMinHashGenerator', 'WeightedMinHashGenerator', (['(10)', '(4)'], {}), '(10, 4)\n', (5309, 5316), False, 'from datasketch.weighted_minhash import WeightedMinHashGenerator\n'), ((2653, 2670), 'pickle.dumps', 'pickle.dumps', (['lsh'], {}), '(lsh)\n', (2665, 2670), False, 'import pickle\n'), ((3303, 3331), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (3320, 3331), True, 'import numpy as np\n'), ((3357, 3385), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (3374, 3385), True, 'import numpy as np\n'), ((3928, 3956), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (3945, 3956), True, 'import numpy as np\n'), ((4165, 4193), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (4182, 4193), True, 'import numpy as np\n'), ((4219, 4247), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (4236, 4247), True, 'import numpy as np\n'), ((4515, 4543), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (4532, 4543), True, 'import numpy as np\n'), ((4747, 4775), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (4764, 4775), True, 'import numpy as np\n'), ((4801, 4829), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (4818, 4829), True, 'import numpy as np\n'), ((5341, 5369), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (5358, 5369), True, 'import numpy as np\n'), ((5395, 5423), 'numpy.random.uniform', 'np.random.uniform', (['(1)', '(10)', '(10)'], {}), '(1, 10, 10)\n', (5412, 5423), True, 'import numpy as np\n'), ((5509, 5526), 'pickle.dumps', 'pickle.dumps', (['lsh'], {}), '(lsh)\n', (5521, 5526), False, 'import pickle\n')] |